summary refs log tree commit diff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Kconfig107
-rw-r--r--drivers/gpu/drm/Makefile26
-rw-r--r--drivers/gpu/drm/README.drm43
-rw-r--r--drivers/gpu/drm/ati_pcigart.c181
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c455
-rw-r--r--drivers/gpu/drm/drm_auth.c190
-rw-r--r--drivers/gpu/drm/drm_bufs.c1601
-rw-r--r--drivers/gpu/drm/drm_context.c471
-rw-r--r--drivers/gpu/drm/drm_dma.c180
-rw-r--r--drivers/gpu/drm/drm_drawable.c192
-rw-r--r--drivers/gpu/drm/drm_drv.c540
-rw-r--r--drivers/gpu/drm/drm_fops.c471
-rw-r--r--drivers/gpu/drm/drm_hashtab.c202
-rw-r--r--drivers/gpu/drm/drm_ioc32.c1073
-rw-r--r--drivers/gpu/drm/drm_ioctl.c352
-rw-r--r--drivers/gpu/drm/drm_irq.c462
-rw-r--r--drivers/gpu/drm/drm_lock.c391
-rw-r--r--drivers/gpu/drm/drm_memory.c186
-rw-r--r--drivers/gpu/drm/drm_mm.c295
-rw-r--r--drivers/gpu/drm/drm_pci.c183
-rw-r--r--drivers/gpu/drm/drm_proc.c557
-rw-r--r--drivers/gpu/drm/drm_scatter.c227
-rw-r--r--drivers/gpu/drm/drm_sman.c353
-rw-r--r--drivers/gpu/drm/drm_stub.c331
-rw-r--r--drivers/gpu/drm/drm_sysfs.c208
-rw-r--r--drivers/gpu/drm/drm_vm.c673
-rw-r--r--drivers/gpu/drm/i810/Makefile8
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c1283
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c97
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h242
-rw-r--r--drivers/gpu/drm/i830/Makefile8
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c1553
-rw-r--r--drivers/gpu/drm/i830/i830_drv.c108
-rw-r--r--drivers/gpu/drm/i830/i830_drv.h292
-rw-r--r--drivers/gpu/drm/i830/i830_irq.c186
-rw-r--r--drivers/gpu/drm/i915/Makefile10
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c858
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c605
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1142
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c222
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c623
-rw-r--r--drivers/gpu/drm/i915/i915_mem.c386
-rw-r--r--drivers/gpu/drm/mga/Makefile11
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c1162
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c141
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h687
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c231
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c148
-rw-r--r--drivers/gpu/drm/mga/mga_state.c1104
-rw-r--r--drivers/gpu/drm/mga/mga_ucode.h11645
-rw-r--r--drivers/gpu/drm/mga/mga_warp.c193
-rw-r--r--drivers/gpu/drm/r128/Makefile10
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c935
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c103
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h522
-rw-r--r--drivers/gpu/drm/r128/r128_ioc32.c221
-rw-r--r--drivers/gpu/drm/r128/r128_irq.c101
-rw-r--r--drivers/gpu/drm/r128/r128_state.c1681
-rw-r--r--drivers/gpu/drm/radeon/Makefile10
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c1071
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h1772
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c1773
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c126
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1406
-rw-r--r--drivers/gpu/drm/radeon/radeon_ioc32.c424
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c320
-rw-r--r--drivers/gpu/drm/radeon/radeon_mem.c302
-rw-r--r--drivers/gpu/drm/radeon/radeon_microcode.h1844
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c3203
-rw-r--r--drivers/gpu/drm/savage/Makefile9
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c1095
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c88
-rw-r--r--drivers/gpu/drm/savage/savage_drv.h575
-rw-r--r--drivers/gpu/drm/savage/savage_state.c1163
-rw-r--r--drivers/gpu/drm/sis/Makefile10
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c117
-rw-r--r--drivers/gpu/drm/sis/sis_drv.h73
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c333
-rw-r--r--drivers/gpu/drm/tdfx/Makefile8
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c84
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.h47
-rw-r--r--drivers/gpu/drm/via/Makefile8
-rw-r--r--drivers/gpu/drm/via/via_3d_reg.h1650
-rw-r--r--drivers/gpu/drm/via/via_dma.c755
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c816
-rw-r--r--drivers/gpu/drm/via/via_dmablit.h140
-rw-r--r--drivers/gpu/drm/via/via_drv.c100
-rw-r--r--drivers/gpu/drm/via/via_drv.h153
-rw-r--r--drivers/gpu/drm/via/via_irq.c377
-rw-r--r--drivers/gpu/drm/via/via_map.c123
-rw-r--r--drivers/gpu/drm/via/via_mm.c194
-rw-r--r--drivers/gpu/drm/via/via_verifier.c1116
-rw-r--r--drivers/gpu/drm/via/via_verifier.h62
-rw-r--r--drivers/gpu/drm/via/via_video.c93
94 files changed, 57908 insertions, 0 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
new file mode 100644
index 000000000000..610d6fd5bb50
--- /dev/null
+++ b/drivers/gpu/drm/Kconfig
@@ -0,0 +1,107 @@
+#
+# Drm device configuration
+#
+# This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+#
+menuconfig DRM
+	tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
+	depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG
+	help
+	  Kernel-level support for the Direct Rendering Infrastructure (DRI)
+	  introduced in XFree86 4.0. If you say Y here, you need to select
+	  the module that's right for your graphics card from the list below.
+	  These modules provide support for synchronization, security, and
+	  DMA transfers. Please see <http://dri.sourceforge.net/> for more
+	  details.  You should also select and configure AGP
+	  (/dev/agpgart) support.
+
+config DRM_TDFX
+	tristate "3dfx Banshee/Voodoo3+"
+	depends on DRM && PCI
+	help
+	  Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
+	  graphics card.  If M is selected, the module will be called tdfx.
+
+config DRM_R128
+	tristate "ATI Rage 128"
+	depends on DRM && PCI
+	help
+	  Choose this option if you have an ATI Rage 128 graphics card.  If M
+	  is selected, the module will be called r128.  AGP support for
+	  this card is strongly suggested (unless you have a PCI version).
+
+config DRM_RADEON
+	tristate "ATI Radeon"
+	depends on DRM && PCI
+	help
+	  Choose this option if you have an ATI Radeon graphics card.  There
+	  are both PCI and AGP versions.  You don't need to choose this to
+	  run the Radeon in plain VGA mode.
+
+	  If M is selected, the module will be called radeon.
+
+config DRM_I810
+	tristate "Intel I810"
+	depends on DRM && AGP && AGP_INTEL
+	help
+	  Choose this option if you have an Intel I810 graphics card.  If M is
+	  selected, the module will be called i810.  AGP support is required
+	  for this driver to work.
+
+choice
+	prompt "Intel 830M, 845G, 852GM, 855GM, 865G"
+	depends on DRM && AGP && AGP_INTEL
+	optional
+
+config DRM_I830
+	tristate "i830 driver"
+	help
+	  Choose this option if you have a system that has Intel 830M, 845G,
+	  852GM, 855GM or 865G integrated graphics.  If M is selected, the
+	  module will be called i830.  AGP support is required for this driver
+	  to work. This driver is used by the older X releases X.org 6.7 and
+	  XFree86 4.3. If unsure, build this and i915 as modules and the X server
+	  will load the correct one.
+
+config DRM_I915
+	tristate "i915 driver"
+	help
+	  Choose this option if you have a system that has Intel 830M, 845G,
+	  852GM, 855GM 865G or 915G integrated graphics.  If M is selected, the
+	  module will be called i915.  AGP support is required for this driver
+	  to work. This driver is used by the Intel driver in X.org 6.8 and
+	  XFree86 4.4 and above. If unsure, build this and i830 as modules and
+	  the X server will load the correct one.
+
+endchoice
+
+config DRM_MGA
+	tristate "Matrox g200/g400"
+	depends on DRM
+	help
+	  Choose this option if you have a Matrox G200, G400 or G450 graphics
+	  card.  If M is selected, the module will be called mga.  AGP
+	  support is required for this driver to work.
+
+config DRM_SIS
+	tristate "SiS video cards"
+	depends on DRM && AGP
+	help
+	  Choose this option if you have a SiS 630 or compatible video
+          chipset. If M is selected the module will be called sis. AGP
+          support is required for this driver to work.
+
+config DRM_VIA
+	tristate "Via unichrome video cards"
+	depends on DRM
+	help
+	  Choose this option if you have a Via unichrome or compatible video
+	  chipset. If M is selected the module will be called via.
+
+config DRM_SAVAGE
+	tristate "Savage video cards"
+	depends on DRM
+	help
+	  Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
+	  chipset. If M is selected the module will be called savage.
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
new file mode 100644
index 000000000000..e9f9a97ae00a
--- /dev/null
+++ b/drivers/gpu/drm/Makefile
@@ -0,0 +1,26 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+
+drm-y       :=	drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
+		drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \
+		drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
+		drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
+		drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o
+
+drm-$(CONFIG_COMPAT) += drm_ioc32.o
+
+obj-$(CONFIG_DRM)	+= drm.o
+obj-$(CONFIG_DRM_TDFX)	+= tdfx/
+obj-$(CONFIG_DRM_R128)	+= r128/
+obj-$(CONFIG_DRM_RADEON)+= radeon/
+obj-$(CONFIG_DRM_MGA)	+= mga/
+obj-$(CONFIG_DRM_I810)	+= i810/
+obj-$(CONFIG_DRM_I830)	+= i830/
+obj-$(CONFIG_DRM_I915)  += i915/
+obj-$(CONFIG_DRM_SIS)   += sis/
+obj-$(CONFIG_DRM_SAVAGE)+= savage/
+obj-$(CONFIG_DRM_VIA)	+=via/
+
diff --git a/drivers/gpu/drm/README.drm b/drivers/gpu/drm/README.drm
new file mode 100644
index 000000000000..b5b332722581
--- /dev/null
+++ b/drivers/gpu/drm/README.drm
@@ -0,0 +1,43 @@
+************************************************************
+* For the very latest on DRI development, please see:      *
+*     http://dri.freedesktop.org/                          *
+************************************************************
+
+The Direct Rendering Manager (drm) is a device-independent kernel-level
+device driver that provides support for the XFree86 Direct Rendering
+Infrastructure (DRI).
+
+The DRM supports the Direct Rendering Infrastructure (DRI) in four major
+ways:
+
+    1. The DRM provides synchronized access to the graphics hardware via
+       the use of an optimized two-tiered lock.
+
+    2. The DRM enforces the DRI security policy for access to the graphics
+       hardware by only allowing authenticated X11 clients access to
+       restricted regions of memory.
+
+    3. The DRM provides a generic DMA engine, complete with multiple
+       queues and the ability to detect the need for an OpenGL context
+       switch.
+
+    4. The DRM is extensible via the use of small device-specific modules
+       that rely extensively on the API exported by the DRM module.
+
+
+Documentation on the DRI is available from:
+    http://dri.freedesktop.org/wiki/Documentation
+    http://sourceforge.net/project/showfiles.php?group_id=387
+    http://dri.sourceforge.net/doc/
+
+For specific information about kernel-level support, see:
+
+    The Direct Rendering Manager, Kernel Support for the Direct Rendering
+    Infrastructure
+    http://dri.sourceforge.net/doc/drm_low_level.html
+
+    Hardware Locking for the Direct Rendering Infrastructure
+    http://dri.sourceforge.net/doc/hardware_locking_low_level.html
+
+    A Security Analysis of the Direct Rendering Infrastructure
+    http://dri.sourceforge.net/doc/security_low_level.html
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
new file mode 100644
index 000000000000..c533d0c9ec61
--- /dev/null
+++ b/drivers/gpu/drm/ati_pcigart.c
@@ -0,0 +1,181 @@
+/**
+ * \file ati_pcigart.c
+ * ATI PCI GART support
+ *
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+# define ATI_PCIGART_PAGE_SIZE		4096	/**< PCI GART page size */
+
+static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
+				       struct drm_ati_pcigart_info *gart_info)
+{
+	gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
+						PAGE_SIZE,
+						gart_info->table_mask);
+	if (gart_info->table_handle == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void drm_ati_free_pcigart_table(struct drm_device *dev,
+				       struct drm_ati_pcigart_info *gart_info)
+{
+	drm_pci_free(dev, gart_info->table_handle);
+	gart_info->table_handle = NULL;
+}
+
+int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
+{
+	struct drm_sg_mem *entry = dev->sg;
+	unsigned long pages;
+	int i;
+	int max_pages;
+
+	/* we need to support large memory configurations */
+	if (!entry) {
+		DRM_ERROR("no scatter/gather memory!\n");
+		return 0;
+	}
+
+	if (gart_info->bus_addr) {
+
+		max_pages = (gart_info->table_size / sizeof(u32));
+		pages = (entry->pages <= max_pages)
+		  ? entry->pages : max_pages;
+
+		for (i = 0; i < pages; i++) {
+			if (!entry->busaddr[i])
+				break;
+			pci_unmap_page(dev->pdev, entry->busaddr[i],
+					 PAGE_SIZE, PCI_DMA_TODEVICE);
+		}
+
+		if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
+			gart_info->bus_addr = 0;
+	}
+
+	if (gart_info->gart_table_location == DRM_ATI_GART_MAIN &&
+	    gart_info->table_handle) {
+		drm_ati_free_pcigart_table(dev, gart_info);
+	}
+
+	return 1;
+}
+EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
+
+int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
+{
+	struct drm_sg_mem *entry = dev->sg;
+	void *address = NULL;
+	unsigned long pages;
+	u32 *pci_gart, page_base;
+	dma_addr_t bus_address = 0;
+	int i, j, ret = 0;
+	int max_pages;
+
+	if (!entry) {
+		DRM_ERROR("no scatter/gather memory!\n");
+		goto done;
+	}
+
+	if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
+		DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
+
+		ret = drm_ati_alloc_pcigart_table(dev, gart_info);
+		if (ret) {
+			DRM_ERROR("cannot allocate PCI GART page!\n");
+			goto done;
+		}
+
+		address = gart_info->table_handle->vaddr;
+		bus_address = gart_info->table_handle->busaddr;
+	} else {
+		address = gart_info->addr;
+		bus_address = gart_info->bus_addr;
+		DRM_DEBUG("PCI: Gart Table: VRAM %08LX mapped at %08lX\n",
+			  (unsigned long long)bus_address,
+			  (unsigned long)address);
+	}
+
+	pci_gart = (u32 *) address;
+
+	max_pages = (gart_info->table_size / sizeof(u32));
+	pages = (entry->pages <= max_pages)
+	    ? entry->pages : max_pages;
+
+	memset(pci_gart, 0, max_pages * sizeof(u32));
+
+	for (i = 0; i < pages; i++) {
+		/* we need to support large memory configurations */
+		entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
+						 0, PAGE_SIZE, PCI_DMA_TODEVICE);
+		if (entry->busaddr[i] == 0) {
+			DRM_ERROR("unable to map PCIGART pages!\n");
+			drm_ati_pcigart_cleanup(dev, gart_info);
+			address = NULL;
+			bus_address = 0;
+			goto done;
+		}
+		page_base = (u32) entry->busaddr[i];
+
+		for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
+			switch(gart_info->gart_reg_if) {
+			case DRM_ATI_GART_IGP:
+				*pci_gart = cpu_to_le32((page_base) | 0xc);
+				break;
+			case DRM_ATI_GART_PCIE:
+				*pci_gart = cpu_to_le32((page_base >> 8) | 0xc);
+				break;
+			default:
+			case DRM_ATI_GART_PCI:
+				*pci_gart = cpu_to_le32(page_base);
+				break;
+			}
+			pci_gart++;
+			page_base += ATI_PCIGART_PAGE_SIZE;
+		}
+	}
+	ret = 1;
+
+#if defined(__i386__) || defined(__x86_64__)
+	wbinvd();
+#else
+	mb();
+#endif
+
+      done:
+	gart_info->addr = address;
+	gart_info->bus_addr = bus_address;
+	return ret;
+}
+EXPORT_SYMBOL(drm_ati_pcigart_init);
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
new file mode 100644
index 000000000000..aefa5ac4c0b1
--- /dev/null
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -0,0 +1,455 @@
+/**
+ * \file drm_agpsupport.c
+ * DRM support for AGP/GART backend
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include <linux/module.h>
+
+#if __OS_HAS_AGP
+
+/**
+ * Get AGP information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a (output) drm_agp_info structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been initialized and acquired and fills in the
+ * drm_agp_info structure with the information in drm_agp_head::agp_info.
+ */
+int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
+{
+	DRM_AGP_KERN *kern;
+
+	if (!dev->agp || !dev->agp->acquired)
+		return -EINVAL;
+
+	kern = &dev->agp->agp_info;
+	info->agp_version_major = kern->version.major;
+	info->agp_version_minor = kern->version.minor;
+	info->mode = kern->mode;
+	info->aperture_base = kern->aper_base;
+	info->aperture_size = kern->aper_size * 1024 * 1024;
+	info->memory_allowed = kern->max_memory << PAGE_SHIFT;
+	info->memory_used = kern->current_memory << PAGE_SHIFT;
+	info->id_vendor = kern->device->vendor;
+	info->id_device = kern->device->device;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(drm_agp_info);
+
+int drm_agp_info_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct drm_agp_info *info = data;
+	int err;
+
+	err = drm_agp_info(dev, info);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+/**
+ * Acquire the AGP device.
+ *
+ * \param dev DRM device that is to acquire AGP.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device hasn't been acquired before and calls
+ * \c agp_backend_acquire.
+ */
+int drm_agp_acquire(struct drm_device * dev)
+{
+	if (!dev->agp)
+		return -ENODEV;
+	if (dev->agp->acquired)
+		return -EBUSY;
+	if (!(dev->agp->bridge = agp_backend_acquire(dev->pdev)))
+		return -ENODEV;
+	dev->agp->acquired = 1;
+	return 0;
+}
+
+EXPORT_SYMBOL(drm_agp_acquire);
+
+/**
+ * Acquire the AGP device (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device hasn't been acquired before and calls
+ * \c agp_backend_acquire.
+ */
+int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	return drm_agp_acquire((struct drm_device *) file_priv->minor->dev);
+}
+
+/**
+ * Release the AGP device.
+ *
+ * \param dev DRM device that is to release AGP.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been acquired and calls \c agp_backend_release.
+ */
+int drm_agp_release(struct drm_device * dev)
+{
+	if (!dev->agp || !dev->agp->acquired)
+		return -EINVAL;
+	agp_backend_release(dev->agp->bridge);
+	dev->agp->acquired = 0;
+	return 0;
+}
+EXPORT_SYMBOL(drm_agp_release);
+
+int drm_agp_release_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	return drm_agp_release(dev);
+}
+
+/**
+ * Enable the AGP bus.
+ *
+ * \param dev DRM device that has previously acquired AGP.
+ * \param mode Requested AGP mode.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been acquired but not enabled, and calls
+ * \c agp_enable.
+ */
+int drm_agp_enable(struct drm_device * dev, struct drm_agp_mode mode)
+{
+	if (!dev->agp || !dev->agp->acquired)
+		return -EINVAL;
+
+	dev->agp->mode = mode.mode;
+	agp_enable(dev->agp->bridge, mode.mode);
+	dev->agp->enabled = 1;
+	return 0;
+}
+
+EXPORT_SYMBOL(drm_agp_enable);
+
+int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	struct drm_agp_mode *mode = data;
+
+	return drm_agp_enable(dev, *mode);
+}
+
+/**
+ * Allocate AGP memory.
+ *
+ * \param inode device inode.
+ * \param file_priv file private pointer.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_buffer structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired, allocates the
+ * memory via alloc_agp() and creates a drm_agp_mem entry for it.
+ */
+int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
+{
+	struct drm_agp_mem *entry;
+	DRM_AGP_MEM *memory;
+	unsigned long pages;
+	u32 type;
+
+	if (!dev->agp || !dev->agp->acquired)
+		return -EINVAL;
+	if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS)))
+		return -ENOMEM;
+
+	memset(entry, 0, sizeof(*entry));
+
+	pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
+	type = (u32) request->type;
+	if (!(memory = drm_alloc_agp(dev, pages, type))) {
+		drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+		return -ENOMEM;
+	}
+
+	entry->handle = (unsigned long)memory->key + 1;
+	entry->memory = memory;
+	entry->bound = 0;
+	entry->pages = pages;
+	list_add(&entry->head, &dev->agp->memory);
+
+	request->handle = entry->handle;
+	request->physical = memory->physical;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_agp_alloc);
+
+
+int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	struct drm_agp_buffer *request = data;
+
+	return drm_agp_alloc(dev, request);
+}
+
+/**
+ * Search for the AGP memory entry associated with a handle.
+ *
+ * \param dev DRM device structure.
+ * \param handle AGP memory handle.
+ * \return pointer to the drm_agp_mem structure associated with \p handle.
+ *
+ * Walks through drm_agp_head::memory until finding a matching handle.
+ */
+static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
+					   unsigned long handle)
+{
+	struct drm_agp_mem *entry;
+
+	list_for_each_entry(entry, &dev->agp->memory, head) {
+		if (entry->handle == handle)
+			return entry;
+	}
+	return NULL;
+}
+
+/**
+ * Unbind AGP memory from the GATT (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_binding structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and acquired, looks-up the AGP memory
+ * entry and passes it to the unbind_agp() function.
+ */
+int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
+{
+	struct drm_agp_mem *entry;
+	int ret;
+
+	if (!dev->agp || !dev->agp->acquired)
+		return -EINVAL;
+	if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+		return -EINVAL;
+	if (!entry->bound)
+		return -EINVAL;
+	ret = drm_unbind_agp(entry->memory);
+	if (ret == 0)
+		entry->bound = 0;
+	return ret;
+}
+EXPORT_SYMBOL(drm_agp_unbind);
+
+
+int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	struct drm_agp_binding *request = data;
+
+	return drm_agp_unbind(dev, request);
+}
+
+/**
+ * Bind AGP memory into the GATT (ioctl)
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_binding structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired and that no memory
+ * is currently bound into the GATT. Looks-up the AGP memory entry and passes
+ * it to bind_agp() function.
+ */
+int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
+{
+	struct drm_agp_mem *entry;
+	int retcode;
+	int page;
+
+	if (!dev->agp || !dev->agp->acquired)
+		return -EINVAL;
+	if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+		return -EINVAL;
+	if (entry->bound)
+		return -EINVAL;
+	page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
+	if ((retcode = drm_bind_agp(entry->memory, page)))
+		return retcode;
+	entry->bound = dev->agp->base + (page << PAGE_SHIFT);
+	DRM_DEBUG("base = 0x%lx entry->bound = 0x%lx\n",
+		  dev->agp->base, entry->bound);
+	return 0;
+}
+EXPORT_SYMBOL(drm_agp_bind);
+
+
+int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct drm_agp_binding *request = data;
+
+	return drm_agp_bind(dev, request);
+}
+
+/**
+ * Free AGP memory (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_agp_buffer structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device is present and has been acquired and looks up the
+ * AGP memory entry. If the memory it's currently bound, unbind it via
+ * unbind_agp(). Frees it via free_agp() as well as the entry itself
+ * and unlinks from the doubly linked list it's inserted in.
+ */
+int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
+{
+	struct drm_agp_mem *entry;
+
+	if (!dev->agp || !dev->agp->acquired)
+		return -EINVAL;
+	if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
+		return -EINVAL;
+	if (entry->bound)
+		drm_unbind_agp(entry->memory);
+
+	list_del(&entry->head);
+
+	drm_free_agp(entry->memory, entry->pages);
+	drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+	return 0;
+}
+EXPORT_SYMBOL(drm_agp_free);
+
+
+
+int drm_agp_free_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct drm_agp_buffer *request = data;
+
+	return drm_agp_free(dev, request);
+}
+
+/**
+ * Initialize the AGP resources.
+ *
+ * \return pointer to a drm_agp_head structure.
+ *
+ * Gets the drm_agp_t structure which is made available by the agpgart module
+ * via the inter_module_* functions. Creates and initializes a drm_agp_head
+ * structure.
+ */
+struct drm_agp_head *drm_agp_init(struct drm_device *dev)
+{
+	struct drm_agp_head *head = NULL;
+
+	if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS)))
+		return NULL;
+	memset((void *)head, 0, sizeof(*head));
+	head->bridge = agp_find_bridge(dev->pdev);
+	if (!head->bridge) {
+		if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
+			drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS);
+			return NULL;
+		}
+		agp_copy_info(head->bridge, &head->agp_info);
+		agp_backend_release(head->bridge);
+	} else {
+		agp_copy_info(head->bridge, &head->agp_info);
+	}
+	if (head->agp_info.chipset == NOT_SUPPORTED) {
+		drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS);
+		return NULL;
+	}
+	INIT_LIST_HEAD(&head->memory);
+	head->cant_use_aperture = head->agp_info.cant_use_aperture;
+	head->page_mask = head->agp_info.page_mask;
+	head->base = head->agp_info.aper_base;
+	return head;
+}
+
+/** Calls agp_allocate_memory() */
+DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data * bridge,
+				     size_t pages, u32 type)
+{
+	return agp_allocate_memory(bridge, pages, type);
+}
+
+/** Calls agp_free_memory() */
+int drm_agp_free_memory(DRM_AGP_MEM * handle)
+{
+	if (!handle)
+		return 0;
+	agp_free_memory(handle);
+	return 1;
+}
+
+/** Calls agp_bind_memory() */
+int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start)
+{
+	if (!handle)
+		return -EINVAL;
+	return agp_bind_memory(handle, start);
+}
+
+/** Calls agp_unbind_memory() */
+int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
+{
+	if (!handle)
+		return -EINVAL;
+	return agp_unbind_memory(handle);
+}
+
+#endif				/* __OS_HAS_AGP */
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
new file mode 100644
index 000000000000..a73462723d2d
--- /dev/null
+++ b/drivers/gpu/drm/drm_auth.c
@@ -0,0 +1,190 @@
+/**
+ * \file drm_auth.c
+ * IOCTLs for authentication
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+/**
+ * Find the file with the given magic number.
+ *
+ * \param dev DRM device.
+ * \param magic magic number.
+ *
+ * Searches in drm_device::magiclist within all files with the same hash key
+ * the one with matching magic number, while holding the drm_device::struct_mutex
+ * lock.
+ */
+static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic)
+{
+	struct drm_file *retval = NULL;
+	struct drm_magic_entry *pt;
+	struct drm_hash_item *hash;
+
+	mutex_lock(&dev->struct_mutex);
+	if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
+		pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
+		retval = pt->priv;
+	}
+	mutex_unlock(&dev->struct_mutex);
+	return retval;
+}
+
+/**
+ * Adds a magic number.
+ *
+ * \param dev DRM device.
+ * \param priv file private data.
+ * \param magic magic number.
+ *
+ * Creates a drm_magic_entry structure and appends to the linked list
+ * associated the magic number hash key in drm_device::magiclist, while holding
+ * the drm_device::struct_mutex lock.
+ */
+static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
+			 drm_magic_t magic)
+{
+	struct drm_magic_entry *entry;
+
+	DRM_DEBUG("%d\n", magic);
+
+	entry = drm_alloc(sizeof(*entry), DRM_MEM_MAGIC);
+	if (!entry)
+		return -ENOMEM;
+	memset(entry, 0, sizeof(*entry));
+	entry->priv = priv;
+
+	entry->hash_item.key = (unsigned long)magic;
+	mutex_lock(&dev->struct_mutex);
+	drm_ht_insert_item(&dev->magiclist, &entry->hash_item);
+	list_add_tail(&entry->head, &dev->magicfree);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+/**
+ * Remove a magic number.
+ *
+ * \param dev DRM device.
+ * \param magic magic number.
+ *
+ * Searches and unlinks the entry in drm_device::magiclist with the magic
+ * number hash key, while holding the drm_device::struct_mutex lock.
+ */
+static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic)
+{
+	struct drm_magic_entry *pt;
+	struct drm_hash_item *hash;
+
+	DRM_DEBUG("%d\n", magic);
+
+	mutex_lock(&dev->struct_mutex);
+	if (drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+	pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
+	drm_ht_remove_item(&dev->magiclist, hash);
+	list_del(&pt->head);
+	mutex_unlock(&dev->struct_mutex);
+
+	drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
+
+	return 0;
+}
+
+/**
+ * Get a unique magic number (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a resulting drm_auth structure.
+ * \return zero on success, or a negative number on failure.
+ *
+ * If there is a magic number in drm_file::magic then use it, otherwise
+ * searches an unique non-zero magic number and add it associating it with \p
+ * file_priv.
+ */
+int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	static drm_magic_t sequence = 0;
+	static DEFINE_SPINLOCK(lock);
+	struct drm_auth *auth = data;
+
+	/* Find unique magic */
+	if (file_priv->magic) {
+		auth->magic = file_priv->magic;
+	} else {
+		do {
+			spin_lock(&lock);
+			if (!sequence)
+				++sequence;	/* reserve 0 */
+			auth->magic = sequence++;
+			spin_unlock(&lock);
+		} while (drm_find_file(dev, auth->magic));
+		file_priv->magic = auth->magic;
+		drm_add_magic(dev, file_priv, auth->magic);
+	}
+
+	DRM_DEBUG("%u\n", auth->magic);
+
+	return 0;
+}
+
+/**
+ * Authenticate with a magic.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_auth structure.
+ * \return zero if authentication successed, or a negative number otherwise.
+ *
+ * Checks if \p file_priv is associated with the magic number passed in \arg.
+ */
+int drm_authmagic(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	struct drm_auth *auth = data;
+	struct drm_file *file;
+
+	DRM_DEBUG("%u\n", auth->magic);
+	if ((file = drm_find_file(dev, auth->magic))) {
+		file->authenticated = 1;
+		drm_remove_magic(dev, auth->magic);
+		return 0;
+	}
+	return -EINVAL;
+}
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
new file mode 100644
index 000000000000..bde64b84166e
--- /dev/null
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -0,0 +1,1601 @@
+/**
+ * \file drm_bufs.c
+ * Generic buffer template
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/vmalloc.h>
+#include "drmP.h"
+
+unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource)
+{
+	return pci_resource_start(dev->pdev, resource);
+}
+EXPORT_SYMBOL(drm_get_resource_start);
+
+unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource)
+{
+	return pci_resource_len(dev->pdev, resource);
+}
+
+EXPORT_SYMBOL(drm_get_resource_len);
+
+static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
+					     drm_local_map_t *map)
+{
+	struct drm_map_list *entry;
+	list_for_each_entry(entry, &dev->maplist, head) {
+		if (entry->map && map->type == entry->map->type &&
+		    ((entry->map->offset == map->offset) ||
+		     (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
+			return entry;
+		}
+	}
+
+	return NULL;
+}
+
+static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
+			  unsigned long user_token, int hashed_handle)
+{
+	int use_hashed_handle;
+#if (BITS_PER_LONG == 64)
+	use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
+#elif (BITS_PER_LONG == 32)
+	use_hashed_handle = hashed_handle;
+#else
+#error Unsupported long size. Neither 64 nor 32 bits.
+#endif
+
+	if (!use_hashed_handle) {
+		int ret;
+		hash->key = user_token >> PAGE_SHIFT;
+		ret = drm_ht_insert_item(&dev->map_hash, hash);
+		if (ret != -EINVAL)
+			return ret;
+	}
+	return drm_ht_just_insert_please(&dev->map_hash, hash,
+					 user_token, 32 - PAGE_SHIFT - 3,
+					 0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
+}
+
+/**
+ * Ioctl to specify a range of memory that is available for mapping by a non-root process.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_map structure.
+ * \return zero on success or a negative value on error.
+ *
+ * Adjusts the memory offset to its absolute value according to the mapping
+ * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
+ * applicable and if supported by the kernel.
+ */
+static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
+			   unsigned int size, enum drm_map_type type,
+			   enum drm_map_flags flags,
+			   struct drm_map_list ** maplist)
+{
+	struct drm_map *map;
+	struct drm_map_list *list;
+	drm_dma_handle_t *dmah;
+	unsigned long user_token;
+	int ret;
+
+	map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
+	if (!map)
+		return -ENOMEM;
+
+	map->offset = offset;
+	map->size = size;
+	map->flags = flags;
+	map->type = type;
+
+	/* Only allow shared memory to be removable since we only keep enough
+	 * book keeping information about shared memory to allow for removal
+	 * when processes fork.
+	 */
+	if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
+		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+		return -EINVAL;
+	}
+	DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
+		  map->offset, map->size, map->type);
+	if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
+		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+		return -EINVAL;
+	}
+	map->mtrr = -1;
+	map->handle = NULL;
+
+	switch (map->type) {
+	case _DRM_REGISTERS:
+	case _DRM_FRAME_BUFFER:
+#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
+		if (map->offset + (map->size-1) < map->offset ||
+		    map->offset < virt_to_phys(high_memory)) {
+			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+			return -EINVAL;
+		}
+#endif
+#ifdef __alpha__
+		map->offset += dev->hose->mem_space->start;
+#endif
+		/* Some drivers preinitialize some maps, without the X Server
+		 * needing to be aware of it.  Therefore, we just return success
+		 * when the server tries to create a duplicate map.
+		 */
+		list = drm_find_matching_map(dev, map);
+		if (list != NULL) {
+			if (list->map->size != map->size) {
+				DRM_DEBUG("Matching maps of type %d with "
+					  "mismatched sizes, (%ld vs %ld)\n",
+					  map->type, map->size,
+					  list->map->size);
+				list->map->size = map->size;
+			}
+
+			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+			*maplist = list;
+			return 0;
+		}
+
+		if (drm_core_has_MTRR(dev)) {
+			if (map->type == _DRM_FRAME_BUFFER ||
+			    (map->flags & _DRM_WRITE_COMBINING)) {
+				map->mtrr = mtrr_add(map->offset, map->size,
+						     MTRR_TYPE_WRCOMB, 1);
+			}
+		}
+		if (map->type == _DRM_REGISTERS) {
+			map->handle = ioremap(map->offset, map->size);
+			if (!map->handle) {
+				drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+				return -ENOMEM;
+			}
+		}
+
+		break;
+	case _DRM_SHM:
+		list = drm_find_matching_map(dev, map);
+		if (list != NULL) {
+			if(list->map->size != map->size) {
+				DRM_DEBUG("Matching maps of type %d with "
+					  "mismatched sizes, (%ld vs %ld)\n",
+					  map->type, map->size, list->map->size);
+				list->map->size = map->size;
+			}
+
+			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+			*maplist = list;
+			return 0;
+		}
+		map->handle = vmalloc_user(map->size);
+		DRM_DEBUG("%lu %d %p\n",
+			  map->size, drm_order(map->size), map->handle);
+		if (!map->handle) {
+			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+			return -ENOMEM;
+		}
+		map->offset = (unsigned long)map->handle;
+		if (map->flags & _DRM_CONTAINS_LOCK) {
+			/* Prevent a 2nd X Server from creating a 2nd lock */
+			if (dev->lock.hw_lock != NULL) {
+				vfree(map->handle);
+				drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+				return -EBUSY;
+			}
+			dev->sigdata.lock = dev->lock.hw_lock = map->handle;	/* Pointer to lock */
+		}
+		break;
+	case _DRM_AGP: {
+		struct drm_agp_mem *entry;
+		int valid = 0;
+
+		if (!drm_core_has_AGP(dev)) {
+			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+			return -EINVAL;
+		}
+#ifdef __alpha__
+		map->offset += dev->hose->mem_space->start;
+#endif
+		/* In some cases (i810 driver), user space may have already
+		 * added the AGP base itself, because dev->agp->base previously
+		 * only got set during AGP enable.  So, only add the base
+		 * address if the map's offset isn't already within the
+		 * aperture.
+		 */
+		if (map->offset < dev->agp->base ||
+		    map->offset > dev->agp->base +
+		    dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
+			map->offset += dev->agp->base;
+		}
+		map->mtrr = dev->agp->agp_mtrr;	/* for getmap */
+
+		/* This assumes the DRM is in total control of AGP space.
+		 * It's not always the case as AGP can be in the control
+		 * of user space (i.e. i810 driver). So this loop will get
+		 * skipped and we double check that dev->agp->memory is
+		 * actually set as well as being invalid before EPERM'ing
+		 */
+		list_for_each_entry(entry, &dev->agp->memory, head) {
+			if ((map->offset >= entry->bound) &&
+			    (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
+				valid = 1;
+				break;
+			}
+		}
+		if (!list_empty(&dev->agp->memory) && !valid) {
+			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+			return -EPERM;
+		}
+		DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
+
+		break;
+	}
+	case _DRM_SCATTER_GATHER:
+		if (!dev->sg) {
+			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+			return -EINVAL;
+		}
+		map->offset += (unsigned long)dev->sg->virtual;
+		break;
+	case _DRM_CONSISTENT:
+		/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
+		 * As we're limiting the address to 2^32-1 (or less),
+		 * casting it down to 32 bits is no problem, but we
+		 * need to point to a 64bit variable first. */
+		dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
+		if (!dmah) {
+			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+			return -ENOMEM;
+		}
+		map->handle = dmah->vaddr;
+		map->offset = (unsigned long)dmah->busaddr;
+		kfree(dmah);
+		break;
+	default:
+		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+		return -EINVAL;
+	}
+
+	list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
+	if (!list) {
+		if (map->type == _DRM_REGISTERS)
+			iounmap(map->handle);
+		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+		return -EINVAL;
+	}
+	memset(list, 0, sizeof(*list));
+	list->map = map;
+
+	mutex_lock(&dev->struct_mutex);
+	list_add(&list->head, &dev->maplist);
+
+	/* Assign a 32-bit handle */
+	/* We do it here so that dev->struct_mutex protects the increment */
+	user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
+		map->offset;
+	ret = drm_map_handle(dev, &list->hash, user_token, 0);
+	if (ret) {
+		if (map->type == _DRM_REGISTERS)
+			iounmap(map->handle);
+		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+		drm_free(list, sizeof(*list), DRM_MEM_MAPS);
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	list->user_token = list->hash.key << PAGE_SHIFT;
+	mutex_unlock(&dev->struct_mutex);
+
+	*maplist = list;
+	return 0;
+	}
+
+int drm_addmap(struct drm_device * dev, unsigned int offset,
+	       unsigned int size, enum drm_map_type type,
+	       enum drm_map_flags flags, drm_local_map_t ** map_ptr)
+{
+	struct drm_map_list *list;
+	int rc;
+
+	rc = drm_addmap_core(dev, offset, size, type, flags, &list);
+	if (!rc)
+		*map_ptr = list->map;
+	return rc;
+}
+
+EXPORT_SYMBOL(drm_addmap);
+
+int drm_addmap_ioctl(struct drm_device *dev, void *data,
+		     struct drm_file *file_priv)
+{
+	struct drm_map *map = data;
+	struct drm_map_list *maplist;
+	int err;
+
+	if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP))
+		return -EPERM;
+
+	err = drm_addmap_core(dev, map->offset, map->size, map->type,
+			      map->flags, &maplist);
+
+	if (err)
+		return err;
+
+	/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
+	map->handle = (void *)(unsigned long)maplist->user_token;
+	return 0;
+}
+
+/**
+ * Remove a map private from list and deallocate resources if the mapping
+ * isn't in use.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a struct drm_map structure.
+ * \return zero on success or a negative value on error.
+ *
+ * Searches the map on drm_device::maplist, removes it from the list, see if
+ * its being used, and free any associate resource (such as MTRR's) if it's not
+ * being on use.
+ *
+ * \sa drm_addmap
+ */
+int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
+{
+	struct drm_map_list *r_list = NULL, *list_t;
+	drm_dma_handle_t dmah;
+	int found = 0;
+
+	/* Find the list entry for the map and remove it */
+	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
+		if (r_list->map == map) {
+			list_del(&r_list->head);
+			drm_ht_remove_key(&dev->map_hash,
+					  r_list->user_token >> PAGE_SHIFT);
+			drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
+			found = 1;
+			break;
+		}
+	}
+
+	if (!found)
+		return -EINVAL;
+
+	switch (map->type) {
+	case _DRM_REGISTERS:
+		iounmap(map->handle);
+		/* FALLTHROUGH */
+	case _DRM_FRAME_BUFFER:
+		if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
+			int retcode;
+			retcode = mtrr_del(map->mtrr, map->offset, map->size);
+			DRM_DEBUG("mtrr_del=%d\n", retcode);
+		}
+		break;
+	case _DRM_SHM:
+		vfree(map->handle);
+		break;
+	case _DRM_AGP:
+	case _DRM_SCATTER_GATHER:
+		break;
+	case _DRM_CONSISTENT:
+		dmah.vaddr = map->handle;
+		dmah.busaddr = map->offset;
+		dmah.size = map->size;
+		__drm_pci_free(dev, &dmah);
+		break;
+	}
+	drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+
+	return 0;
+}
+
+int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
+{
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	ret = drm_rmmap_locked(dev, map);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_rmmap);
+
+/* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
+ * the last close of the device, and this is necessary for cleanup when things
+ * exit uncleanly.  Therefore, having userland manually remove mappings seems
+ * like a pointless exercise since they're going away anyway.
+ *
+ * One use case might be after addmap is allowed for normal users for SHM and
+ * gets used by drivers that the server doesn't need to care about.  This seems
+ * unlikely.
+ */
+int drm_rmmap_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct drm_map *request = data;
+	drm_local_map_t *map = NULL;
+	struct drm_map_list *r_list;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	list_for_each_entry(r_list, &dev->maplist, head) {
+		if (r_list->map &&
+		    r_list->user_token == (unsigned long)request->handle &&
+		    r_list->map->flags & _DRM_REMOVABLE) {
+			map = r_list->map;
+			break;
+		}
+	}
+
+	/* List has wrapped around to the head pointer, or its empty we didn't
+	 * find anything.
+	 */
+	if (list_empty(&dev->maplist) || !map) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	/* Register and framebuffer maps are permanent */
+	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
+		mutex_unlock(&dev->struct_mutex);
+		return 0;
+	}
+
+	ret = drm_rmmap_locked(dev, map);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+/**
+ * Cleanup after an error on one of the addbufs() functions.
+ *
+ * \param dev DRM device.
+ * \param entry buffer entry where the error occurred.
+ *
+ * Frees any pages and buffers associated with the given entry.
+ */
+static void drm_cleanup_buf_error(struct drm_device * dev,
+				  struct drm_buf_entry * entry)
+{
+	int i;
+
+	if (entry->seg_count) {
+		for (i = 0; i < entry->seg_count; i++) {
+			if (entry->seglist[i]) {
+				drm_pci_free(dev, entry->seglist[i]);
+			}
+		}
+		drm_free(entry->seglist,
+			 entry->seg_count *
+			 sizeof(*entry->seglist), DRM_MEM_SEGS);
+
+		entry->seg_count = 0;
+	}
+
+	if (entry->buf_count) {
+		for (i = 0; i < entry->buf_count; i++) {
+			if (entry->buflist[i].dev_private) {
+				drm_free(entry->buflist[i].dev_private,
+					 entry->buflist[i].dev_priv_size,
+					 DRM_MEM_BUFS);
+			}
+		}
+		drm_free(entry->buflist,
+			 entry->buf_count *
+			 sizeof(*entry->buflist), DRM_MEM_BUFS);
+
+		entry->buf_count = 0;
+	}
+}
+
+#if __OS_HAS_AGP
+/**
+ * Add AGP buffers for DMA transfers.
+ *
+ * \param dev struct drm_device to which the buffers are to be added.
+ * \param request pointer to a struct drm_buf_desc describing the request.
+ * \return zero on success or a negative number on failure.
+ *
+ * After some sanity checks creates a drm_buf structure for each buffer and
+ * reallocates the buffer list of the same size order to accommodate the new
+ * buffers.
+ */
+int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
+{
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf_entry *entry;
+	struct drm_agp_mem *agp_entry;
+	struct drm_buf *buf;
+	unsigned long offset;
+	unsigned long agp_offset;
+	int count;
+	int order;
+	int size;
+	int alignment;
+	int page_order;
+	int total;
+	int byte_count;
+	int i, valid;
+	struct drm_buf **temp_buflist;
+
+	if (!dma)
+		return -EINVAL;
+
+	count = request->count;
+	order = drm_order(request->size);
+	size = 1 << order;
+
+	alignment = (request->flags & _DRM_PAGE_ALIGN)
+	    ? PAGE_ALIGN(size) : size;
+	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+	total = PAGE_SIZE << page_order;
+
+	byte_count = 0;
+	agp_offset = dev->agp->base + request->agp_start;
+
+	DRM_DEBUG("count:      %d\n", count);
+	DRM_DEBUG("order:      %d\n", order);
+	DRM_DEBUG("size:       %d\n", size);
+	DRM_DEBUG("agp_offset: %lx\n", agp_offset);
+	DRM_DEBUG("alignment:  %d\n", alignment);
+	DRM_DEBUG("page_order: %d\n", page_order);
+	DRM_DEBUG("total:      %d\n", total);
+
+	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+		return -EINVAL;
+	if (dev->queue_count)
+		return -EBUSY;	/* Not while in use */
+
+	/* Make sure buffers are located in AGP memory that we own */
+	valid = 0;
+	list_for_each_entry(agp_entry, &dev->agp->memory, head) {
+		if ((agp_offset >= agp_entry->bound) &&
+		    (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
+			valid = 1;
+			break;
+		}
+	}
+	if (!list_empty(&dev->agp->memory) && !valid) {
+		DRM_DEBUG("zone invalid\n");
+		return -EINVAL;
+	}
+	spin_lock(&dev->count_lock);
+	if (dev->buf_use) {
+		spin_unlock(&dev->count_lock);
+		return -EBUSY;
+	}
+	atomic_inc(&dev->buf_alloc);
+	spin_unlock(&dev->count_lock);
+
+	mutex_lock(&dev->struct_mutex);
+	entry = &dma->bufs[order];
+	if (entry->buf_count) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;	/* May only call once for each order */
+	}
+
+	if (count < 0 || count > 4096) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -EINVAL;
+	}
+
+	entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
+				   DRM_MEM_BUFS);
+	if (!entry->buflist) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
+	memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+	entry->buf_size = size;
+	entry->page_order = page_order;
+
+	offset = 0;
+
+	while (entry->buf_count < count) {
+		buf = &entry->buflist[entry->buf_count];
+		buf->idx = dma->buf_count + entry->buf_count;
+		buf->total = alignment;
+		buf->order = order;
+		buf->used = 0;
+
+		buf->offset = (dma->byte_count + offset);
+		buf->bus_address = agp_offset + offset;
+		buf->address = (void *)(agp_offset + offset);
+		buf->next = NULL;
+		buf->waiting = 0;
+		buf->pending = 0;
+		init_waitqueue_head(&buf->dma_wait);
+		buf->file_priv = NULL;
+
+		buf->dev_priv_size = dev->driver->dev_priv_size;
+		buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
+		if (!buf->dev_private) {
+			/* Set count correctly so we free the proper amount. */
+			entry->buf_count = count;
+			drm_cleanup_buf_error(dev, entry);
+			mutex_unlock(&dev->struct_mutex);
+			atomic_dec(&dev->buf_alloc);
+			return -ENOMEM;
+		}
+		memset(buf->dev_private, 0, buf->dev_priv_size);
+
+		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
+
+		offset += alignment;
+		entry->buf_count++;
+		byte_count += PAGE_SIZE << page_order;
+	}
+
+	DRM_DEBUG("byte_count: %d\n", byte_count);
+
+	temp_buflist = drm_realloc(dma->buflist,
+				   dma->buf_count * sizeof(*dma->buflist),
+				   (dma->buf_count + entry->buf_count)
+				   * sizeof(*dma->buflist), DRM_MEM_BUFS);
+	if (!temp_buflist) {
+		/* Free the entry because it isn't valid */
+		drm_cleanup_buf_error(dev, entry);
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
+	dma->buflist = temp_buflist;
+
+	for (i = 0; i < entry->buf_count; i++) {
+		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+	}
+
+	dma->buf_count += entry->buf_count;
+	dma->seg_count += entry->seg_count;
+	dma->page_count += byte_count >> PAGE_SHIFT;
+	dma->byte_count += byte_count;
+
+	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	request->count = entry->buf_count;
+	request->size = size;
+
+	dma->flags = _DRM_DMA_USE_AGP;
+
+	atomic_dec(&dev->buf_alloc);
+	return 0;
+}
+EXPORT_SYMBOL(drm_addbufs_agp);
+#endif				/* __OS_HAS_AGP */
+
+int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int count;
+	int order;
+	int size;
+	int total;
+	int page_order;
+	struct drm_buf_entry *entry;
+	drm_dma_handle_t *dmah;
+	struct drm_buf *buf;
+	int alignment;
+	unsigned long offset;
+	int i;
+	int byte_count;
+	int page_count;
+	unsigned long *temp_pagelist;
+	struct drm_buf **temp_buflist;
+
+	if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
+		return -EINVAL;
+
+	if (!dma)
+		return -EINVAL;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	count = request->count;
+	order = drm_order(request->size);
+	size = 1 << order;
+
+	DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
+		  request->count, request->size, size, order, dev->queue_count);
+
+	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+		return -EINVAL;
+	if (dev->queue_count)
+		return -EBUSY;	/* Not while in use */
+
+	alignment = (request->flags & _DRM_PAGE_ALIGN)
+	    ? PAGE_ALIGN(size) : size;
+	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+	total = PAGE_SIZE << page_order;
+
+	spin_lock(&dev->count_lock);
+	if (dev->buf_use) {
+		spin_unlock(&dev->count_lock);
+		return -EBUSY;
+	}
+	atomic_inc(&dev->buf_alloc);
+	spin_unlock(&dev->count_lock);
+
+	mutex_lock(&dev->struct_mutex);
+	entry = &dma->bufs[order];
+	if (entry->buf_count) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;	/* May only call once for each order */
+	}
+
+	if (count < 0 || count > 4096) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -EINVAL;
+	}
+
+	entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
+				   DRM_MEM_BUFS);
+	if (!entry->buflist) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
+	memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+	entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
+				   DRM_MEM_SEGS);
+	if (!entry->seglist) {
+		drm_free(entry->buflist,
+			 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
+	memset(entry->seglist, 0, count * sizeof(*entry->seglist));
+
+	/* Keep the original pagelist until we know all the allocations
+	 * have succeeded
+	 */
+	temp_pagelist = drm_alloc((dma->page_count + (count << page_order))
+				  * sizeof(*dma->pagelist), DRM_MEM_PAGES);
+	if (!temp_pagelist) {
+		drm_free(entry->buflist,
+			 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
+		drm_free(entry->seglist,
+			 count * sizeof(*entry->seglist), DRM_MEM_SEGS);
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
+	memcpy(temp_pagelist,
+	       dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
+	DRM_DEBUG("pagelist: %d entries\n",
+		  dma->page_count + (count << page_order));
+
+	entry->buf_size = size;
+	entry->page_order = page_order;
+	byte_count = 0;
+	page_count = 0;
+
+	while (entry->buf_count < count) {
+
+		dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
+
+		if (!dmah) {
+			/* Set count correctly so we free the proper amount. */
+			entry->buf_count = count;
+			entry->seg_count = count;
+			drm_cleanup_buf_error(dev, entry);
+			drm_free(temp_pagelist,
+				 (dma->page_count + (count << page_order))
+				 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
+			mutex_unlock(&dev->struct_mutex);
+			atomic_dec(&dev->buf_alloc);
+			return -ENOMEM;
+		}
+		entry->seglist[entry->seg_count++] = dmah;
+		for (i = 0; i < (1 << page_order); i++) {
+			DRM_DEBUG("page %d @ 0x%08lx\n",
+				  dma->page_count + page_count,
+				  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
+			temp_pagelist[dma->page_count + page_count++]
+				= (unsigned long)dmah->vaddr + PAGE_SIZE * i;
+		}
+		for (offset = 0;
+		     offset + size <= total && entry->buf_count < count;
+		     offset += alignment, ++entry->buf_count) {
+			buf = &entry->buflist[entry->buf_count];
+			buf->idx = dma->buf_count + entry->buf_count;
+			buf->total = alignment;
+			buf->order = order;
+			buf->used = 0;
+			buf->offset = (dma->byte_count + byte_count + offset);
+			buf->address = (void *)(dmah->vaddr + offset);
+			buf->bus_address = dmah->busaddr + offset;
+			buf->next = NULL;
+			buf->waiting = 0;
+			buf->pending = 0;
+			init_waitqueue_head(&buf->dma_wait);
+			buf->file_priv = NULL;
+
+			buf->dev_priv_size = dev->driver->dev_priv_size;
+			buf->dev_private = drm_alloc(buf->dev_priv_size,
+						     DRM_MEM_BUFS);
+			if (!buf->dev_private) {
+				/* Set count correctly so we free the proper amount. */
+				entry->buf_count = count;
+				entry->seg_count = count;
+				drm_cleanup_buf_error(dev, entry);
+				drm_free(temp_pagelist,
+					 (dma->page_count +
+					  (count << page_order))
+					 * sizeof(*dma->pagelist),
+					 DRM_MEM_PAGES);
+				mutex_unlock(&dev->struct_mutex);
+				atomic_dec(&dev->buf_alloc);
+				return -ENOMEM;
+			}
+			memset(buf->dev_private, 0, buf->dev_priv_size);
+
+			DRM_DEBUG("buffer %d @ %p\n",
+				  entry->buf_count, buf->address);
+		}
+		byte_count += PAGE_SIZE << page_order;
+	}
+
+	temp_buflist = drm_realloc(dma->buflist,
+				   dma->buf_count * sizeof(*dma->buflist),
+				   (dma->buf_count + entry->buf_count)
+				   * sizeof(*dma->buflist), DRM_MEM_BUFS);
+	if (!temp_buflist) {
+		/* Free the entry because it isn't valid */
+		drm_cleanup_buf_error(dev, entry);
+		drm_free(temp_pagelist,
+			 (dma->page_count + (count << page_order))
+			 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
+	dma->buflist = temp_buflist;
+
+	for (i = 0; i < entry->buf_count; i++) {
+		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+	}
+
+	/* No allocations failed, so now we can replace the orginal pagelist
+	 * with the new one.
+	 */
+	if (dma->page_count) {
+		drm_free(dma->pagelist,
+			 dma->page_count * sizeof(*dma->pagelist),
+			 DRM_MEM_PAGES);
+	}
+	dma->pagelist = temp_pagelist;
+
+	dma->buf_count += entry->buf_count;
+	dma->seg_count += entry->seg_count;
+	dma->page_count += entry->seg_count << page_order;
+	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	request->count = entry->buf_count;
+	request->size = size;
+
+	if (request->flags & _DRM_PCI_BUFFER_RO)
+		dma->flags = _DRM_DMA_USE_PCI_RO;
+
+	atomic_dec(&dev->buf_alloc);
+	return 0;
+
+}
+EXPORT_SYMBOL(drm_addbufs_pci);
+
+static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
+{
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf_entry *entry;
+	struct drm_buf *buf;
+	unsigned long offset;
+	unsigned long agp_offset;
+	int count;
+	int order;
+	int size;
+	int alignment;
+	int page_order;
+	int total;
+	int byte_count;
+	int i;
+	struct drm_buf **temp_buflist;
+
+	if (!drm_core_check_feature(dev, DRIVER_SG))
+		return -EINVAL;
+
+	if (!dma)
+		return -EINVAL;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	count = request->count;
+	order = drm_order(request->size);
+	size = 1 << order;
+
+	alignment = (request->flags & _DRM_PAGE_ALIGN)
+	    ? PAGE_ALIGN(size) : size;
+	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+	total = PAGE_SIZE << page_order;
+
+	byte_count = 0;
+	agp_offset = request->agp_start;
+
+	DRM_DEBUG("count:      %d\n", count);
+	DRM_DEBUG("order:      %d\n", order);
+	DRM_DEBUG("size:       %d\n", size);
+	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
+	DRM_DEBUG("alignment:  %d\n", alignment);
+	DRM_DEBUG("page_order: %d\n", page_order);
+	DRM_DEBUG("total:      %d\n", total);
+
+	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+		return -EINVAL;
+	if (dev->queue_count)
+		return -EBUSY;	/* Not while in use */
+
+	spin_lock(&dev->count_lock);
+	if (dev->buf_use) {
+		spin_unlock(&dev->count_lock);
+		return -EBUSY;
+	}
+	atomic_inc(&dev->buf_alloc);
+	spin_unlock(&dev->count_lock);
+
+	mutex_lock(&dev->struct_mutex);
+	entry = &dma->bufs[order];
+	if (entry->buf_count) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;	/* May only call once for each order */
+	}
+
+	if (count < 0 || count > 4096) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -EINVAL;
+	}
+
+	entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
+				   DRM_MEM_BUFS);
+	if (!entry->buflist) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
+	memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+	entry->buf_size = size;
+	entry->page_order = page_order;
+
+	offset = 0;
+
+	while (entry->buf_count < count) {
+		buf = &entry->buflist[entry->buf_count];
+		buf->idx = dma->buf_count + entry->buf_count;
+		buf->total = alignment;
+		buf->order = order;
+		buf->used = 0;
+
+		buf->offset = (dma->byte_count + offset);
+		buf->bus_address = agp_offset + offset;
+		buf->address = (void *)(agp_offset + offset
+					+ (unsigned long)dev->sg->virtual);
+		buf->next = NULL;
+		buf->waiting = 0;
+		buf->pending = 0;
+		init_waitqueue_head(&buf->dma_wait);
+		buf->file_priv = NULL;
+
+		buf->dev_priv_size = dev->driver->dev_priv_size;
+		buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
+		if (!buf->dev_private) {
+			/* Set count correctly so we free the proper amount. */
+			entry->buf_count = count;
+			drm_cleanup_buf_error(dev, entry);
+			mutex_unlock(&dev->struct_mutex);
+			atomic_dec(&dev->buf_alloc);
+			return -ENOMEM;
+		}
+
+		memset(buf->dev_private, 0, buf->dev_priv_size);
+
+		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
+
+		offset += alignment;
+		entry->buf_count++;
+		byte_count += PAGE_SIZE << page_order;
+	}
+
+	DRM_DEBUG("byte_count: %d\n", byte_count);
+
+	temp_buflist = drm_realloc(dma->buflist,
+				   dma->buf_count * sizeof(*dma->buflist),
+				   (dma->buf_count + entry->buf_count)
+				   * sizeof(*dma->buflist), DRM_MEM_BUFS);
+	if (!temp_buflist) {
+		/* Free the entry because it isn't valid */
+		drm_cleanup_buf_error(dev, entry);
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
+	dma->buflist = temp_buflist;
+
+	for (i = 0; i < entry->buf_count; i++) {
+		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+	}
+
+	dma->buf_count += entry->buf_count;
+	dma->seg_count += entry->seg_count;
+	dma->page_count += byte_count >> PAGE_SHIFT;
+	dma->byte_count += byte_count;
+
+	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	request->count = entry->buf_count;
+	request->size = size;
+
+	dma->flags = _DRM_DMA_USE_SG;
+
+	atomic_dec(&dev->buf_alloc);
+	return 0;
+}
+
+static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
+{
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf_entry *entry;
+	struct drm_buf *buf;
+	unsigned long offset;
+	unsigned long agp_offset;
+	int count;
+	int order;
+	int size;
+	int alignment;
+	int page_order;
+	int total;
+	int byte_count;
+	int i;
+	struct drm_buf **temp_buflist;
+
+	if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
+		return -EINVAL;
+
+	if (!dma)
+		return -EINVAL;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	count = request->count;
+	order = drm_order(request->size);
+	size = 1 << order;
+
+	alignment = (request->flags & _DRM_PAGE_ALIGN)
+	    ? PAGE_ALIGN(size) : size;
+	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+	total = PAGE_SIZE << page_order;
+
+	byte_count = 0;
+	agp_offset = request->agp_start;
+
+	DRM_DEBUG("count:      %d\n", count);
+	DRM_DEBUG("order:      %d\n", order);
+	DRM_DEBUG("size:       %d\n", size);
+	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
+	DRM_DEBUG("alignment:  %d\n", alignment);
+	DRM_DEBUG("page_order: %d\n", page_order);
+	DRM_DEBUG("total:      %d\n", total);
+
+	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+		return -EINVAL;
+	if (dev->queue_count)
+		return -EBUSY;	/* Not while in use */
+
+	spin_lock(&dev->count_lock);
+	if (dev->buf_use) {
+		spin_unlock(&dev->count_lock);
+		return -EBUSY;
+	}
+	atomic_inc(&dev->buf_alloc);
+	spin_unlock(&dev->count_lock);
+
+	mutex_lock(&dev->struct_mutex);
+	entry = &dma->bufs[order];
+	if (entry->buf_count) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;	/* May only call once for each order */
+	}
+
+	if (count < 0 || count > 4096) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -EINVAL;
+	}
+
+	entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
+				   DRM_MEM_BUFS);
+	if (!entry->buflist) {
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
+	memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+	entry->buf_size = size;
+	entry->page_order = page_order;
+
+	offset = 0;
+
+	while (entry->buf_count < count) {
+		buf = &entry->buflist[entry->buf_count];
+		buf->idx = dma->buf_count + entry->buf_count;
+		buf->total = alignment;
+		buf->order = order;
+		buf->used = 0;
+
+		buf->offset = (dma->byte_count + offset);
+		buf->bus_address = agp_offset + offset;
+		buf->address = (void *)(agp_offset + offset);
+		buf->next = NULL;
+		buf->waiting = 0;
+		buf->pending = 0;
+		init_waitqueue_head(&buf->dma_wait);
+		buf->file_priv = NULL;
+
+		buf->dev_priv_size = dev->driver->dev_priv_size;
+		buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
+		if (!buf->dev_private) {
+			/* Set count correctly so we free the proper amount. */
+			entry->buf_count = count;
+			drm_cleanup_buf_error(dev, entry);
+			mutex_unlock(&dev->struct_mutex);
+			atomic_dec(&dev->buf_alloc);
+			return -ENOMEM;
+		}
+		memset(buf->dev_private, 0, buf->dev_priv_size);
+
+		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
+
+		offset += alignment;
+		entry->buf_count++;
+		byte_count += PAGE_SIZE << page_order;
+	}
+
+	DRM_DEBUG("byte_count: %d\n", byte_count);
+
+	temp_buflist = drm_realloc(dma->buflist,
+				   dma->buf_count * sizeof(*dma->buflist),
+				   (dma->buf_count + entry->buf_count)
+				   * sizeof(*dma->buflist), DRM_MEM_BUFS);
+	if (!temp_buflist) {
+		/* Free the entry because it isn't valid */
+		drm_cleanup_buf_error(dev, entry);
+		mutex_unlock(&dev->struct_mutex);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
+	dma->buflist = temp_buflist;
+
+	for (i = 0; i < entry->buf_count; i++) {
+		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+	}
+
+	dma->buf_count += entry->buf_count;
+	dma->seg_count += entry->seg_count;
+	dma->page_count += byte_count >> PAGE_SHIFT;
+	dma->byte_count += byte_count;
+
+	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	request->count = entry->buf_count;
+	request->size = size;
+
+	dma->flags = _DRM_DMA_USE_FB;
+
+	atomic_dec(&dev->buf_alloc);
+	return 0;
+}
+
+
+/**
+ * Add buffers for DMA transfers (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a struct drm_buf_desc request.
+ * \return zero on success or a negative number on failure.
+ *
+ * According with the memory type specified in drm_buf_desc::flags and the
+ * build options, it dispatches the call either to addbufs_agp(),
+ * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
+ * PCI memory respectively.
+ */
+int drm_addbufs(struct drm_device *dev, void *data,
+		struct drm_file *file_priv)
+{
+	struct drm_buf_desc *request = data;
+	int ret;
+
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+		return -EINVAL;
+
+#if __OS_HAS_AGP
+	if (request->flags & _DRM_AGP_BUFFER)
+		ret = drm_addbufs_agp(dev, request);
+	else
+#endif
+	if (request->flags & _DRM_SG_BUFFER)
+		ret = drm_addbufs_sg(dev, request);
+	else if (request->flags & _DRM_FB_BUFFER)
+		ret = drm_addbufs_fb(dev, request);
+	else
+		ret = drm_addbufs_pci(dev, request);
+
+	return ret;
+}
+
+/**
+ * Get information about the buffer mappings.
+ *
+ * This was originally mean for debugging purposes, or by a sophisticated
+ * client library to determine how best to use the available buffers (e.g.,
+ * large buffers can be used for image transfer).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_info structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Increments drm_device::buf_use while holding the drm_device::count_lock
+ * lock, preventing of allocating more buffers after this call. Information
+ * about each requested buffer is then copied into user space.
+ */
+int drm_infobufs(struct drm_device *dev, void *data,
+		 struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf_info *request = data;
+	int i;
+	int count;
+
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+		return -EINVAL;
+
+	if (!dma)
+		return -EINVAL;
+
+	spin_lock(&dev->count_lock);
+	if (atomic_read(&dev->buf_alloc)) {
+		spin_unlock(&dev->count_lock);
+		return -EBUSY;
+	}
+	++dev->buf_use;		/* Can't allocate more after this call */
+	spin_unlock(&dev->count_lock);
+
+	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
+		if (dma->bufs[i].buf_count)
+			++count;
+	}
+
+	DRM_DEBUG("count = %d\n", count);
+
+	if (request->count >= count) {
+		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
+			if (dma->bufs[i].buf_count) {
+				struct drm_buf_desc __user *to =
+				    &request->list[count];
+				struct drm_buf_entry *from = &dma->bufs[i];
+				struct drm_freelist *list = &dma->bufs[i].freelist;
+				if (copy_to_user(&to->count,
+						 &from->buf_count,
+						 sizeof(from->buf_count)) ||
+				    copy_to_user(&to->size,
+						 &from->buf_size,
+						 sizeof(from->buf_size)) ||
+				    copy_to_user(&to->low_mark,
+						 &list->low_mark,
+						 sizeof(list->low_mark)) ||
+				    copy_to_user(&to->high_mark,
+						 &list->high_mark,
+						 sizeof(list->high_mark)))
+					return -EFAULT;
+
+				DRM_DEBUG("%d %d %d %d %d\n",
+					  i,
+					  dma->bufs[i].buf_count,
+					  dma->bufs[i].buf_size,
+					  dma->bufs[i].freelist.low_mark,
+					  dma->bufs[i].freelist.high_mark);
+				++count;
+			}
+		}
+	}
+	request->count = count;
+
+	return 0;
+}
+
+/**
+ * Specifies a low and high water mark for buffer allocation
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg a pointer to a drm_buf_desc structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies that the size order is bounded between the admissible orders and
+ * updates the respective drm_device_dma::bufs entry low and high water mark.
+ *
+ * \note This ioctl is deprecated and mostly never used.
+ */
+int drm_markbufs(struct drm_device *dev, void *data,
+		 struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf_desc *request = data;
+	int order;
+	struct drm_buf_entry *entry;
+
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+		return -EINVAL;
+
+	if (!dma)
+		return -EINVAL;
+
+	DRM_DEBUG("%d, %d, %d\n",
+		  request->size, request->low_mark, request->high_mark);
+	order = drm_order(request->size);
+	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+		return -EINVAL;
+	entry = &dma->bufs[order];
+
+	if (request->low_mark < 0 || request->low_mark > entry->buf_count)
+		return -EINVAL;
+	if (request->high_mark < 0 || request->high_mark > entry->buf_count)
+		return -EINVAL;
+
+	entry->freelist.low_mark = request->low_mark;
+	entry->freelist.high_mark = request->high_mark;
+
+	return 0;
+}
+
+/**
+ * Unreserve the buffers in list, previously reserved using drmDMA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_free structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls free_buffer() for each used buffer.
+ * This function is primarily used for debugging.
+ */
+int drm_freebufs(struct drm_device *dev, void *data,
+		 struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf_free *request = data;
+	int i;
+	int idx;
+	struct drm_buf *buf;
+
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+		return -EINVAL;
+
+	if (!dma)
+		return -EINVAL;
+
+	DRM_DEBUG("%d\n", request->count);
+	for (i = 0; i < request->count; i++) {
+		if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
+			return -EFAULT;
+		if (idx < 0 || idx >= dma->buf_count) {
+			DRM_ERROR("Index %d (of %d max)\n",
+				  idx, dma->buf_count - 1);
+			return -EINVAL;
+		}
+		buf = dma->buflist[idx];
+		if (buf->file_priv != file_priv) {
+			DRM_ERROR("Process %d freeing buffer not owned\n",
+				  task_pid_nr(current));
+			return -EINVAL;
+		}
+		drm_free_buffer(dev, buf);
+	}
+
+	return 0;
+}
+
+/**
+ * Maps all of the DMA buffers into client-virtual space (ioctl).
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg pointer to a drm_buf_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
+ * about each buffer into user space. For PCI buffers, it calls do_mmap() with
+ * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
+ * drm_mmap_dma().
+ */
+int drm_mapbufs(struct drm_device *dev, void *data,
+	        struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int retcode = 0;
+	const int zero = 0;
+	unsigned long virtual;
+	unsigned long address;
+	struct drm_buf_map *request = data;
+	int i;
+
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+		return -EINVAL;
+
+	if (!dma)
+		return -EINVAL;
+
+	spin_lock(&dev->count_lock);
+	if (atomic_read(&dev->buf_alloc)) {
+		spin_unlock(&dev->count_lock);
+		return -EBUSY;
+	}
+	dev->buf_use++;		/* Can't allocate more after this call */
+	spin_unlock(&dev->count_lock);
+
+	if (request->count >= dma->buf_count) {
+		if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
+		    || (drm_core_check_feature(dev, DRIVER_SG)
+			&& (dma->flags & _DRM_DMA_USE_SG))
+		    || (drm_core_check_feature(dev, DRIVER_FB_DMA)
+			&& (dma->flags & _DRM_DMA_USE_FB))) {
+			struct drm_map *map = dev->agp_buffer_map;
+			unsigned long token = dev->agp_buffer_token;
+
+			if (!map) {
+				retcode = -EINVAL;
+				goto done;
+			}
+			down_write(&current->mm->mmap_sem);
+			virtual = do_mmap(file_priv->filp, 0, map->size,
+					  PROT_READ | PROT_WRITE,
+					  MAP_SHARED,
+					  token);
+			up_write(&current->mm->mmap_sem);
+		} else {
+			down_write(&current->mm->mmap_sem);
+			virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
+					  PROT_READ | PROT_WRITE,
+					  MAP_SHARED, 0);
+			up_write(&current->mm->mmap_sem);
+		}
+		if (virtual > -1024UL) {
+			/* Real error */
+			retcode = (signed long)virtual;
+			goto done;
+		}
+		request->virtual = (void __user *)virtual;
+
+		for (i = 0; i < dma->buf_count; i++) {
+			if (copy_to_user(&request->list[i].idx,
+					 &dma->buflist[i]->idx,
+					 sizeof(request->list[0].idx))) {
+				retcode = -EFAULT;
+				goto done;
+			}
+			if (copy_to_user(&request->list[i].total,
+					 &dma->buflist[i]->total,
+					 sizeof(request->list[0].total))) {
+				retcode = -EFAULT;
+				goto done;
+			}
+			if (copy_to_user(&request->list[i].used,
+					 &zero, sizeof(zero))) {
+				retcode = -EFAULT;
+				goto done;
+			}
+			address = virtual + dma->buflist[i]->offset;	/* *** */
+			if (copy_to_user(&request->list[i].address,
+					 &address, sizeof(address))) {
+				retcode = -EFAULT;
+				goto done;
+			}
+		}
+	}
+      done:
+	request->count = dma->buf_count;
+	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
+
+	return retcode;
+}
+
+/**
+ * Compute size order.  Returns the exponent of the smaller power of two which
+ * is greater or equal to given number.
+ *
+ * \param size size.
+ * \return order.
+ *
+ * \todo Can be made faster.
+ */
+int drm_order(unsigned long size)
+{
+	int order;
+	unsigned long tmp;
+
+	for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
+
+	if (size & (size - 1))
+		++order;
+
+	return order;
+}
+EXPORT_SYMBOL(drm_order);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
new file mode 100644
index 000000000000..d505f695421f
--- /dev/null
+++ b/drivers/gpu/drm/drm_context.c
@@ -0,0 +1,471 @@
+/**
+ * \file drm_context.c
+ * IOCTLs for generic contexts
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * ChangeLog:
+ *  2001-11-16	Torsten Duwe <duwe@caldera.de>
+ *		added context constructor/destructor hooks,
+ *		needed by SiS driver's memory management.
+ */
+
+#include "drmP.h"
+
+/******************************************************************/
+/** \name Context bitmap support */
+/*@{*/
+
+/**
+ * Free a handle from the context bitmap.
+ *
+ * \param dev DRM device.
+ * \param ctx_handle context handle.
+ *
+ * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
+ * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
+ * lock.
+ */
+void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
+{
+	mutex_lock(&dev->struct_mutex);
+	idr_remove(&dev->ctx_idr, ctx_handle);
+	mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * Context bitmap allocation.
+ *
+ * \param dev DRM device.
+ * \return (non-negative) context handle on success or a negative number on failure.
+ *
+ * Allocate a new idr from drm_device::ctx_idr while holding the
+ * drm_device::struct_mutex lock.
+ */
+static int drm_ctxbitmap_next(struct drm_device * dev)
+{
+	int new_id;
+	int ret;
+
+again:
+	if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
+		DRM_ERROR("Out of memory expanding drawable idr\n");
+		return -ENOMEM;
+	}
+	mutex_lock(&dev->struct_mutex);
+	ret = idr_get_new_above(&dev->ctx_idr, NULL,
+				DRM_RESERVED_CONTEXTS, &new_id);
+	if (ret == -EAGAIN) {
+		mutex_unlock(&dev->struct_mutex);
+		goto again;
+	}
+	mutex_unlock(&dev->struct_mutex);
+	return new_id;
+}
+
+/**
+ * Context bitmap initialization.
+ *
+ * \param dev DRM device.
+ *
+ * Initialise the drm_device::ctx_idr
+ */
+int drm_ctxbitmap_init(struct drm_device * dev)
+{
+	idr_init(&dev->ctx_idr);
+	return 0;
+}
+
+/**
+ * Context bitmap cleanup.
+ *
+ * \param dev DRM device.
+ *
+ * Free all idr members using drm_ctx_sarea_free helper function
+ * while holding the drm_device::struct_mutex lock.
+ */
+void drm_ctxbitmap_cleanup(struct drm_device * dev)
+{
+	mutex_lock(&dev->struct_mutex);
+	idr_remove_all(&dev->ctx_idr);
+	mutex_unlock(&dev->struct_mutex);
+}
+
+/*@}*/
+
+/******************************************************************/
+/** \name Per Context SAREA Support */
+/*@{*/
+
+/**
+ * Get per-context SAREA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_priv_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Gets the map from drm_device::ctx_idr with the handle specified and
+ * returns its handle.
+ */
+int drm_getsareactx(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct drm_ctx_priv_map *request = data;
+	struct drm_map *map;
+	struct drm_map_list *_entry;
+
+	mutex_lock(&dev->struct_mutex);
+
+	map = idr_find(&dev->ctx_idr, request->ctx_id);
+	if (!map) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	request->handle = NULL;
+	list_for_each_entry(_entry, &dev->maplist, head) {
+		if (_entry->map == map) {
+			request->handle =
+			    (void *)(unsigned long)_entry->user_token;
+			break;
+		}
+	}
+	if (request->handle == NULL)
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * Set per-context SAREA.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_priv_map structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches the mapping specified in \p arg and update the entry in
+ * drm_device::ctx_idr with it.
+ */
+int drm_setsareactx(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct drm_ctx_priv_map *request = data;
+	struct drm_map *map = NULL;
+	struct drm_map_list *r_list = NULL;
+
+	mutex_lock(&dev->struct_mutex);
+	list_for_each_entry(r_list, &dev->maplist, head) {
+		if (r_list->map
+		    && r_list->user_token == (unsigned long) request->handle)
+			goto found;
+	}
+      bad:
+	mutex_unlock(&dev->struct_mutex);
+	return -EINVAL;
+
+      found:
+	map = r_list->map;
+	if (!map)
+		goto bad;
+
+	if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
+		goto bad;
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+/*@}*/
+
+/******************************************************************/
+/** \name The actual DRM context handling routines */
+/*@{*/
+
+/**
+ * Switch context.
+ *
+ * \param dev DRM device.
+ * \param old old context handle.
+ * \param new new context handle.
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to set drm_device::context_flag.
+ */
+static int drm_context_switch(struct drm_device * dev, int old, int new)
+{
+	if (test_and_set_bit(0, &dev->context_flag)) {
+		DRM_ERROR("Reentering -- FIXME\n");
+		return -EBUSY;
+	}
+
+	DRM_DEBUG("Context switch from %d to %d\n", old, new);
+
+	if (new == dev->last_context) {
+		clear_bit(0, &dev->context_flag);
+		return 0;
+	}
+
+	return 0;
+}
+
+/**
+ * Complete context switch.
+ *
+ * \param dev DRM device.
+ * \param new new context handle.
+ * \return zero on success or a negative number on failure.
+ *
+ * Updates drm_device::last_context and drm_device::last_switch. Verifies the
+ * hardware lock is held, clears the drm_device::context_flag and wakes up
+ * drm_device::context_wait.
+ */
+static int drm_context_switch_complete(struct drm_device * dev, int new)
+{
+	dev->last_context = new;	/* PRE/POST: This is the _only_ writer. */
+	dev->last_switch = jiffies;
+
+	if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+		DRM_ERROR("Lock isn't held after context switch\n");
+	}
+
+	/* If a context switch is ever initiated
+	   when the kernel holds the lock, release
+	   that lock here. */
+	clear_bit(0, &dev->context_flag);
+	wake_up(&dev->context_wait);
+
+	return 0;
+}
+
+/**
+ * Reserve contexts.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx_res structure.
+ * \return zero on success or a negative number on failure.
+ */
+int drm_resctx(struct drm_device *dev, void *data,
+	       struct drm_file *file_priv)
+{
+	struct drm_ctx_res *res = data;
+	struct drm_ctx ctx;
+	int i;
+
+	if (res->count >= DRM_RESERVED_CONTEXTS) {
+		memset(&ctx, 0, sizeof(ctx));
+		for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
+			ctx.handle = i;
+			if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
+				return -EFAULT;
+		}
+	}
+	res->count = DRM_RESERVED_CONTEXTS;
+
+	return 0;
+}
+
+/**
+ * Add context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Get a new handle for the context and copy to userspace.
+ */
+int drm_addctx(struct drm_device *dev, void *data,
+	       struct drm_file *file_priv)
+{
+	struct drm_ctx_list *ctx_entry;
+	struct drm_ctx *ctx = data;
+
+	ctx->handle = drm_ctxbitmap_next(dev);
+	if (ctx->handle == DRM_KERNEL_CONTEXT) {
+		/* Skip kernel's context and get a new one. */
+		ctx->handle = drm_ctxbitmap_next(dev);
+	}
+	DRM_DEBUG("%d\n", ctx->handle);
+	if (ctx->handle == -1) {
+		DRM_DEBUG("Not enough free contexts.\n");
+		/* Should this return -EBUSY instead? */
+		return -ENOMEM;
+	}
+
+	if (ctx->handle != DRM_KERNEL_CONTEXT) {
+		if (dev->driver->context_ctor)
+			if (!dev->driver->context_ctor(dev, ctx->handle)) {
+				DRM_DEBUG("Running out of ctxs or memory.\n");
+				return -ENOMEM;
+			}
+	}
+
+	ctx_entry = drm_alloc(sizeof(*ctx_entry), DRM_MEM_CTXLIST);
+	if (!ctx_entry) {
+		DRM_DEBUG("out of memory\n");
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&ctx_entry->head);
+	ctx_entry->handle = ctx->handle;
+	ctx_entry->tag = file_priv;
+
+	mutex_lock(&dev->ctxlist_mutex);
+	list_add(&ctx_entry->head, &dev->ctxlist);
+	++dev->ctx_count;
+	mutex_unlock(&dev->ctxlist_mutex);
+
+	return 0;
+}
+
+int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	/* This does nothing */
+	return 0;
+}
+
+/**
+ * Get context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ */
+int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	struct drm_ctx *ctx = data;
+
+	/* This is 0, because we don't handle any context flags */
+	ctx->flags = 0;
+
+	return 0;
+}
+
+/**
+ * Switch context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls context_switch().
+ */
+int drm_switchctx(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	struct drm_ctx *ctx = data;
+
+	DRM_DEBUG("%d\n", ctx->handle);
+	return drm_context_switch(dev, dev->last_context, ctx->handle);
+}
+
+/**
+ * New context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls context_switch_complete().
+ */
+int drm_newctx(struct drm_device *dev, void *data,
+	       struct drm_file *file_priv)
+{
+	struct drm_ctx *ctx = data;
+
+	DRM_DEBUG("%d\n", ctx->handle);
+	drm_context_switch_complete(dev, ctx->handle);
+
+	return 0;
+}
+
+/**
+ * Remove context.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument pointing to a drm_ctx structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
+ */
+int drm_rmctx(struct drm_device *dev, void *data,
+	      struct drm_file *file_priv)
+{
+	struct drm_ctx *ctx = data;
+
+	DRM_DEBUG("%d\n", ctx->handle);
+	if (ctx->handle == DRM_KERNEL_CONTEXT + 1) {
+		file_priv->remove_auth_on_close = 1;
+	}
+	if (ctx->handle != DRM_KERNEL_CONTEXT) {
+		if (dev->driver->context_dtor)
+			dev->driver->context_dtor(dev, ctx->handle);
+		drm_ctxbitmap_free(dev, ctx->handle);
+	}
+
+	mutex_lock(&dev->ctxlist_mutex);
+	if (!list_empty(&dev->ctxlist)) {
+		struct drm_ctx_list *pos, *n;
+
+		list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
+			if (pos->handle == ctx->handle) {
+				list_del(&pos->head);
+				drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);
+				--dev->ctx_count;
+			}
+		}
+	}
+	mutex_unlock(&dev->ctxlist_mutex);
+
+	return 0;
+}
+
+/*@}*/
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
new file mode 100644
index 000000000000..7a8e2fba4678
--- /dev/null
+++ b/drivers/gpu/drm/drm_dma.c
@@ -0,0 +1,180 @@
+/**
+ * \file drm_dma.c
+ * DMA IOCTL and function support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+/**
+ * Initialize the DMA data.
+ *
+ * \param dev DRM device.
+ * \return zero on success or a negative value on failure.
+ *
+ * Allocate and initialize a drm_device_dma structure.
+ */
+int drm_dma_setup(struct drm_device *dev)
+{
+	int i;
+
+	dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER);
+	if (!dev->dma)
+		return -ENOMEM;
+
+	memset(dev->dma, 0, sizeof(*dev->dma));
+
+	for (i = 0; i <= DRM_MAX_ORDER; i++)
+		memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
+
+	return 0;
+}
+
+/**
+ * Cleanup the DMA resources.
+ *
+ * \param dev DRM device.
+ *
+ * Free all pages associated with DMA buffers, the buffers and pages lists, and
+ * finally the drm_device::dma structure itself.
+ */
+void drm_dma_takedown(struct drm_device *dev)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int i, j;
+
+	if (!dma)
+		return;
+
+	/* Clear dma buffers */
+	for (i = 0; i <= DRM_MAX_ORDER; i++) {
+		if (dma->bufs[i].seg_count) {
+			DRM_DEBUG("order %d: buf_count = %d,"
+				  " seg_count = %d\n",
+				  i,
+				  dma->bufs[i].buf_count,
+				  dma->bufs[i].seg_count);
+			for (j = 0; j < dma->bufs[i].seg_count; j++) {
+				if (dma->bufs[i].seglist[j]) {
+					drm_pci_free(dev, dma->bufs[i].seglist[j]);
+				}
+			}
+			drm_free(dma->bufs[i].seglist,
+				 dma->bufs[i].seg_count
+				 * sizeof(*dma->bufs[0].seglist), DRM_MEM_SEGS);
+		}
+		if (dma->bufs[i].buf_count) {
+			for (j = 0; j < dma->bufs[i].buf_count; j++) {
+				if (dma->bufs[i].buflist[j].dev_private) {
+					drm_free(dma->bufs[i].buflist[j].
+						 dev_private,
+						 dma->bufs[i].buflist[j].
+						 dev_priv_size, DRM_MEM_BUFS);
+				}
+			}
+			drm_free(dma->bufs[i].buflist,
+				 dma->bufs[i].buf_count *
+				 sizeof(*dma->bufs[0].buflist), DRM_MEM_BUFS);
+		}
+	}
+
+	if (dma->buflist) {
+		drm_free(dma->buflist,
+			 dma->buf_count * sizeof(*dma->buflist), DRM_MEM_BUFS);
+	}
+
+	if (dma->pagelist) {
+		drm_free(dma->pagelist,
+			 dma->page_count * sizeof(*dma->pagelist),
+			 DRM_MEM_PAGES);
+	}
+	drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
+	dev->dma = NULL;
+}
+
+/**
+ * Free a buffer.
+ *
+ * \param dev DRM device.
+ * \param buf buffer to free.
+ *
+ * Resets the fields of \p buf.
+ */
+void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf)
+{
+	if (!buf)
+		return;
+
+	buf->waiting = 0;
+	buf->pending = 0;
+	buf->file_priv = NULL;
+	buf->used = 0;
+
+	if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)
+	    && waitqueue_active(&buf->dma_wait)) {
+		wake_up_interruptible(&buf->dma_wait);
+	}
+}
+
+/**
+ * Reclaim the buffers.
+ *
+ * \param file_priv DRM file private.
+ *
+ * Frees each buffer associated with \p file_priv not already on the hardware.
+ */
+void drm_core_reclaim_buffers(struct drm_device *dev,
+			      struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int i;
+
+	if (!dma)
+		return;
+	for (i = 0; i < dma->buf_count; i++) {
+		if (dma->buflist[i]->file_priv == file_priv) {
+			switch (dma->buflist[i]->list) {
+			case DRM_LIST_NONE:
+				drm_free_buffer(dev, dma->buflist[i]);
+				break;
+			case DRM_LIST_WAIT:
+				dma->buflist[i]->list = DRM_LIST_RECLAIM;
+				break;
+			default:
+				/* Buffer already on hardware. */
+				break;
+			}
+		}
+	}
+}
+
+EXPORT_SYMBOL(drm_core_reclaim_buffers);
diff --git a/drivers/gpu/drm/drm_drawable.c b/drivers/gpu/drm/drm_drawable.c
new file mode 100644
index 000000000000..1839c57663c5
--- /dev/null
+++ b/drivers/gpu/drm/drm_drawable.c
@@ -0,0 +1,192 @@
+/**
+ * \file drm_drawable.c
+ * IOCTLs for drawables
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ * \author Michel Dänzer <michel@tungstengraphics.com>
+ */
+
+/*
+ * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+/**
+ * Allocate drawable ID and memory to store information about it.
+ */
+int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	unsigned long irqflags;
+	struct drm_draw *draw = data;
+	int new_id = 0;
+	int ret;
+
+again:
+	if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
+		DRM_ERROR("Out of memory expanding drawable idr\n");
+		return -ENOMEM;
+	}
+
+	spin_lock_irqsave(&dev->drw_lock, irqflags);
+	ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id);
+	if (ret == -EAGAIN) {
+		spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+		goto again;
+	}
+
+	spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+
+	draw->handle = new_id;
+
+	DRM_DEBUG("%d\n", draw->handle);
+
+	return 0;
+}
+
+/**
+ * Free drawable ID and memory to store information about it.
+ */
+int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	struct drm_draw *draw = data;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev->drw_lock, irqflags);
+
+	drm_free(drm_get_drawable_info(dev, draw->handle),
+		 sizeof(struct drm_drawable_info), DRM_MEM_BUFS);
+
+	idr_remove(&dev->drw_idr, draw->handle);
+
+	spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+	DRM_DEBUG("%d\n", draw->handle);
+	return 0;
+}
+
+int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	struct drm_update_draw *update = data;
+	unsigned long irqflags;
+	struct drm_clip_rect *rects;
+	struct drm_drawable_info *info;
+	int err;
+
+	info = idr_find(&dev->drw_idr, update->handle);
+	if (!info) {
+		info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS);
+		if (!info)
+			return -ENOMEM;
+		if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
+			DRM_ERROR("No such drawable %d\n", update->handle);
+			drm_free(info, sizeof(*info), DRM_MEM_BUFS);
+			return -EINVAL;
+		}
+	}
+
+	switch (update->type) {
+	case DRM_DRAWABLE_CLIPRECTS:
+		if (update->num != info->num_rects) {
+			rects = drm_alloc(update->num * sizeof(struct drm_clip_rect),
+					 DRM_MEM_BUFS);
+		} else
+			rects = info->rects;
+
+		if (update->num && !rects) {
+			DRM_ERROR("Failed to allocate cliprect memory\n");
+			err = -ENOMEM;
+			goto error;
+		}
+
+		if (update->num && DRM_COPY_FROM_USER(rects,
+						     (struct drm_clip_rect __user *)
+						     (unsigned long)update->data,
+						     update->num *
+						     sizeof(*rects))) {
+			DRM_ERROR("Failed to copy cliprects from userspace\n");
+			err = -EFAULT;
+			goto error;
+		}
+
+		spin_lock_irqsave(&dev->drw_lock, irqflags);
+
+		if (rects != info->rects) {
+			drm_free(info->rects, info->num_rects *
+				 sizeof(struct drm_clip_rect), DRM_MEM_BUFS);
+		}
+
+		info->rects = rects;
+		info->num_rects = update->num;
+
+		spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+
+		DRM_DEBUG("Updated %d cliprects for drawable %d\n",
+			  info->num_rects, update->handle);
+		break;
+	default:
+		DRM_ERROR("Invalid update type %d\n", update->type);
+		return -EINVAL;
+	}
+
+	return 0;
+
+error:
+	if (rects != info->rects)
+		drm_free(rects, update->num * sizeof(struct drm_clip_rect),
+			 DRM_MEM_BUFS);
+
+	return err;
+}
+
+/**
+ * Caller must hold the drawable spinlock!
+ */
+struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id)
+{
+	return idr_find(&dev->drw_idr, id);
+}
+EXPORT_SYMBOL(drm_get_drawable_info);
+
+static int drm_drawable_free(int idr, void *p, void *data)
+{
+	struct drm_drawable_info *info = p;
+
+	if (info) {
+		drm_free(info->rects, info->num_rects *
+			 sizeof(struct drm_clip_rect), DRM_MEM_BUFS);
+		drm_free(info, sizeof(*info), DRM_MEM_BUFS);
+	}
+
+	return 0;
+}
+
+void drm_drawable_free_all(struct drm_device *dev)
+{
+	idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
+	idr_remove_all(&dev->drw_idr);
+}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
new file mode 100644
index 000000000000..564138714bb5
--- /dev/null
+++ b/drivers/gpu/drm/drm_drv.c
@@ -0,0 +1,540 @@
+/**
+ * \file drm_drv.c
+ * Generic driver template
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ *
+ * To use this template, you must at least define the following (samples
+ * given for the MGA driver):
+ *
+ * \code
+ * #define DRIVER_AUTHOR	"VA Linux Systems, Inc."
+ *
+ * #define DRIVER_NAME		"mga"
+ * #define DRIVER_DESC		"Matrox G200/G400"
+ * #define DRIVER_DATE		"20001127"
+ *
+ * #define drm_x		mga_##x
+ * \endcode
+ */
+
+/*
+ * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_core.h"
+
+static int drm_version(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv);
+
+/** Ioctl table */
+static struct drm_ioctl_desc drm_ioctls[] = {
+	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
+	DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
+	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
+	DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
+	DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
+	DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
+	DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
+	/* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
+	DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+#if __OS_HAS_AGP
+	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+#endif
+
+	DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+};
+
+#define DRM_CORE_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )
+
+/**
+ * Take down the DRM device.
+ *
+ * \param dev DRM device structure.
+ *
+ * Frees every resource in \p dev.
+ *
+ * \sa drm_device
+ */
+int drm_lastclose(struct drm_device * dev)
+{
+	struct drm_magic_entry *pt, *next;
+	struct drm_map_list *r_list, *list_t;
+	struct drm_vma_entry *vma, *vma_temp;
+	int i;
+
+	DRM_DEBUG("\n");
+
+	if (dev->driver->lastclose)
+		dev->driver->lastclose(dev);
+	DRM_DEBUG("driver lastclose completed\n");
+
+	if (dev->unique) {
+		drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
+		dev->unique = NULL;
+		dev->unique_len = 0;
+	}
+
+	if (dev->irq_enabled)
+		drm_irq_uninstall(dev);
+
+	mutex_lock(&dev->struct_mutex);
+
+	/* Free drawable information memory */
+	drm_drawable_free_all(dev);
+	del_timer(&dev->timer);
+
+	/* Clear pid list */
+	if (dev->magicfree.next) {
+		list_for_each_entry_safe(pt, next, &dev->magicfree, head) {
+			list_del(&pt->head);
+			drm_ht_remove_item(&dev->magiclist, &pt->hash_item);
+			drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
+		}
+		drm_ht_remove(&dev->magiclist);
+	}
+
+	/* Clear AGP information */
+	if (drm_core_has_AGP(dev) && dev->agp) {
+		struct drm_agp_mem *entry, *tempe;
+
+		/* Remove AGP resources, but leave dev->agp
+		   intact until drv_cleanup is called. */
+		list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
+			if (entry->bound)
+				drm_unbind_agp(entry->memory);
+			drm_free_agp(entry->memory, entry->pages);
+			drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+		}
+		INIT_LIST_HEAD(&dev->agp->memory);
+
+		if (dev->agp->acquired)
+			drm_agp_release(dev);
+
+		dev->agp->acquired = 0;
+		dev->agp->enabled = 0;
+	}
+	if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
+		drm_sg_cleanup(dev->sg);
+		dev->sg = NULL;
+	}
+
+	/* Clear vma list (only built for debugging) */
+	list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
+		list_del(&vma->head);
+		drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
+	}
+
+	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
+		if (!(r_list->map->flags & _DRM_DRIVER)) {
+			drm_rmmap_locked(dev, r_list->map);
+			r_list = NULL;
+		}
+	}
+
+	if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
+		for (i = 0; i < dev->queue_count; i++) {
+			if (dev->queuelist[i]) {
+				drm_free(dev->queuelist[i],
+					 sizeof(*dev->queuelist[0]),
+					 DRM_MEM_QUEUES);
+				dev->queuelist[i] = NULL;
+			}
+		}
+		drm_free(dev->queuelist,
+			 dev->queue_slots * sizeof(*dev->queuelist),
+			 DRM_MEM_QUEUES);
+		dev->queuelist = NULL;
+	}
+	dev->queue_count = 0;
+
+	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
+		drm_dma_takedown(dev);
+
+	if (dev->lock.hw_lock) {
+		dev->sigdata.lock = dev->lock.hw_lock = NULL;	/* SHM removed */
+		dev->lock.file_priv = NULL;
+		wake_up_interruptible(&dev->lock.lock_queue);
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+	DRM_DEBUG("lastclose completed\n");
+	return 0;
+}
+
+/**
+ * Module initialization. Called via init_module at module load time, or via
+ * linux/init/main.c (this is not currently supported).
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Initializes an array of drm_device structures, and attempts to
+ * initialize all available devices, using consecutive minors, registering the
+ * stubs and initializing the AGP device.
+ *
+ * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
+ * after the initialization for driver customization.
+ */
+int drm_init(struct drm_driver *driver)
+{
+	struct pci_dev *pdev = NULL;
+	struct pci_device_id *pid;
+	int i;
+
+	DRM_DEBUG("\n");
+
+	for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
+		pid = (struct pci_device_id *)&driver->pci_driver.id_table[i];
+
+		pdev = NULL;
+		/* pass back in pdev to account for multiple identical cards */
+		while ((pdev =
+			pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
+				       pid->subdevice, pdev)) != NULL) {
+			/* stealth mode requires a manual probe */
+			pci_dev_get(pdev);
+			drm_get_dev(pdev, pid, driver);
+		}
+	}
+	return 0;
+}
+
+EXPORT_SYMBOL(drm_init);
+
+/**
+ * Called via cleanup_module() at module unload time.
+ *
+ * Cleans up all DRM device, calling drm_lastclose().
+ *
+ * \sa drm_init
+ */
+static void drm_cleanup(struct drm_device * dev)
+{
+	DRM_DEBUG("\n");
+
+	if (!dev) {
+		DRM_ERROR("cleanup called no dev\n");
+		return;
+	}
+
+	drm_lastclose(dev);
+
+	if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
+	    dev->agp && dev->agp->agp_mtrr >= 0) {
+		int retval;
+		retval = mtrr_del(dev->agp->agp_mtrr,
+				  dev->agp->agp_info.aper_base,
+				  dev->agp->agp_info.aper_size * 1024 * 1024);
+		DRM_DEBUG("mtrr_del=%d\n", retval);
+	}
+
+	if (drm_core_has_AGP(dev) && dev->agp) {
+		drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
+		dev->agp = NULL;
+	}
+
+	if (dev->driver->unload)
+		dev->driver->unload(dev);
+
+	drm_ht_remove(&dev->map_hash);
+	drm_ctxbitmap_cleanup(dev);
+
+	drm_put_minor(&dev->primary);
+	if (drm_put_dev(dev))
+		DRM_ERROR("Cannot unload module\n");
+}
+
+int drm_minors_cleanup(int id, void *ptr, void *data)
+{
+	struct drm_minor *minor = ptr;
+	struct drm_device *dev;
+	struct drm_driver *driver = data;
+
+	dev = minor->dev;
+	if (minor->dev->driver != driver)
+		return 0;
+
+	if (minor->type != DRM_MINOR_LEGACY)
+		return 0;
+
+	if (dev)
+		pci_dev_put(dev->pdev);
+	drm_cleanup(dev);
+	return 1;
+}
+
+void drm_exit(struct drm_driver *driver)
+{
+	DRM_DEBUG("\n");
+
+	idr_for_each(&drm_minors_idr, &drm_minors_cleanup, driver);
+
+	DRM_INFO("Module unloaded\n");
+}
+
+EXPORT_SYMBOL(drm_exit);
+
+/** File operations structure */
+static const struct file_operations drm_stub_fops = {
+	.owner = THIS_MODULE,
+	.open = drm_stub_open
+};
+
+static int __init drm_core_init(void)
+{
+	int ret = -ENOMEM;
+
+	idr_init(&drm_minors_idr);
+
+	if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
+		goto err_p1;
+
+	drm_class = drm_sysfs_create(THIS_MODULE, "drm");
+	if (IS_ERR(drm_class)) {
+		printk(KERN_ERR "DRM: Error creating drm class.\n");
+		ret = PTR_ERR(drm_class);
+		goto err_p2;
+	}
+
+	drm_proc_root = proc_mkdir("dri", NULL);
+	if (!drm_proc_root) {
+		DRM_ERROR("Cannot create /proc/dri\n");
+		ret = -1;
+		goto err_p3;
+	}
+
+	drm_mem_init();
+
+	DRM_INFO("Initialized %s %d.%d.%d %s\n",
+		 CORE_NAME, CORE_MAJOR, CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
+	return 0;
+err_p3:
+	drm_sysfs_destroy();
+err_p2:
+	unregister_chrdev(DRM_MAJOR, "drm");
+
+	idr_destroy(&drm_minors_idr);
+err_p1:
+	return ret;
+}
+
+static void __exit drm_core_exit(void)
+{
+	remove_proc_entry("dri", NULL);
+	drm_sysfs_destroy();
+
+	unregister_chrdev(DRM_MAJOR, "drm");
+
+	idr_destroy(&drm_minors_idr);
+}
+
+module_init(drm_core_init);
+module_exit(drm_core_exit);
+
+/**
+ * Get version information
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_version structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Fills in the version information in \p arg.
+ */
+static int drm_version(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct drm_version *version = data;
+	int len;
+
+	version->version_major = dev->driver->major;
+	version->version_minor = dev->driver->minor;
+	version->version_patchlevel = dev->driver->patchlevel;
+	DRM_COPY(version->name, dev->driver->name);
+	DRM_COPY(version->date, dev->driver->date);
+	DRM_COPY(version->desc, dev->driver->desc);
+
+	return 0;
+}
+
+/**
+ * Called whenever a process performs an ioctl on /dev/drm.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ *
+ * Looks up the ioctl function in the ::ioctls table, checking for root
+ * previleges if so required, and dispatches to the respective function.
+ */
+int drm_ioctl(struct inode *inode, struct file *filp,
+	      unsigned int cmd, unsigned long arg)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct drm_device *dev = file_priv->minor->dev;
+	struct drm_ioctl_desc *ioctl;
+	drm_ioctl_t *func;
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	int retcode = -EINVAL;
+	char *kdata = NULL;
+
+	atomic_inc(&dev->ioctl_count);
+	atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
+	++file_priv->ioctl_count;
+
+	DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
+		  task_pid_nr(current), cmd, nr,
+		  (long)old_encode_dev(file_priv->minor->device),
+		  file_priv->authenticated);
+
+	if ((nr >= DRM_CORE_IOCTL_COUNT) &&
+	    ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
+		goto err_i1;
+	if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) &&
+	    (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
+		ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+	else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) {
+		ioctl = &drm_ioctls[nr];
+		cmd = ioctl->cmd;
+	} else
+		goto err_i1;
+
+	/* Do not trust userspace, use our own definition */
+	func = ioctl->func;
+	/* is there a local override? */
+	if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
+		func = dev->driver->dma_ioctl;
+
+	if (!func) {
+		DRM_DEBUG("no function\n");
+		retcode = -EINVAL;
+	} else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
+		   ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
+		   ((ioctl->flags & DRM_MASTER) && !file_priv->master)) {
+		retcode = -EACCES;
+	} else {
+		if (cmd & (IOC_IN | IOC_OUT)) {
+			kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
+			if (!kdata) {
+				retcode = -ENOMEM;
+				goto err_i1;
+			}
+		}
+
+		if (cmd & IOC_IN) {
+			if (copy_from_user(kdata, (void __user *)arg,
+					   _IOC_SIZE(cmd)) != 0) {
+				retcode = -EFAULT;
+				goto err_i1;
+			}
+		}
+		retcode = func(dev, kdata, file_priv);
+
+		if ((retcode == 0) && (cmd & IOC_OUT)) {
+			if (copy_to_user((void __user *)arg, kdata,
+					 _IOC_SIZE(cmd)) != 0)
+				retcode = -EFAULT;
+		}
+	}
+
+      err_i1:
+	if (kdata)
+		kfree(kdata);
+	atomic_dec(&dev->ioctl_count);
+	if (retcode)
+		DRM_DEBUG("ret = %x\n", retcode);
+	return retcode;
+}
+
+EXPORT_SYMBOL(drm_ioctl);
+
+drm_local_map_t *drm_getsarea(struct drm_device *dev)
+{
+	struct drm_map_list *entry;
+
+	list_for_each_entry(entry, &dev->maplist, head) {
+		if (entry->map && entry->map->type == _DRM_SHM &&
+		    (entry->map->flags & _DRM_CONTAINS_LOCK)) {
+			return entry->map;
+		}
+	}
+	return NULL;
+}
+EXPORT_SYMBOL(drm_getsarea);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
new file mode 100644
index 000000000000..851a53f1acce
--- /dev/null
+++ b/drivers/gpu/drm/drm_fops.c
@@ -0,0 +1,471 @@
+/**
+ * \file drm_fops.c
+ * File operations for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Daryll Strauss <daryll@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_sarea.h"
+#include <linux/poll.h>
+#include <linux/smp_lock.h>
+
+static int drm_open_helper(struct inode *inode, struct file *filp,
+			   struct drm_device * dev);
+
+static int drm_setup(struct drm_device * dev)
+{
+	drm_local_map_t *map;
+	int i;
+	int ret;
+	u32 sareapage;
+
+	if (dev->driver->firstopen) {
+		ret = dev->driver->firstopen(dev);
+		if (ret != 0)
+			return ret;
+	}
+
+	dev->magicfree.next = NULL;
+
+	/* prebuild the SAREA */
+	sareapage = max_t(unsigned, SAREA_MAX, PAGE_SIZE);
+	i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
+	if (i != 0)
+		return i;
+
+	atomic_set(&dev->ioctl_count, 0);
+	atomic_set(&dev->vma_count, 0);
+	dev->buf_use = 0;
+	atomic_set(&dev->buf_alloc, 0);
+
+	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
+		i = drm_dma_setup(dev);
+		if (i < 0)
+			return i;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
+		atomic_set(&dev->counts[i], 0);
+
+	drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER);
+	INIT_LIST_HEAD(&dev->magicfree);
+
+	dev->sigdata.lock = NULL;
+	init_waitqueue_head(&dev->lock.lock_queue);
+	dev->queue_count = 0;
+	dev->queue_reserved = 0;
+	dev->queue_slots = 0;
+	dev->queuelist = NULL;
+	dev->irq_enabled = 0;
+	dev->context_flag = 0;
+	dev->interrupt_flag = 0;
+	dev->dma_flag = 0;
+	dev->last_context = 0;
+	dev->last_switch = 0;
+	dev->last_checked = 0;
+	init_waitqueue_head(&dev->context_wait);
+	dev->if_version = 0;
+
+	dev->ctx_start = 0;
+	dev->lck_start = 0;
+
+	dev->buf_async = NULL;
+	init_waitqueue_head(&dev->buf_readers);
+	init_waitqueue_head(&dev->buf_writers);
+
+	DRM_DEBUG("\n");
+
+	/*
+	 * The kernel's context could be created here, but is now created
+	 * in drm_dma_enqueue.  This is more resource-efficient for
+	 * hardware that does not do DMA, but may mean that
+	 * drm_select_queue fails between the time the interrupt is
+	 * initialized and the time the queues are initialized.
+	 */
+
+	return 0;
+}
+
+/**
+ * Open file.
+ *
+ * \param inode device inode
+ * \param filp file pointer.
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches the DRM device with the same minor number, calls open_helper(), and
+ * increments the device open count. If the open count was previous at zero,
+ * i.e., it's the first that the device is open, then calls setup().
+ */
+int drm_open(struct inode *inode, struct file *filp)
+{
+	struct drm_device *dev = NULL;
+	int minor_id = iminor(inode);
+	struct drm_minor *minor;
+	int retcode = 0;
+
+	minor = idr_find(&drm_minors_idr, minor_id);
+	if (!minor)
+		return -ENODEV;
+
+	if (!(dev = minor->dev))
+		return -ENODEV;
+
+	retcode = drm_open_helper(inode, filp, dev);
+	if (!retcode) {
+		atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
+		spin_lock(&dev->count_lock);
+		if (!dev->open_count++) {
+			spin_unlock(&dev->count_lock);
+			return drm_setup(dev);
+		}
+		spin_unlock(&dev->count_lock);
+	}
+
+	return retcode;
+}
+EXPORT_SYMBOL(drm_open);
+
+/**
+ * File \c open operation.
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ *
+ * Puts the dev->fops corresponding to the device minor number into
+ * \p filp, call the \c open method, and restore the file operations.
+ */
+int drm_stub_open(struct inode *inode, struct file *filp)
+{
+	struct drm_device *dev = NULL;
+	struct drm_minor *minor;
+	int minor_id = iminor(inode);
+	int err = -ENODEV;
+	const struct file_operations *old_fops;
+
+	DRM_DEBUG("\n");
+
+	/* BKL pushdown: note that nothing else serializes idr_find() */
+	lock_kernel();
+	minor = idr_find(&drm_minors_idr, minor_id);
+	if (!minor)
+		goto out;
+
+	if (!(dev = minor->dev))
+		goto out;
+
+	old_fops = filp->f_op;
+	filp->f_op = fops_get(&dev->driver->fops);
+	if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
+		fops_put(filp->f_op);
+		filp->f_op = fops_get(old_fops);
+	}
+	fops_put(old_fops);
+
+out:
+	unlock_kernel();
+	return err;
+}
+
+/**
+ * Check whether DRI will run on this CPU.
+ *
+ * \return non-zero if the DRI will run on this CPU, or zero otherwise.
+ */
+static int drm_cpu_valid(void)
+{
+#if defined(__i386__)
+	if (boot_cpu_data.x86 == 3)
+		return 0;	/* No cmpxchg on a 386 */
+#endif
+#if defined(__sparc__) && !defined(__sparc_v9__)
+	return 0;		/* No cmpxchg before v9 sparc. */
+#endif
+	return 1;
+}
+
+/**
+ * Called whenever a process opens /dev/drm.
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ * \param dev device.
+ * \return zero on success or a negative number on failure.
+ *
+ * Creates and initializes a drm_file structure for the file private data in \p
+ * filp and add it into the double linked list in \p dev.
+ */
+static int drm_open_helper(struct inode *inode, struct file *filp,
+			   struct drm_device * dev)
+{
+	int minor_id = iminor(inode);
+	struct drm_file *priv;
+	int ret;
+
+	if (filp->f_flags & O_EXCL)
+		return -EBUSY;	/* No exclusive opens */
+	if (!drm_cpu_valid())
+		return -EINVAL;
+
+	DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
+
+	priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
+	if (!priv)
+		return -ENOMEM;
+
+	memset(priv, 0, sizeof(*priv));
+	filp->private_data = priv;
+	priv->filp = filp;
+	priv->uid = current->euid;
+	priv->pid = task_pid_nr(current);
+	priv->minor = idr_find(&drm_minors_idr, minor_id);
+	priv->ioctl_count = 0;
+	/* for compatibility root is always authenticated */
+	priv->authenticated = capable(CAP_SYS_ADMIN);
+	priv->lock_count = 0;
+
+	INIT_LIST_HEAD(&priv->lhead);
+
+	if (dev->driver->open) {
+		ret = dev->driver->open(dev, priv);
+		if (ret < 0)
+			goto out_free;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+	if (list_empty(&dev->filelist))
+		priv->master = 1;
+
+	list_add(&priv->lhead, &dev->filelist);
+	mutex_unlock(&dev->struct_mutex);
+
+#ifdef __alpha__
+	/*
+	 * Default the hose
+	 */
+	if (!dev->hose) {
+		struct pci_dev *pci_dev;
+		pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
+		if (pci_dev) {
+			dev->hose = pci_dev->sysdata;
+			pci_dev_put(pci_dev);
+		}
+		if (!dev->hose) {
+			struct pci_bus *b = pci_bus_b(pci_root_buses.next);
+			if (b)
+				dev->hose = b->sysdata;
+		}
+	}
+#endif
+
+	return 0;
+      out_free:
+	drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
+	filp->private_data = NULL;
+	return ret;
+}
+
+/** No-op. */
+int drm_fasync(int fd, struct file *filp, int on)
+{
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	int retcode;
+
+	DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
+		  (long)old_encode_dev(priv->minor->device));
+	retcode = fasync_helper(fd, filp, on, &dev->buf_async);
+	if (retcode < 0)
+		return retcode;
+	return 0;
+}
+EXPORT_SYMBOL(drm_fasync);
+
+/**
+ * Release file.
+ *
+ * \param inode device inode
+ * \param file_priv DRM file private.
+ * \return zero on success or a negative number on failure.
+ *
+ * If the hardware lock is held then free it, and take it again for the kernel
+ * context since it's necessary to reclaim buffers. Unlink the file private
+ * data from its list and free it. Decreases the open count and if it reaches
+ * zero calls drm_lastclose().
+ */
+int drm_release(struct inode *inode, struct file *filp)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct drm_device *dev = file_priv->minor->dev;
+	int retcode = 0;
+
+	lock_kernel();
+
+	DRM_DEBUG("open_count = %d\n", dev->open_count);
+
+	if (dev->driver->preclose)
+		dev->driver->preclose(dev, file_priv);
+
+	/* ========================================================
+	 * Begin inline drm_release
+	 */
+
+	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
+		  task_pid_nr(current),
+		  (long)old_encode_dev(file_priv->minor->device),
+		  dev->open_count);
+
+	if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
+		if (drm_i_have_hw_lock(dev, file_priv)) {
+			dev->driver->reclaim_buffers_locked(dev, file_priv);
+		} else {
+			unsigned long endtime = jiffies + 3 * DRM_HZ;
+			int locked = 0;
+
+			drm_idlelock_take(&dev->lock);
+
+			/*
+			 * Wait for a while.
+			 */
+
+			do{
+				spin_lock_bh(&dev->lock.spinlock);
+				locked = dev->lock.idle_has_lock;
+				spin_unlock_bh(&dev->lock.spinlock);
+				if (locked)
+					break;
+				schedule();
+			} while (!time_after_eq(jiffies, endtime));
+
+			if (!locked) {
+				DRM_ERROR("reclaim_buffers_locked() deadlock. Please rework this\n"
+					  "\tdriver to use reclaim_buffers_idlelocked() instead.\n"
+					  "\tI will go on reclaiming the buffers anyway.\n");
+			}
+
+			dev->driver->reclaim_buffers_locked(dev, file_priv);
+			drm_idlelock_release(&dev->lock);
+		}
+	}
+
+	if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) {
+
+		drm_idlelock_take(&dev->lock);
+		dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
+		drm_idlelock_release(&dev->lock);
+
+	}
+
+	if (drm_i_have_hw_lock(dev, file_priv)) {
+		DRM_DEBUG("File %p released, freeing lock for context %d\n",
+			  filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+
+		drm_lock_free(&dev->lock,
+			      _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+	}
+
+
+	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+	    !dev->driver->reclaim_buffers_locked) {
+		dev->driver->reclaim_buffers(dev, file_priv);
+	}
+
+	drm_fasync(-1, filp, 0);
+
+	mutex_lock(&dev->ctxlist_mutex);
+	if (!list_empty(&dev->ctxlist)) {
+		struct drm_ctx_list *pos, *n;
+
+		list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
+			if (pos->tag == file_priv &&
+			    pos->handle != DRM_KERNEL_CONTEXT) {
+				if (dev->driver->context_dtor)
+					dev->driver->context_dtor(dev,
+								  pos->handle);
+
+				drm_ctxbitmap_free(dev, pos->handle);
+
+				list_del(&pos->head);
+				drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);
+				--dev->ctx_count;
+			}
+		}
+	}
+	mutex_unlock(&dev->ctxlist_mutex);
+
+	mutex_lock(&dev->struct_mutex);
+	if (file_priv->remove_auth_on_close == 1) {
+		struct drm_file *temp;
+
+		list_for_each_entry(temp, &dev->filelist, lhead)
+			temp->authenticated = 0;
+	}
+	list_del(&file_priv->lhead);
+	mutex_unlock(&dev->struct_mutex);
+
+	if (dev->driver->postclose)
+		dev->driver->postclose(dev, file_priv);
+	drm_free(file_priv, sizeof(*file_priv), DRM_MEM_FILES);
+
+	/* ========================================================
+	 * End inline drm_release
+	 */
+
+	atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
+	spin_lock(&dev->count_lock);
+	if (!--dev->open_count) {
+		if (atomic_read(&dev->ioctl_count) || dev->blocked) {
+			DRM_ERROR("Device busy: %d %d\n",
+				  atomic_read(&dev->ioctl_count), dev->blocked);
+			spin_unlock(&dev->count_lock);
+			unlock_kernel();
+			return -EBUSY;
+		}
+		spin_unlock(&dev->count_lock);
+		unlock_kernel();
+		return drm_lastclose(dev);
+	}
+	spin_unlock(&dev->count_lock);
+
+	unlock_kernel();
+
+	return retcode;
+}
+EXPORT_SYMBOL(drm_release);
+
+/** No-op. */
+unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
+{
+	return 0;
+}
+EXPORT_SYMBOL(drm_poll);
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
new file mode 100644
index 000000000000..33160673a7b7
--- /dev/null
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -0,0 +1,202 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Simple open hash tab implementation.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "drm_hashtab.h"
+#include <linux/hash.h>
+
+int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
+{
+	unsigned int i;
+
+	ht->size = 1 << order;
+	ht->order = order;
+	ht->fill = 0;
+	ht->table = NULL;
+	ht->use_vmalloc = ((ht->size * sizeof(*ht->table)) > PAGE_SIZE);
+	if (!ht->use_vmalloc) {
+		ht->table = drm_calloc(ht->size, sizeof(*ht->table),
+				       DRM_MEM_HASHTAB);
+	}
+	if (!ht->table) {
+		ht->use_vmalloc = 1;
+		ht->table = vmalloc(ht->size*sizeof(*ht->table));
+	}
+	if (!ht->table) {
+		DRM_ERROR("Out of memory for hash table\n");
+		return -ENOMEM;
+	}
+	for (i=0; i< ht->size; ++i) {
+		INIT_HLIST_HEAD(&ht->table[i]);
+	}
+	return 0;
+}
+
+void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
+{
+	struct drm_hash_item *entry;
+	struct hlist_head *h_list;
+	struct hlist_node *list;
+	unsigned int hashed_key;
+	int count = 0;
+
+	hashed_key = hash_long(key, ht->order);
+	DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
+	h_list = &ht->table[hashed_key];
+	hlist_for_each(list, h_list) {
+		entry = hlist_entry(list, struct drm_hash_item, head);
+		DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
+	}
+}
+
+static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
+					  unsigned long key)
+{
+	struct drm_hash_item *entry;
+	struct hlist_head *h_list;
+	struct hlist_node *list;
+	unsigned int hashed_key;
+
+	hashed_key = hash_long(key, ht->order);
+	h_list = &ht->table[hashed_key];
+	hlist_for_each(list, h_list) {
+		entry = hlist_entry(list, struct drm_hash_item, head);
+		if (entry->key == key)
+			return list;
+		if (entry->key > key)
+			break;
+	}
+	return NULL;
+}
+
+
+int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
+{
+	struct drm_hash_item *entry;
+	struct hlist_head *h_list;
+	struct hlist_node *list, *parent;
+	unsigned int hashed_key;
+	unsigned long key = item->key;
+
+	hashed_key = hash_long(key, ht->order);
+	h_list = &ht->table[hashed_key];
+	parent = NULL;
+	hlist_for_each(list, h_list) {
+		entry = hlist_entry(list, struct drm_hash_item, head);
+		if (entry->key == key)
+			return -EINVAL;
+		if (entry->key > key)
+			break;
+		parent = list;
+	}
+	if (parent) {
+		hlist_add_after(parent, &item->head);
+	} else {
+		hlist_add_head(&item->head, h_list);
+	}
+	return 0;
+}
+
+/*
+ * Just insert an item and return any "bits" bit key that hasn't been
+ * used before.
+ */
+int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
+			      unsigned long seed, int bits, int shift,
+			      unsigned long add)
+{
+	int ret;
+	unsigned long mask = (1 << bits) - 1;
+	unsigned long first, unshifted_key;
+
+	unshifted_key = hash_long(seed, bits);
+	first = unshifted_key;
+	do {
+		item->key = (unshifted_key << shift) + add;
+		ret = drm_ht_insert_item(ht, item);
+		if (ret)
+			unshifted_key = (unshifted_key + 1) & mask;
+	} while(ret && (unshifted_key != first));
+
+	if (ret) {
+		DRM_ERROR("Available key bit space exhausted\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
+		     struct drm_hash_item **item)
+{
+	struct hlist_node *list;
+
+	list = drm_ht_find_key(ht, key);
+	if (!list)
+		return -EINVAL;
+
+	*item = hlist_entry(list, struct drm_hash_item, head);
+	return 0;
+}
+
+int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
+{
+	struct hlist_node *list;
+
+	list = drm_ht_find_key(ht, key);
+	if (list) {
+		hlist_del_init(list);
+		ht->fill--;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
+{
+	hlist_del_init(&item->head);
+	ht->fill--;
+	return 0;
+}
+
+void drm_ht_remove(struct drm_open_hash *ht)
+{
+	if (ht->table) {
+		if (ht->use_vmalloc)
+			vfree(ht->table);
+		else
+			drm_free(ht->table, ht->size * sizeof(*ht->table),
+				 DRM_MEM_HASHTAB);
+		ht->table = NULL;
+	}
+}
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
new file mode 100644
index 000000000000..90f5a8d9bdcb
--- /dev/null
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -0,0 +1,1073 @@
+/**
+ * \file drm_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the DRM.
+ *
+ * \author Paul Mackerras <paulus@samba.org>
+ *
+ * Copyright (C) Paul Mackerras 2005.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <linux/compat.h>
+
+#include "drmP.h"
+#include "drm_core.h"
+
+#define DRM_IOCTL_VERSION32		DRM_IOWR(0x00, drm_version32_t)
+#define DRM_IOCTL_GET_UNIQUE32		DRM_IOWR(0x01, drm_unique32_t)
+#define DRM_IOCTL_GET_MAP32		DRM_IOWR(0x04, drm_map32_t)
+#define DRM_IOCTL_GET_CLIENT32		DRM_IOWR(0x05, drm_client32_t)
+#define DRM_IOCTL_GET_STATS32		DRM_IOR( 0x06, drm_stats32_t)
+
+#define DRM_IOCTL_SET_UNIQUE32		DRM_IOW( 0x10, drm_unique32_t)
+#define DRM_IOCTL_ADD_MAP32		DRM_IOWR(0x15, drm_map32_t)
+#define DRM_IOCTL_ADD_BUFS32		DRM_IOWR(0x16, drm_buf_desc32_t)
+#define DRM_IOCTL_MARK_BUFS32		DRM_IOW( 0x17, drm_buf_desc32_t)
+#define DRM_IOCTL_INFO_BUFS32		DRM_IOWR(0x18, drm_buf_info32_t)
+#define DRM_IOCTL_MAP_BUFS32		DRM_IOWR(0x19, drm_buf_map32_t)
+#define DRM_IOCTL_FREE_BUFS32		DRM_IOW( 0x1a, drm_buf_free32_t)
+
+#define DRM_IOCTL_RM_MAP32		DRM_IOW( 0x1b, drm_map32_t)
+
+#define DRM_IOCTL_SET_SAREA_CTX32	DRM_IOW( 0x1c, drm_ctx_priv_map32_t)
+#define DRM_IOCTL_GET_SAREA_CTX32	DRM_IOWR(0x1d, drm_ctx_priv_map32_t)
+
+#define DRM_IOCTL_RES_CTX32		DRM_IOWR(0x26, drm_ctx_res32_t)
+#define DRM_IOCTL_DMA32			DRM_IOWR(0x29, drm_dma32_t)
+
+#define DRM_IOCTL_AGP_ENABLE32		DRM_IOW( 0x32, drm_agp_mode32_t)
+#define DRM_IOCTL_AGP_INFO32		DRM_IOR( 0x33, drm_agp_info32_t)
+#define DRM_IOCTL_AGP_ALLOC32		DRM_IOWR(0x34, drm_agp_buffer32_t)
+#define DRM_IOCTL_AGP_FREE32		DRM_IOW( 0x35, drm_agp_buffer32_t)
+#define DRM_IOCTL_AGP_BIND32		DRM_IOW( 0x36, drm_agp_binding32_t)
+#define DRM_IOCTL_AGP_UNBIND32		DRM_IOW( 0x37, drm_agp_binding32_t)
+
+#define DRM_IOCTL_SG_ALLOC32		DRM_IOW( 0x38, drm_scatter_gather32_t)
+#define DRM_IOCTL_SG_FREE32		DRM_IOW( 0x39, drm_scatter_gather32_t)
+
+#define DRM_IOCTL_WAIT_VBLANK32		DRM_IOWR(0x3a, drm_wait_vblank32_t)
+
+typedef struct drm_version_32 {
+	int version_major;	  /**< Major version */
+	int version_minor;	  /**< Minor version */
+	int version_patchlevel;	   /**< Patch level */
+	u32 name_len;		  /**< Length of name buffer */
+	u32 name;		  /**< Name of driver */
+	u32 date_len;		  /**< Length of date buffer */
+	u32 date;		  /**< User-space buffer to hold date */
+	u32 desc_len;		  /**< Length of desc buffer */
+	u32 desc;		  /**< User-space buffer to hold desc */
+} drm_version32_t;
+
+static int compat_drm_version(struct file *file, unsigned int cmd,
+			      unsigned long arg)
+{
+	drm_version32_t v32;
+	struct drm_version __user *version;
+	int err;
+
+	if (copy_from_user(&v32, (void __user *)arg, sizeof(v32)))
+		return -EFAULT;
+
+	version = compat_alloc_user_space(sizeof(*version));
+	if (!access_ok(VERIFY_WRITE, version, sizeof(*version)))
+		return -EFAULT;
+	if (__put_user(v32.name_len, &version->name_len)
+	    || __put_user((void __user *)(unsigned long)v32.name,
+			  &version->name)
+	    || __put_user(v32.date_len, &version->date_len)
+	    || __put_user((void __user *)(unsigned long)v32.date,
+			  &version->date)
+	    || __put_user(v32.desc_len, &version->desc_len)
+	    || __put_user((void __user *)(unsigned long)v32.desc,
+			  &version->desc))
+		return -EFAULT;
+
+	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+			DRM_IOCTL_VERSION, (unsigned long)version);
+	if (err)
+		return err;
+
+	if (__get_user(v32.version_major, &version->version_major)
+	    || __get_user(v32.version_minor, &version->version_minor)
+	    || __get_user(v32.version_patchlevel, &version->version_patchlevel)
+	    || __get_user(v32.name_len, &version->name_len)
+	    || __get_user(v32.date_len, &version->date_len)
+	    || __get_user(v32.desc_len, &version->desc_len))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &v32, sizeof(v32)))
+		return -EFAULT;
+	return 0;
+}
+
+typedef struct drm_unique32 {
+	u32 unique_len;	/**< Length of unique */
+	u32 unique;	/**< Unique name for driver instantiation */
+} drm_unique32_t;
+
+static int compat_drm_getunique(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	drm_unique32_t uq32;
+	struct drm_unique __user *u;
+	int err;
+
+	if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
+		return -EFAULT;
+
+	u = compat_alloc_user_space(sizeof(*u));
+	if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
+		return -EFAULT;
+	if (__put_user(uq32.unique_len, &u->unique_len)
+	    || __put_user((void __user *)(unsigned long)uq32.unique,
+			  &u->unique))
+		return -EFAULT;
+
+	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+			DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
+	if (err)
+		return err;
+
+	if (__get_user(uq32.unique_len, &u->unique_len))
+		return -EFAULT;
+	if (copy_to_user((void __user *)arg, &uq32, sizeof(uq32)))
+		return -EFAULT;
+	return 0;
+}
+
+static int compat_drm_setunique(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	drm_unique32_t uq32;
+	struct drm_unique __user *u;
+
+	if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
+		return -EFAULT;
+
+	u = compat_alloc_user_space(sizeof(*u));
+	if (!access_ok(VERIFY_WRITE, u, sizeof(*u)))
+		return -EFAULT;
+	if (__put_user(uq32.unique_len, &u->unique_len)
+	    || __put_user((void __user *)(unsigned long)uq32.unique,
+			  &u->unique))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
+}
+
+typedef struct drm_map32 {
+	u32 offset;		/**< Requested physical address (0 for SAREA)*/
+	u32 size;		/**< Requested physical size (bytes) */
+	enum drm_map_type type;	/**< Type of memory to map */
+	enum drm_map_flags flags;	/**< Flags */
+	u32 handle;		/**< User-space: "Handle" to pass to mmap() */
+	int mtrr;		/**< MTRR slot used */
+} drm_map32_t;
+
+static int compat_drm_getmap(struct file *file, unsigned int cmd,
+			     unsigned long arg)
+{
+	drm_map32_t __user *argp = (void __user *)arg;
+	drm_map32_t m32;
+	struct drm_map __user *map;
+	int idx, err;
+	void *handle;
+
+	if (get_user(idx, &argp->offset))
+		return -EFAULT;
+
+	map = compat_alloc_user_space(sizeof(*map));
+	if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+		return -EFAULT;
+	if (__put_user(idx, &map->offset))
+		return -EFAULT;
+
+	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+			DRM_IOCTL_GET_MAP, (unsigned long)map);
+	if (err)
+		return err;
+
+	if (__get_user(m32.offset, &map->offset)
+	    || __get_user(m32.size, &map->size)
+	    || __get_user(m32.type, &map->type)
+	    || __get_user(m32.flags, &map->flags)
+	    || __get_user(handle, &map->handle)
+	    || __get_user(m32.mtrr, &map->mtrr))
+		return -EFAULT;
+
+	m32.handle = (unsigned long)handle;
+	if (copy_to_user(argp, &m32, sizeof(m32)))
+		return -EFAULT;
+	return 0;
+
+}
+
+static int compat_drm_addmap(struct file *file, unsigned int cmd,
+			     unsigned long arg)
+{
+	drm_map32_t __user *argp = (void __user *)arg;
+	drm_map32_t m32;
+	struct drm_map __user *map;
+	int err;
+	void *handle;
+
+	if (copy_from_user(&m32, argp, sizeof(m32)))
+		return -EFAULT;
+
+	map = compat_alloc_user_space(sizeof(*map));
+	if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+		return -EFAULT;
+	if (__put_user(m32.offset, &map->offset)
+	    || __put_user(m32.size, &map->size)
+	    || __put_user(m32.type, &map->type)
+	    || __put_user(m32.flags, &map->flags))
+		return -EFAULT;
+
+	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+			DRM_IOCTL_ADD_MAP, (unsigned long)map);
+	if (err)
+		return err;
+
+	if (__get_user(m32.offset, &map->offset)
+	    || __get_user(m32.mtrr, &map->mtrr)
+	    || __get_user(handle, &map->handle))
+		return -EFAULT;
+
+	m32.handle = (unsigned long)handle;
+	if (m32.handle != (unsigned long)handle && printk_ratelimit())
+		printk(KERN_ERR "compat_drm_addmap truncated handle"
+		       " %p for type %d offset %x\n",
+		       handle, m32.type, m32.offset);
+
+	if (copy_to_user(argp, &m32, sizeof(m32)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int compat_drm_rmmap(struct file *file, unsigned int cmd,
+			    unsigned long arg)
+{
+	drm_map32_t __user *argp = (void __user *)arg;
+	struct drm_map __user *map;
+	u32 handle;
+
+	if (get_user(handle, &argp->handle))
+		return -EFAULT;
+
+	map = compat_alloc_user_space(sizeof(*map));
+	if (!access_ok(VERIFY_WRITE, map, sizeof(*map)))
+		return -EFAULT;
+	if (__put_user((void *)(unsigned long)handle, &map->handle))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_RM_MAP, (unsigned long)map);
+}
+
+typedef struct drm_client32 {
+	int idx;	/**< Which client desired? */
+	int auth;	/**< Is client authenticated? */
+	u32 pid;	/**< Process ID */
+	u32 uid;	/**< User ID */
+	u32 magic;	/**< Magic */
+	u32 iocs;	/**< Ioctl count */
+} drm_client32_t;
+
+static int compat_drm_getclient(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	drm_client32_t c32;
+	drm_client32_t __user *argp = (void __user *)arg;
+	struct drm_client __user *client;
+	int idx, err;
+
+	if (get_user(idx, &argp->idx))
+		return -EFAULT;
+
+	client = compat_alloc_user_space(sizeof(*client));
+	if (!access_ok(VERIFY_WRITE, client, sizeof(*client)))
+		return -EFAULT;
+	if (__put_user(idx, &client->idx))
+		return -EFAULT;
+
+	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+			DRM_IOCTL_GET_CLIENT, (unsigned long)client);
+	if (err)
+		return err;
+
+	if (__get_user(c32.auth, &client->auth)
+	    || __get_user(c32.pid, &client->pid)
+	    || __get_user(c32.uid, &client->uid)
+	    || __get_user(c32.magic, &client->magic)
+	    || __get_user(c32.iocs, &client->iocs))
+		return -EFAULT;
+
+	if (copy_to_user(argp, &c32, sizeof(c32)))
+		return -EFAULT;
+	return 0;
+}
+
+typedef struct drm_stats32 {
+	u32 count;
+	struct {
+		u32 value;
+		enum drm_stat_type type;
+	} data[15];
+} drm_stats32_t;
+
+static int compat_drm_getstats(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	drm_stats32_t s32;
+	drm_stats32_t __user *argp = (void __user *)arg;
+	struct drm_stats __user *stats;
+	int i, err;
+
+	stats = compat_alloc_user_space(sizeof(*stats));
+	if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats)))
+		return -EFAULT;
+
+	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+			DRM_IOCTL_GET_STATS, (unsigned long)stats);
+	if (err)
+		return err;
+
+	if (__get_user(s32.count, &stats->count))
+		return -EFAULT;
+	for (i = 0; i < 15; ++i)
+		if (__get_user(s32.data[i].value, &stats->data[i].value)
+		    || __get_user(s32.data[i].type, &stats->data[i].type))
+			return -EFAULT;
+
+	if (copy_to_user(argp, &s32, sizeof(s32)))
+		return -EFAULT;
+	return 0;
+}
+
+typedef struct drm_buf_desc32 {
+	int count;		 /**< Number of buffers of this size */
+	int size;		 /**< Size in bytes */
+	int low_mark;		 /**< Low water mark */
+	int high_mark;		 /**< High water mark */
+	int flags;
+	u32 agp_start;		 /**< Start address in the AGP aperture */
+} drm_buf_desc32_t;
+
+static int compat_drm_addbufs(struct file *file, unsigned int cmd,
+			      unsigned long arg)
+{
+	drm_buf_desc32_t __user *argp = (void __user *)arg;
+	struct drm_buf_desc __user *buf;
+	int err;
+	unsigned long agp_start;
+
+	buf = compat_alloc_user_space(sizeof(*buf));
+	if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf))
+	    || !access_ok(VERIFY_WRITE, argp, sizeof(*argp)))
+		return -EFAULT;
+
+	if (__copy_in_user(buf, argp, offsetof(drm_buf_desc32_t, agp_start))
+	    || __get_user(agp_start, &argp->agp_start)
+	    || __put_user(agp_start, &buf->agp_start))
+		return -EFAULT;
+
+	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+			DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
+	if (err)
+		return err;
+
+	if (__copy_in_user(argp, buf, offsetof(drm_buf_desc32_t, agp_start))
+	    || __get_user(agp_start, &buf->agp_start)
+	    || __put_user(agp_start, &argp->agp_start))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int compat_drm_markbufs(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	drm_buf_desc32_t b32;
+	drm_buf_desc32_t __user *argp = (void __user *)arg;
+	struct drm_buf_desc __user *buf;
+
+	if (copy_from_user(&b32, argp, sizeof(b32)))
+		return -EFAULT;
+
+	buf = compat_alloc_user_space(sizeof(*buf));
+	if (!access_ok(VERIFY_WRITE, buf, sizeof(*buf)))
+		return -EFAULT;
+
+	if (__put_user(b32.size, &buf->size)
+	    || __put_user(b32.low_mark, &buf->low_mark)
+	    || __put_user(b32.high_mark, &buf->high_mark))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
+}
+
+typedef struct drm_buf_info32 {
+	int count;		/**< Entries in list */
+	u32 list;
+} drm_buf_info32_t;
+
+static int compat_drm_infobufs(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	drm_buf_info32_t req32;
+	drm_buf_info32_t __user *argp = (void __user *)arg;
+	drm_buf_desc32_t __user *to;
+	struct drm_buf_info __user *request;
+	struct drm_buf_desc __user *list;
+	size_t nbytes;
+	int i, err;
+	int count, actual;
+
+	if (copy_from_user(&req32, argp, sizeof(req32)))
+		return -EFAULT;
+
+	count = req32.count;
+	to = (drm_buf_desc32_t __user *) (unsigned long)req32.list;
+	if (count < 0)
+		count = 0;
+	if (count > 0
+	    && !access_ok(VERIFY_WRITE, to, count * sizeof(drm_buf_desc32_t)))
+		return -EFAULT;
+
+	nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc);
+	request = compat_alloc_user_space(nbytes);
+	if (!access_ok(VERIFY_WRITE, request, nbytes))
+		return -EFAULT;
+	list = (struct drm_buf_desc *) (request + 1);
+
+	if (__put_user(count, &request->count)
+	    || __put_user(list, &request->list))
+		return -EFAULT;
+
+	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+			DRM_IOCTL_INFO_BUFS, (unsigned long)request);
+	if (err)
+		return err;
+
+	if (__get_user(actual, &request->count))
+		return -EFAULT;
+	if (count >= actual)
+		for (i = 0; i < actual; ++i)
+			if (__copy_in_user(&to[i], &list[i],
+					   offsetof(struct drm_buf_desc, flags)))
+				return -EFAULT;
+
+	if (__put_user(actual, &argp->count))
+		return -EFAULT;
+
+	return 0;
+}
+
+typedef struct drm_buf_pub32 {
+	int idx;		/**< Index into the master buffer list */
+	int total;		/**< Buffer size */
+	int used;		/**< Amount of buffer in use (for DMA) */
+	u32 address;		/**< Address of buffer */
+} drm_buf_pub32_t;
+
+typedef struct drm_buf_map32 {
+	int count;		/**< Length of the buffer list */
+	u32 virtual;		/**< Mmap'd area in user-virtual */
+	u32 list;		/**< Buffer information */
+} drm_buf_map32_t;
+
+static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
+			      unsigned long arg)
+{
+	drm_buf_map32_t __user *argp = (void __user *)arg;
+	drm_buf_map32_t req32;
+	drm_buf_pub32_t __user *list32;
+	struct drm_buf_map __user *request;
+	struct drm_buf_pub __user *list;
+	int i, err;
+	int count, actual;
+	size_t nbytes;
+	void __user *addr;
+
+	if (copy_from_user(&req32, argp, sizeof(req32)))
+		return -EFAULT;
+	count = req32.count;
+	list32 = (void __user *)(unsigned long)req32.list;
+
+	if (count < 0)
+		return -EINVAL;
+	nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub);
+	request = compat_alloc_user_space(nbytes);
+	if (!access_ok(VERIFY_WRITE, request, nbytes))
+		return -EFAULT;
+	list = (struct drm_buf_pub *) (request + 1);
+
+	if (__put_user(count, &request->count)
+	    || __put_user(list, &request->list))
+		return -EFAULT;
+
+	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+			DRM_IOCTL_MAP_BUFS, (unsigned long)request);
+	if (err)
+		return err;
+
+	if (__get_user(actual, &request->count))
+		return -EFAULT;
+	if (count >= actual)
+		for (i = 0; i < actual; ++i)
+			if (__copy_in_user(&list32[i], &list[i],
+					   offsetof(struct drm_buf_pub, address))
+			    || __get_user(addr, &list[i].address)
+			    || __put_user((unsigned long)addr,
+					  &list32[i].address))
+				return -EFAULT;
+
+	if (__put_user(actual, &argp->count)
+	    || __get_user(addr, &request->virtual)
+	    || __put_user((unsigned long)addr, &argp->virtual))
+		return -EFAULT;
+
+	return 0;
+}
+
+typedef struct drm_buf_free32 {
+	int count;
+	u32 list;
+} drm_buf_free32_t;
+
+static int compat_drm_freebufs(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	drm_buf_free32_t req32;
+	struct drm_buf_free __user *request;
+	drm_buf_free32_t __user *argp = (void __user *)arg;
+
+	if (copy_from_user(&req32, argp, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+		return -EFAULT;
+	if (__put_user(req32.count, &request->count)
+	    || __put_user((int __user *)(unsigned long)req32.list,
+			  &request->list))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_FREE_BUFS, (unsigned long)request);
+}
+
+typedef struct drm_ctx_priv_map32 {
+	unsigned int ctx_id;	 /**< Context requesting private mapping */
+	u32 handle;		/**< Handle of map */
+} drm_ctx_priv_map32_t;
+
+static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
+				  unsigned long arg)
+{
+	drm_ctx_priv_map32_t req32;
+	struct drm_ctx_priv_map __user *request;
+	drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
+
+	if (copy_from_user(&req32, argp, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+		return -EFAULT;
+	if (__put_user(req32.ctx_id, &request->ctx_id)
+	    || __put_user((void *)(unsigned long)req32.handle,
+			  &request->handle))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
+}
+
+static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
+				  unsigned long arg)
+{
+	struct drm_ctx_priv_map __user *request;
+	drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
+	int err;
+	unsigned int ctx_id;
+	void *handle;
+
+	if (!access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+	    || __get_user(ctx_id, &argp->ctx_id))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request)))
+		return -EFAULT;
+	if (__put_user(ctx_id, &request->ctx_id))
+		return -EFAULT;
+
+	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+			DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
+	if (err)
+		return err;
+
+	if (__get_user(handle, &request->handle)
+	    || __put_user((unsigned long)handle, &argp->handle))
+		return -EFAULT;
+
+	return 0;
+}
+
+typedef struct drm_ctx_res32 {
+	int count;
+	u32 contexts;
+} drm_ctx_res32_t;
+
+static int compat_drm_resctx(struct file *file, unsigned int cmd,
+			     unsigned long arg)
+{
+	drm_ctx_res32_t __user *argp = (void __user *)arg;
+	drm_ctx_res32_t res32;
+	struct drm_ctx_res __user *res;
+	int err;
+
+	if (copy_from_user(&res32, argp, sizeof(res32)))
+		return -EFAULT;
+
+	res = compat_alloc_user_space(sizeof(*res));
+	if (!access_ok(VERIFY_WRITE, res, sizeof(*res)))
+		return -EFAULT;
+	if (__put_user(res32.count, &res->count)
+	    || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts,
+			  &res->contexts))
+		return -EFAULT;
+
+	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+			DRM_IOCTL_RES_CTX, (unsigned long)res);
+	if (err)
+		return err;
+
+	if (__get_user(res32.count, &res->count)
+	    || __put_user(res32.count, &argp->count))
+		return -EFAULT;
+
+	return 0;
+}
+
+typedef struct drm_dma32 {
+	int context;		  /**< Context handle */
+	int send_count;		  /**< Number of buffers to send */
+	u32 send_indices;	  /**< List of handles to buffers */
+	u32 send_sizes;		  /**< Lengths of data to send */
+	enum drm_dma_flags flags;		  /**< Flags */
+	int request_count;	  /**< Number of buffers requested */
+	int request_size;	  /**< Desired size for buffers */
+	u32 request_indices;	  /**< Buffer information */
+	u32 request_sizes;
+	int granted_count;	  /**< Number of buffers granted */
+} drm_dma32_t;
+
+static int compat_drm_dma(struct file *file, unsigned int cmd,
+			  unsigned long arg)
+{
+	drm_dma32_t d32;
+	drm_dma32_t __user *argp = (void __user *)arg;
+	struct drm_dma __user *d;
+	int err;
+
+	if (copy_from_user(&d32, argp, sizeof(d32)))
+		return -EFAULT;
+
+	d = compat_alloc_user_space(sizeof(*d));
+	if (!access_ok(VERIFY_WRITE, d, sizeof(*d)))
+		return -EFAULT;
+
+	if (__put_user(d32.context, &d->context)
+	    || __put_user(d32.send_count, &d->send_count)
+	    || __put_user((int __user *)(unsigned long)d32.send_indices,
+			  &d->send_indices)
+	    || __put_user((int __user *)(unsigned long)d32.send_sizes,
+			  &d->send_sizes)
+	    || __put_user(d32.flags, &d->flags)
+	    || __put_user(d32.request_count, &d->request_count)
+	    || __put_user((int __user *)(unsigned long)d32.request_indices,
+			  &d->request_indices)
+	    || __put_user((int __user *)(unsigned long)d32.request_sizes,
+			  &d->request_sizes))
+		return -EFAULT;
+
+	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+			DRM_IOCTL_DMA, (unsigned long)d);
+	if (err)
+		return err;
+
+	if (__get_user(d32.request_size, &d->request_size)
+	    || __get_user(d32.granted_count, &d->granted_count)
+	    || __put_user(d32.request_size, &argp->request_size)
+	    || __put_user(d32.granted_count, &argp->granted_count))
+		return -EFAULT;
+
+	return 0;
+}
+
+#if __OS_HAS_AGP
+typedef struct drm_agp_mode32 {
+	u32 mode;	/**< AGP mode */
+} drm_agp_mode32_t;
+
+static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
+				 unsigned long arg)
+{
+	drm_agp_mode32_t __user *argp = (void __user *)arg;
+	drm_agp_mode32_t m32;
+	struct drm_agp_mode __user *mode;
+
+	if (get_user(m32.mode, &argp->mode))
+		return -EFAULT;
+
+	mode = compat_alloc_user_space(sizeof(*mode));
+	if (put_user(m32.mode, &mode->mode))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
+}
+
+typedef struct drm_agp_info32 {
+	int agp_version_major;
+	int agp_version_minor;
+	u32 mode;
+	u32 aperture_base;	/* physical address */
+	u32 aperture_size;	/* bytes */
+	u32 memory_allowed;	/* bytes */
+	u32 memory_used;
+
+	/* PCI information */
+	unsigned short id_vendor;
+	unsigned short id_device;
+} drm_agp_info32_t;
+
+static int compat_drm_agp_info(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	drm_agp_info32_t __user *argp = (void __user *)arg;
+	drm_agp_info32_t i32;
+	struct drm_agp_info __user *info;
+	int err;
+
+	info = compat_alloc_user_space(sizeof(*info));
+	if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
+		return -EFAULT;
+
+	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+			DRM_IOCTL_AGP_INFO, (unsigned long)info);
+	if (err)
+		return err;
+
+	if (__get_user(i32.agp_version_major, &info->agp_version_major)
+	    || __get_user(i32.agp_version_minor, &info->agp_version_minor)
+	    || __get_user(i32.mode, &info->mode)
+	    || __get_user(i32.aperture_base, &info->aperture_base)
+	    || __get_user(i32.aperture_size, &info->aperture_size)
+	    || __get_user(i32.memory_allowed, &info->memory_allowed)
+	    || __get_user(i32.memory_used, &info->memory_used)
+	    || __get_user(i32.id_vendor, &info->id_vendor)
+	    || __get_user(i32.id_device, &info->id_device))
+		return -EFAULT;
+
+	if (copy_to_user(argp, &i32, sizeof(i32)))
+		return -EFAULT;
+
+	return 0;
+}
+
+typedef struct drm_agp_buffer32 {
+	u32 size;	/**< In bytes -- will round to page boundary */
+	u32 handle;	/**< Used for binding / unbinding */
+	u32 type;	/**< Type of memory to allocate */
+	u32 physical;	/**< Physical used by i810 */
+} drm_agp_buffer32_t;
+
+static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	drm_agp_buffer32_t __user *argp = (void __user *)arg;
+	drm_agp_buffer32_t req32;
+	struct drm_agp_buffer __user *request;
+	int err;
+
+	if (copy_from_user(&req32, argp, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user(req32.size, &request->size)
+	    || __put_user(req32.type, &request->type))
+		return -EFAULT;
+
+	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+			DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
+	if (err)
+		return err;
+
+	if (__get_user(req32.handle, &request->handle)
+	    || __get_user(req32.physical, &request->physical)
+	    || copy_to_user(argp, &req32, sizeof(req32))) {
+		drm_ioctl(file->f_path.dentry->d_inode, file,
+			  DRM_IOCTL_AGP_FREE, (unsigned long)request);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int compat_drm_agp_free(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	drm_agp_buffer32_t __user *argp = (void __user *)arg;
+	struct drm_agp_buffer __user *request;
+	u32 handle;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || get_user(handle, &argp->handle)
+	    || __put_user(handle, &request->handle))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_AGP_FREE, (unsigned long)request);
+}
+
+typedef struct drm_agp_binding32 {
+	u32 handle;	/**< From drm_agp_buffer */
+	u32 offset;	/**< In bytes -- will round to page boundary */
+} drm_agp_binding32_t;
+
+static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	drm_agp_binding32_t __user *argp = (void __user *)arg;
+	drm_agp_binding32_t req32;
+	struct drm_agp_binding __user *request;
+
+	if (copy_from_user(&req32, argp, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user(req32.handle, &request->handle)
+	    || __put_user(req32.offset, &request->offset))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_AGP_BIND, (unsigned long)request);
+}
+
+static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
+				 unsigned long arg)
+{
+	drm_agp_binding32_t __user *argp = (void __user *)arg;
+	struct drm_agp_binding __user *request;
+	u32 handle;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || get_user(handle, &argp->handle)
+	    || __put_user(handle, &request->handle))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
+}
+#endif				/* __OS_HAS_AGP */
+
+typedef struct drm_scatter_gather32 {
+	u32 size;	/**< In bytes -- will round to page boundary */
+	u32 handle;	/**< Used for mapping / unmapping */
+} drm_scatter_gather32_t;
+
+static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	drm_scatter_gather32_t __user *argp = (void __user *)arg;
+	struct drm_scatter_gather __user *request;
+	int err;
+	unsigned long x;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+	    || __get_user(x, &argp->size)
+	    || __put_user(x, &request->size))
+		return -EFAULT;
+
+	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+			DRM_IOCTL_SG_ALLOC, (unsigned long)request);
+	if (err)
+		return err;
+
+	/* XXX not sure about the handle conversion here... */
+	if (__get_user(x, &request->handle)
+	    || __put_user(x >> PAGE_SHIFT, &argp->handle))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int compat_drm_sg_free(struct file *file, unsigned int cmd,
+			      unsigned long arg)
+{
+	drm_scatter_gather32_t __user *argp = (void __user *)arg;
+	struct drm_scatter_gather __user *request;
+	unsigned long x;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || !access_ok(VERIFY_WRITE, argp, sizeof(*argp))
+	    || __get_user(x, &argp->handle)
+	    || __put_user(x << PAGE_SHIFT, &request->handle))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_SG_FREE, (unsigned long)request);
+}
+
+struct drm_wait_vblank_request32 {
+	enum drm_vblank_seq_type type;
+	unsigned int sequence;
+	u32 signal;
+};
+
+struct drm_wait_vblank_reply32 {
+	enum drm_vblank_seq_type type;
+	unsigned int sequence;
+	s32 tval_sec;
+	s32 tval_usec;
+};
+
+typedef union drm_wait_vblank32 {
+	struct drm_wait_vblank_request32 request;
+	struct drm_wait_vblank_reply32 reply;
+} drm_wait_vblank32_t;
+
+static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
+				  unsigned long arg)
+{
+	drm_wait_vblank32_t __user *argp = (void __user *)arg;
+	drm_wait_vblank32_t req32;
+	union drm_wait_vblank __user *request;
+	int err;
+
+	if (copy_from_user(&req32, argp, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user(req32.request.type, &request->request.type)
+	    || __put_user(req32.request.sequence, &request->request.sequence)
+	    || __put_user(req32.request.signal, &request->request.signal))
+		return -EFAULT;
+
+	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+			DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
+	if (err)
+		return err;
+
+	if (__get_user(req32.reply.type, &request->reply.type)
+	    || __get_user(req32.reply.sequence, &request->reply.sequence)
+	    || __get_user(req32.reply.tval_sec, &request->reply.tval_sec)
+	    || __get_user(req32.reply.tval_usec, &request->reply.tval_usec))
+		return -EFAULT;
+
+	if (copy_to_user(argp, &req32, sizeof(req32)))
+		return -EFAULT;
+
+	return 0;
+}
+
+drm_ioctl_compat_t *drm_compat_ioctls[] = {
+	[DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
+	[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
+	[DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
+	[DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT32)] = compat_drm_getclient,
+	[DRM_IOCTL_NR(DRM_IOCTL_GET_STATS32)] = compat_drm_getstats,
+	[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE32)] = compat_drm_setunique,
+	[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP32)] = compat_drm_addmap,
+	[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS32)] = compat_drm_addbufs,
+	[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS32)] = compat_drm_markbufs,
+	[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS32)] = compat_drm_infobufs,
+	[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS32)] = compat_drm_mapbufs,
+	[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS32)] = compat_drm_freebufs,
+	[DRM_IOCTL_NR(DRM_IOCTL_RM_MAP32)] = compat_drm_rmmap,
+	[DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX32)] = compat_drm_setsareactx,
+	[DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX32)] = compat_drm_getsareactx,
+	[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX32)] = compat_drm_resctx,
+	[DRM_IOCTL_NR(DRM_IOCTL_DMA32)] = compat_drm_dma,
+#if __OS_HAS_AGP
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE32)] = compat_drm_agp_enable,
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO32)] = compat_drm_agp_info,
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC32)] = compat_drm_agp_alloc,
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE32)] = compat_drm_agp_free,
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND32)] = compat_drm_agp_bind,
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND32)] = compat_drm_agp_unbind,
+#endif
+	[DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC32)] = compat_drm_sg_alloc,
+	[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE32)] = compat_drm_sg_free,
+	[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK32)] = compat_drm_wait_vblank,
+};
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/drm.
+ *
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	drm_ioctl_compat_t *fn;
+	int ret;
+
+	/* Assume that ioctls without an explicit compat routine will just
+	 * work.  This may not always be a good assumption, but it's better
+	 * than always failing.
+	 */
+	if (nr >= ARRAY_SIZE(drm_compat_ioctls))
+		return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
+
+	fn = drm_compat_ioctls[nr];
+
+	lock_kernel();		/* XXX for now */
+	if (fn != NULL)
+		ret = (*fn) (filp, cmd, arg);
+	else
+		ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
+	unlock_kernel();
+
+	return ret;
+}
+
+EXPORT_SYMBOL(drm_compat_ioctl);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
new file mode 100644
index 000000000000..16829fb3089d
--- /dev/null
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -0,0 +1,352 @@
+/**
+ * \file drm_ioctl.c
+ * IOCTL processing for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Jan  8 09:01:26 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_core.h"
+
+#include "linux/pci.h"
+
+/**
+ * Get the bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_unique structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Copies the bus id from drm_device::unique into user space.
+ */
+int drm_getunique(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	struct drm_unique *u = data;
+
+	if (u->unique_len >= dev->unique_len) {
+		if (copy_to_user(u->unique, dev->unique, dev->unique_len))
+			return -EFAULT;
+	}
+	u->unique_len = dev->unique_len;
+
+	return 0;
+}
+
+/**
+ * Set the bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_unique structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Copies the bus id from userspace into drm_device::unique, and verifies that
+ * it matches the device this DRM is attached to (EINVAL otherwise).  Deprecated
+ * in interface version 1.1 and will return EBUSY when setversion has requested
+ * version 1.1 or greater.
+ */
+int drm_setunique(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	struct drm_unique *u = data;
+	int domain, bus, slot, func, ret;
+
+	if (dev->unique_len || dev->unique)
+		return -EBUSY;
+
+	if (!u->unique_len || u->unique_len > 1024)
+		return -EINVAL;
+
+	dev->unique_len = u->unique_len;
+	dev->unique = drm_alloc(u->unique_len + 1, DRM_MEM_DRIVER);
+	if (!dev->unique)
+		return -ENOMEM;
+	if (copy_from_user(dev->unique, u->unique, dev->unique_len))
+		return -EFAULT;
+
+	dev->unique[dev->unique_len] = '\0';
+
+	dev->devname =
+	    drm_alloc(strlen(dev->driver->pci_driver.name) +
+		      strlen(dev->unique) + 2, DRM_MEM_DRIVER);
+	if (!dev->devname)
+		return -ENOMEM;
+
+	sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
+		dev->unique);
+
+	/* Return error if the busid submitted doesn't match the device's actual
+	 * busid.
+	 */
+	ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
+	if (ret != 3)
+		return -EINVAL;
+	domain = bus >> 8;
+	bus &= 0xff;
+
+	if ((domain != drm_get_pci_domain(dev)) ||
+	    (bus != dev->pdev->bus->number) ||
+	    (slot != PCI_SLOT(dev->pdev->devfn)) ||
+	    (func != PCI_FUNC(dev->pdev->devfn)))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int drm_set_busid(struct drm_device * dev)
+{
+	int len;
+
+	if (dev->unique != NULL)
+		return 0;
+
+	dev->unique_len = 40;
+	dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER);
+	if (dev->unique == NULL)
+		return -ENOMEM;
+
+	len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d",
+		       drm_get_pci_domain(dev), dev->pdev->bus->number,
+		       PCI_SLOT(dev->pdev->devfn),
+		       PCI_FUNC(dev->pdev->devfn));
+
+	if (len > dev->unique_len)
+		DRM_ERROR("Unique buffer overflowed\n");
+
+	dev->devname =
+	    drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len +
+		      2, DRM_MEM_DRIVER);
+	if (dev->devname == NULL)
+		return -ENOMEM;
+
+	sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
+		dev->unique);
+
+	return 0;
+}
+
+/**
+ * Get a mapping information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_map structure.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches for the mapping with the specified offset and copies its information
+ * into userspace
+ */
+int drm_getmap(struct drm_device *dev, void *data,
+	       struct drm_file *file_priv)
+{
+	struct drm_map *map = data;
+	struct drm_map_list *r_list = NULL;
+	struct list_head *list;
+	int idx;
+	int i;
+
+	idx = map->offset;
+
+	mutex_lock(&dev->struct_mutex);
+	if (idx < 0) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	i = 0;
+	list_for_each(list, &dev->maplist) {
+		if (i == idx) {
+			r_list = list_entry(list, struct drm_map_list, head);
+			break;
+		}
+		i++;
+	}
+	if (!r_list || !r_list->map) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	map->offset = r_list->map->offset;
+	map->size = r_list->map->size;
+	map->type = r_list->map->type;
+	map->flags = r_list->map->flags;
+	map->handle = (void *)(unsigned long) r_list->user_token;
+	map->mtrr = r_list->map->mtrr;
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+/**
+ * Get client information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_client structure.
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Searches for the client with the specified index and copies its information
+ * into userspace
+ */
+int drm_getclient(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	struct drm_client *client = data;
+	struct drm_file *pt;
+	int idx;
+	int i;
+
+	idx = client->idx;
+	mutex_lock(&dev->struct_mutex);
+
+	i = 0;
+	list_for_each_entry(pt, &dev->filelist, lhead) {
+		if (i++ >= idx) {
+			client->auth = pt->authenticated;
+			client->pid = pt->pid;
+			client->uid = pt->uid;
+			client->magic = pt->magic;
+			client->iocs = pt->ioctl_count;
+			mutex_unlock(&dev->struct_mutex);
+
+			return 0;
+		}
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+	return -EINVAL;
+}
+
+/**
+ * Get statistics information.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_stats structure.
+ *
+ * \return zero on success or a negative number on failure.
+ */
+int drm_getstats(struct drm_device *dev, void *data,
+		 struct drm_file *file_priv)
+{
+	struct drm_stats *stats = data;
+	int i;
+
+	memset(stats, 0, sizeof(*stats));
+
+	mutex_lock(&dev->struct_mutex);
+
+	for (i = 0; i < dev->counters; i++) {
+		if (dev->types[i] == _DRM_STAT_LOCK)
+			stats->data[i].value =
+			    (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0);
+		else
+			stats->data[i].value = atomic_read(&dev->counts[i]);
+		stats->data[i].type = dev->types[i];
+	}
+
+	stats->count = dev->counters;
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+/**
+ * Setversion ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Sets the requested interface version
+ */
+int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	struct drm_set_version *sv = data;
+	int if_version, retcode = 0;
+
+	if (sv->drm_di_major != -1) {
+		if (sv->drm_di_major != DRM_IF_MAJOR ||
+		    sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) {
+			retcode = -EINVAL;
+			goto done;
+		}
+		if_version = DRM_IF_VERSION(sv->drm_di_major,
+					    sv->drm_di_minor);
+		dev->if_version = max(if_version, dev->if_version);
+		if (sv->drm_di_minor >= 1) {
+			/*
+			 * Version 1.1 includes tying of DRM to specific device
+			 */
+			drm_set_busid(dev);
+		}
+	}
+
+	if (sv->drm_dd_major != -1) {
+		if (sv->drm_dd_major != dev->driver->major ||
+		    sv->drm_dd_minor < 0 || sv->drm_dd_minor >
+		    dev->driver->minor) {
+			retcode = -EINVAL;
+			goto done;
+		}
+
+		if (dev->driver->set_version)
+			dev->driver->set_version(dev, sv);
+	}
+
+done:
+	sv->drm_di_major = DRM_IF_MAJOR;
+	sv->drm_di_minor = DRM_IF_MINOR;
+	sv->drm_dd_major = dev->driver->major;
+	sv->drm_dd_minor = dev->driver->minor;
+
+	return retcode;
+}
+
+/** No-op ioctl. */
+int drm_noop(struct drm_device *dev, void *data,
+	     struct drm_file *file_priv)
+{
+	DRM_DEBUG("\n");
+	return 0;
+}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
new file mode 100644
index 000000000000..089c015c01d1
--- /dev/null
+++ b/drivers/gpu/drm/drm_irq.c
@@ -0,0 +1,462 @@
+/**
+ * \file drm_irq.c
+ * IRQ support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
+ *
+ * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+#include <linux/interrupt.h>	/* For task queue support */
+
+/**
+ * Get interrupt from bus id.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_irq_busid structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Finds the PCI device with the specified bus id and gets its IRQ number.
+ * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
+ * to that of the device that this DRM instance attached to.
+ */
+int drm_irq_by_busid(struct drm_device *dev, void *data,
+		     struct drm_file *file_priv)
+{
+	struct drm_irq_busid *p = data;
+
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+		return -EINVAL;
+
+	if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
+	    (p->busnum & 0xff) != dev->pdev->bus->number ||
+	    p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
+		return -EINVAL;
+
+	p->irq = dev->irq;
+
+	DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
+		  p->irq);
+
+	return 0;
+}
+
+/**
+ * Install IRQ handler.
+ *
+ * \param dev DRM device.
+ * \param irq IRQ number.
+ *
+ * Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver
+ * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
+ * before and after the installation.
+ */
+static int drm_irq_install(struct drm_device * dev)
+{
+	int ret;
+	unsigned long sh_flags = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+		return -EINVAL;
+
+	if (dev->irq == 0)
+		return -EINVAL;
+
+	mutex_lock(&dev->struct_mutex);
+
+	/* Driver must have been initialized */
+	if (!dev->dev_private) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	if (dev->irq_enabled) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EBUSY;
+	}
+	dev->irq_enabled = 1;
+	mutex_unlock(&dev->struct_mutex);
+
+	DRM_DEBUG("irq=%d\n", dev->irq);
+
+	if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
+		init_waitqueue_head(&dev->vbl_queue);
+
+		spin_lock_init(&dev->vbl_lock);
+
+		INIT_LIST_HEAD(&dev->vbl_sigs);
+		INIT_LIST_HEAD(&dev->vbl_sigs2);
+
+		dev->vbl_pending = 0;
+	}
+
+	/* Before installing handler */
+	dev->driver->irq_preinstall(dev);
+
+	/* Install handler */
+	if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
+		sh_flags = IRQF_SHARED;
+
+	ret = request_irq(dev->irq, dev->driver->irq_handler,
+			  sh_flags, dev->devname, dev);
+	if (ret < 0) {
+		mutex_lock(&dev->struct_mutex);
+		dev->irq_enabled = 0;
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	/* After installing handler */
+	dev->driver->irq_postinstall(dev);
+
+	return 0;
+}
+
+/**
+ * Uninstall the IRQ handler.
+ *
+ * \param dev DRM device.
+ *
+ * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
+ */
+int drm_irq_uninstall(struct drm_device * dev)
+{
+	int irq_enabled;
+
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+		return -EINVAL;
+
+	mutex_lock(&dev->struct_mutex);
+	irq_enabled = dev->irq_enabled;
+	dev->irq_enabled = 0;
+	mutex_unlock(&dev->struct_mutex);
+
+	if (!irq_enabled)
+		return -EINVAL;
+
+	DRM_DEBUG("irq=%d\n", dev->irq);
+
+	dev->driver->irq_uninstall(dev);
+
+	free_irq(dev->irq, dev);
+
+	dev->locked_tasklet_func = NULL;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(drm_irq_uninstall);
+
+/**
+ * IRQ control ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_control structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Calls irq_install() or irq_uninstall() according to \p arg.
+ */
+int drm_control(struct drm_device *dev, void *data,
+		struct drm_file *file_priv)
+{
+	struct drm_control *ctl = data;
+
+	/* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
+
+
+	switch (ctl->func) {
+	case DRM_INST_HANDLER:
+		if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+			return 0;
+		if (dev->if_version < DRM_IF_VERSION(1, 2) &&
+		    ctl->irq != dev->irq)
+			return -EINVAL;
+		return drm_irq_install(dev);
+	case DRM_UNINST_HANDLER:
+		if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
+			return 0;
+		return drm_irq_uninstall(dev);
+	default:
+		return -EINVAL;
+	}
+}
+
+/**
+ * Wait for VBLANK.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param data user argument, pointing to a drm_wait_vblank structure.
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the IRQ is installed.
+ *
+ * If a signal is requested checks if this task has already scheduled the same signal
+ * for the same vblank sequence number - nothing to be done in
+ * that case. If the number of tasks waiting for the interrupt exceeds 100 the
+ * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this
+ * task.
+ *
+ * If a signal is not requested, then calls vblank_wait().
+ */
+int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	union drm_wait_vblank *vblwait = data;
+	struct timeval now;
+	int ret = 0;
+	unsigned int flags, seq;
+
+	if ((!dev->irq) || (!dev->irq_enabled))
+		return -EINVAL;
+
+	if (vblwait->request.type &
+	    ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
+		DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
+			  vblwait->request.type,
+			  (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
+		return -EINVAL;
+	}
+
+	flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
+
+	if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ?
+				    DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
+		return -EINVAL;
+
+	seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2
+			  : &dev->vbl_received);
+
+	switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
+	case _DRM_VBLANK_RELATIVE:
+		vblwait->request.sequence += seq;
+		vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+	case _DRM_VBLANK_ABSOLUTE:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if ((flags & _DRM_VBLANK_NEXTONMISS) &&
+	    (seq - vblwait->request.sequence) <= (1<<23)) {
+		vblwait->request.sequence = seq + 1;
+	}
+
+	if (flags & _DRM_VBLANK_SIGNAL) {
+		unsigned long irqflags;
+		struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
+				      ? &dev->vbl_sigs2 : &dev->vbl_sigs;
+		struct drm_vbl_sig *vbl_sig;
+
+		spin_lock_irqsave(&dev->vbl_lock, irqflags);
+
+		/* Check if this task has already scheduled the same signal
+		 * for the same vblank sequence number; nothing to be done in
+		 * that case
+		 */
+		list_for_each_entry(vbl_sig, vbl_sigs, head) {
+			if (vbl_sig->sequence == vblwait->request.sequence
+			    && vbl_sig->info.si_signo ==
+			    vblwait->request.signal
+			    && vbl_sig->task == current) {
+				spin_unlock_irqrestore(&dev->vbl_lock,
+						       irqflags);
+				vblwait->reply.sequence = seq;
+				goto done;
+			}
+		}
+
+		if (dev->vbl_pending >= 100) {
+			spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+			return -EBUSY;
+		}
+
+		dev->vbl_pending++;
+
+		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+		if (!
+		    (vbl_sig =
+		     drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) {
+			return -ENOMEM;
+		}
+
+		memset((void *)vbl_sig, 0, sizeof(*vbl_sig));
+
+		vbl_sig->sequence = vblwait->request.sequence;
+		vbl_sig->info.si_signo = vblwait->request.signal;
+		vbl_sig->task = current;
+
+		spin_lock_irqsave(&dev->vbl_lock, irqflags);
+
+		list_add_tail(&vbl_sig->head, vbl_sigs);
+
+		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+		vblwait->reply.sequence = seq;
+	} else {
+		if (flags & _DRM_VBLANK_SECONDARY) {
+			if (dev->driver->vblank_wait2)
+				ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence);
+		} else if (dev->driver->vblank_wait)
+			ret =
+			    dev->driver->vblank_wait(dev,
+						     &vblwait->request.sequence);
+
+		do_gettimeofday(&now);
+		vblwait->reply.tval_sec = now.tv_sec;
+		vblwait->reply.tval_usec = now.tv_usec;
+	}
+
+      done:
+	return ret;
+}
+
+/**
+ * Send the VBLANK signals.
+ *
+ * \param dev DRM device.
+ *
+ * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
+ *
+ * If a signal is not requested, then calls vblank_wait().
+ */
+void drm_vbl_send_signals(struct drm_device * dev)
+{
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&dev->vbl_lock, flags);
+
+	for (i = 0; i < 2; i++) {
+		struct drm_vbl_sig *vbl_sig, *tmp;
+		struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
+		unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
+						   &dev->vbl_received);
+
+		list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
+			if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
+				vbl_sig->info.si_code = vbl_seq;
+				send_sig_info(vbl_sig->info.si_signo,
+					      &vbl_sig->info, vbl_sig->task);
+
+				list_del(&vbl_sig->head);
+
+				drm_free(vbl_sig, sizeof(*vbl_sig),
+					 DRM_MEM_DRIVER);
+
+				dev->vbl_pending--;
+			}
+		}
+	}
+
+	spin_unlock_irqrestore(&dev->vbl_lock, flags);
+}
+
+EXPORT_SYMBOL(drm_vbl_send_signals);
+
+/**
+ * Tasklet wrapper function.
+ *
+ * \param data DRM device in disguise.
+ *
+ * Attempts to grab the HW lock and calls the driver callback on success. On
+ * failure, leave the lock marked as contended so the callback can be called
+ * from drm_unlock().
+ */
+static void drm_locked_tasklet_func(unsigned long data)
+{
+	struct drm_device *dev = (struct drm_device *)data;
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev->tasklet_lock, irqflags);
+
+	if (!dev->locked_tasklet_func ||
+	    !drm_lock_take(&dev->lock,
+			   DRM_KERNEL_CONTEXT)) {
+		spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+		return;
+	}
+
+	dev->lock.lock_time = jiffies;
+	atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
+
+	dev->locked_tasklet_func(dev);
+
+	drm_lock_free(&dev->lock,
+		      DRM_KERNEL_CONTEXT);
+
+	dev->locked_tasklet_func = NULL;
+
+	spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+}
+
+/**
+ * Schedule a tasklet to call back a driver hook with the HW lock held.
+ *
+ * \param dev DRM device.
+ * \param func Driver callback.
+ *
+ * This is intended for triggering actions that require the HW lock from an
+ * interrupt handler. The lock will be grabbed ASAP after the interrupt handler
+ * completes. Note that the callback may be called from interrupt or process
+ * context, it must not make any assumptions about this. Also, the HW lock will
+ * be held with the kernel context or any client context.
+ */
+void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *))
+{
+	unsigned long irqflags;
+	static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
+
+	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) ||
+	    test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state))
+		return;
+
+	spin_lock_irqsave(&dev->tasklet_lock, irqflags);
+
+	if (dev->locked_tasklet_func) {
+		spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+		return;
+	}
+
+	dev->locked_tasklet_func = func;
+
+	spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+
+	drm_tasklet.data = (unsigned long)dev;
+
+	tasklet_hi_schedule(&drm_tasklet);
+}
+EXPORT_SYMBOL(drm_locked_tasklet);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
new file mode 100644
index 000000000000..0998723cde79
--- /dev/null
+++ b/drivers/gpu/drm/drm_lock.c
@@ -0,0 +1,391 @@
+/**
+ * \file drm_lock.c
+ * IOCTLs for locking
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+static int drm_notifier(void *priv);
+
+/**
+ * Lock ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Add the current task to the lock wait queue, and attempt to take to lock.
+ */
+int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	DECLARE_WAITQUEUE(entry, current);
+	struct drm_lock *lock = data;
+	int ret = 0;
+
+	++file_priv->lock_count;
+
+	if (lock->context == DRM_KERNEL_CONTEXT) {
+		DRM_ERROR("Process %d using kernel context %d\n",
+			  task_pid_nr(current), lock->context);
+		return -EINVAL;
+	}
+
+	DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+		  lock->context, task_pid_nr(current),
+		  dev->lock.hw_lock->lock, lock->flags);
+
+	if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
+		if (lock->context < 0)
+			return -EINVAL;
+
+	add_wait_queue(&dev->lock.lock_queue, &entry);
+	spin_lock_bh(&dev->lock.spinlock);
+	dev->lock.user_waiters++;
+	spin_unlock_bh(&dev->lock.spinlock);
+	for (;;) {
+		__set_current_state(TASK_INTERRUPTIBLE);
+		if (!dev->lock.hw_lock) {
+			/* Device has been unregistered */
+			ret = -EINTR;
+			break;
+		}
+		if (drm_lock_take(&dev->lock, lock->context)) {
+			dev->lock.file_priv = file_priv;
+			dev->lock.lock_time = jiffies;
+			atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
+			break;	/* Got lock */
+		}
+
+		/* Contention */
+		schedule();
+		if (signal_pending(current)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+	}
+	spin_lock_bh(&dev->lock.spinlock);
+	dev->lock.user_waiters--;
+	spin_unlock_bh(&dev->lock.spinlock);
+	__set_current_state(TASK_RUNNING);
+	remove_wait_queue(&dev->lock.lock_queue, &entry);
+
+	DRM_DEBUG("%d %s\n", lock->context,
+		  ret ? "interrupted" : "has lock");
+	if (ret) return ret;
+
+	sigemptyset(&dev->sigmask);
+	sigaddset(&dev->sigmask, SIGSTOP);
+	sigaddset(&dev->sigmask, SIGTSTP);
+	sigaddset(&dev->sigmask, SIGTTIN);
+	sigaddset(&dev->sigmask, SIGTTOU);
+	dev->sigdata.context = lock->context;
+	dev->sigdata.lock = dev->lock.hw_lock;
+	block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
+
+	if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
+		dev->driver->dma_ready(dev);
+
+	if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
+	{
+		if (dev->driver->dma_quiescent(dev)) {
+			DRM_DEBUG("%d waiting for DMA quiescent\n",
+				  lock->context);
+			return -EBUSY;
+		}
+	}
+
+	if (dev->driver->kernel_context_switch &&
+	    dev->last_context != lock->context) {
+		dev->driver->kernel_context_switch(dev, dev->last_context,
+						   lock->context);
+	}
+
+	return 0;
+}
+
+/**
+ * Unlock ioctl.
+ *
+ * \param inode device inode.
+ * \param file_priv DRM file private.
+ * \param cmd command.
+ * \param arg user argument, pointing to a drm_lock structure.
+ * \return zero on success or negative number on failure.
+ *
+ * Transfer and free the lock.
+ */
+int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	struct drm_lock *lock = data;
+	unsigned long irqflags;
+
+	if (lock->context == DRM_KERNEL_CONTEXT) {
+		DRM_ERROR("Process %d using kernel context %d\n",
+			  task_pid_nr(current), lock->context);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&dev->tasklet_lock, irqflags);
+
+	if (dev->locked_tasklet_func) {
+		dev->locked_tasklet_func(dev);
+
+		dev->locked_tasklet_func = NULL;
+	}
+
+	spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
+
+	atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
+
+	/* kernel_context_switch isn't used by any of the x86 drm
+	 * modules but is required by the Sparc driver.
+	 */
+	if (dev->driver->kernel_context_switch_unlock)
+		dev->driver->kernel_context_switch_unlock(dev);
+	else {
+		if (drm_lock_free(&dev->lock,lock->context)) {
+			/* FIXME: Should really bail out here. */
+		}
+	}
+
+	unblock_all_signals();
+	return 0;
+}
+
+/**
+ * Take the heavyweight lock.
+ *
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return one if the lock is held, or zero otherwise.
+ *
+ * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
+ */
+int drm_lock_take(struct drm_lock_data *lock_data,
+		  unsigned int context)
+{
+	unsigned int old, new, prev;
+	volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+	spin_lock_bh(&lock_data->spinlock);
+	do {
+		old = *lock;
+		if (old & _DRM_LOCK_HELD)
+			new = old | _DRM_LOCK_CONT;
+		else {
+			new = context | _DRM_LOCK_HELD |
+				((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
+				 _DRM_LOCK_CONT : 0);
+		}
+		prev = cmpxchg(lock, old, new);
+	} while (prev != old);
+	spin_unlock_bh(&lock_data->spinlock);
+
+	if (_DRM_LOCKING_CONTEXT(old) == context) {
+		if (old & _DRM_LOCK_HELD) {
+			if (context != DRM_KERNEL_CONTEXT) {
+				DRM_ERROR("%d holds heavyweight lock\n",
+					  context);
+			}
+			return 0;
+		}
+	}
+
+	if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
+		/* Have lock */
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * This takes a lock forcibly and hands it to context.	Should ONLY be used
+ * inside *_unlock to give lock to kernel before calling *_dma_schedule.
+ *
+ * \param dev DRM device.
+ * \param lock lock pointer.
+ * \param context locking context.
+ * \return always one.
+ *
+ * Resets the lock file pointer.
+ * Marks the lock as held by the given context, via the \p cmpxchg instruction.
+ */
+static int drm_lock_transfer(struct drm_lock_data *lock_data,
+			     unsigned int context)
+{
+	unsigned int old, new, prev;
+	volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+	lock_data->file_priv = NULL;
+	do {
+		old = *lock;
+		new = context | _DRM_LOCK_HELD;
+		prev = cmpxchg(lock, old, new);
+	} while (prev != old);
+	return 1;
+}
+
+/**
+ * Free lock.
+ *
+ * \param dev DRM device.
+ * \param lock lock.
+ * \param context context.
+ *
+ * Resets the lock file pointer.
+ * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
+ * waiting on the lock queue.
+ */
+int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
+{
+	unsigned int old, new, prev;
+	volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+	spin_lock_bh(&lock_data->spinlock);
+	if (lock_data->kernel_waiters != 0) {
+		drm_lock_transfer(lock_data, 0);
+		lock_data->idle_has_lock = 1;
+		spin_unlock_bh(&lock_data->spinlock);
+		return 1;
+	}
+	spin_unlock_bh(&lock_data->spinlock);
+
+	do {
+		old = *lock;
+		new = _DRM_LOCKING_CONTEXT(old);
+		prev = cmpxchg(lock, old, new);
+	} while (prev != old);
+
+	if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
+		DRM_ERROR("%d freed heavyweight lock held by %d\n",
+			  context, _DRM_LOCKING_CONTEXT(old));
+		return 1;
+	}
+	wake_up_interruptible(&lock_data->lock_queue);
+	return 0;
+}
+
+/**
+ * If we get here, it means that the process has called DRM_IOCTL_LOCK
+ * without calling DRM_IOCTL_UNLOCK.
+ *
+ * If the lock is not held, then let the signal proceed as usual.  If the lock
+ * is held, then set the contended flag and keep the signal blocked.
+ *
+ * \param priv pointer to a drm_sigdata structure.
+ * \return one if the signal should be delivered normally, or zero if the
+ * signal should be blocked.
+ */
+static int drm_notifier(void *priv)
+{
+	struct drm_sigdata *s = (struct drm_sigdata *) priv;
+	unsigned int old, new, prev;
+
+	/* Allow signal delivery if lock isn't held */
+	if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock)
+	    || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context)
+		return 1;
+
+	/* Otherwise, set flag to force call to
+	   drmUnlock */
+	do {
+		old = s->lock->lock;
+		new = old | _DRM_LOCK_CONT;
+		prev = cmpxchg(&s->lock->lock, old, new);
+	} while (prev != old);
+	return 0;
+}
+
+/**
+ * This function returns immediately and takes the hw lock
+ * with the kernel context if it is free, otherwise it gets the highest priority when and if
+ * it is eventually released.
+ *
+ * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
+ * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
+ * a deadlock, which is why the "idlelock" was invented).
+ *
+ * This should be sufficient to wait for GPU idle without
+ * having to worry about starvation.
+ */
+
+void drm_idlelock_take(struct drm_lock_data *lock_data)
+{
+	int ret = 0;
+
+	spin_lock_bh(&lock_data->spinlock);
+	lock_data->kernel_waiters++;
+	if (!lock_data->idle_has_lock) {
+
+		spin_unlock_bh(&lock_data->spinlock);
+		ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
+		spin_lock_bh(&lock_data->spinlock);
+
+		if (ret == 1)
+			lock_data->idle_has_lock = 1;
+	}
+	spin_unlock_bh(&lock_data->spinlock);
+}
+EXPORT_SYMBOL(drm_idlelock_take);
+
+void drm_idlelock_release(struct drm_lock_data *lock_data)
+{
+	unsigned int old, prev;
+	volatile unsigned int *lock = &lock_data->hw_lock->lock;
+
+	spin_lock_bh(&lock_data->spinlock);
+	if (--lock_data->kernel_waiters == 0) {
+		if (lock_data->idle_has_lock) {
+			do {
+				old = *lock;
+				prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
+			} while (prev != old);
+			wake_up_interruptible(&lock_data->lock_queue);
+			lock_data->idle_has_lock = 0;
+		}
+	}
+	spin_unlock_bh(&lock_data->spinlock);
+}
+EXPORT_SYMBOL(drm_idlelock_release);
+
+
+int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
+{
+	return (file_priv->lock_count && dev->lock.hw_lock &&
+		_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
+		dev->lock.file_priv == file_priv);
+}
+
+EXPORT_SYMBOL(drm_i_have_hw_lock);
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
new file mode 100644
index 000000000000..0177012845c6
--- /dev/null
+++ b/drivers/gpu/drm/drm_memory.c
@@ -0,0 +1,186 @@
+/**
+ * \file drm_memory.c
+ * Memory management wrappers for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Thu Feb  4 14:00:34 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/highmem.h>
+#include "drmP.h"
+
+#ifdef DEBUG_MEMORY
+#include "drm_memory_debug.h"
+#else
+
+/** No-op. */
+void drm_mem_init(void)
+{
+}
+
+/**
+ * Called when "/proc/dri/%dev%/mem" is read.
+ *
+ * \param buf output buffer.
+ * \param start start of output data.
+ * \param offset requested start offset.
+ * \param len requested number of bytes.
+ * \param eof whether there is no more data to return.
+ * \param data private data.
+ * \return number of written bytes.
+ *
+ * No-op.
+ */
+int drm_mem_info(char *buf, char **start, off_t offset,
+		 int len, int *eof, void *data)
+{
+	return 0;
+}
+
+/** Wrapper around kmalloc() and kfree() */
+void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area)
+{
+	void *pt;
+
+	if (!(pt = kmalloc(size, GFP_KERNEL)))
+		return NULL;
+	if (oldpt && oldsize) {
+		memcpy(pt, oldpt, oldsize);
+		kfree(oldpt);
+	}
+	return pt;
+}
+
+#if __OS_HAS_AGP
+static void *agp_remap(unsigned long offset, unsigned long size,
+		       struct drm_device * dev)
+{
+	unsigned long *phys_addr_map, i, num_pages =
+	    PAGE_ALIGN(size) / PAGE_SIZE;
+	struct drm_agp_mem *agpmem;
+	struct page **page_map;
+	void *addr;
+
+	size = PAGE_ALIGN(size);
+
+#ifdef __alpha__
+	offset -= dev->hose->mem_space->start;
+#endif
+
+	list_for_each_entry(agpmem, &dev->agp->memory, head)
+		if (agpmem->bound <= offset
+		    && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
+		    (offset + size))
+			break;
+	if (!agpmem)
+		return NULL;
+
+	/*
+	 * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
+	 * the CPU do not get remapped by the GART.  We fix this by using the kernel's
+	 * page-table instead (that's probably faster anyhow...).
+	 */
+	/* note: use vmalloc() because num_pages could be large... */
+	page_map = vmalloc(num_pages * sizeof(struct page *));
+	if (!page_map)
+		return NULL;
+
+	phys_addr_map =
+	    agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
+	for (i = 0; i < num_pages; ++i)
+		page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
+	addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
+	vfree(page_map);
+
+	return addr;
+}
+
+/** Wrapper around agp_allocate_memory() */
+DRM_AGP_MEM *drm_alloc_agp(struct drm_device * dev, int pages, u32 type)
+{
+	return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
+}
+
+/** Wrapper around agp_free_memory() */
+int drm_free_agp(DRM_AGP_MEM * handle, int pages)
+{
+	return drm_agp_free_memory(handle) ? 0 : -EINVAL;
+}
+
+/** Wrapper around agp_bind_memory() */
+int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
+{
+	return drm_agp_bind_memory(handle, start);
+}
+
+/** Wrapper around agp_unbind_memory() */
+int drm_unbind_agp(DRM_AGP_MEM * handle)
+{
+	return drm_agp_unbind_memory(handle);
+}
+
+#else  /*  __OS_HAS_AGP  */
+static inline void *agp_remap(unsigned long offset, unsigned long size,
+			      struct drm_device * dev)
+{
+	return NULL;
+}
+
+#endif				/* agp */
+
+#endif				/* debug_memory */
+
+void drm_core_ioremap(struct drm_map *map, struct drm_device *dev)
+{
+	if (drm_core_has_AGP(dev) &&
+	    dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+		map->handle = agp_remap(map->offset, map->size, dev);
+	else
+		map->handle = ioremap(map->offset, map->size);
+}
+EXPORT_SYMBOL(drm_core_ioremap);
+
+void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev)
+{
+	map->handle = ioremap_wc(map->offset, map->size);
+}
+EXPORT_SYMBOL(drm_core_ioremap_wc);
+void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev)
+{
+	if (!map->handle || !map->size)
+		return;
+
+	if (drm_core_has_AGP(dev) &&
+	    dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP)
+		vunmap(map->handle);
+	else
+		iounmap(map->handle);
+}
+EXPORT_SYMBOL(drm_core_ioremapfree);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
new file mode 100644
index 000000000000..dcff9e9b52e3
--- /dev/null
+++ b/drivers/gpu/drm/drm_mm.c
@@ -0,0 +1,295 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+
+/*
+ * Generic simple memory manager implementation. Intended to be used as a base
+ * class implementation for more advanced memory managers.
+ *
+ * Note that the algorithm used is quite simple and there might be substantial
+ * performance gains if a smarter free list is implemented. Currently it is just an
+ * unordered stack of free regions. This could easily be improved if an RB-tree
+ * is used instead. At least if we expect heavy fragmentation.
+ *
+ * Aligned allocations can also see improvement.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include <linux/slab.h>
+
+unsigned long drm_mm_tail_space(struct drm_mm *mm)
+{
+	struct list_head *tail_node;
+	struct drm_mm_node *entry;
+
+	tail_node = mm->ml_entry.prev;
+	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+	if (!entry->free)
+		return 0;
+
+	return entry->size;
+}
+
+int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
+{
+	struct list_head *tail_node;
+	struct drm_mm_node *entry;
+
+	tail_node = mm->ml_entry.prev;
+	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+	if (!entry->free)
+		return -ENOMEM;
+
+	if (entry->size <= size)
+		return -ENOMEM;
+
+	entry->size -= size;
+	return 0;
+}
+
+
+static int drm_mm_create_tail_node(struct drm_mm *mm,
+			    unsigned long start,
+			    unsigned long size)
+{
+	struct drm_mm_node *child;
+
+	child = (struct drm_mm_node *)
+		drm_alloc(sizeof(*child), DRM_MEM_MM);
+	if (!child)
+		return -ENOMEM;
+
+	child->free = 1;
+	child->size = size;
+	child->start = start;
+	child->mm = mm;
+
+	list_add_tail(&child->ml_entry, &mm->ml_entry);
+	list_add_tail(&child->fl_entry, &mm->fl_entry);
+
+	return 0;
+}
+
+
+int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
+{
+	struct list_head *tail_node;
+	struct drm_mm_node *entry;
+
+	tail_node = mm->ml_entry.prev;
+	entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
+	if (!entry->free) {
+		return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
+	}
+	entry->size += size;
+	return 0;
+}
+
+static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
+					    unsigned long size)
+{
+	struct drm_mm_node *child;
+
+	child = (struct drm_mm_node *)
+		drm_alloc(sizeof(*child), DRM_MEM_MM);
+	if (!child)
+		return NULL;
+
+	INIT_LIST_HEAD(&child->fl_entry);
+
+	child->free = 0;
+	child->size = size;
+	child->start = parent->start;
+	child->mm = parent->mm;
+
+	list_add_tail(&child->ml_entry, &parent->ml_entry);
+	INIT_LIST_HEAD(&child->fl_entry);
+
+	parent->size -= size;
+	parent->start += size;
+	return child;
+}
+
+
+
+struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
+				unsigned long size, unsigned alignment)
+{
+
+	struct drm_mm_node *align_splitoff = NULL;
+	struct drm_mm_node *child;
+	unsigned tmp = 0;
+
+	if (alignment)
+		tmp = parent->start % alignment;
+
+	if (tmp) {
+		align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
+		if (!align_splitoff)
+			return NULL;
+	}
+
+	if (parent->size == size) {
+		list_del_init(&parent->fl_entry);
+		parent->free = 0;
+		return parent;
+	} else {
+		child = drm_mm_split_at_start(parent, size);
+	}
+
+	if (align_splitoff)
+		drm_mm_put_block(align_splitoff);
+
+	return child;
+}
+
+/*
+ * Put a block. Merge with the previous and / or next block if they are free.
+ * Otherwise add to the free stack.
+ */
+
+void drm_mm_put_block(struct drm_mm_node * cur)
+{
+
+	struct drm_mm *mm = cur->mm;
+	struct list_head *cur_head = &cur->ml_entry;
+	struct list_head *root_head = &mm->ml_entry;
+	struct drm_mm_node *prev_node = NULL;
+	struct drm_mm_node *next_node;
+
+	int merged = 0;
+
+	if (cur_head->prev != root_head) {
+		prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
+		if (prev_node->free) {
+			prev_node->size += cur->size;
+			merged = 1;
+		}
+	}
+	if (cur_head->next != root_head) {
+		next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry);
+		if (next_node->free) {
+			if (merged) {
+				prev_node->size += next_node->size;
+				list_del(&next_node->ml_entry);
+				list_del(&next_node->fl_entry);
+				drm_free(next_node, sizeof(*next_node),
+					 DRM_MEM_MM);
+			} else {
+				next_node->size += cur->size;
+				next_node->start = cur->start;
+				merged = 1;
+			}
+		}
+	}
+	if (!merged) {
+		cur->free = 1;
+		list_add(&cur->fl_entry, &mm->fl_entry);
+	} else {
+		list_del(&cur->ml_entry);
+		drm_free(cur, sizeof(*cur), DRM_MEM_MM);
+	}
+}
+
+struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
+				  unsigned long size,
+				  unsigned alignment, int best_match)
+{
+	struct list_head *list;
+	const struct list_head *free_stack = &mm->fl_entry;
+	struct drm_mm_node *entry;
+	struct drm_mm_node *best;
+	unsigned long best_size;
+	unsigned wasted;
+
+	best = NULL;
+	best_size = ~0UL;
+
+	list_for_each(list, free_stack) {
+		entry = list_entry(list, struct drm_mm_node, fl_entry);
+		wasted = 0;
+
+		if (entry->size < size)
+			continue;
+
+		if (alignment) {
+			register unsigned tmp = entry->start % alignment;
+			if (tmp)
+				wasted += alignment - tmp;
+		}
+
+
+		if (entry->size >= size + wasted) {
+			if (!best_match)
+				return entry;
+			if (size < best_size) {
+				best = entry;
+				best_size = entry->size;
+			}
+		}
+	}
+
+	return best;
+}
+
+int drm_mm_clean(struct drm_mm * mm)
+{
+	struct list_head *head = &mm->ml_entry;
+
+	return (head->next->next == head);
+}
+
+int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
+{
+	INIT_LIST_HEAD(&mm->ml_entry);
+	INIT_LIST_HEAD(&mm->fl_entry);
+
+	return drm_mm_create_tail_node(mm, start, size);
+}
+
+
+void drm_mm_takedown(struct drm_mm * mm)
+{
+	struct list_head *bnode = mm->fl_entry.next;
+	struct drm_mm_node *entry;
+
+	entry = list_entry(bnode, struct drm_mm_node, fl_entry);
+
+	if (entry->ml_entry.next != &mm->ml_entry ||
+	    entry->fl_entry.next != &mm->fl_entry) {
+		DRM_ERROR("Memory manager not clean. Delaying takedown\n");
+		return;
+	}
+
+	list_del(&entry->fl_entry);
+	list_del(&entry->ml_entry);
+
+	drm_free(entry, sizeof(*entry), DRM_MEM_MM);
+}
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
new file mode 100644
index 000000000000..b55d5bc6ea61
--- /dev/null
+++ b/drivers/gpu/drm/drm_pci.c
@@ -0,0 +1,183 @@
+/* drm_pci.h -- PCI DMA memory management wrappers for DRM -*- linux-c -*- */
+/**
+ * \file drm_pci.c
+ * \brief Functions and ioctls to manage PCI memory
+ *
+ * \warning These interfaces aren't stable yet.
+ *
+ * \todo Implement the remaining ioctl's for the PCI pools.
+ * \todo The wrappers here are so thin that they would be better off inlined..
+ *
+ * \author José Fonseca <jrfonseca@tungstengraphics.com>
+ * \author Leif Delgass <ldelgass@retinalburn.net>
+ */
+
+/*
+ * Copyright 2003 José Fonseca.
+ * Copyright 2003 Leif Delgass.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+ * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include "drmP.h"
+
+/**********************************************************************/
+/** \name PCI memory */
+/*@{*/
+
+/**
+ * \brief Allocate a PCI consistent memory block, for DMA.
+ */
+drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align,
+				dma_addr_t maxaddr)
+{
+	drm_dma_handle_t *dmah;
+#if 1
+	unsigned long addr;
+	size_t sz;
+#endif
+#ifdef DRM_DEBUG_MEMORY
+	int area = DRM_MEM_DMA;
+
+	spin_lock(&drm_mem_lock);
+	if ((drm_ram_used >> PAGE_SHIFT)
+	    > (DRM_RAM_PERCENT * drm_ram_available) / 100) {
+		spin_unlock(&drm_mem_lock);
+		return 0;
+	}
+	spin_unlock(&drm_mem_lock);
+#endif
+
+	/* pci_alloc_consistent only guarantees alignment to the smallest
+	 * PAGE_SIZE order which is greater than or equal to the requested size.
+	 * Return NULL here for now to make sure nobody tries for larger alignment
+	 */
+	if (align > size)
+		return NULL;
+
+	if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) {
+		DRM_ERROR("Setting pci dma mask failed\n");
+		return NULL;
+	}
+
+	dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
+	if (!dmah)
+		return NULL;
+
+	dmah->size = size;
+	dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, &dmah->busaddr, GFP_KERNEL | __GFP_COMP);
+
+#ifdef DRM_DEBUG_MEMORY
+	if (dmah->vaddr == NULL) {
+		spin_lock(&drm_mem_lock);
+		++drm_mem_stats[area].fail_count;
+		spin_unlock(&drm_mem_lock);
+		kfree(dmah);
+		return NULL;
+	}
+
+	spin_lock(&drm_mem_lock);
+	++drm_mem_stats[area].succeed_count;
+	drm_mem_stats[area].bytes_allocated += size;
+	drm_ram_used += size;
+	spin_unlock(&drm_mem_lock);
+#else
+	if (dmah->vaddr == NULL) {
+		kfree(dmah);
+		return NULL;
+	}
+#endif
+
+	memset(dmah->vaddr, 0, size);
+
+	/* XXX - Is virt_to_page() legal for consistent mem? */
+	/* Reserve */
+	for (addr = (unsigned long)dmah->vaddr, sz = size;
+	     sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+		SetPageReserved(virt_to_page(addr));
+	}
+
+	return dmah;
+}
+
+EXPORT_SYMBOL(drm_pci_alloc);
+
+/**
+ * \brief Free a PCI consistent memory block without freeing its descriptor.
+ *
+ * This function is for internal use in the Linux-specific DRM core code.
+ */
+void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
+{
+#if 1
+	unsigned long addr;
+	size_t sz;
+#endif
+#ifdef DRM_DEBUG_MEMORY
+	int area = DRM_MEM_DMA;
+	int alloc_count;
+	int free_count;
+#endif
+
+	if (!dmah->vaddr) {
+#ifdef DRM_DEBUG_MEMORY
+		DRM_MEM_ERROR(area, "Attempt to free address 0\n");
+#endif
+	} else {
+		/* XXX - Is virt_to_page() legal for consistent mem? */
+		/* Unreserve */
+		for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
+		     sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
+			ClearPageReserved(virt_to_page(addr));
+		}
+		dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
+				  dmah->busaddr);
+	}
+
+#ifdef DRM_DEBUG_MEMORY
+	spin_lock(&drm_mem_lock);
+	free_count = ++drm_mem_stats[area].free_count;
+	alloc_count = drm_mem_stats[area].succeed_count;
+	drm_mem_stats[area].bytes_freed += size;
+	drm_ram_used -= size;
+	spin_unlock(&drm_mem_lock);
+	if (free_count > alloc_count) {
+		DRM_MEM_ERROR(area,
+			      "Excess frees: %d frees, %d allocs\n",
+			      free_count, alloc_count);
+	}
+#endif
+
+}
+
+/**
+ * \brief Free a PCI consistent memory block
+ */
+void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
+{
+	__drm_pci_free(dev, dmah);
+	kfree(dmah);
+}
+
+EXPORT_SYMBOL(drm_pci_free);
+
+/*@}*/
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
new file mode 100644
index 000000000000..93b1e0475c93
--- /dev/null
+++ b/drivers/gpu/drm/drm_proc.c
@@ -0,0 +1,557 @@
+/**
+ * \file drm_proc.c
+ * /proc support for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ *
+ * \par Acknowledgements:
+ *    Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix
+ *    the problem with the proc files not outputting all their information.
+ */
+
+/*
+ * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+static int drm_name_info(char *buf, char **start, off_t offset,
+			 int request, int *eof, void *data);
+static int drm_vm_info(char *buf, char **start, off_t offset,
+		       int request, int *eof, void *data);
+static int drm_clients_info(char *buf, char **start, off_t offset,
+			    int request, int *eof, void *data);
+static int drm_queues_info(char *buf, char **start, off_t offset,
+			   int request, int *eof, void *data);
+static int drm_bufs_info(char *buf, char **start, off_t offset,
+			 int request, int *eof, void *data);
+#if DRM_DEBUG_CODE
+static int drm_vma_info(char *buf, char **start, off_t offset,
+			int request, int *eof, void *data);
+#endif
+
+/**
+ * Proc file list.
+ */
+static struct drm_proc_list {
+	const char *name;	/**< file name */
+	int (*f) (char *, char **, off_t, int, int *, void *);		/**< proc callback*/
+} drm_proc_list[] = {
+	{"name", drm_name_info},
+	{"mem", drm_mem_info},
+	{"vm", drm_vm_info},
+	{"clients", drm_clients_info},
+	{"queues", drm_queues_info},
+	{"bufs", drm_bufs_info},
+#if DRM_DEBUG_CODE
+	{"vma", drm_vma_info},
+#endif
+};
+
+#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
+
+/**
+ * Initialize the DRI proc filesystem for a device.
+ *
+ * \param dev DRM device.
+ * \param minor device minor number.
+ * \param root DRI proc dir entry.
+ * \param dev_root resulting DRI device proc dir entry.
+ * \return root entry pointer on success, or NULL on failure.
+ *
+ * Create the DRI proc root entry "/proc/dri", the device proc root entry
+ * "/proc/dri/%minor%/", and each entry in proc_list as
+ * "/proc/dri/%minor%/%name%".
+ */
+int drm_proc_init(struct drm_minor *minor, int minor_id,
+		  struct proc_dir_entry *root)
+{
+	struct proc_dir_entry *ent;
+	int i, j;
+	char name[64];
+
+	sprintf(name, "%d", minor_id);
+	minor->dev_root = proc_mkdir(name, root);
+	if (!minor->dev_root) {
+		DRM_ERROR("Cannot create /proc/dri/%s\n", name);
+		return -1;
+	}
+
+	for (i = 0; i < DRM_PROC_ENTRIES; i++) {
+		ent = create_proc_entry(drm_proc_list[i].name,
+					S_IFREG | S_IRUGO, minor->dev_root);
+		if (!ent) {
+			DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
+				  name, drm_proc_list[i].name);
+			for (j = 0; j < i; j++)
+				remove_proc_entry(drm_proc_list[i].name,
+						  minor->dev_root);
+			remove_proc_entry(name, root);
+			minor->dev_root = NULL;
+			return -1;
+		}
+		ent->read_proc = drm_proc_list[i].f;
+		ent->data = minor;
+	}
+
+	return 0;
+}
+
+/**
+ * Cleanup the proc filesystem resources.
+ *
+ * \param minor device minor number.
+ * \param root DRI proc dir entry.
+ * \param dev_root DRI device proc dir entry.
+ * \return always zero.
+ *
+ * Remove all proc entries created by proc_init().
+ */
+int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
+{
+	int i;
+	char name[64];
+
+	if (!root || !minor->dev_root)
+		return 0;
+
+	for (i = 0; i < DRM_PROC_ENTRIES; i++)
+		remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
+	sprintf(name, "%d", minor->index);
+	remove_proc_entry(name, root);
+
+	return 0;
+}
+
+/**
+ * Called when "/proc/dri/.../name" is read.
+ *
+ * \param buf output buffer.
+ * \param start start of output data.
+ * \param offset requested start offset.
+ * \param request requested number of bytes.
+ * \param eof whether there is no more data to return.
+ * \param data private data.
+ * \return number of written bytes.
+ *
+ * Prints the device name together with the bus id if available.
+ */
+static int drm_name_info(char *buf, char **start, off_t offset, int request,
+			 int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	int len = 0;
+
+	if (offset > DRM_PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = &buf[offset];
+	*eof = 0;
+
+	if (dev->unique) {
+		DRM_PROC_PRINT("%s %s %s\n",
+			       dev->driver->pci_driver.name,
+			       pci_name(dev->pdev), dev->unique);
+	} else {
+		DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name,
+			       pci_name(dev->pdev));
+	}
+
+	if (len > request + offset)
+		return request;
+	*eof = 1;
+	return len - offset;
+}
+
+/**
+ * Called when "/proc/dri/.../vm" is read.
+ *
+ * \param buf output buffer.
+ * \param start start of output data.
+ * \param offset requested start offset.
+ * \param request requested number of bytes.
+ * \param eof whether there is no more data to return.
+ * \param data private data.
+ * \return number of written bytes.
+ *
+ * Prints information about all mappings in drm_device::maplist.
+ */
+static int drm__vm_info(char *buf, char **start, off_t offset, int request,
+			int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	int len = 0;
+	struct drm_map *map;
+	struct drm_map_list *r_list;
+
+	/* Hardcoded from _DRM_FRAME_BUFFER,
+	   _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
+	   _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
+	const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
+	const char *type;
+	int i;
+
+	if (offset > DRM_PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = &buf[offset];
+	*eof = 0;
+
+	DRM_PROC_PRINT("slot	 offset	      size type flags	 "
+		       "address mtrr\n\n");
+	i = 0;
+	list_for_each_entry(r_list, &dev->maplist, head) {
+		map = r_list->map;
+		if (!map)
+			continue;
+		if (map->type < 0 || map->type > 5)
+			type = "??";
+		else
+			type = types[map->type];
+		DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s  0x%02x 0x%08lx ",
+			       i,
+			       map->offset,
+			       map->size, type, map->flags,
+			       (unsigned long) r_list->user_token);
+		if (map->mtrr < 0) {
+			DRM_PROC_PRINT("none\n");
+		} else {
+			DRM_PROC_PRINT("%4d\n", map->mtrr);
+		}
+		i++;
+	}
+
+	if (len > request + offset)
+		return request;
+	*eof = 1;
+	return len - offset;
+}
+
+/**
+ * Simply calls _vm_info() while holding the drm_device::struct_mutex lock.
+ */
+static int drm_vm_info(char *buf, char **start, off_t offset, int request,
+		       int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	ret = drm__vm_info(buf, start, offset, request, eof, data);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/**
+ * Called when "/proc/dri/.../queues" is read.
+ *
+ * \param buf output buffer.
+ * \param start start of output data.
+ * \param offset requested start offset.
+ * \param request requested number of bytes.
+ * \param eof whether there is no more data to return.
+ * \param data private data.
+ * \return number of written bytes.
+ */
+static int drm__queues_info(char *buf, char **start, off_t offset,
+			    int request, int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	int len = 0;
+	int i;
+	struct drm_queue *q;
+
+	if (offset > DRM_PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = &buf[offset];
+	*eof = 0;
+
+	DRM_PROC_PRINT("  ctx/flags   use   fin"
+		       "   blk/rw/rwf  wait    flushed	   queued"
+		       "      locks\n\n");
+	for (i = 0; i < dev->queue_count; i++) {
+		q = dev->queuelist[i];
+		atomic_inc(&q->use_count);
+		DRM_PROC_PRINT_RET(atomic_dec(&q->use_count),
+				   "%5d/0x%03x %5d %5d"
+				   " %5d/%c%c/%c%c%c %5Zd\n",
+				   i,
+				   q->flags,
+				   atomic_read(&q->use_count),
+				   atomic_read(&q->finalization),
+				   atomic_read(&q->block_count),
+				   atomic_read(&q->block_read) ? 'r' : '-',
+				   atomic_read(&q->block_write) ? 'w' : '-',
+				   waitqueue_active(&q->read_queue) ? 'r' : '-',
+				   waitqueue_active(&q->
+						    write_queue) ? 'w' : '-',
+				   waitqueue_active(&q->
+						    flush_queue) ? 'f' : '-',
+				   DRM_BUFCOUNT(&q->waitlist));
+		atomic_dec(&q->use_count);
+	}
+
+	if (len > request + offset)
+		return request;
+	*eof = 1;
+	return len - offset;
+}
+
+/**
+ * Simply calls _queues_info() while holding the drm_device::struct_mutex lock.
+ */
+static int drm_queues_info(char *buf, char **start, off_t offset, int request,
+			   int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	ret = drm__queues_info(buf, start, offset, request, eof, data);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/**
+ * Called when "/proc/dri/.../bufs" is read.
+ *
+ * \param buf output buffer.
+ * \param start start of output data.
+ * \param offset requested start offset.
+ * \param request requested number of bytes.
+ * \param eof whether there is no more data to return.
+ * \param data private data.
+ * \return number of written bytes.
+ */
+static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
+			  int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	int len = 0;
+	struct drm_device_dma *dma = dev->dma;
+	int i;
+
+	if (!dma || offset > DRM_PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = &buf[offset];
+	*eof = 0;
+
+	DRM_PROC_PRINT(" o     size count  free	 segs pages    kB\n\n");
+	for (i = 0; i <= DRM_MAX_ORDER; i++) {
+		if (dma->bufs[i].buf_count)
+			DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n",
+				       i,
+				       dma->bufs[i].buf_size,
+				       dma->bufs[i].buf_count,
+				       atomic_read(&dma->bufs[i]
+						   .freelist.count),
+				       dma->bufs[i].seg_count,
+				       dma->bufs[i].seg_count
+				       * (1 << dma->bufs[i].page_order),
+				       (dma->bufs[i].seg_count
+					* (1 << dma->bufs[i].page_order))
+				       * PAGE_SIZE / 1024);
+	}
+	DRM_PROC_PRINT("\n");
+	for (i = 0; i < dma->buf_count; i++) {
+		if (i && !(i % 32))
+			DRM_PROC_PRINT("\n");
+		DRM_PROC_PRINT(" %d", dma->buflist[i]->list);
+	}
+	DRM_PROC_PRINT("\n");
+
+	if (len > request + offset)
+		return request;
+	*eof = 1;
+	return len - offset;
+}
+
+/**
+ * Simply calls _bufs_info() while holding the drm_device::struct_mutex lock.
+ */
+static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
+			 int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	ret = drm__bufs_info(buf, start, offset, request, eof, data);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/**
+ * Called when "/proc/dri/.../clients" is read.
+ *
+ * \param buf output buffer.
+ * \param start start of output data.
+ * \param offset requested start offset.
+ * \param request requested number of bytes.
+ * \param eof whether there is no more data to return.
+ * \param data private data.
+ * \return number of written bytes.
+ */
+static int drm__clients_info(char *buf, char **start, off_t offset,
+			     int request, int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	int len = 0;
+	struct drm_file *priv;
+
+	if (offset > DRM_PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = &buf[offset];
+	*eof = 0;
+
+	DRM_PROC_PRINT("a dev	pid    uid	magic	  ioctls\n\n");
+	list_for_each_entry(priv, &dev->filelist, lhead) {
+		DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
+			       priv->authenticated ? 'y' : 'n',
+			       priv->minor->index,
+			       priv->pid,
+			       priv->uid, priv->magic, priv->ioctl_count);
+	}
+
+	if (len > request + offset)
+		return request;
+	*eof = 1;
+	return len - offset;
+}
+
+/**
+ * Simply calls _clients_info() while holding the drm_device::struct_mutex lock.
+ */
+static int drm_clients_info(char *buf, char **start, off_t offset,
+			    int request, int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	ret = drm__clients_info(buf, start, offset, request, eof, data);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+#if DRM_DEBUG_CODE
+
+static int drm__vma_info(char *buf, char **start, off_t offset, int request,
+			 int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	int len = 0;
+	struct drm_vma_entry *pt;
+	struct vm_area_struct *vma;
+#if defined(__i386__)
+	unsigned int pgprot;
+#endif
+
+	if (offset > DRM_PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = &buf[offset];
+	*eof = 0;
+
+	DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
+		       atomic_read(&dev->vma_count),
+		       high_memory, virt_to_phys(high_memory));
+	list_for_each_entry(pt, &dev->vmalist, head) {
+		if (!(vma = pt->vma))
+			continue;
+		DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
+			       pt->pid,
+			       vma->vm_start,
+			       vma->vm_end,
+			       vma->vm_flags & VM_READ ? 'r' : '-',
+			       vma->vm_flags & VM_WRITE ? 'w' : '-',
+			       vma->vm_flags & VM_EXEC ? 'x' : '-',
+			       vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
+			       vma->vm_flags & VM_LOCKED ? 'l' : '-',
+			       vma->vm_flags & VM_IO ? 'i' : '-',
+			       vma->vm_pgoff);
+
+#if defined(__i386__)
+		pgprot = pgprot_val(vma->vm_page_prot);
+		DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c",
+			       pgprot & _PAGE_PRESENT ? 'p' : '-',
+			       pgprot & _PAGE_RW ? 'w' : 'r',
+			       pgprot & _PAGE_USER ? 'u' : 's',
+			       pgprot & _PAGE_PWT ? 't' : 'b',
+			       pgprot & _PAGE_PCD ? 'u' : 'c',
+			       pgprot & _PAGE_ACCESSED ? 'a' : '-',
+			       pgprot & _PAGE_DIRTY ? 'd' : '-',
+			       pgprot & _PAGE_PSE ? 'm' : 'k',
+			       pgprot & _PAGE_GLOBAL ? 'g' : 'l');
+#endif
+		DRM_PROC_PRINT("\n");
+	}
+
+	if (len > request + offset)
+		return request;
+	*eof = 1;
+	return len - offset;
+}
+
+static int drm_vma_info(char *buf, char **start, off_t offset, int request,
+			int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	ret = drm__vma_info(buf, start, offset, request, eof, data);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+#endif
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
new file mode 100644
index 000000000000..b2b0f3d41714
--- /dev/null
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -0,0 +1,227 @@
+/**
+ * \file drm_scatter.c
+ * IOCTLs to manage scatter/gather memory
+ *
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/vmalloc.h>
+#include "drmP.h"
+
+#define DEBUG_SCATTER 0
+
+static inline void *drm_vmalloc_dma(unsigned long size)
+{
+#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
+	return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
+#else
+	return vmalloc_32(size);
+#endif
+}
+
+void drm_sg_cleanup(struct drm_sg_mem * entry)
+{
+	struct page *page;
+	int i;
+
+	for (i = 0; i < entry->pages; i++) {
+		page = entry->pagelist[i];
+		if (page)
+			ClearPageReserved(page);
+	}
+
+	vfree(entry->virtual);
+
+	drm_free(entry->busaddr,
+		 entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES);
+	drm_free(entry->pagelist,
+		 entry->pages * sizeof(*entry->pagelist), DRM_MEM_PAGES);
+	drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
+}
+
+#ifdef _LP64
+# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
+#else
+# define ScatterHandle(x) (unsigned int)(x)
+#endif
+
+int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
+{
+	struct drm_sg_mem *entry;
+	unsigned long pages, i, j;
+
+	DRM_DEBUG("\n");
+
+	if (!drm_core_check_feature(dev, DRIVER_SG))
+		return -EINVAL;
+
+	if (dev->sg)
+		return -EINVAL;
+
+	entry = drm_alloc(sizeof(*entry), DRM_MEM_SGLISTS);
+	if (!entry)
+		return -ENOMEM;
+
+	memset(entry, 0, sizeof(*entry));
+	pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
+	DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
+
+	entry->pages = pages;
+	entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist),
+				    DRM_MEM_PAGES);
+	if (!entry->pagelist) {
+		drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
+		return -ENOMEM;
+	}
+
+	memset(entry->pagelist, 0, pages * sizeof(*entry->pagelist));
+
+	entry->busaddr = drm_alloc(pages * sizeof(*entry->busaddr),
+				   DRM_MEM_PAGES);
+	if (!entry->busaddr) {
+		drm_free(entry->pagelist,
+			 entry->pages * sizeof(*entry->pagelist),
+			 DRM_MEM_PAGES);
+		drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
+		return -ENOMEM;
+	}
+	memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr));
+
+	entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
+	if (!entry->virtual) {
+		drm_free(entry->busaddr,
+			 entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES);
+		drm_free(entry->pagelist,
+			 entry->pages * sizeof(*entry->pagelist),
+			 DRM_MEM_PAGES);
+		drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
+		return -ENOMEM;
+	}
+
+	/* This also forces the mapping of COW pages, so our page list
+	 * will be valid.  Please don't remove it...
+	 */
+	memset(entry->virtual, 0, pages << PAGE_SHIFT);
+
+	entry->handle = ScatterHandle((unsigned long)entry->virtual);
+
+	DRM_DEBUG("handle  = %08lx\n", entry->handle);
+	DRM_DEBUG("virtual = %p\n", entry->virtual);
+
+	for (i = (unsigned long)entry->virtual, j = 0; j < pages;
+	     i += PAGE_SIZE, j++) {
+		entry->pagelist[j] = vmalloc_to_page((void *)i);
+		if (!entry->pagelist[j])
+			goto failed;
+		SetPageReserved(entry->pagelist[j]);
+	}
+
+	request->handle = entry->handle;
+
+	dev->sg = entry;
+
+#if DEBUG_SCATTER
+	/* Verify that each page points to its virtual address, and vice
+	 * versa.
+	 */
+	{
+		int error = 0;
+
+		for (i = 0; i < pages; i++) {
+			unsigned long *tmp;
+
+			tmp = page_address(entry->pagelist[i]);
+			for (j = 0;
+			     j < PAGE_SIZE / sizeof(unsigned long);
+			     j++, tmp++) {
+				*tmp = 0xcafebabe;
+			}
+			tmp = (unsigned long *)((u8 *) entry->virtual +
+						(PAGE_SIZE * i));
+			for (j = 0;
+			     j < PAGE_SIZE / sizeof(unsigned long);
+			     j++, tmp++) {
+				if (*tmp != 0xcafebabe && error == 0) {
+					error = 1;
+					DRM_ERROR("Scatter allocation error, "
+						  "pagelist does not match "
+						  "virtual mapping\n");
+				}
+			}
+			tmp = page_address(entry->pagelist[i]);
+			for (j = 0;
+			     j < PAGE_SIZE / sizeof(unsigned long);
+			     j++, tmp++) {
+				*tmp = 0;
+			}
+		}
+		if (error == 0)
+			DRM_ERROR("Scatter allocation matches pagelist\n");
+	}
+#endif
+
+	return 0;
+
+      failed:
+	drm_sg_cleanup(entry);
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_sg_alloc);
+
+
+int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct drm_scatter_gather *request = data;
+
+	return drm_sg_alloc(dev, request);
+
+}
+
+int drm_sg_free(struct drm_device *dev, void *data,
+		struct drm_file *file_priv)
+{
+	struct drm_scatter_gather *request = data;
+	struct drm_sg_mem *entry;
+
+	if (!drm_core_check_feature(dev, DRIVER_SG))
+		return -EINVAL;
+
+	entry = dev->sg;
+	dev->sg = NULL;
+
+	if (!entry || entry->handle != request->handle)
+		return -EINVAL;
+
+	DRM_DEBUG("virtual  = %p\n", entry->virtual);
+
+	drm_sg_cleanup(entry);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/drm_sman.c b/drivers/gpu/drm/drm_sman.c
new file mode 100644
index 000000000000..926f146390ce
--- /dev/null
+++ b/drivers/gpu/drm/drm_sman.c
@@ -0,0 +1,353 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Simple memory manager interface that keeps track on allocate regions on a
+ * per "owner" basis. All regions associated with an "owner" can be released
+ * with a simple call. Typically if the "owner" exists. The owner is any
+ * "unsigned long" identifier. Can typically be a pointer to a file private
+ * struct or a context identifier.
+ *
+ * Authors:
+ * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drm_sman.h"
+
+struct drm_owner_item {
+	struct drm_hash_item owner_hash;
+	struct list_head sman_list;
+	struct list_head mem_blocks;
+};
+
+void drm_sman_takedown(struct drm_sman * sman)
+{
+	drm_ht_remove(&sman->user_hash_tab);
+	drm_ht_remove(&sman->owner_hash_tab);
+	if (sman->mm)
+		drm_free(sman->mm, sman->num_managers * sizeof(*sman->mm),
+			 DRM_MEM_MM);
+}
+
+EXPORT_SYMBOL(drm_sman_takedown);
+
+int
+drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
+	      unsigned int user_order, unsigned int owner_order)
+{
+	int ret = 0;
+
+	sman->mm = (struct drm_sman_mm *) drm_calloc(num_managers, sizeof(*sman->mm),
+						DRM_MEM_MM);
+	if (!sman->mm) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	sman->num_managers = num_managers;
+	INIT_LIST_HEAD(&sman->owner_items);
+	ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
+	if (ret)
+		goto out1;
+	ret = drm_ht_create(&sman->user_hash_tab, user_order);
+	if (!ret)
+		goto out;
+
+	drm_ht_remove(&sman->owner_hash_tab);
+out1:
+	drm_free(sman->mm, num_managers * sizeof(*sman->mm), DRM_MEM_MM);
+out:
+	return ret;
+}
+
+EXPORT_SYMBOL(drm_sman_init);
+
+static void *drm_sman_mm_allocate(void *private, unsigned long size,
+				  unsigned alignment)
+{
+	struct drm_mm *mm = (struct drm_mm *) private;
+	struct drm_mm_node *tmp;
+
+	tmp = drm_mm_search_free(mm, size, alignment, 1);
+	if (!tmp) {
+		return NULL;
+	}
+	tmp = drm_mm_get_block(tmp, size, alignment);
+	return tmp;
+}
+
+static void drm_sman_mm_free(void *private, void *ref)
+{
+	struct drm_mm_node *node = (struct drm_mm_node *) ref;
+
+	drm_mm_put_block(node);
+}
+
+static void drm_sman_mm_destroy(void *private)
+{
+	struct drm_mm *mm = (struct drm_mm *) private;
+	drm_mm_takedown(mm);
+	drm_free(mm, sizeof(*mm), DRM_MEM_MM);
+}
+
+static unsigned long drm_sman_mm_offset(void *private, void *ref)
+{
+	struct drm_mm_node *node = (struct drm_mm_node *) ref;
+	return node->start;
+}
+
+int
+drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
+		   unsigned long start, unsigned long size)
+{
+	struct drm_sman_mm *sman_mm;
+	struct drm_mm *mm;
+	int ret;
+
+	BUG_ON(manager >= sman->num_managers);
+
+	sman_mm = &sman->mm[manager];
+	mm = drm_calloc(1, sizeof(*mm), DRM_MEM_MM);
+	if (!mm) {
+		return -ENOMEM;
+	}
+	sman_mm->private = mm;
+	ret = drm_mm_init(mm, start, size);
+
+	if (ret) {
+		drm_free(mm, sizeof(*mm), DRM_MEM_MM);
+		return ret;
+	}
+
+	sman_mm->allocate = drm_sman_mm_allocate;
+	sman_mm->free = drm_sman_mm_free;
+	sman_mm->destroy = drm_sman_mm_destroy;
+	sman_mm->offset = drm_sman_mm_offset;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(drm_sman_set_range);
+
+int
+drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
+		     struct drm_sman_mm * allocator)
+{
+	BUG_ON(manager >= sman->num_managers);
+	sman->mm[manager] = *allocator;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_sman_set_manager);
+
+static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
+						 unsigned long owner)
+{
+	int ret;
+	struct drm_hash_item *owner_hash_item;
+	struct drm_owner_item *owner_item;
+
+	ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
+	if (!ret) {
+		return drm_hash_entry(owner_hash_item, struct drm_owner_item,
+				      owner_hash);
+	}
+
+	owner_item = drm_calloc(1, sizeof(*owner_item), DRM_MEM_MM);
+	if (!owner_item)
+		goto out;
+
+	INIT_LIST_HEAD(&owner_item->mem_blocks);
+	owner_item->owner_hash.key = owner;
+	if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
+		goto out1;
+
+	list_add_tail(&owner_item->sman_list, &sman->owner_items);
+	return owner_item;
+
+out1:
+	drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM);
+out:
+	return NULL;
+}
+
+struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
+				    unsigned long size, unsigned alignment,
+				    unsigned long owner)
+{
+	void *tmp;
+	struct drm_sman_mm *sman_mm;
+	struct drm_owner_item *owner_item;
+	struct drm_memblock_item *memblock;
+
+	BUG_ON(manager >= sman->num_managers);
+
+	sman_mm = &sman->mm[manager];
+	tmp = sman_mm->allocate(sman_mm->private, size, alignment);
+
+	if (!tmp) {
+		return NULL;
+	}
+
+	memblock = drm_calloc(1, sizeof(*memblock), DRM_MEM_MM);
+
+	if (!memblock)
+		goto out;
+
+	memblock->mm_info = tmp;
+	memblock->mm = sman_mm;
+	memblock->sman = sman;
+
+	if (drm_ht_just_insert_please
+	    (&sman->user_hash_tab, &memblock->user_hash,
+	     (unsigned long)memblock, 32, 0, 0))
+		goto out1;
+
+	owner_item = drm_sman_get_owner_item(sman, owner);
+	if (!owner_item)
+		goto out2;
+
+	list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
+
+	return memblock;
+
+out2:
+	drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
+out1:
+	drm_free(memblock, sizeof(*memblock), DRM_MEM_MM);
+out:
+	sman_mm->free(sman_mm->private, tmp);
+
+	return NULL;
+}
+
+EXPORT_SYMBOL(drm_sman_alloc);
+
+static void drm_sman_free(struct drm_memblock_item *item)
+{
+	struct drm_sman *sman = item->sman;
+
+	list_del(&item->owner_list);
+	drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
+	item->mm->free(item->mm->private, item->mm_info);
+	drm_free(item, sizeof(*item), DRM_MEM_MM);
+}
+
+int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
+{
+	struct drm_hash_item *hash_item;
+	struct drm_memblock_item *memblock_item;
+
+	if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
+		return -EINVAL;
+
+	memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
+				       user_hash);
+	drm_sman_free(memblock_item);
+	return 0;
+}
+
+EXPORT_SYMBOL(drm_sman_free_key);
+
+static void drm_sman_remove_owner(struct drm_sman *sman,
+				  struct drm_owner_item *owner_item)
+{
+	list_del(&owner_item->sman_list);
+	drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
+	drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM);
+}
+
+int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
+{
+
+	struct drm_hash_item *hash_item;
+	struct drm_owner_item *owner_item;
+
+	if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
+		return -1;
+	}
+
+	owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
+	if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
+		drm_sman_remove_owner(sman, owner_item);
+		return -1;
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(drm_sman_owner_clean);
+
+static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
+				      struct drm_owner_item *owner_item)
+{
+	struct drm_memblock_item *entry, *next;
+
+	list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
+				 owner_list) {
+		drm_sman_free(entry);
+	}
+	drm_sman_remove_owner(sman, owner_item);
+}
+
+void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
+{
+
+	struct drm_hash_item *hash_item;
+	struct drm_owner_item *owner_item;
+
+	if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
+
+		return;
+	}
+
+	owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
+	drm_sman_do_owner_cleanup(sman, owner_item);
+}
+
+EXPORT_SYMBOL(drm_sman_owner_cleanup);
+
+void drm_sman_cleanup(struct drm_sman *sman)
+{
+	struct drm_owner_item *entry, *next;
+	unsigned int i;
+	struct drm_sman_mm *sman_mm;
+
+	list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
+		drm_sman_do_owner_cleanup(sman, entry);
+	}
+	if (sman->mm) {
+		for (i = 0; i < sman->num_managers; ++i) {
+			sman_mm = &sman->mm[i];
+			if (sman_mm->private) {
+				sman_mm->destroy(sman_mm->private);
+				sman_mm->private = NULL;
+			}
+		}
+	}
+}
+
+EXPORT_SYMBOL(drm_sman_cleanup);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
new file mode 100644
index 000000000000..c2f584f3b46c
--- /dev/null
+++ b/drivers/gpu/drm/drm_stub.c
@@ -0,0 +1,331 @@
+/**
+ * \file drm_stub.h
+ * Stub support
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ */
+
+/*
+ * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
+ *
+ * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include "drmP.h"
+#include "drm_core.h"
+
+unsigned int drm_debug = 0;	/* 1 to enable debug output */
+EXPORT_SYMBOL(drm_debug);
+
+MODULE_AUTHOR(CORE_AUTHOR);
+MODULE_DESCRIPTION(CORE_DESC);
+MODULE_LICENSE("GPL and additional rights");
+MODULE_PARM_DESC(debug, "Enable debug output");
+
+module_param_named(debug, drm_debug, int, 0600);
+
+struct idr drm_minors_idr;
+
+struct class *drm_class;
+struct proc_dir_entry *drm_proc_root;
+
+static int drm_minor_get_id(struct drm_device *dev, int type)
+{
+	int new_id;
+	int ret;
+	int base = 0, limit = 63;
+
+again:
+	if (idr_pre_get(&drm_minors_idr, GFP_KERNEL) == 0) {
+		DRM_ERROR("Out of memory expanding drawable idr\n");
+		return -ENOMEM;
+	}
+	mutex_lock(&dev->struct_mutex);
+	ret = idr_get_new_above(&drm_minors_idr, NULL,
+				base, &new_id);
+	mutex_unlock(&dev->struct_mutex);
+	if (ret == -EAGAIN) {
+		goto again;
+	} else if (ret) {
+		return ret;
+	}
+
+	if (new_id >= limit) {
+		idr_remove(&drm_minors_idr, new_id);
+		return -EINVAL;
+	}
+	return new_id;
+}
+
+static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
+			   const struct pci_device_id *ent,
+			   struct drm_driver *driver)
+{
+	int retcode;
+
+	INIT_LIST_HEAD(&dev->filelist);
+	INIT_LIST_HEAD(&dev->ctxlist);
+	INIT_LIST_HEAD(&dev->vmalist);
+	INIT_LIST_HEAD(&dev->maplist);
+
+	spin_lock_init(&dev->count_lock);
+	spin_lock_init(&dev->drw_lock);
+	spin_lock_init(&dev->tasklet_lock);
+	spin_lock_init(&dev->lock.spinlock);
+	init_timer(&dev->timer);
+	mutex_init(&dev->struct_mutex);
+	mutex_init(&dev->ctxlist_mutex);
+
+	idr_init(&dev->drw_idr);
+
+	dev->pdev = pdev;
+	dev->pci_device = pdev->device;
+	dev->pci_vendor = pdev->vendor;
+
+#ifdef __alpha__
+	dev->hose = pdev->sysdata;
+#endif
+	dev->irq = pdev->irq;
+
+	if (drm_ht_create(&dev->map_hash, 12)) {
+		return -ENOMEM;
+	}
+
+	/* the DRM has 6 basic counters */
+	dev->counters = 6;
+	dev->types[0] = _DRM_STAT_LOCK;
+	dev->types[1] = _DRM_STAT_OPENS;
+	dev->types[2] = _DRM_STAT_CLOSES;
+	dev->types[3] = _DRM_STAT_IOCTLS;
+	dev->types[4] = _DRM_STAT_LOCKS;
+	dev->types[5] = _DRM_STAT_UNLOCKS;
+
+	dev->driver = driver;
+
+	if (drm_core_has_AGP(dev)) {
+		if (drm_device_is_agp(dev))
+			dev->agp = drm_agp_init(dev);
+		if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP)
+		    && (dev->agp == NULL)) {
+			DRM_ERROR("Cannot initialize the agpgart module.\n");
+			retcode = -EINVAL;
+			goto error_out_unreg;
+		}
+		if (drm_core_has_MTRR(dev)) {
+			if (dev->agp)
+				dev->agp->agp_mtrr =
+				    mtrr_add(dev->agp->agp_info.aper_base,
+					     dev->agp->agp_info.aper_size *
+					     1024 * 1024, MTRR_TYPE_WRCOMB, 1);
+		}
+	}
+
+	if (dev->driver->load)
+		if ((retcode = dev->driver->load(dev, ent->driver_data)))
+			goto error_out_unreg;
+
+	retcode = drm_ctxbitmap_init(dev);
+	if (retcode) {
+		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+		goto error_out_unreg;
+	}
+
+	return 0;
+
+      error_out_unreg:
+	drm_lastclose(dev);
+	return retcode;
+}
+
+
+/**
+ * Get a secondary minor number.
+ *
+ * \param dev device data structure
+ * \param sec-minor structure to hold the assigned minor
+ * \return negative number on failure.
+ *
+ * Search an empty entry and initialize it to the given parameters, and
+ * create the proc init entry via proc_init(). This routines assigns
+ * minor numbers to secondary heads of multi-headed cards
+ */
+static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
+{
+	struct drm_minor *new_minor;
+	int ret;
+	int minor_id;
+
+	DRM_DEBUG("\n");
+
+	minor_id = drm_minor_get_id(dev, type);
+	if (minor_id < 0)
+		return minor_id;
+
+	new_minor = kzalloc(sizeof(struct drm_minor), GFP_KERNEL);
+	if (!new_minor) {
+		ret = -ENOMEM;
+		goto err_idr;
+	}
+
+	new_minor->type = type;
+	new_minor->device = MKDEV(DRM_MAJOR, minor_id);
+	new_minor->dev = dev;
+	new_minor->index = minor_id;
+
+	idr_replace(&drm_minors_idr, new_minor, minor_id);
+
+	if (type == DRM_MINOR_LEGACY) {
+		ret = drm_proc_init(new_minor, minor_id, drm_proc_root);
+		if (ret) {
+			DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
+			goto err_mem;
+		}
+	} else
+		new_minor->dev_root = NULL;
+
+	ret = drm_sysfs_device_add(new_minor);
+	if (ret) {
+		printk(KERN_ERR
+		       "DRM: Error sysfs_device_add.\n");
+		goto err_g2;
+	}
+	*minor = new_minor;
+
+	DRM_DEBUG("new minor assigned %d\n", minor_id);
+	return 0;
+
+
+err_g2:
+	if (new_minor->type == DRM_MINOR_LEGACY)
+		drm_proc_cleanup(new_minor, drm_proc_root);
+err_mem:
+	kfree(new_minor);
+err_idr:
+	idr_remove(&drm_minors_idr, minor_id);
+	*minor = NULL;
+	return ret;
+}
+
+/**
+ * Register.
+ *
+ * \param pdev - PCI device structure
+ * \param ent entry from the PCI ID table with device type flags
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to gets inter module "drm" information. If we are first
+ * then register the character device and inter module information.
+ * Try and register, if we fail to register, backout previous work.
+ */
+int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
+		struct drm_driver *driver)
+{
+	struct drm_device *dev;
+	int ret;
+
+	DRM_DEBUG("\n");
+
+	dev = drm_calloc(1, sizeof(*dev), DRM_MEM_STUB);
+	if (!dev)
+		return -ENOMEM;
+
+	ret = pci_enable_device(pdev);
+	if (ret)
+		goto err_g1;
+
+	pci_set_master(pdev);
+	if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
+		printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
+		goto err_g2;
+	}
+	if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
+		goto err_g2;
+
+	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+		 driver->name, driver->major, driver->minor, driver->patchlevel,
+		 driver->date, dev->primary->index);
+
+	return 0;
+
+err_g2:
+	pci_disable_device(pdev);
+err_g1:
+	drm_free(dev, sizeof(*dev), DRM_MEM_STUB);
+	return ret;
+}
+
+/**
+ * Put a device minor number.
+ *
+ * \param dev device data structure
+ * \return always zero
+ *
+ * Cleans up the proc resources. If it is the last minor then release the foreign
+ * "drm" data, otherwise unregisters the "drm" data, frees the dev list and
+ * unregisters the character device.
+ */
+int drm_put_dev(struct drm_device * dev)
+{
+	DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name);
+
+	if (dev->unique) {
+		drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
+		dev->unique = NULL;
+		dev->unique_len = 0;
+	}
+	if (dev->devname) {
+		drm_free(dev->devname, strlen(dev->devname) + 1,
+			 DRM_MEM_DRIVER);
+		dev->devname = NULL;
+	}
+	drm_free(dev, sizeof(*dev), DRM_MEM_STUB);
+	return 0;
+}
+
+/**
+ * Put a secondary minor number.
+ *
+ * \param sec_minor - structure to be released
+ * \return always zero
+ *
+ * Cleans up the proc resources. Not legal for this to be the
+ * last minor released.
+ *
+ */
+int drm_put_minor(struct drm_minor **minor_p)
+{
+	struct drm_minor *minor = *minor_p;
+	DRM_DEBUG("release secondary minor %d\n", minor->index);
+
+	if (minor->type == DRM_MINOR_LEGACY)
+		drm_proc_cleanup(minor, drm_proc_root);
+	drm_sysfs_device_remove(minor);
+
+	idr_remove(&drm_minors_idr, minor->index);
+
+	kfree(minor);
+	*minor_p = NULL;
+	return 0;
+}
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
new file mode 100644
index 000000000000..af211a0ef179
--- /dev/null
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -0,0 +1,208 @@
+
+/*
+ * drm_sysfs.c - Modifications to drm_sysfs_class.c to support
+ *               extra sysfs attribute from DRM. Normal drm_sysfs_class
+ *               does not allow adding attributes.
+ *
+ * Copyright (c) 2004 Jon Smirl <jonsmirl@gmail.com>
+ * Copyright (c) 2003-2004 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (c) 2003-2004 IBM Corp.
+ *
+ * This file is released under the GPLv2
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/kdev_t.h>
+#include <linux/err.h>
+
+#include "drm_core.h"
+#include "drmP.h"
+
+#define to_drm_minor(d) container_of(d, struct drm_minor, kdev)
+
+/**
+ * drm_sysfs_suspend - DRM class suspend hook
+ * @dev: Linux device to suspend
+ * @state: power state to enter
+ *
+ * Just figures out what the actual struct drm_device associated with
+ * @dev is and calls its suspend hook, if present.
+ */
+static int drm_sysfs_suspend(struct device *dev, pm_message_t state)
+{
+	struct drm_minor *drm_minor = to_drm_minor(dev);
+	struct drm_device *drm_dev = drm_minor->dev;
+
+	if (drm_dev->driver->suspend)
+		return drm_dev->driver->suspend(drm_dev, state);
+
+	return 0;
+}
+
+/**
+ * drm_sysfs_resume - DRM class resume hook
+ * @dev: Linux device to resume
+ *
+ * Just figures out what the actual struct drm_device associated with
+ * @dev is and calls its resume hook, if present.
+ */
+static int drm_sysfs_resume(struct device *dev)
+{
+	struct drm_minor *drm_minor = to_drm_minor(dev);
+	struct drm_device *drm_dev = drm_minor->dev;
+
+	if (drm_dev->driver->resume)
+		return drm_dev->driver->resume(drm_dev);
+
+	return 0;
+}
+
+/* Display the version of drm_core. This doesn't work right in current design */
+static ssize_t version_show(struct class *dev, char *buf)
+{
+	return sprintf(buf, "%s %d.%d.%d %s\n", CORE_NAME, CORE_MAJOR,
+		       CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE);
+}
+
+static CLASS_ATTR(version, S_IRUGO, version_show, NULL);
+
+/**
+ * drm_sysfs_create - create a struct drm_sysfs_class structure
+ * @owner: pointer to the module that is to "own" this struct drm_sysfs_class
+ * @name: pointer to a string for the name of this class.
+ *
+ * This is used to create DRM class pointer that can then be used
+ * in calls to drm_sysfs_device_add().
+ *
+ * Note, the pointer created here is to be destroyed when finished by making a
+ * call to drm_sysfs_destroy().
+ */
+struct class *drm_sysfs_create(struct module *owner, char *name)
+{
+	struct class *class;
+	int err;
+
+	class = class_create(owner, name);
+	if (IS_ERR(class)) {
+		err = PTR_ERR(class);
+		goto err_out;
+	}
+
+	class->suspend = drm_sysfs_suspend;
+	class->resume = drm_sysfs_resume;
+
+	err = class_create_file(class, &class_attr_version);
+	if (err)
+		goto err_out_class;
+
+	return class;
+
+err_out_class:
+	class_destroy(class);
+err_out:
+	return ERR_PTR(err);
+}
+
+/**
+ * drm_sysfs_destroy - destroys DRM class
+ *
+ * Destroy the DRM device class.
+ */
+void drm_sysfs_destroy(void)
+{
+	if ((drm_class == NULL) || (IS_ERR(drm_class)))
+		return;
+	class_remove_file(drm_class, &class_attr_version);
+	class_destroy(drm_class);
+}
+
+static ssize_t show_dri(struct device *device, struct device_attribute *attr,
+			char *buf)
+{
+	struct drm_minor *drm_minor = to_drm_minor(device);
+	struct drm_device *drm_dev = drm_minor->dev;
+	if (drm_dev->driver->dri_library_name)
+		return drm_dev->driver->dri_library_name(drm_dev, buf);
+	return snprintf(buf, PAGE_SIZE, "%s\n", drm_dev->driver->pci_driver.name);
+}
+
+static struct device_attribute device_attrs[] = {
+	__ATTR(dri_library_name, S_IRUGO, show_dri, NULL),
+};
+
+/**
+ * drm_sysfs_device_release - do nothing
+ * @dev: Linux device
+ *
+ * Normally, this would free the DRM device associated with @dev, along
+ * with cleaning up any other stuff.  But we do that in the DRM core, so
+ * this function can just return and hope that the core does its job.
+ */
+static void drm_sysfs_device_release(struct device *dev)
+{
+	return;
+}
+
+/**
+ * drm_sysfs_device_add - adds a class device to sysfs for a character driver
+ * @dev: DRM device to be added
+ * @head: DRM head in question
+ *
+ * Add a DRM device to the DRM's device model class.  We use @dev's PCI device
+ * as the parent for the Linux device, and make sure it has a file containing
+ * the driver we're using (for userspace compatibility).
+ */
+int drm_sysfs_device_add(struct drm_minor *minor)
+{
+	int err;
+	int i, j;
+	char *minor_str;
+
+	minor->kdev.parent = &minor->dev->pdev->dev;
+	minor->kdev.class = drm_class;
+	minor->kdev.release = drm_sysfs_device_release;
+	minor->kdev.devt = minor->device;
+	minor_str = "card%d";
+
+	snprintf(minor->kdev.bus_id, BUS_ID_SIZE, minor_str, minor->index);
+
+	err = device_register(&minor->kdev);
+	if (err) {
+		DRM_ERROR("device add failed: %d\n", err);
+		goto err_out;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(device_attrs); i++) {
+		err = device_create_file(&minor->kdev, &device_attrs[i]);
+		if (err)
+			goto err_out_files;
+	}
+
+	return 0;
+
+err_out_files:
+	if (i > 0)
+		for (j = 0; j < i; j++)
+			device_remove_file(&minor->kdev, &device_attrs[i]);
+	device_unregister(&minor->kdev);
+err_out:
+
+	return err;
+}
+
+/**
+ * drm_sysfs_device_remove - remove DRM device
+ * @dev: DRM device to remove
+ *
+ * This call unregisters and cleans up a class device that was created with a
+ * call to drm_sysfs_device_add()
+ */
+void drm_sysfs_device_remove(struct drm_minor *minor)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(device_attrs); i++)
+		device_remove_file(&minor->kdev, &device_attrs[i]);
+	device_unregister(&minor->kdev);
+}
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
new file mode 100644
index 000000000000..c234c6f24a8d
--- /dev/null
+++ b/drivers/gpu/drm/drm_vm.c
@@ -0,0 +1,673 @@
+/**
+ * \file drm_vm.c
+ * Memory mapping for DRM
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#if defined(__ia64__)
+#include <linux/efi.h>
+#endif
+
+static void drm_vm_open(struct vm_area_struct *vma);
+static void drm_vm_close(struct vm_area_struct *vma);
+
+static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
+{
+	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
+
+#if defined(__i386__) || defined(__x86_64__)
+	if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
+		pgprot_val(tmp) |= _PAGE_PCD;
+		pgprot_val(tmp) &= ~_PAGE_PWT;
+	}
+#elif defined(__powerpc__)
+	pgprot_val(tmp) |= _PAGE_NO_CACHE;
+	if (map_type == _DRM_REGISTERS)
+		pgprot_val(tmp) |= _PAGE_GUARDED;
+#elif defined(__ia64__)
+	if (efi_range_is_wc(vma->vm_start, vma->vm_end -
+				    vma->vm_start))
+		tmp = pgprot_writecombine(tmp);
+	else
+		tmp = pgprot_noncached(tmp);
+#elif defined(__sparc__)
+	tmp = pgprot_noncached(tmp);
+#endif
+	return tmp;
+}
+
+static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
+{
+	pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
+
+#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
+	tmp |= _PAGE_NO_CACHE;
+#endif
+	return tmp;
+}
+
+/**
+ * \c fault method for AGP virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Find the right map and if it's AGP memory find the real physical page to
+ * map, get the page, increment the use count and return it.
+ */
+#if __OS_HAS_AGP
+static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct drm_file *priv = vma->vm_file->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	struct drm_map *map = NULL;
+	struct drm_map_list *r_list;
+	struct drm_hash_item *hash;
+
+	/*
+	 * Find the right map
+	 */
+	if (!drm_core_has_AGP(dev))
+		goto vm_fault_error;
+
+	if (!dev->agp || !dev->agp->cant_use_aperture)
+		goto vm_fault_error;
+
+	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
+		goto vm_fault_error;
+
+	r_list = drm_hash_entry(hash, struct drm_map_list, hash);
+	map = r_list->map;
+
+	if (map && map->type == _DRM_AGP) {
+		/*
+		 * Using vm_pgoff as a selector forces us to use this unusual
+		 * addressing scheme.
+		 */
+		unsigned long offset = (unsigned long)vmf->virtual_address -
+								vma->vm_start;
+		unsigned long baddr = map->offset + offset;
+		struct drm_agp_mem *agpmem;
+		struct page *page;
+
+#ifdef __alpha__
+		/*
+		 * Adjust to a bus-relative address
+		 */
+		baddr -= dev->hose->mem_space->start;
+#endif
+
+		/*
+		 * It's AGP memory - find the real physical page to map
+		 */
+		list_for_each_entry(agpmem, &dev->agp->memory, head) {
+			if (agpmem->bound <= baddr &&
+			    agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
+				break;
+		}
+
+		if (!agpmem)
+			goto vm_fault_error;
+
+		/*
+		 * Get the page, inc the use count, and return it
+		 */
+		offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
+		page = virt_to_page(__va(agpmem->memory->memory[offset]));
+		get_page(page);
+		vmf->page = page;
+
+		DRM_DEBUG
+		    ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
+		     baddr, __va(agpmem->memory->memory[offset]), offset,
+		     page_count(page));
+		return 0;
+	}
+vm_fault_error:
+	return VM_FAULT_SIGBUS;	/* Disallow mremap */
+}
+#else				/* __OS_HAS_AGP */
+static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	return VM_FAULT_SIGBUS;
+}
+#endif				/* __OS_HAS_AGP */
+
+/**
+ * \c nopage method for shared virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Get the mapping, find the real physical page to map, get the page, and
+ * return it.
+ */
+static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct drm_map *map = (struct drm_map *) vma->vm_private_data;
+	unsigned long offset;
+	unsigned long i;
+	struct page *page;
+
+	if (!map)
+		return VM_FAULT_SIGBUS;	/* Nothing allocated */
+
+	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
+	i = (unsigned long)map->handle + offset;
+	page = vmalloc_to_page((void *)i);
+	if (!page)
+		return VM_FAULT_SIGBUS;
+	get_page(page);
+	vmf->page = page;
+
+	DRM_DEBUG("shm_fault 0x%lx\n", offset);
+	return 0;
+}
+
+/**
+ * \c close method for shared virtual memory.
+ *
+ * \param vma virtual memory area.
+ *
+ * Deletes map information if we are the last
+ * person to close a mapping and it's not in the global maplist.
+ */
+static void drm_vm_shm_close(struct vm_area_struct *vma)
+{
+	struct drm_file *priv = vma->vm_file->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	struct drm_vma_entry *pt, *temp;
+	struct drm_map *map;
+	struct drm_map_list *r_list;
+	int found_maps = 0;
+
+	DRM_DEBUG("0x%08lx,0x%08lx\n",
+		  vma->vm_start, vma->vm_end - vma->vm_start);
+	atomic_dec(&dev->vma_count);
+
+	map = vma->vm_private_data;
+
+	mutex_lock(&dev->struct_mutex);
+	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
+		if (pt->vma->vm_private_data == map)
+			found_maps++;
+		if (pt->vma == vma) {
+			list_del(&pt->head);
+			drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
+		}
+	}
+
+	/* We were the only map that was found */
+	if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
+		/* Check to see if we are in the maplist, if we are not, then
+		 * we delete this mappings information.
+		 */
+		found_maps = 0;
+		list_for_each_entry(r_list, &dev->maplist, head) {
+			if (r_list->map == map)
+				found_maps++;
+		}
+
+		if (!found_maps) {
+			drm_dma_handle_t dmah;
+
+			switch (map->type) {
+			case _DRM_REGISTERS:
+			case _DRM_FRAME_BUFFER:
+				if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
+					int retcode;
+					retcode = mtrr_del(map->mtrr,
+							   map->offset,
+							   map->size);
+					DRM_DEBUG("mtrr_del = %d\n", retcode);
+				}
+				iounmap(map->handle);
+				break;
+			case _DRM_SHM:
+				vfree(map->handle);
+				break;
+			case _DRM_AGP:
+			case _DRM_SCATTER_GATHER:
+				break;
+			case _DRM_CONSISTENT:
+				dmah.vaddr = map->handle;
+				dmah.busaddr = map->offset;
+				dmah.size = map->size;
+				__drm_pci_free(dev, &dmah);
+				break;
+			}
+			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+		}
+	}
+	mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * \c fault method for DMA virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
+ */
+static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct drm_file *priv = vma->vm_file->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	struct drm_device_dma *dma = dev->dma;
+	unsigned long offset;
+	unsigned long page_nr;
+	struct page *page;
+
+	if (!dma)
+		return VM_FAULT_SIGBUS;	/* Error */
+	if (!dma->pagelist)
+		return VM_FAULT_SIGBUS;	/* Nothing allocated */
+
+	offset = (unsigned long)vmf->virtual_address - vma->vm_start;	/* vm_[pg]off[set] should be 0 */
+	page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
+	page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
+
+	get_page(page);
+	vmf->page = page;
+
+	DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
+	return 0;
+}
+
+/**
+ * \c fault method for scatter-gather virtual memory.
+ *
+ * \param vma virtual memory area.
+ * \param address access address.
+ * \return pointer to the page structure.
+ *
+ * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
+ */
+static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct drm_map *map = (struct drm_map *) vma->vm_private_data;
+	struct drm_file *priv = vma->vm_file->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	struct drm_sg_mem *entry = dev->sg;
+	unsigned long offset;
+	unsigned long map_offset;
+	unsigned long page_offset;
+	struct page *page;
+
+	if (!entry)
+		return VM_FAULT_SIGBUS;	/* Error */
+	if (!entry->pagelist)
+		return VM_FAULT_SIGBUS;	/* Nothing allocated */
+
+	offset = (unsigned long)vmf->virtual_address - vma->vm_start;
+	map_offset = map->offset - (unsigned long)dev->sg->virtual;
+	page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
+	page = entry->pagelist[page_offset];
+	get_page(page);
+	vmf->page = page;
+
+	return 0;
+}
+
+static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	return drm_do_vm_fault(vma, vmf);
+}
+
+static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	return drm_do_vm_shm_fault(vma, vmf);
+}
+
+static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	return drm_do_vm_dma_fault(vma, vmf);
+}
+
+static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	return drm_do_vm_sg_fault(vma, vmf);
+}
+
+/** AGP virtual memory operations */
+static struct vm_operations_struct drm_vm_ops = {
+	.fault = drm_vm_fault,
+	.open = drm_vm_open,
+	.close = drm_vm_close,
+};
+
+/** Shared virtual memory operations */
+static struct vm_operations_struct drm_vm_shm_ops = {
+	.fault = drm_vm_shm_fault,
+	.open = drm_vm_open,
+	.close = drm_vm_shm_close,
+};
+
+/** DMA virtual memory operations */
+static struct vm_operations_struct drm_vm_dma_ops = {
+	.fault = drm_vm_dma_fault,
+	.open = drm_vm_open,
+	.close = drm_vm_close,
+};
+
+/** Scatter-gather virtual memory operations */
+static struct vm_operations_struct drm_vm_sg_ops = {
+	.fault = drm_vm_sg_fault,
+	.open = drm_vm_open,
+	.close = drm_vm_close,
+};
+
+/**
+ * \c open method for shared virtual memory.
+ *
+ * \param vma virtual memory area.
+ *
+ * Create a new drm_vma_entry structure as the \p vma private data entry and
+ * add it to drm_device::vmalist.
+ */
+static void drm_vm_open_locked(struct vm_area_struct *vma)
+{
+	struct drm_file *priv = vma->vm_file->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	struct drm_vma_entry *vma_entry;
+
+	DRM_DEBUG("0x%08lx,0x%08lx\n",
+		  vma->vm_start, vma->vm_end - vma->vm_start);
+	atomic_inc(&dev->vma_count);
+
+	vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
+	if (vma_entry) {
+		vma_entry->vma = vma;
+		vma_entry->pid = current->pid;
+		list_add(&vma_entry->head, &dev->vmalist);
+	}
+}
+
+static void drm_vm_open(struct vm_area_struct *vma)
+{
+	struct drm_file *priv = vma->vm_file->private_data;
+	struct drm_device *dev = priv->minor->dev;
+
+	mutex_lock(&dev->struct_mutex);
+	drm_vm_open_locked(vma);
+	mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * \c close method for all virtual memory types.
+ *
+ * \param vma virtual memory area.
+ *
+ * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
+ * free it.
+ */
+static void drm_vm_close(struct vm_area_struct *vma)
+{
+	struct drm_file *priv = vma->vm_file->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	struct drm_vma_entry *pt, *temp;
+
+	DRM_DEBUG("0x%08lx,0x%08lx\n",
+		  vma->vm_start, vma->vm_end - vma->vm_start);
+	atomic_dec(&dev->vma_count);
+
+	mutex_lock(&dev->struct_mutex);
+	list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
+		if (pt->vma == vma) {
+			list_del(&pt->head);
+			drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
+			break;
+		}
+	}
+	mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * mmap DMA memory.
+ *
+ * \param file_priv DRM file private.
+ * \param vma virtual memory area.
+ * \return zero on success or a negative number on failure.
+ *
+ * Sets the virtual memory area operations structure to vm_dma_ops, the file
+ * pointer, and calls vm_open().
+ */
+static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev;
+	struct drm_device_dma *dma;
+	unsigned long length = vma->vm_end - vma->vm_start;
+
+	dev = priv->minor->dev;
+	dma = dev->dma;
+	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
+		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
+
+	/* Length must match exact page count */
+	if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
+		return -EINVAL;
+	}
+
+	if (!capable(CAP_SYS_ADMIN) &&
+	    (dma->flags & _DRM_DMA_USE_PCI_RO)) {
+		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+#if defined(__i386__) || defined(__x86_64__)
+		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
+#else
+		/* Ye gads this is ugly.  With more thought
+		   we could move this up higher and use
+		   `protection_map' instead.  */
+		vma->vm_page_prot =
+		    __pgprot(pte_val
+			     (pte_wrprotect
+			      (__pte(pgprot_val(vma->vm_page_prot)))));
+#endif
+	}
+
+	vma->vm_ops = &drm_vm_dma_ops;
+
+	vma->vm_flags |= VM_RESERVED;	/* Don't swap */
+	vma->vm_flags |= VM_DONTEXPAND;
+
+	vma->vm_file = filp;	/* Needed for drm_vm_open() */
+	drm_vm_open_locked(vma);
+	return 0;
+}
+
+unsigned long drm_core_get_map_ofs(struct drm_map * map)
+{
+	return map->offset;
+}
+
+EXPORT_SYMBOL(drm_core_get_map_ofs);
+
+unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
+{
+#ifdef __alpha__
+	return dev->hose->dense_mem_base - dev->hose->mem_space->start;
+#else
+	return 0;
+#endif
+}
+
+EXPORT_SYMBOL(drm_core_get_reg_ofs);
+
+/**
+ * mmap DMA memory.
+ *
+ * \param file_priv DRM file private.
+ * \param vma virtual memory area.
+ * \return zero on success or a negative number on failure.
+ *
+ * If the virtual memory area has no offset associated with it then it's a DMA
+ * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
+ * checks that the restricted flag is not set, sets the virtual memory operations
+ * according to the mapping type and remaps the pages. Finally sets the file
+ * pointer and calls vm_open().
+ */
+static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	struct drm_map *map = NULL;
+	unsigned long offset = 0;
+	struct drm_hash_item *hash;
+
+	DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
+		  vma->vm_start, vma->vm_end, vma->vm_pgoff);
+
+	if (!priv->authenticated)
+		return -EACCES;
+
+	/* We check for "dma". On Apple's UniNorth, it's valid to have
+	 * the AGP mapped at physical address 0
+	 * --BenH.
+	 */
+	if (!vma->vm_pgoff
+#if __OS_HAS_AGP
+	    && (!dev->agp
+		|| dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
+#endif
+	    )
+		return drm_mmap_dma(filp, vma);
+
+	if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
+		DRM_ERROR("Could not find map\n");
+		return -EINVAL;
+	}
+
+	map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
+	if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
+		return -EPERM;
+
+	/* Check for valid size. */
+	if (map->size < vma->vm_end - vma->vm_start)
+		return -EINVAL;
+
+	if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
+		vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
+#if defined(__i386__) || defined(__x86_64__)
+		pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
+#else
+		/* Ye gads this is ugly.  With more thought
+		   we could move this up higher and use
+		   `protection_map' instead.  */
+		vma->vm_page_prot =
+		    __pgprot(pte_val
+			     (pte_wrprotect
+			      (__pte(pgprot_val(vma->vm_page_prot)))));
+#endif
+	}
+
+	switch (map->type) {
+	case _DRM_AGP:
+		if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
+			/*
+			 * On some platforms we can't talk to bus dma address from the CPU, so for
+			 * memory of type DRM_AGP, we'll deal with sorting out the real physical
+			 * pages and mappings in fault()
+			 */
+#if defined(__powerpc__)
+			pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+#endif
+			vma->vm_ops = &drm_vm_ops;
+			break;
+		}
+		/* fall through to _DRM_FRAME_BUFFER... */
+	case _DRM_FRAME_BUFFER:
+	case _DRM_REGISTERS:
+		offset = dev->driver->get_reg_ofs(dev);
+		vma->vm_flags |= VM_IO;	/* not in core dump */
+		vma->vm_page_prot = drm_io_prot(map->type, vma);
+		if (io_remap_pfn_range(vma, vma->vm_start,
+				       (map->offset + offset) >> PAGE_SHIFT,
+				       vma->vm_end - vma->vm_start,
+				       vma->vm_page_prot))
+			return -EAGAIN;
+		DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
+			  " offset = 0x%lx\n",
+			  map->type,
+			  vma->vm_start, vma->vm_end, map->offset + offset);
+		vma->vm_ops = &drm_vm_ops;
+		break;
+	case _DRM_CONSISTENT:
+		/* Consistent memory is really like shared memory. But
+		 * it's allocated in a different way, so avoid fault */
+		if (remap_pfn_range(vma, vma->vm_start,
+		    page_to_pfn(virt_to_page(map->handle)),
+		    vma->vm_end - vma->vm_start, vma->vm_page_prot))
+			return -EAGAIN;
+		vma->vm_page_prot = drm_dma_prot(map->type, vma);
+	/* fall through to _DRM_SHM */
+	case _DRM_SHM:
+		vma->vm_ops = &drm_vm_shm_ops;
+		vma->vm_private_data = (void *)map;
+		/* Don't let this area swap.  Change when
+		   DRM_KERNEL advisory is supported. */
+		vma->vm_flags |= VM_RESERVED;
+		break;
+	case _DRM_SCATTER_GATHER:
+		vma->vm_ops = &drm_vm_sg_ops;
+		vma->vm_private_data = (void *)map;
+		vma->vm_flags |= VM_RESERVED;
+		vma->vm_page_prot = drm_dma_prot(map->type, vma);
+		break;
+	default:
+		return -EINVAL;	/* This should never happen. */
+	}
+	vma->vm_flags |= VM_RESERVED;	/* Don't swap */
+	vma->vm_flags |= VM_DONTEXPAND;
+
+	vma->vm_file = filp;	/* Needed for drm_vm_open() */
+	drm_vm_open_locked(vma);
+	return 0;
+}
+
+int drm_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	ret = drm_mmap_locked(filp, vma);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_mmap);
diff --git a/drivers/gpu/drm/i810/Makefile b/drivers/gpu/drm/i810/Makefile
new file mode 100644
index 000000000000..43844ecafcc5
--- /dev/null
+++ b/drivers/gpu/drm/i810/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+i810-y := i810_drv.o i810_dma.o
+
+obj-$(CONFIG_DRM_I810)	+= i810.o
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
new file mode 100644
index 000000000000..e5de8ea41544
--- /dev/null
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -0,0 +1,1283 @@
+/* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
+ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
+ *	    Jeff Hartmann <jhartmann@valinux.com>
+ *          Keith Whitwell <keith@tungstengraphics.com>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i810_drm.h"
+#include "i810_drv.h"
+#include <linux/interrupt.h>	/* For task queue support */
+#include <linux/delay.h>
+#include <linux/pagemap.h>
+
+#define I810_BUF_FREE		2
+#define I810_BUF_CLIENT		1
+#define I810_BUF_HARDWARE	0
+
+#define I810_BUF_UNMAPPED 0
+#define I810_BUF_MAPPED   1
+
+static struct drm_buf *i810_freelist_get(struct drm_device * dev)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int i;
+	int used;
+
+	/* Linear search might not be the best solution */
+
+	for (i = 0; i < dma->buf_count; i++) {
+		struct drm_buf *buf = dma->buflist[i];
+		drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+		/* In use is already a pointer */
+		used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
+			       I810_BUF_CLIENT);
+		if (used == I810_BUF_FREE) {
+			return buf;
+		}
+	}
+	return NULL;
+}
+
+/* This should only be called if the buffer is not sent to the hardware
+ * yet, the hardware updates in use for us once its on the ring buffer.
+ */
+
+static int i810_freelist_put(struct drm_device * dev, struct drm_buf * buf)
+{
+	drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+	int used;
+
+	/* In use is already a pointer */
+	used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE);
+	if (used != I810_BUF_CLIENT) {
+		DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev;
+	drm_i810_private_t *dev_priv;
+	struct drm_buf *buf;
+	drm_i810_buf_priv_t *buf_priv;
+
+	lock_kernel();
+	dev = priv->minor->dev;
+	dev_priv = dev->dev_private;
+	buf = dev_priv->mmap_buffer;
+	buf_priv = buf->dev_private;
+
+	vma->vm_flags |= (VM_IO | VM_DONTCOPY);
+	vma->vm_file = filp;
+
+	buf_priv->currently_mapped = I810_BUF_MAPPED;
+	unlock_kernel();
+
+	if (io_remap_pfn_range(vma, vma->vm_start,
+			       vma->vm_pgoff,
+			       vma->vm_end - vma->vm_start, vma->vm_page_prot))
+		return -EAGAIN;
+	return 0;
+}
+
+static const struct file_operations i810_buffer_fops = {
+	.open = drm_open,
+	.release = drm_release,
+	.ioctl = drm_ioctl,
+	.mmap = i810_mmap_buffers,
+	.fasync = drm_fasync,
+};
+
+static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
+{
+	struct drm_device *dev = file_priv->minor->dev;
+	drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	const struct file_operations *old_fops;
+	int retcode = 0;
+
+	if (buf_priv->currently_mapped == I810_BUF_MAPPED)
+		return -EINVAL;
+
+	down_write(&current->mm->mmap_sem);
+	old_fops = file_priv->filp->f_op;
+	file_priv->filp->f_op = &i810_buffer_fops;
+	dev_priv->mmap_buffer = buf;
+	buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total,
+					    PROT_READ | PROT_WRITE,
+					    MAP_SHARED, buf->bus_address);
+	dev_priv->mmap_buffer = NULL;
+	file_priv->filp->f_op = old_fops;
+	if (IS_ERR(buf_priv->virtual)) {
+		/* Real error */
+		DRM_ERROR("mmap error\n");
+		retcode = PTR_ERR(buf_priv->virtual);
+		buf_priv->virtual = NULL;
+	}
+	up_write(&current->mm->mmap_sem);
+
+	return retcode;
+}
+
+static int i810_unmap_buffer(struct drm_buf * buf)
+{
+	drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+	int retcode = 0;
+
+	if (buf_priv->currently_mapped != I810_BUF_MAPPED)
+		return -EINVAL;
+
+	down_write(&current->mm->mmap_sem);
+	retcode = do_munmap(current->mm,
+			    (unsigned long)buf_priv->virtual,
+			    (size_t) buf->total);
+	up_write(&current->mm->mmap_sem);
+
+	buf_priv->currently_mapped = I810_BUF_UNMAPPED;
+	buf_priv->virtual = NULL;
+
+	return retcode;
+}
+
+static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d,
+			       struct drm_file *file_priv)
+{
+	struct drm_buf *buf;
+	drm_i810_buf_priv_t *buf_priv;
+	int retcode = 0;
+
+	buf = i810_freelist_get(dev);
+	if (!buf) {
+		retcode = -ENOMEM;
+		DRM_DEBUG("retcode=%d\n", retcode);
+		return retcode;
+	}
+
+	retcode = i810_map_buffer(buf, file_priv);
+	if (retcode) {
+		i810_freelist_put(dev, buf);
+		DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
+		return retcode;
+	}
+	buf->file_priv = file_priv;
+	buf_priv = buf->dev_private;
+	d->granted = 1;
+	d->request_idx = buf->idx;
+	d->request_size = buf->total;
+	d->virtual = buf_priv->virtual;
+
+	return retcode;
+}
+
+static int i810_dma_cleanup(struct drm_device * dev)
+{
+	struct drm_device_dma *dma = dev->dma;
+
+	/* Make sure interrupts are disabled here because the uninstall ioctl
+	 * may not have been called from userspace and after dev_private
+	 * is freed, it's too late.
+	 */
+	if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ) && dev->irq_enabled)
+		drm_irq_uninstall(dev);
+
+	if (dev->dev_private) {
+		int i;
+		drm_i810_private_t *dev_priv =
+		    (drm_i810_private_t *) dev->dev_private;
+
+		if (dev_priv->ring.virtual_start) {
+			drm_core_ioremapfree(&dev_priv->ring.map, dev);
+		}
+		if (dev_priv->hw_status_page) {
+			pci_free_consistent(dev->pdev, PAGE_SIZE,
+					    dev_priv->hw_status_page,
+					    dev_priv->dma_status_page);
+			/* Need to rewrite hardware status page */
+			I810_WRITE(0x02080, 0x1ffff000);
+		}
+		drm_free(dev->dev_private, sizeof(drm_i810_private_t),
+			 DRM_MEM_DRIVER);
+		dev->dev_private = NULL;
+
+		for (i = 0; i < dma->buf_count; i++) {
+			struct drm_buf *buf = dma->buflist[i];
+			drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+
+			if (buf_priv->kernel_virtual && buf->total)
+				drm_core_ioremapfree(&buf_priv->map, dev);
+		}
+	}
+	return 0;
+}
+
+static int i810_wait_ring(struct drm_device * dev, int n)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
+	int iters = 0;
+	unsigned long end;
+	unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+
+	end = jiffies + (HZ * 3);
+	while (ring->space < n) {
+		ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+		ring->space = ring->head - (ring->tail + 8);
+		if (ring->space < 0)
+			ring->space += ring->Size;
+
+		if (ring->head != last_head) {
+			end = jiffies + (HZ * 3);
+			last_head = ring->head;
+		}
+
+		iters++;
+		if (time_before(end, jiffies)) {
+			DRM_ERROR("space: %d wanted %d\n", ring->space, n);
+			DRM_ERROR("lockup\n");
+			goto out_wait_ring;
+		}
+		udelay(1);
+	}
+
+      out_wait_ring:
+	return iters;
+}
+
+static void i810_kernel_lost_context(struct drm_device * dev)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
+
+	ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+	ring->tail = I810_READ(LP_RING + RING_TAIL);
+	ring->space = ring->head - (ring->tail + 8);
+	if (ring->space < 0)
+		ring->space += ring->Size;
+}
+
+static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int my_idx = 24;
+	u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
+	int i;
+
+	if (dma->buf_count > 1019) {
+		/* Not enough space in the status page for the freelist */
+		return -EINVAL;
+	}
+
+	for (i = 0; i < dma->buf_count; i++) {
+		struct drm_buf *buf = dma->buflist[i];
+		drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+
+		buf_priv->in_use = hw_status++;
+		buf_priv->my_use_idx = my_idx;
+		my_idx += 4;
+
+		*buf_priv->in_use = I810_BUF_FREE;
+
+		buf_priv->map.offset = buf->bus_address;
+		buf_priv->map.size = buf->total;
+		buf_priv->map.type = _DRM_AGP;
+		buf_priv->map.flags = 0;
+		buf_priv->map.mtrr = 0;
+
+		drm_core_ioremap(&buf_priv->map, dev);
+		buf_priv->kernel_virtual = buf_priv->map.handle;
+
+	}
+	return 0;
+}
+
+static int i810_dma_initialize(struct drm_device * dev,
+			       drm_i810_private_t * dev_priv,
+			       drm_i810_init_t * init)
+{
+	struct drm_map_list *r_list;
+	memset(dev_priv, 0, sizeof(drm_i810_private_t));
+
+	list_for_each_entry(r_list, &dev->maplist, head) {
+		if (r_list->map &&
+		    r_list->map->type == _DRM_SHM &&
+		    r_list->map->flags & _DRM_CONTAINS_LOCK) {
+			dev_priv->sarea_map = r_list->map;
+			break;
+		}
+	}
+	if (!dev_priv->sarea_map) {
+		dev->dev_private = (void *)dev_priv;
+		i810_dma_cleanup(dev);
+		DRM_ERROR("can not find sarea!\n");
+		return -EINVAL;
+	}
+	dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
+	if (!dev_priv->mmio_map) {
+		dev->dev_private = (void *)dev_priv;
+		i810_dma_cleanup(dev);
+		DRM_ERROR("can not find mmio map!\n");
+		return -EINVAL;
+	}
+	dev->agp_buffer_token = init->buffers_offset;
+	dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
+	if (!dev->agp_buffer_map) {
+		dev->dev_private = (void *)dev_priv;
+		i810_dma_cleanup(dev);
+		DRM_ERROR("can not find dma buffer map!\n");
+		return -EINVAL;
+	}
+
+	dev_priv->sarea_priv = (drm_i810_sarea_t *)
+	    ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
+
+	dev_priv->ring.Start = init->ring_start;
+	dev_priv->ring.End = init->ring_end;
+	dev_priv->ring.Size = init->ring_size;
+
+	dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
+	dev_priv->ring.map.size = init->ring_size;
+	dev_priv->ring.map.type = _DRM_AGP;
+	dev_priv->ring.map.flags = 0;
+	dev_priv->ring.map.mtrr = 0;
+
+	drm_core_ioremap(&dev_priv->ring.map, dev);
+
+	if (dev_priv->ring.map.handle == NULL) {
+		dev->dev_private = (void *)dev_priv;
+		i810_dma_cleanup(dev);
+		DRM_ERROR("can not ioremap virtual address for"
+			  " ring buffer\n");
+		return -ENOMEM;
+	}
+
+	dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+
+	dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
+
+	dev_priv->w = init->w;
+	dev_priv->h = init->h;
+	dev_priv->pitch = init->pitch;
+	dev_priv->back_offset = init->back_offset;
+	dev_priv->depth_offset = init->depth_offset;
+	dev_priv->front_offset = init->front_offset;
+
+	dev_priv->overlay_offset = init->overlay_offset;
+	dev_priv->overlay_physical = init->overlay_physical;
+
+	dev_priv->front_di1 = init->front_offset | init->pitch_bits;
+	dev_priv->back_di1 = init->back_offset | init->pitch_bits;
+	dev_priv->zi1 = init->depth_offset | init->pitch_bits;
+
+	/* Program Hardware Status Page */
+	dev_priv->hw_status_page =
+	    pci_alloc_consistent(dev->pdev, PAGE_SIZE,
+				 &dev_priv->dma_status_page);
+	if (!dev_priv->hw_status_page) {
+		dev->dev_private = (void *)dev_priv;
+		i810_dma_cleanup(dev);
+		DRM_ERROR("Can not allocate hardware status page\n");
+		return -ENOMEM;
+	}
+	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+	DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
+
+	I810_WRITE(0x02080, dev_priv->dma_status_page);
+	DRM_DEBUG("Enabled hardware status page\n");
+
+	/* Now we need to init our freelist */
+	if (i810_freelist_init(dev, dev_priv) != 0) {
+		dev->dev_private = (void *)dev_priv;
+		i810_dma_cleanup(dev);
+		DRM_ERROR("Not enough space in the status page for"
+			  " the freelist\n");
+		return -ENOMEM;
+	}
+	dev->dev_private = (void *)dev_priv;
+
+	return 0;
+}
+
+static int i810_dma_init(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i810_private_t *dev_priv;
+	drm_i810_init_t *init = data;
+	int retcode = 0;
+
+	switch (init->func) {
+	case I810_INIT_DMA_1_4:
+		DRM_INFO("Using v1.4 init.\n");
+		dev_priv = drm_alloc(sizeof(drm_i810_private_t),
+				     DRM_MEM_DRIVER);
+		if (dev_priv == NULL)
+			return -ENOMEM;
+		retcode = i810_dma_initialize(dev, dev_priv, init);
+		break;
+
+	case I810_CLEANUP_DMA:
+		DRM_INFO("DMA Cleanup\n");
+		retcode = i810_dma_cleanup(dev);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return retcode;
+}
+
+/* Most efficient way to verify state for the i810 is as it is
+ * emitted.  Non-conformant state is silently dropped.
+ *
+ * Use 'volatile' & local var tmp to force the emitted values to be
+ * identical to the verified ones.
+ */
+static void i810EmitContextVerified(struct drm_device * dev,
+				    volatile unsigned int *code)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	int i, j = 0;
+	unsigned int tmp;
+	RING_LOCALS;
+
+	BEGIN_LP_RING(I810_CTX_SETUP_SIZE);
+
+	OUT_RING(GFX_OP_COLOR_FACTOR);
+	OUT_RING(code[I810_CTXREG_CF1]);
+
+	OUT_RING(GFX_OP_STIPPLE);
+	OUT_RING(code[I810_CTXREG_ST1]);
+
+	for (i = 4; i < I810_CTX_SETUP_SIZE; i++) {
+		tmp = code[i];
+
+		if ((tmp & (7 << 29)) == (3 << 29) &&
+		    (tmp & (0x1f << 24)) < (0x1d << 24)) {
+			OUT_RING(tmp);
+			j++;
+		} else
+			printk("constext state dropped!!!\n");
+	}
+
+	if (j & 1)
+		OUT_RING(0);
+
+	ADVANCE_LP_RING();
+}
+
+static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *code)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	int i, j = 0;
+	unsigned int tmp;
+	RING_LOCALS;
+
+	BEGIN_LP_RING(I810_TEX_SETUP_SIZE);
+
+	OUT_RING(GFX_OP_MAP_INFO);
+	OUT_RING(code[I810_TEXREG_MI1]);
+	OUT_RING(code[I810_TEXREG_MI2]);
+	OUT_RING(code[I810_TEXREG_MI3]);
+
+	for (i = 4; i < I810_TEX_SETUP_SIZE; i++) {
+		tmp = code[i];
+
+		if ((tmp & (7 << 29)) == (3 << 29) &&
+		    (tmp & (0x1f << 24)) < (0x1d << 24)) {
+			OUT_RING(tmp);
+			j++;
+		} else
+			printk("texture state dropped!!!\n");
+	}
+
+	if (j & 1)
+		OUT_RING(0);
+
+	ADVANCE_LP_RING();
+}
+
+/* Need to do some additional checking when setting the dest buffer.
+ */
+static void i810EmitDestVerified(struct drm_device * dev,
+				 volatile unsigned int *code)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	unsigned int tmp;
+	RING_LOCALS;
+
+	BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
+
+	tmp = code[I810_DESTREG_DI1];
+	if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
+		OUT_RING(CMD_OP_DESTBUFFER_INFO);
+		OUT_RING(tmp);
+	} else
+		DRM_DEBUG("bad di1 %x (allow %x or %x)\n",
+			  tmp, dev_priv->front_di1, dev_priv->back_di1);
+
+	/* invarient:
+	 */
+	OUT_RING(CMD_OP_Z_BUFFER_INFO);
+	OUT_RING(dev_priv->zi1);
+
+	OUT_RING(GFX_OP_DESTBUFFER_VARS);
+	OUT_RING(code[I810_DESTREG_DV1]);
+
+	OUT_RING(GFX_OP_DRAWRECT_INFO);
+	OUT_RING(code[I810_DESTREG_DR1]);
+	OUT_RING(code[I810_DESTREG_DR2]);
+	OUT_RING(code[I810_DESTREG_DR3]);
+	OUT_RING(code[I810_DESTREG_DR4]);
+	OUT_RING(0);
+
+	ADVANCE_LP_RING();
+}
+
+static void i810EmitState(struct drm_device * dev)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	unsigned int dirty = sarea_priv->dirty;
+
+	DRM_DEBUG("%x\n", dirty);
+
+	if (dirty & I810_UPLOAD_BUFFERS) {
+		i810EmitDestVerified(dev, sarea_priv->BufferState);
+		sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS;
+	}
+
+	if (dirty & I810_UPLOAD_CTX) {
+		i810EmitContextVerified(dev, sarea_priv->ContextState);
+		sarea_priv->dirty &= ~I810_UPLOAD_CTX;
+	}
+
+	if (dirty & I810_UPLOAD_TEX0) {
+		i810EmitTexVerified(dev, sarea_priv->TexState[0]);
+		sarea_priv->dirty &= ~I810_UPLOAD_TEX0;
+	}
+
+	if (dirty & I810_UPLOAD_TEX1) {
+		i810EmitTexVerified(dev, sarea_priv->TexState[1]);
+		sarea_priv->dirty &= ~I810_UPLOAD_TEX1;
+	}
+}
+
+/* need to verify
+ */
+static void i810_dma_dispatch_clear(struct drm_device * dev, int flags,
+				    unsigned int clear_color,
+				    unsigned int clear_zval)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	int nbox = sarea_priv->nbox;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	int pitch = dev_priv->pitch;
+	int cpp = 2;
+	int i;
+	RING_LOCALS;
+
+	if (dev_priv->current_page == 1) {
+		unsigned int tmp = flags;
+
+		flags &= ~(I810_FRONT | I810_BACK);
+		if (tmp & I810_FRONT)
+			flags |= I810_BACK;
+		if (tmp & I810_BACK)
+			flags |= I810_FRONT;
+	}
+
+	i810_kernel_lost_context(dev);
+
+	if (nbox > I810_NR_SAREA_CLIPRECTS)
+		nbox = I810_NR_SAREA_CLIPRECTS;
+
+	for (i = 0; i < nbox; i++, pbox++) {
+		unsigned int x = pbox->x1;
+		unsigned int y = pbox->y1;
+		unsigned int width = (pbox->x2 - x) * cpp;
+		unsigned int height = pbox->y2 - y;
+		unsigned int start = y * pitch + x * cpp;
+
+		if (pbox->x1 > pbox->x2 ||
+		    pbox->y1 > pbox->y2 ||
+		    pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
+			continue;
+
+		if (flags & I810_FRONT) {
+			BEGIN_LP_RING(6);
+			OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
+			OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
+			OUT_RING((height << 16) | width);
+			OUT_RING(start);
+			OUT_RING(clear_color);
+			OUT_RING(0);
+			ADVANCE_LP_RING();
+		}
+
+		if (flags & I810_BACK) {
+			BEGIN_LP_RING(6);
+			OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
+			OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
+			OUT_RING((height << 16) | width);
+			OUT_RING(dev_priv->back_offset + start);
+			OUT_RING(clear_color);
+			OUT_RING(0);
+			ADVANCE_LP_RING();
+		}
+
+		if (flags & I810_DEPTH) {
+			BEGIN_LP_RING(6);
+			OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_COLOR_BLT | 0x3);
+			OUT_RING(BR13_SOLID_PATTERN | (0xF0 << 16) | pitch);
+			OUT_RING((height << 16) | width);
+			OUT_RING(dev_priv->depth_offset + start);
+			OUT_RING(clear_zval);
+			OUT_RING(0);
+			ADVANCE_LP_RING();
+		}
+	}
+}
+
+static void i810_dma_dispatch_swap(struct drm_device * dev)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	int nbox = sarea_priv->nbox;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	int pitch = dev_priv->pitch;
+	int cpp = 2;
+	int i;
+	RING_LOCALS;
+
+	DRM_DEBUG("swapbuffers\n");
+
+	i810_kernel_lost_context(dev);
+
+	if (nbox > I810_NR_SAREA_CLIPRECTS)
+		nbox = I810_NR_SAREA_CLIPRECTS;
+
+	for (i = 0; i < nbox; i++, pbox++) {
+		unsigned int w = pbox->x2 - pbox->x1;
+		unsigned int h = pbox->y2 - pbox->y1;
+		unsigned int dst = pbox->x1 * cpp + pbox->y1 * pitch;
+		unsigned int start = dst;
+
+		if (pbox->x1 > pbox->x2 ||
+		    pbox->y1 > pbox->y2 ||
+		    pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
+			continue;
+
+		BEGIN_LP_RING(6);
+		OUT_RING(BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4);
+		OUT_RING(pitch | (0xCC << 16));
+		OUT_RING((h << 16) | (w * cpp));
+		if (dev_priv->current_page == 0)
+			OUT_RING(dev_priv->front_offset + start);
+		else
+			OUT_RING(dev_priv->back_offset + start);
+		OUT_RING(pitch);
+		if (dev_priv->current_page == 0)
+			OUT_RING(dev_priv->back_offset + start);
+		else
+			OUT_RING(dev_priv->front_offset + start);
+		ADVANCE_LP_RING();
+	}
+}
+
+static void i810_dma_dispatch_vertex(struct drm_device * dev,
+				     struct drm_buf * buf, int discard, int used)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+	drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	struct drm_clip_rect *box = sarea_priv->boxes;
+	int nbox = sarea_priv->nbox;
+	unsigned long address = (unsigned long)buf->bus_address;
+	unsigned long start = address - dev->agp->base;
+	int i = 0;
+	RING_LOCALS;
+
+	i810_kernel_lost_context(dev);
+
+	if (nbox > I810_NR_SAREA_CLIPRECTS)
+		nbox = I810_NR_SAREA_CLIPRECTS;
+
+	if (used > 4 * 1024)
+		used = 0;
+
+	if (sarea_priv->dirty)
+		i810EmitState(dev);
+
+	if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
+		unsigned int prim = (sarea_priv->vertex_prim & PR_MASK);
+
+		*(u32 *) buf_priv->kernel_virtual =
+		    ((GFX_OP_PRIMITIVE | prim | ((used / 4) - 2)));
+
+		if (used & 4) {
+			*(u32 *) ((char *) buf_priv->kernel_virtual + used) = 0;
+			used += 4;
+		}
+
+		i810_unmap_buffer(buf);
+	}
+
+	if (used) {
+		do {
+			if (i < nbox) {
+				BEGIN_LP_RING(4);
+				OUT_RING(GFX_OP_SCISSOR | SC_UPDATE_SCISSOR |
+					 SC_ENABLE);
+				OUT_RING(GFX_OP_SCISSOR_INFO);
+				OUT_RING(box[i].x1 | (box[i].y1 << 16));
+				OUT_RING((box[i].x2 -
+					  1) | ((box[i].y2 - 1) << 16));
+				ADVANCE_LP_RING();
+			}
+
+			BEGIN_LP_RING(4);
+			OUT_RING(CMD_OP_BATCH_BUFFER);
+			OUT_RING(start | BB1_PROTECTED);
+			OUT_RING(start + used - 4);
+			OUT_RING(0);
+			ADVANCE_LP_RING();
+
+		} while (++i < nbox);
+	}
+
+	if (discard) {
+		dev_priv->counter++;
+
+		(void)cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
+			      I810_BUF_HARDWARE);
+
+		BEGIN_LP_RING(8);
+		OUT_RING(CMD_STORE_DWORD_IDX);
+		OUT_RING(20);
+		OUT_RING(dev_priv->counter);
+		OUT_RING(CMD_STORE_DWORD_IDX);
+		OUT_RING(buf_priv->my_use_idx);
+		OUT_RING(I810_BUF_FREE);
+		OUT_RING(CMD_REPORT_HEAD);
+		OUT_RING(0);
+		ADVANCE_LP_RING();
+	}
+}
+
+static void i810_dma_dispatch_flip(struct drm_device * dev)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	int pitch = dev_priv->pitch;
+	RING_LOCALS;
+
+	DRM_DEBUG("page=%d pfCurrentPage=%d\n",
+		  dev_priv->current_page,
+		  dev_priv->sarea_priv->pf_current_page);
+
+	i810_kernel_lost_context(dev);
+
+	BEGIN_LP_RING(2);
+	OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	BEGIN_LP_RING(I810_DEST_SETUP_SIZE + 2);
+	/* On i815 at least ASYNC is buggy */
+	/* pitch<<5 is from 11.2.8 p158,
+	   its the pitch / 8 then left shifted 8,
+	   so (pitch >> 3) << 8 */
+	OUT_RING(CMD_OP_FRONTBUFFER_INFO | (pitch << 5) /*| ASYNC_FLIP */ );
+	if (dev_priv->current_page == 0) {
+		OUT_RING(dev_priv->back_offset);
+		dev_priv->current_page = 1;
+	} else {
+		OUT_RING(dev_priv->front_offset);
+		dev_priv->current_page = 0;
+	}
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	BEGIN_LP_RING(2);
+	OUT_RING(CMD_OP_WAIT_FOR_EVENT | WAIT_FOR_PLANE_A_FLIP);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	/* Increment the frame counter.  The client-side 3D driver must
+	 * throttle the framerate by waiting for this value before
+	 * performing the swapbuffer ioctl.
+	 */
+	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+
+}
+
+static void i810_dma_quiescent(struct drm_device * dev)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+
+	i810_kernel_lost_context(dev);
+
+	BEGIN_LP_RING(4);
+	OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
+	OUT_RING(CMD_REPORT_HEAD);
+	OUT_RING(0);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	i810_wait_ring(dev, dev_priv->ring.Size - 8);
+}
+
+static int i810_flush_queue(struct drm_device * dev)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	int i, ret = 0;
+	RING_LOCALS;
+
+	i810_kernel_lost_context(dev);
+
+	BEGIN_LP_RING(2);
+	OUT_RING(CMD_REPORT_HEAD);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	i810_wait_ring(dev, dev_priv->ring.Size - 8);
+
+	for (i = 0; i < dma->buf_count; i++) {
+		struct drm_buf *buf = dma->buflist[i];
+		drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+
+		int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
+				   I810_BUF_FREE);
+
+		if (used == I810_BUF_HARDWARE)
+			DRM_DEBUG("reclaimed from HARDWARE\n");
+		if (used == I810_BUF_CLIENT)
+			DRM_DEBUG("still on client\n");
+	}
+
+	return ret;
+}
+
+/* Must be called with the lock held */
+static void i810_reclaim_buffers(struct drm_device * dev,
+				 struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int i;
+
+	if (!dma)
+		return;
+	if (!dev->dev_private)
+		return;
+	if (!dma->buflist)
+		return;
+
+	i810_flush_queue(dev);
+
+	for (i = 0; i < dma->buf_count; i++) {
+		struct drm_buf *buf = dma->buflist[i];
+		drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+
+		if (buf->file_priv == file_priv && buf_priv) {
+			int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
+					   I810_BUF_FREE);
+
+			if (used == I810_BUF_CLIENT)
+				DRM_DEBUG("reclaimed from client\n");
+			if (buf_priv->currently_mapped == I810_BUF_MAPPED)
+				buf_priv->currently_mapped = I810_BUF_UNMAPPED;
+		}
+	}
+}
+
+static int i810_flush_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv)
+{
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	i810_flush_queue(dev);
+	return 0;
+}
+
+static int i810_dma_vertex(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
+	u32 *hw_status = dev_priv->hw_status_page;
+	drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
+	    dev_priv->sarea_priv;
+	drm_i810_vertex_t *vertex = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DRM_DEBUG("idx %d used %d discard %d\n",
+		  vertex->idx, vertex->used, vertex->discard);
+
+	if (vertex->idx < 0 || vertex->idx > dma->buf_count)
+		return -EINVAL;
+
+	i810_dma_dispatch_vertex(dev,
+				 dma->buflist[vertex->idx],
+				 vertex->discard, vertex->used);
+
+	atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
+	atomic_inc(&dev->counts[_DRM_STAT_DMA]);
+	sarea_priv->last_enqueue = dev_priv->counter - 1;
+	sarea_priv->last_dispatch = (int)hw_status[5];
+
+	return 0;
+}
+
+static int i810_clear_bufs(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	drm_i810_clear_t *clear = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	/* GH: Someone's doing nasty things... */
+	if (!dev->dev_private) {
+		return -EINVAL;
+	}
+
+	i810_dma_dispatch_clear(dev, clear->flags,
+				clear->clear_color, clear->clear_depth);
+	return 0;
+}
+
+static int i810_swap_bufs(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	i810_dma_dispatch_swap(dev);
+	return 0;
+}
+
+static int i810_getage(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
+	u32 *hw_status = dev_priv->hw_status_page;
+	drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
+	    dev_priv->sarea_priv;
+
+	sarea_priv->last_dispatch = (int)hw_status[5];
+	return 0;
+}
+
+static int i810_getbuf(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	int retcode = 0;
+	drm_i810_dma_t *d = data;
+	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
+	u32 *hw_status = dev_priv->hw_status_page;
+	drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
+	    dev_priv->sarea_priv;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	d->granted = 0;
+
+	retcode = i810_dma_get_buffer(dev, d, file_priv);
+
+	DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
+		  task_pid_nr(current), retcode, d->granted);
+
+	sarea_priv->last_dispatch = (int)hw_status[5];
+
+	return retcode;
+}
+
+static int i810_copybuf(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	/* Never copy - 2.4.x doesn't need it */
+	return 0;
+}
+
+static int i810_docopy(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	/* Never copy - 2.4.x doesn't need it */
+	return 0;
+}
+
+static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, int used,
+				 unsigned int last_render)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+	drm_i810_buf_priv_t *buf_priv = buf->dev_private;
+	drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	unsigned long address = (unsigned long)buf->bus_address;
+	unsigned long start = address - dev->agp->base;
+	int u;
+	RING_LOCALS;
+
+	i810_kernel_lost_context(dev);
+
+	u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE);
+	if (u != I810_BUF_CLIENT) {
+		DRM_DEBUG("MC found buffer that isn't mine!\n");
+	}
+
+	if (used > 4 * 1024)
+		used = 0;
+
+	sarea_priv->dirty = 0x7f;
+
+	DRM_DEBUG("addr 0x%lx, used 0x%x\n", address, used);
+
+	dev_priv->counter++;
+	DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
+	DRM_DEBUG("start : %lx\n", start);
+	DRM_DEBUG("used : %d\n", used);
+	DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
+
+	if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
+		if (used & 4) {
+			*(u32 *) ((char *) buf_priv->virtual + used) = 0;
+			used += 4;
+		}
+
+		i810_unmap_buffer(buf);
+	}
+	BEGIN_LP_RING(4);
+	OUT_RING(CMD_OP_BATCH_BUFFER);
+	OUT_RING(start | BB1_PROTECTED);
+	OUT_RING(start + used - 4);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	BEGIN_LP_RING(8);
+	OUT_RING(CMD_STORE_DWORD_IDX);
+	OUT_RING(buf_priv->my_use_idx);
+	OUT_RING(I810_BUF_FREE);
+	OUT_RING(0);
+
+	OUT_RING(CMD_STORE_DWORD_IDX);
+	OUT_RING(16);
+	OUT_RING(last_render);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+}
+
+static int i810_dma_mc(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
+	u32 *hw_status = dev_priv->hw_status_page;
+	drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
+	    dev_priv->sarea_priv;
+	drm_i810_mc_t *mc = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (mc->idx >= dma->buf_count || mc->idx < 0)
+		return -EINVAL;
+
+	i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
+			     mc->last_render);
+
+	atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
+	atomic_inc(&dev->counts[_DRM_STAT_DMA]);
+	sarea_priv->last_enqueue = dev_priv->counter - 1;
+	sarea_priv->last_dispatch = (int)hw_status[5];
+
+	return 0;
+}
+
+static int i810_rstatus(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
+
+	return (int)(((u32 *) (dev_priv->hw_status_page))[4]);
+}
+
+static int i810_ov0_info(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
+	drm_i810_overlay_t *ov = data;
+
+	ov->offset = dev_priv->overlay_offset;
+	ov->physical = dev_priv->overlay_physical;
+
+	return 0;
+}
+
+static int i810_fstatus(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+	return I810_READ(0x30008);
+}
+
+static int i810_ov0_flip(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	//Tell the overlay to update
+	I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000);
+
+	return 0;
+}
+
+/* Not sure why this isn't set all the time:
+ */
+static void i810_do_init_pageflip(struct drm_device * dev)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+
+	DRM_DEBUG("\n");
+	dev_priv->page_flipping = 1;
+	dev_priv->current_page = 0;
+	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+}
+
+static int i810_do_cleanup_pageflip(struct drm_device * dev)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+
+	DRM_DEBUG("\n");
+	if (dev_priv->current_page != 0)
+		i810_dma_dispatch_flip(dev);
+
+	dev_priv->page_flipping = 0;
+	return 0;
+}
+
+static int i810_flip_bufs(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	drm_i810_private_t *dev_priv = dev->dev_private;
+
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (!dev_priv->page_flipping)
+		i810_do_init_pageflip(dev);
+
+	i810_dma_dispatch_flip(dev);
+	return 0;
+}
+
+int i810_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	/* i810 has 4 more counters */
+	dev->counters += 4;
+	dev->types[6] = _DRM_STAT_IRQ;
+	dev->types[7] = _DRM_STAT_PRIMARY;
+	dev->types[8] = _DRM_STAT_SECONDARY;
+	dev->types[9] = _DRM_STAT_DMA;
+
+	return 0;
+}
+
+void i810_driver_lastclose(struct drm_device * dev)
+{
+	i810_dma_cleanup(dev);
+}
+
+void i810_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+{
+	if (dev->dev_private) {
+		drm_i810_private_t *dev_priv = dev->dev_private;
+		if (dev_priv->page_flipping) {
+			i810_do_cleanup_pageflip(dev);
+		}
+	}
+}
+
+void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
+					struct drm_file *file_priv)
+{
+	i810_reclaim_buffers(dev, file_priv);
+}
+
+int i810_driver_dma_quiescent(struct drm_device * dev)
+{
+	i810_dma_quiescent(dev);
+	return 0;
+}
+
+struct drm_ioctl_desc i810_ioctls[] = {
+	DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH)
+};
+
+int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
+
+/**
+ * Determine if the device really is AGP or not.
+ *
+ * All Intel graphics chipsets are treated as AGP, even if they are really
+ * PCI-e.
+ *
+ * \param dev   The device to be tested.
+ *
+ * \returns
+ * A value of 1 is always retured to indictate every i810 is AGP.
+ */
+int i810_driver_device_is_agp(struct drm_device * dev)
+{
+	return 1;
+}
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
new file mode 100644
index 000000000000..fabb9a817966
--- /dev/null
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -0,0 +1,97 @@
+/* i810_drv.c -- I810 driver -*- linux-c -*-
+ * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Rickard E. (Rik) Faith <faith@valinux.com>
+ *    Jeff Hartmann <jhartmann@valinux.com>
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i810_drm.h"
+#include "i810_drv.h"
+
+#include "drm_pciids.h"
+
+static struct pci_device_id pciidlist[] = {
+	i810_PCI_IDS
+};
+
+static struct drm_driver driver = {
+	.driver_features =
+	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
+	    DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
+	.dev_priv_size = sizeof(drm_i810_buf_priv_t),
+	.load = i810_driver_load,
+	.lastclose = i810_driver_lastclose,
+	.preclose = i810_driver_preclose,
+	.device_is_agp = i810_driver_device_is_agp,
+	.reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
+	.dma_quiescent = i810_driver_dma_quiescent,
+	.get_map_ofs = drm_core_get_map_ofs,
+	.get_reg_ofs = drm_core_get_reg_ofs,
+	.ioctls = i810_ioctls,
+	.fops = {
+		 .owner = THIS_MODULE,
+		 .open = drm_open,
+		 .release = drm_release,
+		 .ioctl = drm_ioctl,
+		 .mmap = drm_mmap,
+		 .poll = drm_poll,
+		 .fasync = drm_fasync,
+	},
+
+	.pci_driver = {
+		 .name = DRIVER_NAME,
+		 .id_table = pciidlist,
+	},
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int __init i810_init(void)
+{
+	driver.num_ioctls = i810_max_ioctl;
+	return drm_init(&driver);
+}
+
+static void __exit i810_exit(void)
+{
+	drm_exit(&driver);
+}
+
+module_init(i810_init);
+module_exit(i810_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
new file mode 100644
index 000000000000..0118849a5672
--- /dev/null
+++ b/drivers/gpu/drm/i810/i810_drv.h
@@ -0,0 +1,242 @@
+/* i810_drv.h -- Private header for the Matrox g200/g400 driver -*- linux-c -*-
+ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
+ *	    Jeff Hartmann <jhartmann@valinux.com>
+ *
+ */
+
+#ifndef _I810_DRV_H_
+#define _I810_DRV_H_
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR		"VA Linux Systems Inc."
+
+#define DRIVER_NAME		"i810"
+#define DRIVER_DESC		"Intel i810"
+#define DRIVER_DATE		"20030605"
+
+/* Interface history
+ *
+ * 1.1   - XFree86 4.1
+ * 1.2   - XvMC interfaces
+ *       - XFree86 4.2
+ * 1.2.1 - Disable copying code (leave stub ioctls for backwards compatibility)
+ *       - Remove requirement for interrupt (leave stubs again)
+ * 1.3   - Add page flipping.
+ * 1.4   - fix DRM interface
+ */
+#define DRIVER_MAJOR		1
+#define DRIVER_MINOR		4
+#define DRIVER_PATCHLEVEL	0
+
+typedef struct drm_i810_buf_priv {
+	u32 *in_use;
+	int my_use_idx;
+	int currently_mapped;
+	void *virtual;
+	void *kernel_virtual;
+	drm_local_map_t map;
+} drm_i810_buf_priv_t;
+
+typedef struct _drm_i810_ring_buffer {
+	int tail_mask;
+	unsigned long Start;
+	unsigned long End;
+	unsigned long Size;
+	u8 *virtual_start;
+	int head;
+	int tail;
+	int space;
+	drm_local_map_t map;
+} drm_i810_ring_buffer_t;
+
+typedef struct drm_i810_private {
+	struct drm_map *sarea_map;
+	struct drm_map *mmio_map;
+
+	drm_i810_sarea_t *sarea_priv;
+	drm_i810_ring_buffer_t ring;
+
+	void *hw_status_page;
+	unsigned long counter;
+
+	dma_addr_t dma_status_page;
+
+	struct drm_buf *mmap_buffer;
+
+	u32 front_di1, back_di1, zi1;
+
+	int back_offset;
+	int depth_offset;
+	int overlay_offset;
+	int overlay_physical;
+	int w, h;
+	int pitch;
+	int back_pitch;
+	int depth_pitch;
+
+	int do_boxes;
+	int dma_used;
+
+	int current_page;
+	int page_flipping;
+
+	wait_queue_head_t irq_queue;
+	atomic_t irq_received;
+	atomic_t irq_emitted;
+
+	int front_offset;
+} drm_i810_private_t;
+
+				/* i810_dma.c */
+extern int i810_driver_dma_quiescent(struct drm_device * dev);
+extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
+					       struct drm_file *file_priv);
+extern int i810_driver_load(struct drm_device *, unsigned long flags);
+extern void i810_driver_lastclose(struct drm_device * dev);
+extern void i810_driver_preclose(struct drm_device * dev,
+				 struct drm_file *file_priv);
+extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
+					       struct drm_file *file_priv);
+extern int i810_driver_device_is_agp(struct drm_device * dev);
+
+extern struct drm_ioctl_desc i810_ioctls[];
+extern int i810_max_ioctl;
+
+#define I810_BASE(reg)		((unsigned long) \
+				dev_priv->mmio_map->handle)
+#define I810_ADDR(reg)		(I810_BASE(reg) + reg)
+#define I810_DEREF(reg)		*(__volatile__ int *)I810_ADDR(reg)
+#define I810_READ(reg)		I810_DEREF(reg)
+#define I810_WRITE(reg,val)	do { I810_DEREF(reg) = val; } while (0)
+#define I810_DEREF16(reg)	*(__volatile__ u16 *)I810_ADDR(reg)
+#define I810_READ16(reg)	I810_DEREF16(reg)
+#define I810_WRITE16(reg,val)	do { I810_DEREF16(reg) = val; } while (0)
+
+#define I810_VERBOSE 0
+#define RING_LOCALS	unsigned int outring, ringmask; \
+                        volatile char *virt;
+
+#define BEGIN_LP_RING(n) do {						\
+	if (I810_VERBOSE)                                               \
+		DRM_DEBUG("BEGIN_LP_RING(%d)\n", n);			\
+	if (dev_priv->ring.space < n*4)					\
+		i810_wait_ring(dev, n*4);				\
+	dev_priv->ring.space -= n*4;					\
+	outring = dev_priv->ring.tail;					\
+	ringmask = dev_priv->ring.tail_mask;				\
+	virt = dev_priv->ring.virtual_start;				\
+} while (0)
+
+#define ADVANCE_LP_RING() do {				        \
+	if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n");	\
+	dev_priv->ring.tail = outring;				\
+	I810_WRITE(LP_RING + RING_TAIL, outring);	        \
+} while(0)
+
+#define OUT_RING(n) do {				                \
+	if (I810_VERBOSE) DRM_DEBUG("   OUT_RING %x\n", (int)(n));	\
+	*(volatile unsigned int *)(virt + outring) = n;	                \
+	outring += 4;					                \
+	outring &= ringmask;			                        \
+} while (0)
+
+#define GFX_OP_USER_INTERRUPT		((0<<29)|(2<<23))
+#define GFX_OP_BREAKPOINT_INTERRUPT	((0<<29)|(1<<23))
+#define CMD_REPORT_HEAD			(7<<23)
+#define CMD_STORE_DWORD_IDX		((0x21<<23) | 0x1)
+#define CMD_OP_BATCH_BUFFER  ((0x0<<29)|(0x30<<23)|0x1)
+
+#define INST_PARSER_CLIENT   0x00000000
+#define INST_OP_FLUSH        0x02000000
+#define INST_FLUSH_MAP_CACHE 0x00000001
+
+#define BB1_START_ADDR_MASK   (~0x7)
+#define BB1_PROTECTED         (1<<0)
+#define BB1_UNPROTECTED       (0<<0)
+#define BB2_END_ADDR_MASK     (~0x7)
+
+#define I810REG_HWSTAM		0x02098
+#define I810REG_INT_IDENTITY_R	0x020a4
+#define I810REG_INT_MASK_R	0x020a8
+#define I810REG_INT_ENABLE_R	0x020a0
+
+#define LP_RING			0x2030
+#define HP_RING			0x2040
+#define RING_TAIL		0x00
+#define TAIL_ADDR		0x000FFFF8
+#define RING_HEAD		0x04
+#define HEAD_WRAP_COUNT		0xFFE00000
+#define HEAD_WRAP_ONE		0x00200000
+#define HEAD_ADDR		0x001FFFFC
+#define RING_START		0x08
+#define START_ADDR		0x00FFFFF8
+#define RING_LEN		0x0C
+#define RING_NR_PAGES		0x000FF000
+#define RING_REPORT_MASK	0x00000006
+#define RING_REPORT_64K		0x00000002
+#define RING_REPORT_128K	0x00000004
+#define RING_NO_REPORT		0x00000000
+#define RING_VALID_MASK		0x00000001
+#define RING_VALID		0x00000001
+#define RING_INVALID		0x00000000
+
+#define GFX_OP_SCISSOR         ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define SC_UPDATE_SCISSOR       (0x1<<1)
+#define SC_ENABLE_MASK          (0x1<<0)
+#define SC_ENABLE               (0x1<<0)
+
+#define GFX_OP_SCISSOR_INFO    ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
+#define SCI_YMIN_MASK      (0xffff<<16)
+#define SCI_XMIN_MASK      (0xffff<<0)
+#define SCI_YMAX_MASK      (0xffff<<16)
+#define SCI_XMAX_MASK      (0xffff<<0)
+
+#define GFX_OP_COLOR_FACTOR      ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
+#define GFX_OP_STIPPLE           ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define GFX_OP_MAP_INFO          ((0x3<<29)|(0x1d<<24)|0x2)
+#define GFX_OP_DESTBUFFER_VARS   ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DRAWRECT_INFO     ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+#define GFX_OP_PRIMITIVE         ((0x3<<29)|(0x1f<<24))
+
+#define CMD_OP_Z_BUFFER_INFO     ((0x0<<29)|(0x16<<23))
+#define CMD_OP_DESTBUFFER_INFO   ((0x0<<29)|(0x15<<23))
+#define CMD_OP_FRONTBUFFER_INFO  ((0x0<<29)|(0x14<<23))
+#define CMD_OP_WAIT_FOR_EVENT    ((0x0<<29)|(0x03<<23))
+
+#define BR00_BITBLT_CLIENT   0x40000000
+#define BR00_OP_COLOR_BLT    0x10000000
+#define BR00_OP_SRC_COPY_BLT 0x10C00000
+#define BR13_SOLID_PATTERN   0x80000000
+
+#define WAIT_FOR_PLANE_A_SCANLINES (1<<1)
+#define WAIT_FOR_PLANE_A_FLIP      (1<<2)
+#define WAIT_FOR_VBLANK (1<<3)
+
+#endif
diff --git a/drivers/gpu/drm/i830/Makefile b/drivers/gpu/drm/i830/Makefile
new file mode 100644
index 000000000000..c642ee0b238c
--- /dev/null
+++ b/drivers/gpu/drm/i830/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+i830-y := i830_drv.o i830_dma.o i830_irq.o
+
+obj-$(CONFIG_DRM_I830) += i830.o
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
new file mode 100644
index 000000000000..a86ab30b4620
--- /dev/null
+++ b/drivers/gpu/drm/i830/i830_dma.c
@@ -0,0 +1,1553 @@
+/* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
+ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
+ *	    Jeff Hartmann <jhartmann@valinux.com>
+ *	    Keith Whitwell <keith@tungstengraphics.com>
+ *	    Abraham vd Merwe <abraham@2d3d.co.za>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i830_drm.h"
+#include "i830_drv.h"
+#include <linux/interrupt.h>	/* For task queue support */
+#include <linux/pagemap.h>
+#include <linux/delay.h>
+#include <asm/uaccess.h>
+
+#define I830_BUF_FREE		2
+#define I830_BUF_CLIENT		1
+#define I830_BUF_HARDWARE	0
+
+#define I830_BUF_UNMAPPED 0
+#define I830_BUF_MAPPED   1
+
+static struct drm_buf *i830_freelist_get(struct drm_device * dev)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int i;
+	int used;
+
+	/* Linear search might not be the best solution */
+
+	for (i = 0; i < dma->buf_count; i++) {
+		struct drm_buf *buf = dma->buflist[i];
+		drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+		/* In use is already a pointer */
+		used = cmpxchg(buf_priv->in_use, I830_BUF_FREE,
+			       I830_BUF_CLIENT);
+		if (used == I830_BUF_FREE) {
+			return buf;
+		}
+	}
+	return NULL;
+}
+
+/* This should only be called if the buffer is not sent to the hardware
+ * yet, the hardware updates in use for us once its on the ring buffer.
+ */
+
+static int i830_freelist_put(struct drm_device * dev, struct drm_buf * buf)
+{
+	drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+	int used;
+
+	/* In use is already a pointer */
+	used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, I830_BUF_FREE);
+	if (used != I830_BUF_CLIENT) {
+		DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev;
+	drm_i830_private_t *dev_priv;
+	struct drm_buf *buf;
+	drm_i830_buf_priv_t *buf_priv;
+
+	lock_kernel();
+	dev = priv->minor->dev;
+	dev_priv = dev->dev_private;
+	buf = dev_priv->mmap_buffer;
+	buf_priv = buf->dev_private;
+
+	vma->vm_flags |= (VM_IO | VM_DONTCOPY);
+	vma->vm_file = filp;
+
+	buf_priv->currently_mapped = I830_BUF_MAPPED;
+	unlock_kernel();
+
+	if (io_remap_pfn_range(vma, vma->vm_start,
+			       vma->vm_pgoff,
+			       vma->vm_end - vma->vm_start, vma->vm_page_prot))
+		return -EAGAIN;
+	return 0;
+}
+
+static const struct file_operations i830_buffer_fops = {
+	.open = drm_open,
+	.release = drm_release,
+	.ioctl = drm_ioctl,
+	.mmap = i830_mmap_buffers,
+	.fasync = drm_fasync,
+};
+
+static int i830_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
+{
+	struct drm_device *dev = file_priv->minor->dev;
+	drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+	drm_i830_private_t *dev_priv = dev->dev_private;
+	const struct file_operations *old_fops;
+	unsigned long virtual;
+	int retcode = 0;
+
+	if (buf_priv->currently_mapped == I830_BUF_MAPPED)
+		return -EINVAL;
+
+	down_write(&current->mm->mmap_sem);
+	old_fops = file_priv->filp->f_op;
+	file_priv->filp->f_op = &i830_buffer_fops;
+	dev_priv->mmap_buffer = buf;
+	virtual = do_mmap(file_priv->filp, 0, buf->total, PROT_READ | PROT_WRITE,
+			  MAP_SHARED, buf->bus_address);
+	dev_priv->mmap_buffer = NULL;
+	file_priv->filp->f_op = old_fops;
+	if (IS_ERR((void *)virtual)) {	/* ugh */
+		/* Real error */
+		DRM_ERROR("mmap error\n");
+		retcode = PTR_ERR((void *)virtual);
+		buf_priv->virtual = NULL;
+	} else {
+		buf_priv->virtual = (void __user *)virtual;
+	}
+	up_write(&current->mm->mmap_sem);
+
+	return retcode;
+}
+
+static int i830_unmap_buffer(struct drm_buf * buf)
+{
+	drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+	int retcode = 0;
+
+	if (buf_priv->currently_mapped != I830_BUF_MAPPED)
+		return -EINVAL;
+
+	down_write(&current->mm->mmap_sem);
+	retcode = do_munmap(current->mm,
+			    (unsigned long)buf_priv->virtual,
+			    (size_t) buf->total);
+	up_write(&current->mm->mmap_sem);
+
+	buf_priv->currently_mapped = I830_BUF_UNMAPPED;
+	buf_priv->virtual = NULL;
+
+	return retcode;
+}
+
+static int i830_dma_get_buffer(struct drm_device * dev, drm_i830_dma_t * d,
+			       struct drm_file *file_priv)
+{
+	struct drm_buf *buf;
+	drm_i830_buf_priv_t *buf_priv;
+	int retcode = 0;
+
+	buf = i830_freelist_get(dev);
+	if (!buf) {
+		retcode = -ENOMEM;
+		DRM_DEBUG("retcode=%d\n", retcode);
+		return retcode;
+	}
+
+	retcode = i830_map_buffer(buf, file_priv);
+	if (retcode) {
+		i830_freelist_put(dev, buf);
+		DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
+		return retcode;
+	}
+	buf->file_priv = file_priv;
+	buf_priv = buf->dev_private;
+	d->granted = 1;
+	d->request_idx = buf->idx;
+	d->request_size = buf->total;
+	d->virtual = buf_priv->virtual;
+
+	return retcode;
+}
+
+static int i830_dma_cleanup(struct drm_device * dev)
+{
+	struct drm_device_dma *dma = dev->dma;
+
+	/* Make sure interrupts are disabled here because the uninstall ioctl
+	 * may not have been called from userspace and after dev_private
+	 * is freed, it's too late.
+	 */
+	if (dev->irq_enabled)
+		drm_irq_uninstall(dev);
+
+	if (dev->dev_private) {
+		int i;
+		drm_i830_private_t *dev_priv =
+		    (drm_i830_private_t *) dev->dev_private;
+
+		if (dev_priv->ring.virtual_start) {
+			drm_core_ioremapfree(&dev_priv->ring.map, dev);
+		}
+		if (dev_priv->hw_status_page) {
+			pci_free_consistent(dev->pdev, PAGE_SIZE,
+					    dev_priv->hw_status_page,
+					    dev_priv->dma_status_page);
+			/* Need to rewrite hardware status page */
+			I830_WRITE(0x02080, 0x1ffff000);
+		}
+
+		drm_free(dev->dev_private, sizeof(drm_i830_private_t),
+			 DRM_MEM_DRIVER);
+		dev->dev_private = NULL;
+
+		for (i = 0; i < dma->buf_count; i++) {
+			struct drm_buf *buf = dma->buflist[i];
+			drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+			if (buf_priv->kernel_virtual && buf->total)
+				drm_core_ioremapfree(&buf_priv->map, dev);
+		}
+	}
+	return 0;
+}
+
+int i830_wait_ring(struct drm_device * dev, int n, const char *caller)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+	drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
+	int iters = 0;
+	unsigned long end;
+	unsigned int last_head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+
+	end = jiffies + (HZ * 3);
+	while (ring->space < n) {
+		ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+		ring->space = ring->head - (ring->tail + 8);
+		if (ring->space < 0)
+			ring->space += ring->Size;
+
+		if (ring->head != last_head) {
+			end = jiffies + (HZ * 3);
+			last_head = ring->head;
+		}
+
+		iters++;
+		if (time_before(end, jiffies)) {
+			DRM_ERROR("space: %d wanted %d\n", ring->space, n);
+			DRM_ERROR("lockup\n");
+			goto out_wait_ring;
+		}
+		udelay(1);
+		dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
+	}
+
+      out_wait_ring:
+	return iters;
+}
+
+static void i830_kernel_lost_context(struct drm_device * dev)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+	drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
+
+	ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+	ring->tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
+	ring->space = ring->head - (ring->tail + 8);
+	if (ring->space < 0)
+		ring->space += ring->Size;
+
+	if (ring->head == ring->tail)
+		dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY;
+}
+
+static int i830_freelist_init(struct drm_device * dev, drm_i830_private_t * dev_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int my_idx = 36;
+	u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
+	int i;
+
+	if (dma->buf_count > 1019) {
+		/* Not enough space in the status page for the freelist */
+		return -EINVAL;
+	}
+
+	for (i = 0; i < dma->buf_count; i++) {
+		struct drm_buf *buf = dma->buflist[i];
+		drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+
+		buf_priv->in_use = hw_status++;
+		buf_priv->my_use_idx = my_idx;
+		my_idx += 4;
+
+		*buf_priv->in_use = I830_BUF_FREE;
+
+		buf_priv->map.offset = buf->bus_address;
+		buf_priv->map.size = buf->total;
+		buf_priv->map.type = _DRM_AGP;
+		buf_priv->map.flags = 0;
+		buf_priv->map.mtrr = 0;
+
+		drm_core_ioremap(&buf_priv->map, dev);
+		buf_priv->kernel_virtual = buf_priv->map.handle;
+	}
+	return 0;
+}
+
+static int i830_dma_initialize(struct drm_device * dev,
+			       drm_i830_private_t * dev_priv,
+			       drm_i830_init_t * init)
+{
+	struct drm_map_list *r_list;
+
+	memset(dev_priv, 0, sizeof(drm_i830_private_t));
+
+	list_for_each_entry(r_list, &dev->maplist, head) {
+		if (r_list->map &&
+		    r_list->map->type == _DRM_SHM &&
+		    r_list->map->flags & _DRM_CONTAINS_LOCK) {
+			dev_priv->sarea_map = r_list->map;
+			break;
+		}
+	}
+
+	if (!dev_priv->sarea_map) {
+		dev->dev_private = (void *)dev_priv;
+		i830_dma_cleanup(dev);
+		DRM_ERROR("can not find sarea!\n");
+		return -EINVAL;
+	}
+	dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
+	if (!dev_priv->mmio_map) {
+		dev->dev_private = (void *)dev_priv;
+		i830_dma_cleanup(dev);
+		DRM_ERROR("can not find mmio map!\n");
+		return -EINVAL;
+	}
+	dev->agp_buffer_token = init->buffers_offset;
+	dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
+	if (!dev->agp_buffer_map) {
+		dev->dev_private = (void *)dev_priv;
+		i830_dma_cleanup(dev);
+		DRM_ERROR("can not find dma buffer map!\n");
+		return -EINVAL;
+	}
+
+	dev_priv->sarea_priv = (drm_i830_sarea_t *)
+	    ((u8 *) dev_priv->sarea_map->handle + init->sarea_priv_offset);
+
+	dev_priv->ring.Start = init->ring_start;
+	dev_priv->ring.End = init->ring_end;
+	dev_priv->ring.Size = init->ring_size;
+
+	dev_priv->ring.map.offset = dev->agp->base + init->ring_start;
+	dev_priv->ring.map.size = init->ring_size;
+	dev_priv->ring.map.type = _DRM_AGP;
+	dev_priv->ring.map.flags = 0;
+	dev_priv->ring.map.mtrr = 0;
+
+	drm_core_ioremap(&dev_priv->ring.map, dev);
+
+	if (dev_priv->ring.map.handle == NULL) {
+		dev->dev_private = (void *)dev_priv;
+		i830_dma_cleanup(dev);
+		DRM_ERROR("can not ioremap virtual address for"
+			  " ring buffer\n");
+		return -ENOMEM;
+	}
+
+	dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+
+	dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
+
+	dev_priv->w = init->w;
+	dev_priv->h = init->h;
+	dev_priv->pitch = init->pitch;
+	dev_priv->back_offset = init->back_offset;
+	dev_priv->depth_offset = init->depth_offset;
+	dev_priv->front_offset = init->front_offset;
+
+	dev_priv->front_di1 = init->front_offset | init->pitch_bits;
+	dev_priv->back_di1 = init->back_offset | init->pitch_bits;
+	dev_priv->zi1 = init->depth_offset | init->pitch_bits;
+
+	DRM_DEBUG("front_di1 %x\n", dev_priv->front_di1);
+	DRM_DEBUG("back_offset %x\n", dev_priv->back_offset);
+	DRM_DEBUG("back_di1 %x\n", dev_priv->back_di1);
+	DRM_DEBUG("pitch_bits %x\n", init->pitch_bits);
+
+	dev_priv->cpp = init->cpp;
+	/* We are using separate values as placeholders for mechanisms for
+	 * private backbuffer/depthbuffer usage.
+	 */
+
+	dev_priv->back_pitch = init->back_pitch;
+	dev_priv->depth_pitch = init->depth_pitch;
+	dev_priv->do_boxes = 0;
+	dev_priv->use_mi_batchbuffer_start = 0;
+
+	/* Program Hardware Status Page */
+	dev_priv->hw_status_page =
+	    pci_alloc_consistent(dev->pdev, PAGE_SIZE,
+				 &dev_priv->dma_status_page);
+	if (!dev_priv->hw_status_page) {
+		dev->dev_private = (void *)dev_priv;
+		i830_dma_cleanup(dev);
+		DRM_ERROR("Can not allocate hardware status page\n");
+		return -ENOMEM;
+	}
+	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+	DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
+
+	I830_WRITE(0x02080, dev_priv->dma_status_page);
+	DRM_DEBUG("Enabled hardware status page\n");
+
+	/* Now we need to init our freelist */
+	if (i830_freelist_init(dev, dev_priv) != 0) {
+		dev->dev_private = (void *)dev_priv;
+		i830_dma_cleanup(dev);
+		DRM_ERROR("Not enough space in the status page for"
+			  " the freelist\n");
+		return -ENOMEM;
+	}
+	dev->dev_private = (void *)dev_priv;
+
+	return 0;
+}
+
+static int i830_dma_init(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i830_private_t *dev_priv;
+	drm_i830_init_t *init = data;
+	int retcode = 0;
+
+	switch (init->func) {
+	case I830_INIT_DMA:
+		dev_priv = drm_alloc(sizeof(drm_i830_private_t),
+				     DRM_MEM_DRIVER);
+		if (dev_priv == NULL)
+			return -ENOMEM;
+		retcode = i830_dma_initialize(dev, dev_priv, init);
+		break;
+	case I830_CLEANUP_DMA:
+		retcode = i830_dma_cleanup(dev);
+		break;
+	default:
+		retcode = -EINVAL;
+		break;
+	}
+
+	return retcode;
+}
+
+#define GFX_OP_STIPPLE           ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define ST1_ENABLE               (1<<16)
+#define ST1_MASK                 (0xffff)
+
+/* Most efficient way to verify state for the i830 is as it is
+ * emitted.  Non-conformant state is silently dropped.
+ */
+static void i830EmitContextVerified(struct drm_device * dev, unsigned int *code)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+	int i, j = 0;
+	unsigned int tmp;
+	RING_LOCALS;
+
+	BEGIN_LP_RING(I830_CTX_SETUP_SIZE + 4);
+
+	for (i = 0; i < I830_CTXREG_BLENDCOLR0; i++) {
+		tmp = code[i];
+		if ((tmp & (7 << 29)) == CMD_3D &&
+		    (tmp & (0x1f << 24)) < (0x1d << 24)) {
+			OUT_RING(tmp);
+			j++;
+		} else {
+			DRM_ERROR("Skipping %d\n", i);
+		}
+	}
+
+	OUT_RING(STATE3D_CONST_BLEND_COLOR_CMD);
+	OUT_RING(code[I830_CTXREG_BLENDCOLR]);
+	j += 2;
+
+	for (i = I830_CTXREG_VF; i < I830_CTXREG_MCSB0; i++) {
+		tmp = code[i];
+		if ((tmp & (7 << 29)) == CMD_3D &&
+		    (tmp & (0x1f << 24)) < (0x1d << 24)) {
+			OUT_RING(tmp);
+			j++;
+		} else {
+			DRM_ERROR("Skipping %d\n", i);
+		}
+	}
+
+	OUT_RING(STATE3D_MAP_COORD_SETBIND_CMD);
+	OUT_RING(code[I830_CTXREG_MCSB1]);
+	j += 2;
+
+	if (j & 1)
+		OUT_RING(0);
+
+	ADVANCE_LP_RING();
+}
+
+static void i830EmitTexVerified(struct drm_device * dev, unsigned int *code)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+	int i, j = 0;
+	unsigned int tmp;
+	RING_LOCALS;
+
+	if (code[I830_TEXREG_MI0] == GFX_OP_MAP_INFO ||
+	    (code[I830_TEXREG_MI0] & ~(0xf * LOAD_TEXTURE_MAP0)) ==
+	    (STATE3D_LOAD_STATE_IMMEDIATE_2 | 4)) {
+
+		BEGIN_LP_RING(I830_TEX_SETUP_SIZE);
+
+		OUT_RING(code[I830_TEXREG_MI0]);	/* TM0LI */
+		OUT_RING(code[I830_TEXREG_MI1]);	/* TM0S0 */
+		OUT_RING(code[I830_TEXREG_MI2]);	/* TM0S1 */
+		OUT_RING(code[I830_TEXREG_MI3]);	/* TM0S2 */
+		OUT_RING(code[I830_TEXREG_MI4]);	/* TM0S3 */
+		OUT_RING(code[I830_TEXREG_MI5]);	/* TM0S4 */
+
+		for (i = 6; i < I830_TEX_SETUP_SIZE; i++) {
+			tmp = code[i];
+			OUT_RING(tmp);
+			j++;
+		}
+
+		if (j & 1)
+			OUT_RING(0);
+
+		ADVANCE_LP_RING();
+	} else
+		printk("rejected packet %x\n", code[0]);
+}
+
+static void i830EmitTexBlendVerified(struct drm_device * dev,
+				     unsigned int *code, unsigned int num)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+	int i, j = 0;
+	unsigned int tmp;
+	RING_LOCALS;
+
+	if (!num)
+		return;
+
+	BEGIN_LP_RING(num + 1);
+
+	for (i = 0; i < num; i++) {
+		tmp = code[i];
+		OUT_RING(tmp);
+		j++;
+	}
+
+	if (j & 1)
+		OUT_RING(0);
+
+	ADVANCE_LP_RING();
+}
+
+static void i830EmitTexPalette(struct drm_device * dev,
+			       unsigned int *palette, int number, int is_shared)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+	int i;
+	RING_LOCALS;
+
+	return;
+
+	BEGIN_LP_RING(258);
+
+	if (is_shared == 1) {
+		OUT_RING(CMD_OP_MAP_PALETTE_LOAD |
+			 MAP_PALETTE_NUM(0) | MAP_PALETTE_BOTH);
+	} else {
+		OUT_RING(CMD_OP_MAP_PALETTE_LOAD | MAP_PALETTE_NUM(number));
+	}
+	for (i = 0; i < 256; i++) {
+		OUT_RING(palette[i]);
+	}
+	OUT_RING(0);
+	/* KW:  WHERE IS THE ADVANCE_LP_RING?  This is effectively a noop!
+	 */
+}
+
+/* Need to do some additional checking when setting the dest buffer.
+ */
+static void i830EmitDestVerified(struct drm_device * dev, unsigned int *code)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+	unsigned int tmp;
+	RING_LOCALS;
+
+	BEGIN_LP_RING(I830_DEST_SETUP_SIZE + 10);
+
+	tmp = code[I830_DESTREG_CBUFADDR];
+	if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
+		if (((int)outring) & 8) {
+			OUT_RING(0);
+			OUT_RING(0);
+		}
+
+		OUT_RING(CMD_OP_DESTBUFFER_INFO);
+		OUT_RING(BUF_3D_ID_COLOR_BACK |
+			 BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) |
+			 BUF_3D_USE_FENCE);
+		OUT_RING(tmp);
+		OUT_RING(0);
+
+		OUT_RING(CMD_OP_DESTBUFFER_INFO);
+		OUT_RING(BUF_3D_ID_DEPTH | BUF_3D_USE_FENCE |
+			 BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp));
+		OUT_RING(dev_priv->zi1);
+		OUT_RING(0);
+	} else {
+		DRM_ERROR("bad di1 %x (allow %x or %x)\n",
+			  tmp, dev_priv->front_di1, dev_priv->back_di1);
+	}
+
+	/* invarient:
+	 */
+
+	OUT_RING(GFX_OP_DESTBUFFER_VARS);
+	OUT_RING(code[I830_DESTREG_DV1]);
+
+	OUT_RING(GFX_OP_DRAWRECT_INFO);
+	OUT_RING(code[I830_DESTREG_DR1]);
+	OUT_RING(code[I830_DESTREG_DR2]);
+	OUT_RING(code[I830_DESTREG_DR3]);
+	OUT_RING(code[I830_DESTREG_DR4]);
+
+	/* Need to verify this */
+	tmp = code[I830_DESTREG_SENABLE];
+	if ((tmp & ~0x3) == GFX_OP_SCISSOR_ENABLE) {
+		OUT_RING(tmp);
+	} else {
+		DRM_ERROR("bad scissor enable\n");
+		OUT_RING(0);
+	}
+
+	OUT_RING(GFX_OP_SCISSOR_RECT);
+	OUT_RING(code[I830_DESTREG_SR1]);
+	OUT_RING(code[I830_DESTREG_SR2]);
+	OUT_RING(0);
+
+	ADVANCE_LP_RING();
+}
+
+static void i830EmitStippleVerified(struct drm_device * dev, unsigned int *code)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+
+	BEGIN_LP_RING(2);
+	OUT_RING(GFX_OP_STIPPLE);
+	OUT_RING(code[1]);
+	ADVANCE_LP_RING();
+}
+
+static void i830EmitState(struct drm_device * dev)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+	drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	unsigned int dirty = sarea_priv->dirty;
+
+	DRM_DEBUG("%s %x\n", __func__, dirty);
+
+	if (dirty & I830_UPLOAD_BUFFERS) {
+		i830EmitDestVerified(dev, sarea_priv->BufferState);
+		sarea_priv->dirty &= ~I830_UPLOAD_BUFFERS;
+	}
+
+	if (dirty & I830_UPLOAD_CTX) {
+		i830EmitContextVerified(dev, sarea_priv->ContextState);
+		sarea_priv->dirty &= ~I830_UPLOAD_CTX;
+	}
+
+	if (dirty & I830_UPLOAD_TEX0) {
+		i830EmitTexVerified(dev, sarea_priv->TexState[0]);
+		sarea_priv->dirty &= ~I830_UPLOAD_TEX0;
+	}
+
+	if (dirty & I830_UPLOAD_TEX1) {
+		i830EmitTexVerified(dev, sarea_priv->TexState[1]);
+		sarea_priv->dirty &= ~I830_UPLOAD_TEX1;
+	}
+
+	if (dirty & I830_UPLOAD_TEXBLEND0) {
+		i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[0],
+					 sarea_priv->TexBlendStateWordsUsed[0]);
+		sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND0;
+	}
+
+	if (dirty & I830_UPLOAD_TEXBLEND1) {
+		i830EmitTexBlendVerified(dev, sarea_priv->TexBlendState[1],
+					 sarea_priv->TexBlendStateWordsUsed[1]);
+		sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND1;
+	}
+
+	if (dirty & I830_UPLOAD_TEX_PALETTE_SHARED) {
+		i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1);
+	} else {
+		if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) {
+			i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0);
+			sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0);
+		}
+		if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) {
+			i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0);
+			sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1);
+		}
+
+		/* 1.3:
+		 */
+#if 0
+		if (dirty & I830_UPLOAD_TEX_PALETTE_N(2)) {
+			i830EmitTexPalette(dev, sarea_priv->Palette2[0], 0, 0);
+			sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
+		}
+		if (dirty & I830_UPLOAD_TEX_PALETTE_N(3)) {
+			i830EmitTexPalette(dev, sarea_priv->Palette2[1], 1, 0);
+			sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
+		}
+#endif
+	}
+
+	/* 1.3:
+	 */
+	if (dirty & I830_UPLOAD_STIPPLE) {
+		i830EmitStippleVerified(dev, sarea_priv->StippleState);
+		sarea_priv->dirty &= ~I830_UPLOAD_STIPPLE;
+	}
+
+	if (dirty & I830_UPLOAD_TEX2) {
+		i830EmitTexVerified(dev, sarea_priv->TexState2);
+		sarea_priv->dirty &= ~I830_UPLOAD_TEX2;
+	}
+
+	if (dirty & I830_UPLOAD_TEX3) {
+		i830EmitTexVerified(dev, sarea_priv->TexState3);
+		sarea_priv->dirty &= ~I830_UPLOAD_TEX3;
+	}
+
+	if (dirty & I830_UPLOAD_TEXBLEND2) {
+		i830EmitTexBlendVerified(dev,
+					 sarea_priv->TexBlendState2,
+					 sarea_priv->TexBlendStateWordsUsed2);
+
+		sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND2;
+	}
+
+	if (dirty & I830_UPLOAD_TEXBLEND3) {
+		i830EmitTexBlendVerified(dev,
+					 sarea_priv->TexBlendState3,
+					 sarea_priv->TexBlendStateWordsUsed3);
+		sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND3;
+	}
+}
+
+/* ================================================================
+ * Performance monitoring functions
+ */
+
+static void i830_fill_box(struct drm_device * dev,
+			  int x, int y, int w, int h, int r, int g, int b)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+	u32 color;
+	unsigned int BR13, CMD;
+	RING_LOCALS;
+
+	BR13 = (0xF0 << 16) | (dev_priv->pitch * dev_priv->cpp) | (1 << 24);
+	CMD = XY_COLOR_BLT_CMD;
+	x += dev_priv->sarea_priv->boxes[0].x1;
+	y += dev_priv->sarea_priv->boxes[0].y1;
+
+	if (dev_priv->cpp == 4) {
+		BR13 |= (1 << 25);
+		CMD |= (XY_COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB);
+		color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
+	} else {
+		color = (((r & 0xf8) << 8) |
+			 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
+	}
+
+	BEGIN_LP_RING(6);
+	OUT_RING(CMD);
+	OUT_RING(BR13);
+	OUT_RING((y << 16) | x);
+	OUT_RING(((y + h) << 16) | (x + w));
+
+	if (dev_priv->current_page == 1) {
+		OUT_RING(dev_priv->front_offset);
+	} else {
+		OUT_RING(dev_priv->back_offset);
+	}
+
+	OUT_RING(color);
+	ADVANCE_LP_RING();
+}
+
+static void i830_cp_performance_boxes(struct drm_device * dev)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+
+	/* Purple box for page flipping
+	 */
+	if (dev_priv->sarea_priv->perf_boxes & I830_BOX_FLIP)
+		i830_fill_box(dev, 4, 4, 8, 8, 255, 0, 255);
+
+	/* Red box if we have to wait for idle at any point
+	 */
+	if (dev_priv->sarea_priv->perf_boxes & I830_BOX_WAIT)
+		i830_fill_box(dev, 16, 4, 8, 8, 255, 0, 0);
+
+	/* Blue box: lost context?
+	 */
+	if (dev_priv->sarea_priv->perf_boxes & I830_BOX_LOST_CONTEXT)
+		i830_fill_box(dev, 28, 4, 8, 8, 0, 0, 255);
+
+	/* Yellow box for texture swaps
+	 */
+	if (dev_priv->sarea_priv->perf_boxes & I830_BOX_TEXTURE_LOAD)
+		i830_fill_box(dev, 40, 4, 8, 8, 255, 255, 0);
+
+	/* Green box if hardware never idles (as far as we can tell)
+	 */
+	if (!(dev_priv->sarea_priv->perf_boxes & I830_BOX_RING_EMPTY))
+		i830_fill_box(dev, 64, 4, 8, 8, 0, 255, 0);
+
+	/* Draw bars indicating number of buffers allocated
+	 * (not a great measure, easily confused)
+	 */
+	if (dev_priv->dma_used) {
+		int bar = dev_priv->dma_used / 10240;
+		if (bar > 100)
+			bar = 100;
+		if (bar < 1)
+			bar = 1;
+		i830_fill_box(dev, 4, 16, bar, 4, 196, 128, 128);
+		dev_priv->dma_used = 0;
+	}
+
+	dev_priv->sarea_priv->perf_boxes = 0;
+}
+
+static void i830_dma_dispatch_clear(struct drm_device * dev, int flags,
+				    unsigned int clear_color,
+				    unsigned int clear_zval,
+				    unsigned int clear_depthmask)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+	drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	int nbox = sarea_priv->nbox;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	int pitch = dev_priv->pitch;
+	int cpp = dev_priv->cpp;
+	int i;
+	unsigned int BR13, CMD, D_CMD;
+	RING_LOCALS;
+
+	if (dev_priv->current_page == 1) {
+		unsigned int tmp = flags;
+
+		flags &= ~(I830_FRONT | I830_BACK);
+		if (tmp & I830_FRONT)
+			flags |= I830_BACK;
+		if (tmp & I830_BACK)
+			flags |= I830_FRONT;
+	}
+
+	i830_kernel_lost_context(dev);
+
+	switch (cpp) {
+	case 2:
+		BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24);
+		D_CMD = CMD = XY_COLOR_BLT_CMD;
+		break;
+	case 4:
+		BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24) | (1 << 25);
+		CMD = (XY_COLOR_BLT_CMD | XY_COLOR_BLT_WRITE_ALPHA |
+		       XY_COLOR_BLT_WRITE_RGB);
+		D_CMD = XY_COLOR_BLT_CMD;
+		if (clear_depthmask & 0x00ffffff)
+			D_CMD |= XY_COLOR_BLT_WRITE_RGB;
+		if (clear_depthmask & 0xff000000)
+			D_CMD |= XY_COLOR_BLT_WRITE_ALPHA;
+		break;
+	default:
+		BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24);
+		D_CMD = CMD = XY_COLOR_BLT_CMD;
+		break;
+	}
+
+	if (nbox > I830_NR_SAREA_CLIPRECTS)
+		nbox = I830_NR_SAREA_CLIPRECTS;
+
+	for (i = 0; i < nbox; i++, pbox++) {
+		if (pbox->x1 > pbox->x2 ||
+		    pbox->y1 > pbox->y2 ||
+		    pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
+			continue;
+
+		if (flags & I830_FRONT) {
+			DRM_DEBUG("clear front\n");
+			BEGIN_LP_RING(6);
+			OUT_RING(CMD);
+			OUT_RING(BR13);
+			OUT_RING((pbox->y1 << 16) | pbox->x1);
+			OUT_RING((pbox->y2 << 16) | pbox->x2);
+			OUT_RING(dev_priv->front_offset);
+			OUT_RING(clear_color);
+			ADVANCE_LP_RING();
+		}
+
+		if (flags & I830_BACK) {
+			DRM_DEBUG("clear back\n");
+			BEGIN_LP_RING(6);
+			OUT_RING(CMD);
+			OUT_RING(BR13);
+			OUT_RING((pbox->y1 << 16) | pbox->x1);
+			OUT_RING((pbox->y2 << 16) | pbox->x2);
+			OUT_RING(dev_priv->back_offset);
+			OUT_RING(clear_color);
+			ADVANCE_LP_RING();
+		}
+
+		if (flags & I830_DEPTH) {
+			DRM_DEBUG("clear depth\n");
+			BEGIN_LP_RING(6);
+			OUT_RING(D_CMD);
+			OUT_RING(BR13);
+			OUT_RING((pbox->y1 << 16) | pbox->x1);
+			OUT_RING((pbox->y2 << 16) | pbox->x2);
+			OUT_RING(dev_priv->depth_offset);
+			OUT_RING(clear_zval);
+			ADVANCE_LP_RING();
+		}
+	}
+}
+
+static void i830_dma_dispatch_swap(struct drm_device * dev)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+	drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	int nbox = sarea_priv->nbox;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	int pitch = dev_priv->pitch;
+	int cpp = dev_priv->cpp;
+	int i;
+	unsigned int CMD, BR13;
+	RING_LOCALS;
+
+	DRM_DEBUG("swapbuffers\n");
+
+	i830_kernel_lost_context(dev);
+
+	if (dev_priv->do_boxes)
+		i830_cp_performance_boxes(dev);
+
+	switch (cpp) {
+	case 2:
+		BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
+		CMD = XY_SRC_COPY_BLT_CMD;
+		break;
+	case 4:
+		BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24) | (1 << 25);
+		CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
+		       XY_SRC_COPY_BLT_WRITE_RGB);
+		break;
+	default:
+		BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
+		CMD = XY_SRC_COPY_BLT_CMD;
+		break;
+	}
+
+	if (nbox > I830_NR_SAREA_CLIPRECTS)
+		nbox = I830_NR_SAREA_CLIPRECTS;
+
+	for (i = 0; i < nbox; i++, pbox++) {
+		if (pbox->x1 > pbox->x2 ||
+		    pbox->y1 > pbox->y2 ||
+		    pbox->x2 > dev_priv->w || pbox->y2 > dev_priv->h)
+			continue;
+
+		DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n",
+			  pbox->x1, pbox->y1, pbox->x2, pbox->y2);
+
+		BEGIN_LP_RING(8);
+		OUT_RING(CMD);
+		OUT_RING(BR13);
+		OUT_RING((pbox->y1 << 16) | pbox->x1);
+		OUT_RING((pbox->y2 << 16) | pbox->x2);
+
+		if (dev_priv->current_page == 0)
+			OUT_RING(dev_priv->front_offset);
+		else
+			OUT_RING(dev_priv->back_offset);
+
+		OUT_RING((pbox->y1 << 16) | pbox->x1);
+		OUT_RING(BR13 & 0xffff);
+
+		if (dev_priv->current_page == 0)
+			OUT_RING(dev_priv->back_offset);
+		else
+			OUT_RING(dev_priv->front_offset);
+
+		ADVANCE_LP_RING();
+	}
+}
+
+static void i830_dma_dispatch_flip(struct drm_device * dev)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+
+	DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
+		  __func__,
+		  dev_priv->current_page,
+		  dev_priv->sarea_priv->pf_current_page);
+
+	i830_kernel_lost_context(dev);
+
+	if (dev_priv->do_boxes) {
+		dev_priv->sarea_priv->perf_boxes |= I830_BOX_FLIP;
+		i830_cp_performance_boxes(dev);
+	}
+
+	BEGIN_LP_RING(2);
+	OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	BEGIN_LP_RING(6);
+	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
+	OUT_RING(0);
+	if (dev_priv->current_page == 0) {
+		OUT_RING(dev_priv->back_offset);
+		dev_priv->current_page = 1;
+	} else {
+		OUT_RING(dev_priv->front_offset);
+		dev_priv->current_page = 0;
+	}
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	BEGIN_LP_RING(2);
+	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+}
+
+static void i830_dma_dispatch_vertex(struct drm_device * dev,
+				     struct drm_buf * buf, int discard, int used)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+	drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+	drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	struct drm_clip_rect *box = sarea_priv->boxes;
+	int nbox = sarea_priv->nbox;
+	unsigned long address = (unsigned long)buf->bus_address;
+	unsigned long start = address - dev->agp->base;
+	int i = 0, u;
+	RING_LOCALS;
+
+	i830_kernel_lost_context(dev);
+
+	if (nbox > I830_NR_SAREA_CLIPRECTS)
+		nbox = I830_NR_SAREA_CLIPRECTS;
+
+	if (discard) {
+		u = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
+			    I830_BUF_HARDWARE);
+		if (u != I830_BUF_CLIENT) {
+			DRM_DEBUG("xxxx 2\n");
+		}
+	}
+
+	if (used > 4 * 1023)
+		used = 0;
+
+	if (sarea_priv->dirty)
+		i830EmitState(dev);
+
+	DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n",
+		  address, used, nbox);
+
+	dev_priv->counter++;
+	DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
+	DRM_DEBUG("i830_dma_dispatch\n");
+	DRM_DEBUG("start : %lx\n", start);
+	DRM_DEBUG("used : %d\n", used);
+	DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
+
+	if (buf_priv->currently_mapped == I830_BUF_MAPPED) {
+		u32 *vp = buf_priv->kernel_virtual;
+
+		vp[0] = (GFX_OP_PRIMITIVE |
+			 sarea_priv->vertex_prim | ((used / 4) - 2));
+
+		if (dev_priv->use_mi_batchbuffer_start) {
+			vp[used / 4] = MI_BATCH_BUFFER_END;
+			used += 4;
+		}
+
+		if (used & 4) {
+			vp[used / 4] = 0;
+			used += 4;
+		}
+
+		i830_unmap_buffer(buf);
+	}
+
+	if (used) {
+		do {
+			if (i < nbox) {
+				BEGIN_LP_RING(6);
+				OUT_RING(GFX_OP_DRAWRECT_INFO);
+				OUT_RING(sarea_priv->
+					 BufferState[I830_DESTREG_DR1]);
+				OUT_RING(box[i].x1 | (box[i].y1 << 16));
+				OUT_RING(box[i].x2 | (box[i].y2 << 16));
+				OUT_RING(sarea_priv->
+					 BufferState[I830_DESTREG_DR4]);
+				OUT_RING(0);
+				ADVANCE_LP_RING();
+			}
+
+			if (dev_priv->use_mi_batchbuffer_start) {
+				BEGIN_LP_RING(2);
+				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
+				OUT_RING(start | MI_BATCH_NON_SECURE);
+				ADVANCE_LP_RING();
+			} else {
+				BEGIN_LP_RING(4);
+				OUT_RING(MI_BATCH_BUFFER);
+				OUT_RING(start | MI_BATCH_NON_SECURE);
+				OUT_RING(start + used - 4);
+				OUT_RING(0);
+				ADVANCE_LP_RING();
+			}
+
+		} while (++i < nbox);
+	}
+
+	if (discard) {
+		dev_priv->counter++;
+
+		(void)cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
+			      I830_BUF_HARDWARE);
+
+		BEGIN_LP_RING(8);
+		OUT_RING(CMD_STORE_DWORD_IDX);
+		OUT_RING(20);
+		OUT_RING(dev_priv->counter);
+		OUT_RING(CMD_STORE_DWORD_IDX);
+		OUT_RING(buf_priv->my_use_idx);
+		OUT_RING(I830_BUF_FREE);
+		OUT_RING(CMD_REPORT_HEAD);
+		OUT_RING(0);
+		ADVANCE_LP_RING();
+	}
+}
+
+static void i830_dma_quiescent(struct drm_device * dev)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+
+	i830_kernel_lost_context(dev);
+
+	BEGIN_LP_RING(4);
+	OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
+	OUT_RING(CMD_REPORT_HEAD);
+	OUT_RING(0);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
+}
+
+static int i830_flush_queue(struct drm_device * dev)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	int i, ret = 0;
+	RING_LOCALS;
+
+	i830_kernel_lost_context(dev);
+
+	BEGIN_LP_RING(2);
+	OUT_RING(CMD_REPORT_HEAD);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
+
+	for (i = 0; i < dma->buf_count; i++) {
+		struct drm_buf *buf = dma->buflist[i];
+		drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+
+		int used = cmpxchg(buf_priv->in_use, I830_BUF_HARDWARE,
+				   I830_BUF_FREE);
+
+		if (used == I830_BUF_HARDWARE)
+			DRM_DEBUG("reclaimed from HARDWARE\n");
+		if (used == I830_BUF_CLIENT)
+			DRM_DEBUG("still on client\n");
+	}
+
+	return ret;
+}
+
+/* Must be called with the lock held */
+static void i830_reclaim_buffers(struct drm_device * dev, struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int i;
+
+	if (!dma)
+		return;
+	if (!dev->dev_private)
+		return;
+	if (!dma->buflist)
+		return;
+
+	i830_flush_queue(dev);
+
+	for (i = 0; i < dma->buf_count; i++) {
+		struct drm_buf *buf = dma->buflist[i];
+		drm_i830_buf_priv_t *buf_priv = buf->dev_private;
+
+		if (buf->file_priv == file_priv && buf_priv) {
+			int used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
+					   I830_BUF_FREE);
+
+			if (used == I830_BUF_CLIENT)
+				DRM_DEBUG("reclaimed from client\n");
+			if (buf_priv->currently_mapped == I830_BUF_MAPPED)
+				buf_priv->currently_mapped = I830_BUF_UNMAPPED;
+		}
+	}
+}
+
+static int i830_flush_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv)
+{
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	i830_flush_queue(dev);
+	return 0;
+}
+
+static int i830_dma_vertex(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
+	u32 *hw_status = dev_priv->hw_status_page;
+	drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
+	    dev_priv->sarea_priv;
+	drm_i830_vertex_t *vertex = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n",
+		  vertex->idx, vertex->used, vertex->discard);
+
+	if (vertex->idx < 0 || vertex->idx > dma->buf_count)
+		return -EINVAL;
+
+	i830_dma_dispatch_vertex(dev,
+				 dma->buflist[vertex->idx],
+				 vertex->discard, vertex->used);
+
+	sarea_priv->last_enqueue = dev_priv->counter - 1;
+	sarea_priv->last_dispatch = (int)hw_status[5];
+
+	return 0;
+}
+
+static int i830_clear_bufs(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
+{
+	drm_i830_clear_t *clear = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	/* GH: Someone's doing nasty things... */
+	if (!dev->dev_private) {
+		return -EINVAL;
+	}
+
+	i830_dma_dispatch_clear(dev, clear->flags,
+				clear->clear_color,
+				clear->clear_depth, clear->clear_depthmask);
+	return 0;
+}
+
+static int i830_swap_bufs(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	DRM_DEBUG("i830_swap_bufs\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	i830_dma_dispatch_swap(dev);
+	return 0;
+}
+
+/* Not sure why this isn't set all the time:
+ */
+static void i830_do_init_pageflip(struct drm_device * dev)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+
+	DRM_DEBUG("%s\n", __func__);
+	dev_priv->page_flipping = 1;
+	dev_priv->current_page = 0;
+	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+}
+
+static int i830_do_cleanup_pageflip(struct drm_device * dev)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+
+	DRM_DEBUG("%s\n", __func__);
+	if (dev_priv->current_page != 0)
+		i830_dma_dispatch_flip(dev);
+
+	dev_priv->page_flipping = 0;
+	return 0;
+}
+
+static int i830_flip_bufs(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+
+	DRM_DEBUG("%s\n", __func__);
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (!dev_priv->page_flipping)
+		i830_do_init_pageflip(dev);
+
+	i830_dma_dispatch_flip(dev);
+	return 0;
+}
+
+static int i830_getage(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
+	u32 *hw_status = dev_priv->hw_status_page;
+	drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
+	    dev_priv->sarea_priv;
+
+	sarea_priv->last_dispatch = (int)hw_status[5];
+	return 0;
+}
+
+static int i830_getbuf(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	int retcode = 0;
+	drm_i830_dma_t *d = data;
+	drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
+	u32 *hw_status = dev_priv->hw_status_page;
+	drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
+	    dev_priv->sarea_priv;
+
+	DRM_DEBUG("getbuf\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	d->granted = 0;
+
+	retcode = i830_dma_get_buffer(dev, d, file_priv);
+
+	DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n",
+		  task_pid_nr(current), retcode, d->granted);
+
+	sarea_priv->last_dispatch = (int)hw_status[5];
+
+	return retcode;
+}
+
+static int i830_copybuf(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	/* Never copy - 2.4.x doesn't need it */
+	return 0;
+}
+
+static int i830_docopy(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	return 0;
+}
+
+static int i830_getparam(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+	drm_i830_getparam_t *param = data;
+	int value;
+
+	if (!dev_priv) {
+		DRM_ERROR("%s called with no initialization\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (param->param) {
+	case I830_PARAM_IRQ_ACTIVE:
+		value = dev->irq_enabled;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (copy_to_user(param->value, &value, sizeof(int))) {
+		DRM_ERROR("copy_to_user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int i830_setparam(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+	drm_i830_setparam_t *param = data;
+
+	if (!dev_priv) {
+		DRM_ERROR("%s called with no initialization\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (param->param) {
+	case I830_SETPARAM_USE_MI_BATCHBUFFER_START:
+		dev_priv->use_mi_batchbuffer_start = param->value;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int i830_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	/* i830 has 4 more counters */
+	dev->counters += 4;
+	dev->types[6] = _DRM_STAT_IRQ;
+	dev->types[7] = _DRM_STAT_PRIMARY;
+	dev->types[8] = _DRM_STAT_SECONDARY;
+	dev->types[9] = _DRM_STAT_DMA;
+
+	return 0;
+}
+
+void i830_driver_lastclose(struct drm_device * dev)
+{
+	i830_dma_cleanup(dev);
+}
+
+void i830_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+{
+	if (dev->dev_private) {
+		drm_i830_private_t *dev_priv = dev->dev_private;
+		if (dev_priv->page_flipping) {
+			i830_do_cleanup_pageflip(dev);
+		}
+	}
+}
+
+void i830_driver_reclaim_buffers_locked(struct drm_device * dev, struct drm_file *file_priv)
+{
+	i830_reclaim_buffers(dev, file_priv);
+}
+
+int i830_driver_dma_quiescent(struct drm_device * dev)
+{
+	i830_dma_quiescent(dev);
+	return 0;
+}
+
+struct drm_ioctl_desc i830_ioctls[] = {
+	DRM_IOCTL_DEF(DRM_I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_I830_VERTEX, i830_dma_vertex, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I830_CLEAR, i830_clear_bufs, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I830_FLUSH, i830_flush_ioctl, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I830_GETAGE, i830_getage, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I830_GETBUF, i830_getbuf, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I830_SWAP, i830_swap_bufs, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I830_COPY, i830_copybuf, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I830_DOCOPY, i830_docopy, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I830_FLIP, i830_flip_bufs, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I830_GETPARAM, i830_getparam, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I830_SETPARAM, i830_setparam, DRM_AUTH)
+};
+
+int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
+
+/**
+ * Determine if the device really is AGP or not.
+ *
+ * All Intel graphics chipsets are treated as AGP, even if they are really
+ * PCI-e.
+ *
+ * \param dev   The device to be tested.
+ *
+ * \returns
+ * A value of 1 is always retured to indictate every i8xx is AGP.
+ */
+int i830_driver_device_is_agp(struct drm_device * dev)
+{
+	return 1;
+}
diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c
new file mode 100644
index 000000000000..389597e4a623
--- /dev/null
+++ b/drivers/gpu/drm/i830/i830_drv.c
@@ -0,0 +1,108 @@
+/* i830_drv.c -- I810 driver -*- linux-c -*-
+ * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Rickard E. (Rik) Faith <faith@valinux.com>
+ *    Jeff Hartmann <jhartmann@valinux.com>
+ *    Gareth Hughes <gareth@valinux.com>
+ *    Abraham vd Merwe <abraham@2d3d.co.za>
+ *    Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i830_drm.h"
+#include "i830_drv.h"
+
+#include "drm_pciids.h"
+
+static struct pci_device_id pciidlist[] = {
+	i830_PCI_IDS
+};
+
+static struct drm_driver driver = {
+	.driver_features =
+	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
+	    DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
+#if USE_IRQS
+	.driver_features |= DRIVER_HAVE_IRQ | DRIVER_SHARED_IRQ,
+#endif
+	.dev_priv_size = sizeof(drm_i830_buf_priv_t),
+	.load = i830_driver_load,
+	.lastclose = i830_driver_lastclose,
+	.preclose = i830_driver_preclose,
+	.device_is_agp = i830_driver_device_is_agp,
+	.reclaim_buffers_locked = i830_driver_reclaim_buffers_locked,
+	.dma_quiescent = i830_driver_dma_quiescent,
+	.get_map_ofs = drm_core_get_map_ofs,
+	.get_reg_ofs = drm_core_get_reg_ofs,
+#if USE_IRQS
+	.irq_preinstall = i830_driver_irq_preinstall,
+	.irq_postinstall = i830_driver_irq_postinstall,
+	.irq_uninstall = i830_driver_irq_uninstall,
+	.irq_handler = i830_driver_irq_handler,
+#endif
+	.ioctls = i830_ioctls,
+	.fops = {
+		 .owner = THIS_MODULE,
+		 .open = drm_open,
+		 .release = drm_release,
+		 .ioctl = drm_ioctl,
+		 .mmap = drm_mmap,
+		 .poll = drm_poll,
+		 .fasync = drm_fasync,
+	},
+
+	.pci_driver = {
+		 .name = DRIVER_NAME,
+		 .id_table = pciidlist,
+	},
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int __init i830_init(void)
+{
+	driver.num_ioctls = i830_max_ioctl;
+	return drm_init(&driver);
+}
+
+static void __exit i830_exit(void)
+{
+	drm_exit(&driver);
+}
+
+module_init(i830_init);
+module_exit(i830_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
new file mode 100644
index 000000000000..b5bf8cc0fdaa
--- /dev/null
+++ b/drivers/gpu/drm/i830/i830_drv.h
@@ -0,0 +1,292 @@
+/* i830_drv.h -- Private header for the I830 driver -*- linux-c -*-
+ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
+ *	    Jeff Hartmann <jhartmann@valinux.com>
+ *
+ */
+
+#ifndef _I830_DRV_H_
+#define _I830_DRV_H_
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR		"VA Linux Systems Inc."
+
+#define DRIVER_NAME		"i830"
+#define DRIVER_DESC		"Intel 830M"
+#define DRIVER_DATE		"20021108"
+
+/* Interface history:
+ *
+ * 1.1: Original.
+ * 1.2: ?
+ * 1.3: New irq emit/wait ioctls.
+ *      New pageflip ioctl.
+ *      New getparam ioctl.
+ *      State for texunits 3&4 in sarea.
+ *      New (alternative) layout for texture state.
+ */
+#define DRIVER_MAJOR		1
+#define DRIVER_MINOR		3
+#define DRIVER_PATCHLEVEL	2
+
+/* Driver will work either way: IRQ's save cpu time when waiting for
+ * the card, but are subject to subtle interactions between bios,
+ * hardware and the driver.
+ */
+/* XXX: Add vblank support? */
+#define USE_IRQS 0
+
+typedef struct drm_i830_buf_priv {
+	u32 *in_use;
+	int my_use_idx;
+	int currently_mapped;
+	void __user *virtual;
+	void *kernel_virtual;
+	drm_local_map_t map;
+} drm_i830_buf_priv_t;
+
+typedef struct _drm_i830_ring_buffer {
+	int tail_mask;
+	unsigned long Start;
+	unsigned long End;
+	unsigned long Size;
+	u8 *virtual_start;
+	int head;
+	int tail;
+	int space;
+	drm_local_map_t map;
+} drm_i830_ring_buffer_t;
+
+typedef struct drm_i830_private {
+	struct drm_map *sarea_map;
+	struct drm_map *mmio_map;
+
+	drm_i830_sarea_t *sarea_priv;
+	drm_i830_ring_buffer_t ring;
+
+	void *hw_status_page;
+	unsigned long counter;
+
+	dma_addr_t dma_status_page;
+
+	struct drm_buf *mmap_buffer;
+
+	u32 front_di1, back_di1, zi1;
+
+	int back_offset;
+	int depth_offset;
+	int front_offset;
+	int w, h;
+	int pitch;
+	int back_pitch;
+	int depth_pitch;
+	unsigned int cpp;
+
+	int do_boxes;
+	int dma_used;
+
+	int current_page;
+	int page_flipping;
+
+	wait_queue_head_t irq_queue;
+	atomic_t irq_received;
+	atomic_t irq_emitted;
+
+	int use_mi_batchbuffer_start;
+
+} drm_i830_private_t;
+
+extern struct drm_ioctl_desc i830_ioctls[];
+extern int i830_max_ioctl;
+
+/* i830_irq.c */
+extern int i830_irq_emit(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+extern int i830_irq_wait(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+
+extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS);
+extern void i830_driver_irq_preinstall(struct drm_device * dev);
+extern void i830_driver_irq_postinstall(struct drm_device * dev);
+extern void i830_driver_irq_uninstall(struct drm_device * dev);
+extern int i830_driver_load(struct drm_device *, unsigned long flags);
+extern void i830_driver_preclose(struct drm_device * dev,
+				 struct drm_file *file_priv);
+extern void i830_driver_lastclose(struct drm_device * dev);
+extern void i830_driver_reclaim_buffers_locked(struct drm_device * dev,
+					       struct drm_file *file_priv);
+extern int i830_driver_dma_quiescent(struct drm_device * dev);
+extern int i830_driver_device_is_agp(struct drm_device * dev);
+
+#define I830_READ(reg)          DRM_READ32(dev_priv->mmio_map, reg)
+#define I830_WRITE(reg,val)     DRM_WRITE32(dev_priv->mmio_map, reg, val)
+#define I830_READ16(reg)        DRM_READ16(dev_priv->mmio_map, reg)
+#define I830_WRITE16(reg,val)   DRM_WRITE16(dev_priv->mmio_map, reg, val)
+
+#define I830_VERBOSE 0
+
+#define RING_LOCALS	unsigned int outring, ringmask, outcount; \
+                        volatile char *virt;
+
+#define BEGIN_LP_RING(n) do {				\
+	if (I830_VERBOSE)				\
+		printk("BEGIN_LP_RING(%d)\n", (n));	\
+	if (dev_priv->ring.space < n*4)			\
+		i830_wait_ring(dev, n*4, __func__);		\
+	outcount = 0;					\
+	outring = dev_priv->ring.tail;			\
+	ringmask = dev_priv->ring.tail_mask;		\
+	virt = dev_priv->ring.virtual_start;		\
+} while (0)
+
+#define OUT_RING(n) do {					\
+	if (I830_VERBOSE) printk("   OUT_RING %x\n", (int)(n));	\
+	*(volatile unsigned int *)(virt + outring) = n;		\
+        outcount++;						\
+	outring += 4;						\
+	outring &= ringmask;					\
+} while (0)
+
+#define ADVANCE_LP_RING() do {						\
+	if (I830_VERBOSE) printk("ADVANCE_LP_RING %x\n", outring);	\
+	dev_priv->ring.tail = outring;					\
+	dev_priv->ring.space -= outcount * 4;				\
+	I830_WRITE(LP_RING + RING_TAIL, outring);			\
+} while(0)
+
+extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller);
+
+#define GFX_OP_USER_INTERRUPT		((0<<29)|(2<<23))
+#define GFX_OP_BREAKPOINT_INTERRUPT	((0<<29)|(1<<23))
+#define CMD_REPORT_HEAD			(7<<23)
+#define CMD_STORE_DWORD_IDX		((0x21<<23) | 0x1)
+#define CMD_OP_BATCH_BUFFER  ((0x0<<29)|(0x30<<23)|0x1)
+
+#define STATE3D_LOAD_STATE_IMMEDIATE_2      ((0x3<<29)|(0x1d<<24)|(0x03<<16))
+#define LOAD_TEXTURE_MAP0                   (1<<11)
+
+#define INST_PARSER_CLIENT   0x00000000
+#define INST_OP_FLUSH        0x02000000
+#define INST_FLUSH_MAP_CACHE 0x00000001
+
+#define BB1_START_ADDR_MASK   (~0x7)
+#define BB1_PROTECTED         (1<<0)
+#define BB1_UNPROTECTED       (0<<0)
+#define BB2_END_ADDR_MASK     (~0x7)
+
+#define I830REG_HWSTAM		0x02098
+#define I830REG_INT_IDENTITY_R	0x020a4
+#define I830REG_INT_MASK_R	0x020a8
+#define I830REG_INT_ENABLE_R	0x020a0
+
+#define I830_IRQ_RESERVED ((1<<13)|(3<<2))
+
+#define LP_RING			0x2030
+#define HP_RING			0x2040
+#define RING_TAIL		0x00
+#define TAIL_ADDR		0x001FFFF8
+#define RING_HEAD		0x04
+#define HEAD_WRAP_COUNT		0xFFE00000
+#define HEAD_WRAP_ONE		0x00200000
+#define HEAD_ADDR		0x001FFFFC
+#define RING_START		0x08
+#define START_ADDR		0x0xFFFFF000
+#define RING_LEN		0x0C
+#define RING_NR_PAGES		0x001FF000
+#define RING_REPORT_MASK	0x00000006
+#define RING_REPORT_64K		0x00000002
+#define RING_REPORT_128K	0x00000004
+#define RING_NO_REPORT		0x00000000
+#define RING_VALID_MASK		0x00000001
+#define RING_VALID		0x00000001
+#define RING_INVALID		0x00000000
+
+#define GFX_OP_SCISSOR         ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define SC_UPDATE_SCISSOR       (0x1<<1)
+#define SC_ENABLE_MASK          (0x1<<0)
+#define SC_ENABLE               (0x1<<0)
+
+#define GFX_OP_SCISSOR_INFO    ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
+#define SCI_YMIN_MASK      (0xffff<<16)
+#define SCI_XMIN_MASK      (0xffff<<0)
+#define SCI_YMAX_MASK      (0xffff<<16)
+#define SCI_XMAX_MASK      (0xffff<<0)
+
+#define GFX_OP_SCISSOR_ENABLE	 ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define GFX_OP_SCISSOR_RECT	 ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
+#define GFX_OP_COLOR_FACTOR      ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
+#define GFX_OP_STIPPLE           ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define GFX_OP_MAP_INFO          ((0x3<<29)|(0x1d<<24)|0x4)
+#define GFX_OP_DESTBUFFER_VARS   ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DRAWRECT_INFO     ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+#define GFX_OP_PRIMITIVE         ((0x3<<29)|(0x1f<<24))
+
+#define CMD_OP_DESTBUFFER_INFO	 ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
+
+#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
+#define ASYNC_FLIP                (1<<22)
+
+#define CMD_3D                          (0x3<<29)
+#define STATE3D_CONST_BLEND_COLOR_CMD   (CMD_3D|(0x1d<<24)|(0x88<<16))
+#define STATE3D_MAP_COORD_SETBIND_CMD   (CMD_3D|(0x1d<<24)|(0x02<<16))
+
+#define BR00_BITBLT_CLIENT   0x40000000
+#define BR00_OP_COLOR_BLT    0x10000000
+#define BR00_OP_SRC_COPY_BLT 0x10C00000
+#define BR13_SOLID_PATTERN   0x80000000
+
+#define BUF_3D_ID_COLOR_BACK    (0x3<<24)
+#define BUF_3D_ID_DEPTH         (0x7<<24)
+#define BUF_3D_USE_FENCE        (1<<23)
+#define BUF_3D_PITCH(x)         (((x)/4)<<2)
+
+#define CMD_OP_MAP_PALETTE_LOAD	((3<<29)|(0x1d<<24)|(0x82<<16)|255)
+#define MAP_PALETTE_NUM(x)	((x<<8) & (1<<8))
+#define MAP_PALETTE_BOTH	(1<<11)
+
+#define XY_COLOR_BLT_CMD		((2<<29)|(0x50<<22)|0x4)
+#define XY_COLOR_BLT_WRITE_ALPHA	(1<<21)
+#define XY_COLOR_BLT_WRITE_RGB		(1<<20)
+
+#define XY_SRC_COPY_BLT_CMD             ((2<<29)|(0x53<<22)|6)
+#define XY_SRC_COPY_BLT_WRITE_ALPHA     (1<<21)
+#define XY_SRC_COPY_BLT_WRITE_RGB       (1<<20)
+
+#define MI_BATCH_BUFFER		((0x30<<23)|1)
+#define MI_BATCH_BUFFER_START	(0x31<<23)
+#define MI_BATCH_BUFFER_END	(0xA<<23)
+#define MI_BATCH_NON_SECURE	(1)
+
+#define MI_WAIT_FOR_EVENT       ((0x3<<23))
+#define MI_WAIT_FOR_PLANE_A_FLIP      (1<<2)
+#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
+
+#define MI_LOAD_SCAN_LINES_INCL  ((0x12<<23))
+
+#endif
diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
new file mode 100644
index 000000000000..91ec2bb497e9
--- /dev/null
+++ b/drivers/gpu/drm/i830/i830_irq.c
@@ -0,0 +1,186 @@
+/* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
+ *
+ * Copyright 2002 Tungsten Graphics, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Keith Whitwell <keith@tungstengraphics.com>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i830_drm.h"
+#include "i830_drv.h"
+#include <linux/interrupt.h>	/* For task queue support */
+#include <linux/delay.h>
+
+irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
+	u16 temp;
+
+	temp = I830_READ16(I830REG_INT_IDENTITY_R);
+	DRM_DEBUG("%x\n", temp);
+
+	if (!(temp & 2))
+		return IRQ_NONE;
+
+	I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
+
+	atomic_inc(&dev_priv->irq_received);
+	wake_up_interruptible(&dev_priv->irq_queue);
+
+	return IRQ_HANDLED;
+}
+
+static int i830_emit_irq(struct drm_device * dev)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+
+	DRM_DEBUG("%s\n", __func__);
+
+	atomic_inc(&dev_priv->irq_emitted);
+
+	BEGIN_LP_RING(2);
+	OUT_RING(0);
+	OUT_RING(GFX_OP_USER_INTERRUPT);
+	ADVANCE_LP_RING();
+
+	return atomic_read(&dev_priv->irq_emitted);
+}
+
+static int i830_wait_irq(struct drm_device * dev, int irq_nr)
+{
+	drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
+	DECLARE_WAITQUEUE(entry, current);
+	unsigned long end = jiffies + HZ * 3;
+	int ret = 0;
+
+	DRM_DEBUG("%s\n", __func__);
+
+	if (atomic_read(&dev_priv->irq_received) >= irq_nr)
+		return 0;
+
+	dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
+
+	add_wait_queue(&dev_priv->irq_queue, &entry);
+
+	for (;;) {
+		__set_current_state(TASK_INTERRUPTIBLE);
+		if (atomic_read(&dev_priv->irq_received) >= irq_nr)
+			break;
+		if ((signed)(end - jiffies) <= 0) {
+			DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
+				  I830_READ16(I830REG_INT_IDENTITY_R),
+				  I830_READ16(I830REG_INT_MASK_R),
+				  I830_READ16(I830REG_INT_ENABLE_R),
+				  I830_READ16(I830REG_HWSTAM));
+
+			ret = -EBUSY;	/* Lockup?  Missed irq? */
+			break;
+		}
+		schedule_timeout(HZ * 3);
+		if (signal_pending(current)) {
+			ret = -EINTR;
+			break;
+		}
+	}
+
+	__set_current_state(TASK_RUNNING);
+	remove_wait_queue(&dev_priv->irq_queue, &entry);
+	return ret;
+}
+
+/* Needs the lock as it touches the ring.
+ */
+int i830_irq_emit(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+	drm_i830_irq_emit_t *emit = data;
+	int result;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (!dev_priv) {
+		DRM_ERROR("%s called with no initialization\n", __func__);
+		return -EINVAL;
+	}
+
+	result = i830_emit_irq(dev);
+
+	if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
+		DRM_ERROR("copy_to_user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/* Doesn't need the hardware lock.
+ */
+int i830_irq_wait(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	drm_i830_private_t *dev_priv = dev->dev_private;
+	drm_i830_irq_wait_t *irqwait = data;
+
+	if (!dev_priv) {
+		DRM_ERROR("%s called with no initialization\n", __func__);
+		return -EINVAL;
+	}
+
+	return i830_wait_irq(dev, irqwait->irq_seq);
+}
+
+/* drm_dma.h hooks
+*/
+void i830_driver_irq_preinstall(struct drm_device * dev)
+{
+	drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
+
+	I830_WRITE16(I830REG_HWSTAM, 0xffff);
+	I830_WRITE16(I830REG_INT_MASK_R, 0x0);
+	I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
+	atomic_set(&dev_priv->irq_received, 0);
+	atomic_set(&dev_priv->irq_emitted, 0);
+	init_waitqueue_head(&dev_priv->irq_queue);
+}
+
+void i830_driver_irq_postinstall(struct drm_device * dev)
+{
+	drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
+
+	I830_WRITE16(I830REG_INT_ENABLE_R, 0x2);
+}
+
+void i830_driver_irq_uninstall(struct drm_device * dev)
+{
+	drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
+	if (!dev_priv)
+		return;
+
+	I830_WRITE16(I830REG_INT_MASK_R, 0xffff);
+	I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
+}
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
new file mode 100644
index 000000000000..a9e60464df74
--- /dev/null
+++ b/drivers/gpu/drm/i915/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
+
+i915-$(CONFIG_COMPAT)   += i915_ioc32.o
+
+obj-$(CONFIG_DRM_I915)  += i915.o
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
new file mode 100644
index 000000000000..88974342933c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -0,0 +1,858 @@
+/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
+ */
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/* Really want an OS-independent resettable timer.  Would like to have
+ * this loop run for (eg) 3 sec, but have the timer reset every time
+ * the head pointer changes, so that EBUSY only happens if the ring
+ * actually stalls for (eg) 3 seconds.
+ */
+int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+	u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+	int i;
+
+	for (i = 0; i < 10000; i++) {
+		ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+		ring->space = ring->head - (ring->tail + 8);
+		if (ring->space < 0)
+			ring->space += ring->Size;
+		if (ring->space >= n)
+			return 0;
+
+		dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+
+		if (ring->head != last_head)
+			i = 0;
+
+		last_head = ring->head;
+	}
+
+	return -EBUSY;
+}
+
+void i915_kernel_lost_context(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+
+	ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+	ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
+	ring->space = ring->head - (ring->tail + 8);
+	if (ring->space < 0)
+		ring->space += ring->Size;
+
+	if (ring->head == ring->tail)
+		dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
+}
+
+static int i915_dma_cleanup(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	/* Make sure interrupts are disabled here because the uninstall ioctl
+	 * may not have been called from userspace and after dev_private
+	 * is freed, it's too late.
+	 */
+	if (dev->irq)
+		drm_irq_uninstall(dev);
+
+	if (dev_priv->ring.virtual_start) {
+		drm_core_ioremapfree(&dev_priv->ring.map, dev);
+		dev_priv->ring.virtual_start = 0;
+		dev_priv->ring.map.handle = 0;
+		dev_priv->ring.map.size = 0;
+	}
+
+	if (dev_priv->status_page_dmah) {
+		drm_pci_free(dev, dev_priv->status_page_dmah);
+		dev_priv->status_page_dmah = NULL;
+		/* Need to rewrite hardware status page */
+		I915_WRITE(0x02080, 0x1ffff000);
+	}
+
+	if (dev_priv->status_gfx_addr) {
+		dev_priv->status_gfx_addr = 0;
+		drm_core_ioremapfree(&dev_priv->hws_map, dev);
+		I915_WRITE(0x2080, 0x1ffff000);
+	}
+
+	return 0;
+}
+
+static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	dev_priv->sarea = drm_getsarea(dev);
+	if (!dev_priv->sarea) {
+		DRM_ERROR("can not find sarea!\n");
+		i915_dma_cleanup(dev);
+		return -EINVAL;
+	}
+
+	dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
+	if (!dev_priv->mmio_map) {
+		i915_dma_cleanup(dev);
+		DRM_ERROR("can not find mmio map!\n");
+		return -EINVAL;
+	}
+
+	dev_priv->sarea_priv = (drm_i915_sarea_t *)
+	    ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
+
+	dev_priv->ring.Start = init->ring_start;
+	dev_priv->ring.End = init->ring_end;
+	dev_priv->ring.Size = init->ring_size;
+	dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
+
+	dev_priv->ring.map.offset = init->ring_start;
+	dev_priv->ring.map.size = init->ring_size;
+	dev_priv->ring.map.type = 0;
+	dev_priv->ring.map.flags = 0;
+	dev_priv->ring.map.mtrr = 0;
+
+	drm_core_ioremap(&dev_priv->ring.map, dev);
+
+	if (dev_priv->ring.map.handle == NULL) {
+		i915_dma_cleanup(dev);
+		DRM_ERROR("can not ioremap virtual address for"
+			  " ring buffer\n");
+		return -ENOMEM;
+	}
+
+	dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+
+	dev_priv->cpp = init->cpp;
+	dev_priv->back_offset = init->back_offset;
+	dev_priv->front_offset = init->front_offset;
+	dev_priv->current_page = 0;
+	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+
+	/* We are using separate values as placeholders for mechanisms for
+	 * private backbuffer/depthbuffer usage.
+	 */
+	dev_priv->use_mi_batchbuffer_start = 0;
+	if (IS_I965G(dev)) /* 965 doesn't support older method */
+		dev_priv->use_mi_batchbuffer_start = 1;
+
+	/* Allow hardware batchbuffers unless told otherwise.
+	 */
+	dev_priv->allow_batchbuffer = 1;
+
+	/* Program Hardware Status Page */
+	if (!I915_NEED_GFX_HWS(dev)) {
+		dev_priv->status_page_dmah =
+			drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
+
+		if (!dev_priv->status_page_dmah) {
+			i915_dma_cleanup(dev);
+			DRM_ERROR("Can not allocate hardware status page\n");
+			return -ENOMEM;
+		}
+		dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+		dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
+
+		memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+		I915_WRITE(0x02080, dev_priv->dma_status_page);
+	}
+	DRM_DEBUG("Enabled hardware status page\n");
+	return 0;
+}
+
+static int i915_dma_resume(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+	DRM_DEBUG("%s\n", __func__);
+
+	if (!dev_priv->sarea) {
+		DRM_ERROR("can not find sarea!\n");
+		return -EINVAL;
+	}
+
+	if (!dev_priv->mmio_map) {
+		DRM_ERROR("can not find mmio map!\n");
+		return -EINVAL;
+	}
+
+	if (dev_priv->ring.map.handle == NULL) {
+		DRM_ERROR("can not ioremap virtual address for"
+			  " ring buffer\n");
+		return -ENOMEM;
+	}
+
+	/* Program Hardware Status Page */
+	if (!dev_priv->hw_status_page) {
+		DRM_ERROR("Can not find hardware status page\n");
+		return -EINVAL;
+	}
+	DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
+
+	if (dev_priv->status_gfx_addr != 0)
+		I915_WRITE(0x02080, dev_priv->status_gfx_addr);
+	else
+		I915_WRITE(0x02080, dev_priv->dma_status_page);
+	DRM_DEBUG("Enabled hardware status page\n");
+
+	return 0;
+}
+
+static int i915_dma_init(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i915_init_t *init = data;
+	int retcode = 0;
+
+	switch (init->func) {
+	case I915_INIT_DMA:
+		retcode = i915_initialize(dev, init);
+		break;
+	case I915_CLEANUP_DMA:
+		retcode = i915_dma_cleanup(dev);
+		break;
+	case I915_RESUME_DMA:
+		retcode = i915_dma_resume(dev);
+		break;
+	default:
+		retcode = -EINVAL;
+		break;
+	}
+
+	return retcode;
+}
+
+/* Implement basically the same security restrictions as hardware does
+ * for MI_BATCH_NON_SECURE.  These can be made stricter at any time.
+ *
+ * Most of the calculations below involve calculating the size of a
+ * particular instruction.  It's important to get the size right as
+ * that tells us where the next instruction to check is.  Any illegal
+ * instruction detected will be given a size of zero, which is a
+ * signal to abort the rest of the buffer.
+ */
+static int do_validate_cmd(int cmd)
+{
+	switch (((cmd >> 29) & 0x7)) {
+	case 0x0:
+		switch ((cmd >> 23) & 0x3f) {
+		case 0x0:
+			return 1;	/* MI_NOOP */
+		case 0x4:
+			return 1;	/* MI_FLUSH */
+		default:
+			return 0;	/* disallow everything else */
+		}
+		break;
+	case 0x1:
+		return 0;	/* reserved */
+	case 0x2:
+		return (cmd & 0xff) + 2;	/* 2d commands */
+	case 0x3:
+		if (((cmd >> 24) & 0x1f) <= 0x18)
+			return 1;
+
+		switch ((cmd >> 24) & 0x1f) {
+		case 0x1c:
+			return 1;
+		case 0x1d:
+			switch ((cmd >> 16) & 0xff) {
+			case 0x3:
+				return (cmd & 0x1f) + 2;
+			case 0x4:
+				return (cmd & 0xf) + 2;
+			default:
+				return (cmd & 0xffff) + 2;
+			}
+		case 0x1e:
+			if (cmd & (1 << 23))
+				return (cmd & 0xffff) + 1;
+			else
+				return 1;
+		case 0x1f:
+			if ((cmd & (1 << 23)) == 0)	/* inline vertices */
+				return (cmd & 0x1ffff) + 2;
+			else if (cmd & (1 << 17))	/* indirect random */
+				if ((cmd & 0xffff) == 0)
+					return 0;	/* unknown length, too hard */
+				else
+					return (((cmd & 0xffff) + 1) / 2) + 1;
+			else
+				return 2;	/* indirect sequential */
+		default:
+			return 0;
+		}
+	default:
+		return 0;
+	}
+
+	return 0;
+}
+
+static int validate_cmd(int cmd)
+{
+	int ret = do_validate_cmd(cmd);
+
+/*	printk("validate_cmd( %x ): %d\n", cmd, ret); */
+
+	return ret;
+}
+
+static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int i;
+	RING_LOCALS;
+
+	if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
+		return -EINVAL;
+
+	BEGIN_LP_RING((dwords+1)&~1);
+
+	for (i = 0; i < dwords;) {
+		int cmd, sz;
+
+		if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
+			return -EINVAL;
+
+		if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
+			return -EINVAL;
+
+		OUT_RING(cmd);
+
+		while (++i, --sz) {
+			if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
+							 sizeof(cmd))) {
+				return -EINVAL;
+			}
+			OUT_RING(cmd);
+		}
+	}
+
+	if (dwords & 1)
+		OUT_RING(0);
+
+	ADVANCE_LP_RING();
+
+	return 0;
+}
+
+static int i915_emit_box(struct drm_device * dev,
+			 struct drm_clip_rect __user * boxes,
+			 int i, int DR1, int DR4)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_clip_rect box;
+	RING_LOCALS;
+
+	if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
+		return -EFAULT;
+	}
+
+	if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
+		DRM_ERROR("Bad box %d,%d..%d,%d\n",
+			  box.x1, box.y1, box.x2, box.y2);
+		return -EINVAL;
+	}
+
+	if (IS_I965G(dev)) {
+		BEGIN_LP_RING(4);
+		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
+		OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
+		OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+		OUT_RING(DR4);
+		ADVANCE_LP_RING();
+	} else {
+		BEGIN_LP_RING(6);
+		OUT_RING(GFX_OP_DRAWRECT_INFO);
+		OUT_RING(DR1);
+		OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
+		OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+		OUT_RING(DR4);
+		OUT_RING(0);
+		ADVANCE_LP_RING();
+	}
+
+	return 0;
+}
+
+/* XXX: Emitting the counter should really be moved to part of the IRQ
+ * emit. For now, do it in both places:
+ */
+
+static void i915_emit_breadcrumb(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+
+	dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
+
+	if (dev_priv->counter > 0x7FFFFFFFUL)
+		dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
+
+	BEGIN_LP_RING(4);
+	OUT_RING(CMD_STORE_DWORD_IDX);
+	OUT_RING(20);
+	OUT_RING(dev_priv->counter);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+}
+
+static int i915_dispatch_cmdbuffer(struct drm_device * dev,
+				   drm_i915_cmdbuffer_t * cmd)
+{
+	int nbox = cmd->num_cliprects;
+	int i = 0, count, ret;
+
+	if (cmd->sz & 0x3) {
+		DRM_ERROR("alignment");
+		return -EINVAL;
+	}
+
+	i915_kernel_lost_context(dev);
+
+	count = nbox ? nbox : 1;
+
+	for (i = 0; i < count; i++) {
+		if (i < nbox) {
+			ret = i915_emit_box(dev, cmd->cliprects, i,
+					    cmd->DR1, cmd->DR4);
+			if (ret)
+				return ret;
+		}
+
+		ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
+		if (ret)
+			return ret;
+	}
+
+	i915_emit_breadcrumb(dev);
+	return 0;
+}
+
+static int i915_dispatch_batchbuffer(struct drm_device * dev,
+				     drm_i915_batchbuffer_t * batch)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_clip_rect __user *boxes = batch->cliprects;
+	int nbox = batch->num_cliprects;
+	int i = 0, count;
+	RING_LOCALS;
+
+	if ((batch->start | batch->used) & 0x7) {
+		DRM_ERROR("alignment");
+		return -EINVAL;
+	}
+
+	i915_kernel_lost_context(dev);
+
+	count = nbox ? nbox : 1;
+
+	for (i = 0; i < count; i++) {
+		if (i < nbox) {
+			int ret = i915_emit_box(dev, boxes, i,
+						batch->DR1, batch->DR4);
+			if (ret)
+				return ret;
+		}
+
+		if (dev_priv->use_mi_batchbuffer_start) {
+			BEGIN_LP_RING(2);
+			if (IS_I965G(dev)) {
+				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
+				OUT_RING(batch->start);
+			} else {
+				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
+				OUT_RING(batch->start | MI_BATCH_NON_SECURE);
+			}
+			ADVANCE_LP_RING();
+		} else {
+			BEGIN_LP_RING(4);
+			OUT_RING(MI_BATCH_BUFFER);
+			OUT_RING(batch->start | MI_BATCH_NON_SECURE);
+			OUT_RING(batch->start + batch->used - 4);
+			OUT_RING(0);
+			ADVANCE_LP_RING();
+		}
+	}
+
+	i915_emit_breadcrumb(dev);
+
+	return 0;
+}
+
+static int i915_dispatch_flip(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+
+	DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
+		  __FUNCTION__,
+		  dev_priv->current_page,
+		  dev_priv->sarea_priv->pf_current_page);
+
+	i915_kernel_lost_context(dev);
+
+	BEGIN_LP_RING(2);
+	OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	BEGIN_LP_RING(6);
+	OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
+	OUT_RING(0);
+	if (dev_priv->current_page == 0) {
+		OUT_RING(dev_priv->back_offset);
+		dev_priv->current_page = 1;
+	} else {
+		OUT_RING(dev_priv->front_offset);
+		dev_priv->current_page = 0;
+	}
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	BEGIN_LP_RING(2);
+	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
+
+	BEGIN_LP_RING(4);
+	OUT_RING(CMD_STORE_DWORD_IDX);
+	OUT_RING(20);
+	OUT_RING(dev_priv->counter);
+	OUT_RING(0);
+	ADVANCE_LP_RING();
+
+	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
+	return 0;
+}
+
+static int i915_quiescent(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	i915_kernel_lost_context(dev);
+	return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
+}
+
+static int i915_flush_ioctl(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv)
+{
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	return i915_quiescent(dev);
+}
+
+static int i915_batchbuffer(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 *hw_status = dev_priv->hw_status_page;
+	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
+	    dev_priv->sarea_priv;
+	drm_i915_batchbuffer_t *batch = data;
+	int ret;
+
+	if (!dev_priv->allow_batchbuffer) {
+		DRM_ERROR("Batchbuffer ioctl disabled\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
+		  batch->start, batch->used, batch->num_cliprects);
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
+						       batch->num_cliprects *
+						       sizeof(struct drm_clip_rect)))
+		return -EFAULT;
+
+	ret = i915_dispatch_batchbuffer(dev, batch);
+
+	sarea_priv->last_dispatch = (int)hw_status[5];
+	return ret;
+}
+
+static int i915_cmdbuffer(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 *hw_status = dev_priv->hw_status_page;
+	drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
+	    dev_priv->sarea_priv;
+	drm_i915_cmdbuffer_t *cmdbuf = data;
+	int ret;
+
+	DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
+		  cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (cmdbuf->num_cliprects &&
+	    DRM_VERIFYAREA_READ(cmdbuf->cliprects,
+				cmdbuf->num_cliprects *
+				sizeof(struct drm_clip_rect))) {
+		DRM_ERROR("Fault accessing cliprects\n");
+		return -EFAULT;
+	}
+
+	ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
+	if (ret) {
+		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
+		return ret;
+	}
+
+	sarea_priv->last_dispatch = (int)hw_status[5];
+	return 0;
+}
+
+static int i915_flip_bufs(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	DRM_DEBUG("%s\n", __FUNCTION__);
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	return i915_dispatch_flip(dev);
+}
+
+static int i915_getparam(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_getparam_t *param = data;
+	int value;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	switch (param->param) {
+	case I915_PARAM_IRQ_ACTIVE:
+		value = dev->irq ? 1 : 0;
+		break;
+	case I915_PARAM_ALLOW_BATCHBUFFER:
+		value = dev_priv->allow_batchbuffer ? 1 : 0;
+		break;
+	case I915_PARAM_LAST_DISPATCH:
+		value = READ_BREADCRUMB(dev_priv);
+		break;
+	default:
+		DRM_ERROR("Unknown parameter %d\n", param->param);
+		return -EINVAL;
+	}
+
+	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+		DRM_ERROR("DRM_COPY_TO_USER failed\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int i915_setparam(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_setparam_t *param = data;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	switch (param->param) {
+	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
+		if (!IS_I965G(dev))
+			dev_priv->use_mi_batchbuffer_start = param->value;
+		break;
+	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
+		dev_priv->tex_lru_log_granularity = param->value;
+		break;
+	case I915_SETPARAM_ALLOW_BATCHBUFFER:
+		dev_priv->allow_batchbuffer = param->value;
+		break;
+	default:
+		DRM_ERROR("unknown parameter %d\n", param->param);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int i915_set_status_page(struct drm_device *dev, void *data,
+				struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_hws_addr_t *hws = data;
+
+	if (!I915_NEED_GFX_HWS(dev))
+		return -EINVAL;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr);
+
+	dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
+
+	dev_priv->hws_map.offset = dev->agp->base + hws->addr;
+	dev_priv->hws_map.size = 4*1024;
+	dev_priv->hws_map.type = 0;
+	dev_priv->hws_map.flags = 0;
+	dev_priv->hws_map.mtrr = 0;
+
+	drm_core_ioremap(&dev_priv->hws_map, dev);
+	if (dev_priv->hws_map.handle == NULL) {
+		i915_dma_cleanup(dev);
+		dev_priv->status_gfx_addr = 0;
+		DRM_ERROR("can not ioremap virtual address for"
+				" G33 hw status page\n");
+		return -ENOMEM;
+	}
+	dev_priv->hw_status_page = dev_priv->hws_map.handle;
+
+	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+	I915_WRITE(0x02080, dev_priv->status_gfx_addr);
+	DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
+			dev_priv->status_gfx_addr);
+	DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
+	return 0;
+}
+
+int i915_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long base, size;
+	int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
+
+	/* i915 has 4 more counters */
+	dev->counters += 4;
+	dev->types[6] = _DRM_STAT_IRQ;
+	dev->types[7] = _DRM_STAT_PRIMARY;
+	dev->types[8] = _DRM_STAT_SECONDARY;
+	dev->types[9] = _DRM_STAT_DMA;
+
+	dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
+	if (dev_priv == NULL)
+		return -ENOMEM;
+
+	memset(dev_priv, 0, sizeof(drm_i915_private_t));
+
+	dev->dev_private = (void *)dev_priv;
+
+	/* Add register map (needed for suspend/resume) */
+	base = drm_get_resource_start(dev, mmio_bar);
+	size = drm_get_resource_len(dev, mmio_bar);
+
+	ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
+			 _DRM_KERNEL | _DRM_DRIVER,
+			 &dev_priv->mmio_map);
+	return ret;
+}
+
+int i915_driver_unload(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (dev_priv->mmio_map)
+		drm_rmmap(dev, dev_priv->mmio_map);
+
+	drm_free(dev->dev_private, sizeof(drm_i915_private_t),
+		 DRM_MEM_DRIVER);
+
+	return 0;
+}
+
+void i915_driver_lastclose(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (!dev_priv)
+		return;
+
+	if (dev_priv->agp_heap)
+		i915_mem_takedown(&(dev_priv->agp_heap));
+
+	i915_dma_cleanup(dev);
+}
+
+void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	i915_mem_release(dev, file_priv, dev_priv->agp_heap);
+}
+
+struct drm_ioctl_desc i915_ioctls[] = {
+	DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP,  i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
+	DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
+	DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
+	DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
+};
+
+int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
+
+/**
+ * Determine if the device really is AGP or not.
+ *
+ * All Intel graphics chipsets are treated as AGP, even if they are really
+ * PCI-e.
+ *
+ * \param dev   The device to be tested.
+ *
+ * \returns
+ * A value of 1 is always retured to indictate every i9x5 is AGP.
+ */
+int i915_driver_device_is_agp(struct drm_device * dev)
+{
+	return 1;
+}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
new file mode 100644
index 000000000000..93aed1c38bd2
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -0,0 +1,605 @@
+/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
+ */
+/*
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+#include "drm_pciids.h"
+
+static struct pci_device_id pciidlist[] = {
+	i915_PCI_IDS
+};
+
+enum pipe {
+    PIPE_A = 0,
+    PIPE_B,
+};
+
+static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (pipe == PIPE_A)
+		return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
+	else
+		return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
+}
+
+static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+	u32 *array;
+	int i;
+
+	if (!i915_pipe_enabled(dev, pipe))
+		return;
+
+	if (pipe == PIPE_A)
+		array = dev_priv->save_palette_a;
+	else
+		array = dev_priv->save_palette_b;
+
+	for(i = 0; i < 256; i++)
+		array[i] = I915_READ(reg + (i << 2));
+}
+
+static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+	u32 *array;
+	int i;
+
+	if (!i915_pipe_enabled(dev, pipe))
+		return;
+
+	if (pipe == PIPE_A)
+		array = dev_priv->save_palette_a;
+	else
+		array = dev_priv->save_palette_b;
+
+	for(i = 0; i < 256; i++)
+		I915_WRITE(reg + (i << 2), array[i]);
+}
+
+static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg)
+{
+	outb(reg, index_port);
+	return inb(data_port);
+}
+
+static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable)
+{
+	inb(st01);
+	outb(palette_enable | reg, VGA_AR_INDEX);
+	return inb(VGA_AR_DATA_READ);
+}
+
+static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable)
+{
+	inb(st01);
+	outb(palette_enable | reg, VGA_AR_INDEX);
+	outb(val, VGA_AR_DATA_WRITE);
+}
+
+static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val)
+{
+	outb(reg, index_port);
+	outb(val, data_port);
+}
+
+static void i915_save_vga(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+	u16 cr_index, cr_data, st01;
+
+	/* VGA color palette registers */
+	dev_priv->saveDACMASK = inb(VGA_DACMASK);
+	/* DACCRX automatically increments during read */
+	outb(0, VGA_DACRX);
+	/* Read 3 bytes of color data from each index */
+	for (i = 0; i < 256 * 3; i++)
+		dev_priv->saveDACDATA[i] = inb(VGA_DACDATA);
+
+	/* MSR bits */
+	dev_priv->saveMSR = inb(VGA_MSR_READ);
+	if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+		cr_index = VGA_CR_INDEX_CGA;
+		cr_data = VGA_CR_DATA_CGA;
+		st01 = VGA_ST01_CGA;
+	} else {
+		cr_index = VGA_CR_INDEX_MDA;
+		cr_data = VGA_CR_DATA_MDA;
+		st01 = VGA_ST01_MDA;
+	}
+
+	/* CRT controller regs */
+	i915_write_indexed(cr_index, cr_data, 0x11,
+			   i915_read_indexed(cr_index, cr_data, 0x11) &
+			   (~0x80));
+	for (i = 0; i <= 0x24; i++)
+		dev_priv->saveCR[i] =
+			i915_read_indexed(cr_index, cr_data, i);
+	/* Make sure we don't turn off CR group 0 writes */
+	dev_priv->saveCR[0x11] &= ~0x80;
+
+	/* Attribute controller registers */
+	inb(st01);
+	dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX);
+	for (i = 0; i <= 0x14; i++)
+		dev_priv->saveAR[i] = i915_read_ar(st01, i, 0);
+	inb(st01);
+	outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX);
+	inb(st01);
+
+	/* Graphics controller registers */
+	for (i = 0; i < 9; i++)
+		dev_priv->saveGR[i] =
+			i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i);
+
+	dev_priv->saveGR[0x10] =
+		i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10);
+	dev_priv->saveGR[0x11] =
+		i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11);
+	dev_priv->saveGR[0x18] =
+		i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18);
+
+	/* Sequencer registers */
+	for (i = 0; i < 8; i++)
+		dev_priv->saveSR[i] =
+			i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i);
+}
+
+static void i915_restore_vga(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+	u16 cr_index, cr_data, st01;
+
+	/* MSR bits */
+	outb(dev_priv->saveMSR, VGA_MSR_WRITE);
+	if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+		cr_index = VGA_CR_INDEX_CGA;
+		cr_data = VGA_CR_DATA_CGA;
+		st01 = VGA_ST01_CGA;
+	} else {
+		cr_index = VGA_CR_INDEX_MDA;
+		cr_data = VGA_CR_DATA_MDA;
+		st01 = VGA_ST01_MDA;
+	}
+
+	/* Sequencer registers, don't write SR07 */
+	for (i = 0; i < 7; i++)
+		i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i,
+				   dev_priv->saveSR[i]);
+
+	/* CRT controller regs */
+	/* Enable CR group 0 writes */
+	i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
+	for (i = 0; i <= 0x24; i++)
+		i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]);
+
+	/* Graphics controller regs */
+	for (i = 0; i < 9; i++)
+		i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i,
+				   dev_priv->saveGR[i]);
+
+	i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10,
+			   dev_priv->saveGR[0x10]);
+	i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11,
+			   dev_priv->saveGR[0x11]);
+	i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18,
+			   dev_priv->saveGR[0x18]);
+
+	/* Attribute controller registers */
+	inb(st01);
+	for (i = 0; i <= 0x14; i++)
+		i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
+	inb(st01); /* switch back to index mode */
+	outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX);
+	inb(st01);
+
+	/* VGA color palette registers */
+	outb(dev_priv->saveDACMASK, VGA_DACMASK);
+	/* DACCRX automatically increments during read */
+	outb(0, VGA_DACWX);
+	/* Read 3 bytes of color data from each index */
+	for (i = 0; i < 256 * 3; i++)
+		outb(dev_priv->saveDACDATA[i], VGA_DACDATA);
+
+}
+
+static int i915_suspend(struct drm_device *dev, pm_message_t state)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	if (!dev || !dev_priv) {
+		printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv);
+		printk(KERN_ERR "DRM not initialized, aborting suspend.\n");
+		return -ENODEV;
+	}
+
+	if (state.event == PM_EVENT_PRETHAW)
+		return 0;
+
+	pci_save_state(dev->pdev);
+	pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
+
+	/* Display arbitration control */
+	dev_priv->saveDSPARB = I915_READ(DSPARB);
+
+	/* Pipe & plane A info */
+	dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
+	dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
+	dev_priv->saveFPA0 = I915_READ(FPA0);
+	dev_priv->saveFPA1 = I915_READ(FPA1);
+	dev_priv->saveDPLL_A = I915_READ(DPLL_A);
+	if (IS_I965G(dev))
+		dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
+	dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
+	dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
+	dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
+	dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
+	dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
+	dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
+	dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+
+	dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
+	dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
+	dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
+	dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
+	dev_priv->saveDSPABASE = I915_READ(DSPABASE);
+	if (IS_I965G(dev)) {
+		dev_priv->saveDSPASURF = I915_READ(DSPASURF);
+		dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
+	}
+	i915_save_palette(dev, PIPE_A);
+	dev_priv->savePIPEASTAT = I915_READ(I915REG_PIPEASTAT);
+
+	/* Pipe & plane B info */
+	dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
+	dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
+	dev_priv->saveFPB0 = I915_READ(FPB0);
+	dev_priv->saveFPB1 = I915_READ(FPB1);
+	dev_priv->saveDPLL_B = I915_READ(DPLL_B);
+	if (IS_I965G(dev))
+		dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
+	dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
+	dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
+	dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
+	dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
+	dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
+	dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
+	dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+
+	dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
+	dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
+	dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
+	dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
+	dev_priv->saveDSPBBASE = I915_READ(DSPBBASE);
+	if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
+		dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
+		dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
+	}
+	i915_save_palette(dev, PIPE_B);
+	dev_priv->savePIPEBSTAT = I915_READ(I915REG_PIPEBSTAT);
+
+	/* CRT state */
+	dev_priv->saveADPA = I915_READ(ADPA);
+
+	/* LVDS state */
+	dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
+	dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+	dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+	if (IS_I965G(dev))
+		dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+	if (IS_MOBILE(dev) && !IS_I830(dev))
+		dev_priv->saveLVDS = I915_READ(LVDS);
+	if (!IS_I830(dev) && !IS_845G(dev))
+		dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+	dev_priv->saveLVDSPP_ON = I915_READ(LVDSPP_ON);
+	dev_priv->saveLVDSPP_OFF = I915_READ(LVDSPP_OFF);
+	dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE);
+
+	/* FIXME: save TV & SDVO state */
+
+	/* FBC state */
+	dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+	dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+	dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+	dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+
+	/* Interrupt state */
+	dev_priv->saveIIR = I915_READ(I915REG_INT_IDENTITY_R);
+	dev_priv->saveIER = I915_READ(I915REG_INT_ENABLE_R);
+	dev_priv->saveIMR = I915_READ(I915REG_INT_MASK_R);
+
+	/* VGA state */
+	dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0);
+	dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1);
+	dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV);
+	dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
+
+	/* Clock gating state */
+	dev_priv->saveD_STATE = I915_READ(D_STATE);
+	dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
+
+	/* Cache mode state */
+	dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+
+	/* Memory Arbitration state */
+	dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
+
+	/* Scratch space */
+	for (i = 0; i < 16; i++) {
+		dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2));
+		dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
+	}
+	for (i = 0; i < 3; i++)
+		dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+
+	i915_save_vga(dev);
+
+	if (state.event == PM_EVENT_SUSPEND) {
+		/* Shut down the device */
+		pci_disable_device(dev->pdev);
+		pci_set_power_state(dev->pdev, PCI_D3hot);
+	}
+
+	return 0;
+}
+
+static int i915_resume(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	pci_set_power_state(dev->pdev, PCI_D0);
+	pci_restore_state(dev->pdev);
+	if (pci_enable_device(dev->pdev))
+		return -1;
+	pci_set_master(dev->pdev);
+
+	pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
+
+	I915_WRITE(DSPARB, dev_priv->saveDSPARB);
+
+	/* Pipe & plane A info */
+	/* Prime the clock */
+	if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
+		I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
+			   ~DPLL_VCO_ENABLE);
+		udelay(150);
+	}
+	I915_WRITE(FPA0, dev_priv->saveFPA0);
+	I915_WRITE(FPA1, dev_priv->saveFPA1);
+	/* Actually enable it */
+	I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
+	udelay(150);
+	if (IS_I965G(dev))
+		I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
+	udelay(150);
+
+	/* Restore mode */
+	I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
+	I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
+	I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
+	I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
+	I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
+	I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
+	I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
+
+	/* Restore plane info */
+	I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
+	I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
+	I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
+	I915_WRITE(DSPABASE, dev_priv->saveDSPABASE);
+	I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
+	if (IS_I965G(dev)) {
+		I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
+		I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
+	}
+
+	I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
+
+	i915_restore_palette(dev, PIPE_A);
+	/* Enable the plane */
+	I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
+	I915_WRITE(DSPABASE, I915_READ(DSPABASE));
+
+	/* Pipe & plane B info */
+	if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
+		I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
+			   ~DPLL_VCO_ENABLE);
+		udelay(150);
+	}
+	I915_WRITE(FPB0, dev_priv->saveFPB0);
+	I915_WRITE(FPB1, dev_priv->saveFPB1);
+	/* Actually enable it */
+	I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
+	udelay(150);
+	if (IS_I965G(dev))
+		I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
+	udelay(150);
+
+	/* Restore mode */
+	I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
+	I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
+	I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
+	I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
+	I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
+	I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
+	I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
+
+	/* Restore plane info */
+	I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
+	I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
+	I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
+	I915_WRITE(DSPBBASE, dev_priv->saveDSPBBASE);
+	I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
+	if (IS_I965G(dev)) {
+		I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
+		I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
+	}
+
+	I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
+
+	i915_restore_palette(dev, PIPE_B);
+	/* Enable the plane */
+	I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
+	I915_WRITE(DSPBBASE, I915_READ(DSPBBASE));
+
+	/* CRT state */
+	I915_WRITE(ADPA, dev_priv->saveADPA);
+
+	/* LVDS state */
+	if (IS_I965G(dev))
+		I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
+	if (IS_MOBILE(dev) && !IS_I830(dev))
+		I915_WRITE(LVDS, dev_priv->saveLVDS);
+	if (!IS_I830(dev) && !IS_845G(dev))
+		I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
+
+	I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
+	I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
+	I915_WRITE(LVDSPP_ON, dev_priv->saveLVDSPP_ON);
+	I915_WRITE(LVDSPP_OFF, dev_priv->saveLVDSPP_OFF);
+	I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
+	I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
+
+	/* FIXME: restore TV & SDVO state */
+
+	/* FBC info */
+	I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
+	I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
+	I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
+	I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+
+	/* VGA state */
+	I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+	I915_WRITE(VCLK_DIVISOR_VGA0, dev_priv->saveVCLK_DIVISOR_VGA0);
+	I915_WRITE(VCLK_DIVISOR_VGA1, dev_priv->saveVCLK_DIVISOR_VGA1);
+	I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV);
+	udelay(150);
+
+	/* Clock gating state */
+	I915_WRITE (D_STATE, dev_priv->saveD_STATE);
+	I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
+
+	/* Cache mode state */
+	I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
+
+	/* Memory arbitration state */
+	I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
+
+	for (i = 0; i < 16; i++) {
+		I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]);
+		I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
+	}
+	for (i = 0; i < 3; i++)
+		I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+
+	i915_restore_vga(dev);
+
+	return 0;
+}
+
+static struct drm_driver driver = {
+	/* don't use mtrr's here, the Xserver or user space app should
+	 * deal with them for intel hardware.
+	 */
+	.driver_features =
+	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
+	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL |
+	    DRIVER_IRQ_VBL2,
+	.load = i915_driver_load,
+	.unload = i915_driver_unload,
+	.lastclose = i915_driver_lastclose,
+	.preclose = i915_driver_preclose,
+	.suspend = i915_suspend,
+	.resume = i915_resume,
+	.device_is_agp = i915_driver_device_is_agp,
+	.vblank_wait = i915_driver_vblank_wait,
+	.vblank_wait2 = i915_driver_vblank_wait2,
+	.irq_preinstall = i915_driver_irq_preinstall,
+	.irq_postinstall = i915_driver_irq_postinstall,
+	.irq_uninstall = i915_driver_irq_uninstall,
+	.irq_handler = i915_driver_irq_handler,
+	.reclaim_buffers = drm_core_reclaim_buffers,
+	.get_map_ofs = drm_core_get_map_ofs,
+	.get_reg_ofs = drm_core_get_reg_ofs,
+	.ioctls = i915_ioctls,
+	.fops = {
+		 .owner = THIS_MODULE,
+		 .open = drm_open,
+		 .release = drm_release,
+		 .ioctl = drm_ioctl,
+		 .mmap = drm_mmap,
+		 .poll = drm_poll,
+		 .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+		 .compat_ioctl = i915_compat_ioctl,
+#endif
+	},
+
+	.pci_driver = {
+		 .name = DRIVER_NAME,
+		 .id_table = pciidlist,
+	},
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int __init i915_init(void)
+{
+	driver.num_ioctls = i915_max_ioctl;
+	return drm_init(&driver);
+}
+
+static void __exit i915_exit(void)
+{
+	drm_exit(&driver);
+}
+
+module_init(i915_init);
+module_exit(i915_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
new file mode 100644
index 000000000000..d7326d92a237
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -0,0 +1,1142 @@
+/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
+ */
+/*
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _I915_DRV_H_
+#define _I915_DRV_H_
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR		"Tungsten Graphics, Inc."
+
+#define DRIVER_NAME		"i915"
+#define DRIVER_DESC		"Intel Graphics"
+#define DRIVER_DATE		"20060119"
+
+/* Interface history:
+ *
+ * 1.1: Original.
+ * 1.2: Add Power Management
+ * 1.3: Add vblank support
+ * 1.4: Fix cmdbuffer path, add heap destroy
+ * 1.5: Add vblank pipe configuration
+ * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
+ *      - Support vertical blank on secondary display pipe
+ */
+#define DRIVER_MAJOR		1
+#define DRIVER_MINOR		6
+#define DRIVER_PATCHLEVEL	0
+
+typedef struct _drm_i915_ring_buffer {
+	int tail_mask;
+	unsigned long Start;
+	unsigned long End;
+	unsigned long Size;
+	u8 *virtual_start;
+	int head;
+	int tail;
+	int space;
+	drm_local_map_t map;
+} drm_i915_ring_buffer_t;
+
+struct mem_block {
+	struct mem_block *next;
+	struct mem_block *prev;
+	int start;
+	int size;
+	struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
+};
+
+typedef struct _drm_i915_vbl_swap {
+	struct list_head head;
+	drm_drawable_t drw_id;
+	unsigned int pipe;
+	unsigned int sequence;
+} drm_i915_vbl_swap_t;
+
+typedef struct drm_i915_private {
+	drm_local_map_t *sarea;
+	drm_local_map_t *mmio_map;
+
+	drm_i915_sarea_t *sarea_priv;
+	drm_i915_ring_buffer_t ring;
+
+	drm_dma_handle_t *status_page_dmah;
+	void *hw_status_page;
+	dma_addr_t dma_status_page;
+	unsigned long counter;
+	unsigned int status_gfx_addr;
+	drm_local_map_t hws_map;
+
+	unsigned int cpp;
+	int back_offset;
+	int front_offset;
+	int current_page;
+	int page_flipping;
+	int use_mi_batchbuffer_start;
+
+	wait_queue_head_t irq_queue;
+	atomic_t irq_received;
+	atomic_t irq_emitted;
+
+	int tex_lru_log_granularity;
+	int allow_batchbuffer;
+	struct mem_block *agp_heap;
+	unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
+	int vblank_pipe;
+
+	spinlock_t swaps_lock;
+	drm_i915_vbl_swap_t vbl_swaps;
+	unsigned int swaps_pending;
+
+	/* Register state */
+	u8 saveLBB;
+	u32 saveDSPACNTR;
+	u32 saveDSPBCNTR;
+	u32 saveDSPARB;
+	u32 savePIPEACONF;
+	u32 savePIPEBCONF;
+	u32 savePIPEASRC;
+	u32 savePIPEBSRC;
+	u32 saveFPA0;
+	u32 saveFPA1;
+	u32 saveDPLL_A;
+	u32 saveDPLL_A_MD;
+	u32 saveHTOTAL_A;
+	u32 saveHBLANK_A;
+	u32 saveHSYNC_A;
+	u32 saveVTOTAL_A;
+	u32 saveVBLANK_A;
+	u32 saveVSYNC_A;
+	u32 saveBCLRPAT_A;
+	u32 savePIPEASTAT;
+	u32 saveDSPASTRIDE;
+	u32 saveDSPASIZE;
+	u32 saveDSPAPOS;
+	u32 saveDSPABASE;
+	u32 saveDSPASURF;
+	u32 saveDSPATILEOFF;
+	u32 savePFIT_PGM_RATIOS;
+	u32 saveBLC_PWM_CTL;
+	u32 saveBLC_PWM_CTL2;
+	u32 saveFPB0;
+	u32 saveFPB1;
+	u32 saveDPLL_B;
+	u32 saveDPLL_B_MD;
+	u32 saveHTOTAL_B;
+	u32 saveHBLANK_B;
+	u32 saveHSYNC_B;
+	u32 saveVTOTAL_B;
+	u32 saveVBLANK_B;
+	u32 saveVSYNC_B;
+	u32 saveBCLRPAT_B;
+	u32 savePIPEBSTAT;
+	u32 saveDSPBSTRIDE;
+	u32 saveDSPBSIZE;
+	u32 saveDSPBPOS;
+	u32 saveDSPBBASE;
+	u32 saveDSPBSURF;
+	u32 saveDSPBTILEOFF;
+	u32 saveVCLK_DIVISOR_VGA0;
+	u32 saveVCLK_DIVISOR_VGA1;
+	u32 saveVCLK_POST_DIV;
+	u32 saveVGACNTRL;
+	u32 saveADPA;
+	u32 saveLVDS;
+	u32 saveLVDSPP_ON;
+	u32 saveLVDSPP_OFF;
+	u32 saveDVOA;
+	u32 saveDVOB;
+	u32 saveDVOC;
+	u32 savePP_ON;
+	u32 savePP_OFF;
+	u32 savePP_CONTROL;
+	u32 savePP_CYCLE;
+	u32 savePFIT_CONTROL;
+	u32 save_palette_a[256];
+	u32 save_palette_b[256];
+	u32 saveFBC_CFB_BASE;
+	u32 saveFBC_LL_BASE;
+	u32 saveFBC_CONTROL;
+	u32 saveFBC_CONTROL2;
+	u32 saveIER;
+	u32 saveIIR;
+	u32 saveIMR;
+	u32 saveCACHE_MODE_0;
+	u32 saveD_STATE;
+	u32 saveDSPCLK_GATE_D;
+	u32 saveMI_ARB_STATE;
+	u32 saveSWF0[16];
+	u32 saveSWF1[16];
+	u32 saveSWF2[3];
+	u8 saveMSR;
+	u8 saveSR[8];
+	u8 saveGR[25];
+	u8 saveAR_INDEX;
+	u8 saveAR[21];
+	u8 saveDACMASK;
+	u8 saveDACDATA[256*3]; /* 256 3-byte colors */
+	u8 saveCR[37];
+} drm_i915_private_t;
+
+extern struct drm_ioctl_desc i915_ioctls[];
+extern int i915_max_ioctl;
+
+				/* i915_dma.c */
+extern void i915_kernel_lost_context(struct drm_device * dev);
+extern int i915_driver_load(struct drm_device *, unsigned long flags);
+extern int i915_driver_unload(struct drm_device *);
+extern void i915_driver_lastclose(struct drm_device * dev);
+extern void i915_driver_preclose(struct drm_device *dev,
+				 struct drm_file *file_priv);
+extern int i915_driver_device_is_agp(struct drm_device * dev);
+extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
+			      unsigned long arg);
+
+/* i915_irq.c */
+extern int i915_irq_emit(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+extern int i915_irq_wait(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+
+extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
+extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
+extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
+extern void i915_driver_irq_preinstall(struct drm_device * dev);
+extern void i915_driver_irq_postinstall(struct drm_device * dev);
+extern void i915_driver_irq_uninstall(struct drm_device * dev);
+extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
+				struct drm_file *file_priv);
+extern int i915_vblank_swap(struct drm_device *dev, void *data,
+			    struct drm_file *file_priv);
+
+/* i915_mem.c */
+extern int i915_mem_alloc(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv);
+extern int i915_mem_free(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+extern int i915_mem_init_heap(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv);
+extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
+				 struct drm_file *file_priv);
+extern void i915_mem_takedown(struct mem_block **heap);
+extern void i915_mem_release(struct drm_device * dev,
+			     struct drm_file *file_priv, struct mem_block *heap);
+
+#define I915_READ(reg)          DRM_READ32(dev_priv->mmio_map, (reg))
+#define I915_WRITE(reg,val)     DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
+#define I915_READ16(reg)	DRM_READ16(dev_priv->mmio_map, (reg))
+#define I915_WRITE16(reg,val)	DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
+
+#define I915_VERBOSE 0
+
+#define RING_LOCALS	unsigned int outring, ringmask, outcount; \
+                        volatile char *virt;
+
+#define BEGIN_LP_RING(n) do {				\
+	if (I915_VERBOSE)				\
+		DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n));	\
+	if (dev_priv->ring.space < (n)*4)		\
+		i915_wait_ring(dev, (n)*4, __func__);		\
+	outcount = 0;					\
+	outring = dev_priv->ring.tail;			\
+	ringmask = dev_priv->ring.tail_mask;		\
+	virt = dev_priv->ring.virtual_start;		\
+} while (0)
+
+#define OUT_RING(n) do {					\
+	if (I915_VERBOSE) DRM_DEBUG("   OUT_RING %x\n", (int)(n));	\
+	*(volatile unsigned int *)(virt + outring) = (n);	\
+        outcount++;						\
+	outring += 4;						\
+	outring &= ringmask;					\
+} while (0)
+
+#define ADVANCE_LP_RING() do {						\
+	if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring);	\
+	dev_priv->ring.tail = outring;					\
+	dev_priv->ring.space -= outcount * 4;				\
+	I915_WRITE(LP_RING + RING_TAIL, outring);			\
+} while(0)
+
+extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
+
+/* Extended config space */
+#define LBB 0xf4
+
+/* VGA stuff */
+
+#define VGA_ST01_MDA 0x3ba
+#define VGA_ST01_CGA 0x3da
+
+#define VGA_MSR_WRITE 0x3c2
+#define VGA_MSR_READ 0x3cc
+#define   VGA_MSR_MEM_EN (1<<1)
+#define   VGA_MSR_CGA_MODE (1<<0)
+
+#define VGA_SR_INDEX 0x3c4
+#define VGA_SR_DATA 0x3c5
+
+#define VGA_AR_INDEX 0x3c0
+#define   VGA_AR_VID_EN (1<<5)
+#define VGA_AR_DATA_WRITE 0x3c0
+#define VGA_AR_DATA_READ 0x3c1
+
+#define VGA_GR_INDEX 0x3ce
+#define VGA_GR_DATA 0x3cf
+/* GR05 */
+#define   VGA_GR_MEM_READ_MODE_SHIFT 3
+#define     VGA_GR_MEM_READ_MODE_PLANE 1
+/* GR06 */
+#define   VGA_GR_MEM_MODE_MASK 0xc
+#define   VGA_GR_MEM_MODE_SHIFT 2
+#define   VGA_GR_MEM_A0000_AFFFF 0
+#define   VGA_GR_MEM_A0000_BFFFF 1
+#define   VGA_GR_MEM_B0000_B7FFF 2
+#define   VGA_GR_MEM_B0000_BFFFF 3
+
+#define VGA_DACMASK 0x3c6
+#define VGA_DACRX 0x3c7
+#define VGA_DACWX 0x3c8
+#define VGA_DACDATA 0x3c9
+
+#define VGA_CR_INDEX_MDA 0x3b4
+#define VGA_CR_DATA_MDA 0x3b5
+#define VGA_CR_INDEX_CGA 0x3d4
+#define VGA_CR_DATA_CGA 0x3d5
+
+#define GFX_OP_USER_INTERRUPT		((0<<29)|(2<<23))
+#define GFX_OP_BREAKPOINT_INTERRUPT	((0<<29)|(1<<23))
+#define CMD_REPORT_HEAD			(7<<23)
+#define CMD_STORE_DWORD_IDX		((0x21<<23) | 0x1)
+#define CMD_OP_BATCH_BUFFER  ((0x0<<29)|(0x30<<23)|0x1)
+
+#define INST_PARSER_CLIENT   0x00000000
+#define INST_OP_FLUSH        0x02000000
+#define INST_FLUSH_MAP_CACHE 0x00000001
+
+#define BB1_START_ADDR_MASK   (~0x7)
+#define BB1_PROTECTED         (1<<0)
+#define BB1_UNPROTECTED       (0<<0)
+#define BB2_END_ADDR_MASK     (~0x7)
+
+/* Framebuffer compression */
+#define FBC_CFB_BASE		0x03200 /* 4k page aligned */
+#define FBC_LL_BASE		0x03204 /* 4k page aligned */
+#define FBC_CONTROL		0x03208
+#define   FBC_CTL_EN		(1<<31)
+#define   FBC_CTL_PERIODIC	(1<<30)
+#define   FBC_CTL_INTERVAL_SHIFT (16)
+#define   FBC_CTL_UNCOMPRESSIBLE (1<<14)
+#define   FBC_CTL_STRIDE_SHIFT	(5)
+#define   FBC_CTL_FENCENO	(1<<0)
+#define FBC_COMMAND		0x0320c
+#define   FBC_CMD_COMPRESS	(1<<0)
+#define FBC_STATUS		0x03210
+#define   FBC_STAT_COMPRESSING	(1<<31)
+#define   FBC_STAT_COMPRESSED	(1<<30)
+#define   FBC_STAT_MODIFIED	(1<<29)
+#define   FBC_STAT_CURRENT_LINE	(1<<0)
+#define FBC_CONTROL2		0x03214
+#define   FBC_CTL_FENCE_DBL	(0<<4)
+#define   FBC_CTL_IDLE_IMM	(0<<2)
+#define   FBC_CTL_IDLE_FULL	(1<<2)
+#define   FBC_CTL_IDLE_LINE	(2<<2)
+#define   FBC_CTL_IDLE_DEBUG	(3<<2)
+#define   FBC_CTL_CPU_FENCE	(1<<1)
+#define   FBC_CTL_PLANEA	(0<<0)
+#define   FBC_CTL_PLANEB	(1<<0)
+#define FBC_FENCE_OFF		0x0321b
+
+#define FBC_LL_SIZE		(1536)
+#define FBC_LL_PAD		(32)
+
+/* Interrupt bits:
+ */
+#define USER_INT_FLAG    (1<<1)
+#define VSYNC_PIPEB_FLAG (1<<5)
+#define VSYNC_PIPEA_FLAG (1<<7)
+#define HWB_OOM_FLAG     (1<<13) /* binner out of memory */
+
+#define I915REG_HWSTAM		0x02098
+#define I915REG_INT_IDENTITY_R	0x020a4
+#define I915REG_INT_MASK_R	0x020a8
+#define I915REG_INT_ENABLE_R	0x020a0
+
+#define I915REG_PIPEASTAT	0x70024
+#define I915REG_PIPEBSTAT	0x71024
+
+#define I915_VBLANK_INTERRUPT_ENABLE	(1UL<<17)
+#define I915_VBLANK_CLEAR		(1UL<<1)
+
+#define SRX_INDEX		0x3c4
+#define SRX_DATA		0x3c5
+#define SR01			1
+#define SR01_SCREEN_OFF		(1<<5)
+
+#define PPCR			0x61204
+#define PPCR_ON			(1<<0)
+
+#define DVOB			0x61140
+#define DVOB_ON			(1<<31)
+#define DVOC			0x61160
+#define DVOC_ON			(1<<31)
+#define LVDS			0x61180
+#define LVDS_ON			(1<<31)
+
+#define ADPA			0x61100
+#define ADPA_DPMS_MASK		(~(3<<10))
+#define ADPA_DPMS_ON		(0<<10)
+#define ADPA_DPMS_SUSPEND	(1<<10)
+#define ADPA_DPMS_STANDBY	(2<<10)
+#define ADPA_DPMS_OFF		(3<<10)
+
+#define NOPID                   0x2094
+#define LP_RING			0x2030
+#define HP_RING			0x2040
+/* The binner has its own ring buffer:
+ */
+#define HWB_RING		0x2400
+
+#define RING_TAIL		0x00
+#define TAIL_ADDR		0x001FFFF8
+#define RING_HEAD		0x04
+#define HEAD_WRAP_COUNT		0xFFE00000
+#define HEAD_WRAP_ONE		0x00200000
+#define HEAD_ADDR		0x001FFFFC
+#define RING_START		0x08
+#define START_ADDR		0x0xFFFFF000
+#define RING_LEN		0x0C
+#define RING_NR_PAGES		0x001FF000
+#define RING_REPORT_MASK	0x00000006
+#define RING_REPORT_64K		0x00000002
+#define RING_REPORT_128K	0x00000004
+#define RING_NO_REPORT		0x00000000
+#define RING_VALID_MASK		0x00000001
+#define RING_VALID		0x00000001
+#define RING_INVALID		0x00000000
+
+/* Instruction parser error reg:
+ */
+#define IPEIR			0x2088
+
+/* Scratch pad debug 0 reg:
+ */
+#define SCPD0			0x209c
+
+/* Error status reg:
+ */
+#define ESR			0x20b8
+
+/* Secondary DMA fetch address debug reg:
+ */
+#define DMA_FADD_S		0x20d4
+
+/* Memory Interface Arbitration State
+ */
+#define MI_ARB_STATE		0x20e4
+
+/* Cache mode 0 reg.
+ *  - Manipulating render cache behaviour is central
+ *    to the concept of zone rendering, tuning this reg can help avoid
+ *    unnecessary render cache reads and even writes (for z/stencil)
+ *    at beginning and end of scene.
+ *
+ * - To change a bit, write to this reg with a mask bit set and the
+ * bit of interest either set or cleared.  EG: (BIT<<16) | BIT to set.
+ */
+#define Cache_Mode_0		0x2120
+#define CACHE_MODE_0		0x2120
+#define CM0_MASK_SHIFT          16
+#define CM0_IZ_OPT_DISABLE      (1<<6)
+#define CM0_ZR_OPT_DISABLE      (1<<5)
+#define CM0_DEPTH_EVICT_DISABLE (1<<4)
+#define CM0_COLOR_EVICT_DISABLE (1<<3)
+#define CM0_DEPTH_WRITE_DISABLE (1<<1)
+#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
+
+
+/* Graphics flush control.  A CPU write flushes the GWB of all writes.
+ * The data is discarded.
+ */
+#define GFX_FLSH_CNTL		0x2170
+
+/* Binner control.  Defines the location of the bin pointer list:
+ */
+#define BINCTL			0x2420
+#define BC_MASK			(1 << 9)
+
+/* Binned scene info.
+ */
+#define BINSCENE		0x2428
+#define BS_OP_LOAD		(1 << 8)
+#define BS_MASK			(1 << 22)
+
+/* Bin command parser debug reg:
+ */
+#define BCPD			0x2480
+
+/* Bin memory control debug reg:
+ */
+#define BMCD			0x2484
+
+/* Bin data cache debug reg:
+ */
+#define BDCD			0x2488
+
+/* Binner pointer cache debug reg:
+ */
+#define BPCD			0x248c
+
+/* Binner scratch pad debug reg:
+ */
+#define BINSKPD			0x24f0
+
+/* HWB scratch pad debug reg:
+ */
+#define HWBSKPD			0x24f4
+
+/* Binner memory pool reg:
+ */
+#define BMP_BUFFER		0x2430
+#define BMP_PAGE_SIZE_4K	(0 << 10)
+#define BMP_BUFFER_SIZE_SHIFT	1
+#define BMP_ENABLE		(1 << 0)
+
+/* Get/put memory from the binner memory pool:
+ */
+#define BMP_GET			0x2438
+#define BMP_PUT			0x2440
+#define BMP_OFFSET_SHIFT	5
+
+/* 3D state packets:
+ */
+#define GFX_OP_RASTER_RULES    ((0x3<<29)|(0x7<<24))
+
+#define GFX_OP_SCISSOR         ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define SC_UPDATE_SCISSOR       (0x1<<1)
+#define SC_ENABLE_MASK          (0x1<<0)
+#define SC_ENABLE               (0x1<<0)
+
+#define GFX_OP_LOAD_INDIRECT   ((0x3<<29)|(0x1d<<24)|(0x7<<16))
+
+#define GFX_OP_SCISSOR_INFO    ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
+#define SCI_YMIN_MASK      (0xffff<<16)
+#define SCI_XMIN_MASK      (0xffff<<0)
+#define SCI_YMAX_MASK      (0xffff<<16)
+#define SCI_XMAX_MASK      (0xffff<<0)
+
+#define GFX_OP_SCISSOR_ENABLE	 ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define GFX_OP_SCISSOR_RECT	 ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
+#define GFX_OP_COLOR_FACTOR      ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
+#define GFX_OP_STIPPLE           ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define GFX_OP_MAP_INFO          ((0x3<<29)|(0x1d<<24)|0x4)
+#define GFX_OP_DESTBUFFER_VARS   ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DRAWRECT_INFO     ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+
+#define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2)
+
+#define SRC_COPY_BLT_CMD                ((2<<29)|(0x43<<22)|4)
+#define XY_SRC_COPY_BLT_CMD		((2<<29)|(0x53<<22)|6)
+#define XY_SRC_COPY_BLT_WRITE_ALPHA	(1<<21)
+#define XY_SRC_COPY_BLT_WRITE_RGB	(1<<20)
+#define XY_SRC_COPY_BLT_SRC_TILED	(1<<15)
+#define XY_SRC_COPY_BLT_DST_TILED	(1<<11)
+
+#define MI_BATCH_BUFFER		((0x30<<23)|1)
+#define MI_BATCH_BUFFER_START	(0x31<<23)
+#define MI_BATCH_BUFFER_END	(0xA<<23)
+#define MI_BATCH_NON_SECURE	(1)
+#define MI_BATCH_NON_SECURE_I965 (1<<8)
+
+#define MI_WAIT_FOR_EVENT       ((0x3<<23))
+#define MI_WAIT_FOR_PLANE_B_FLIP      (1<<6)
+#define MI_WAIT_FOR_PLANE_A_FLIP      (1<<2)
+#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
+
+#define MI_LOAD_SCAN_LINES_INCL  ((0x12<<23))
+
+#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
+#define ASYNC_FLIP                (1<<22)
+#define DISPLAY_PLANE_A           (0<<20)
+#define DISPLAY_PLANE_B           (1<<20)
+
+/* Display regs */
+#define DSPACNTR                0x70180
+#define DSPBCNTR                0x71180
+#define DISPPLANE_SEL_PIPE_MASK                 (1<<24)
+
+/* Define the region of interest for the binner:
+ */
+#define CMD_OP_BIN_CONTROL	 ((0x3<<29)|(0x1d<<24)|(0x84<<16)|4)
+
+#define CMD_OP_DESTBUFFER_INFO	 ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
+
+#define CMD_MI_FLUSH         (0x04 << 23)
+#define MI_NO_WRITE_FLUSH    (1 << 2)
+#define MI_READ_FLUSH        (1 << 0)
+#define MI_EXE_FLUSH         (1 << 1)
+#define MI_END_SCENE         (1 << 4) /* flush binner and incr scene count */
+#define MI_SCENE_COUNT       (1 << 3) /* just increment scene count */
+
+#define BREADCRUMB_BITS 31
+#define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1)
+
+#define READ_BREADCRUMB(dev_priv)  (((volatile u32*)(dev_priv->hw_status_page))[5])
+#define READ_HWSP(dev_priv, reg)  (((volatile u32*)(dev_priv->hw_status_page))[reg])
+
+#define BLC_PWM_CTL		0x61254
+#define BACKLIGHT_MODULATION_FREQ_SHIFT		(17)
+
+#define BLC_PWM_CTL2		0x61250
+/**
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
+ *
+ * The actual value is this field multiplied by two.
+ */
+#define BACKLIGHT_MODULATION_FREQ_MASK		(0x7fff << 17)
+#define BLM_LEGACY_MODE				(1 << 16)
+/**
+ * This is the number of cycles out of the backlight modulation cycle for which
+ * the backlight is on.
+ *
+ * This field must be no greater than the number of cycles in the complete
+ * backlight modulation cycle.
+ */
+#define BACKLIGHT_DUTY_CYCLE_SHIFT		(0)
+#define BACKLIGHT_DUTY_CYCLE_MASK		(0xffff)
+
+#define I915_GCFGC			0xf0
+#define I915_LOW_FREQUENCY_ENABLE		(1 << 7)
+#define I915_DISPLAY_CLOCK_190_200_MHZ		(0 << 4)
+#define I915_DISPLAY_CLOCK_333_MHZ		(4 << 4)
+#define I915_DISPLAY_CLOCK_MASK			(7 << 4)
+
+#define I855_HPLLCC			0xc0
+#define I855_CLOCK_CONTROL_MASK			(3 << 0)
+#define I855_CLOCK_133_200			(0 << 0)
+#define I855_CLOCK_100_200			(1 << 0)
+#define I855_CLOCK_100_133			(2 << 0)
+#define I855_CLOCK_166_250			(3 << 0)
+
+/* p317, 319
+ */
+#define VCLK2_VCO_M        0x6008 /* treat as 16 bit? (includes msbs) */
+#define VCLK2_VCO_N        0x600a
+#define VCLK2_VCO_DIV_SEL  0x6012
+
+#define VCLK_DIVISOR_VGA0   0x6000
+#define VCLK_DIVISOR_VGA1   0x6004
+#define VCLK_POST_DIV	    0x6010
+/** Selects a post divisor of 4 instead of 2. */
+# define VGA1_PD_P2_DIV_4	(1 << 15)
+/** Overrides the p2 post divisor field */
+# define VGA1_PD_P1_DIV_2	(1 << 13)
+# define VGA1_PD_P1_SHIFT	8
+/** P1 value is 2 greater than this field */
+# define VGA1_PD_P1_MASK	(0x1f << 8)
+/** Selects a post divisor of 4 instead of 2. */
+# define VGA0_PD_P2_DIV_4	(1 << 7)
+/** Overrides the p2 post divisor field */
+# define VGA0_PD_P1_DIV_2	(1 << 5)
+# define VGA0_PD_P1_SHIFT	0
+/** P1 value is 2 greater than this field */
+# define VGA0_PD_P1_MASK	(0x1f << 0)
+
+/* PCI D state control register */
+#define D_STATE		0x6104
+#define DSPCLK_GATE_D	0x6200
+
+/* I830 CRTC registers */
+#define HTOTAL_A	0x60000
+#define HBLANK_A	0x60004
+#define HSYNC_A		0x60008
+#define VTOTAL_A	0x6000c
+#define VBLANK_A	0x60010
+#define VSYNC_A		0x60014
+#define PIPEASRC	0x6001c
+#define BCLRPAT_A	0x60020
+#define VSYNCSHIFT_A	0x60028
+
+#define HTOTAL_B	0x61000
+#define HBLANK_B	0x61004
+#define HSYNC_B		0x61008
+#define VTOTAL_B	0x6100c
+#define VBLANK_B	0x61010
+#define VSYNC_B		0x61014
+#define PIPEBSRC	0x6101c
+#define BCLRPAT_B	0x61020
+#define VSYNCSHIFT_B	0x61028
+
+#define PP_STATUS	0x61200
+# define PP_ON					(1 << 31)
+/**
+ * Indicates that all dependencies of the panel are on:
+ *
+ * - PLL enabled
+ * - pipe enabled
+ * - LVDS/DVOB/DVOC on
+ */
+# define PP_READY				(1 << 30)
+# define PP_SEQUENCE_NONE			(0 << 28)
+# define PP_SEQUENCE_ON				(1 << 28)
+# define PP_SEQUENCE_OFF			(2 << 28)
+# define PP_SEQUENCE_MASK			0x30000000
+#define PP_CONTROL	0x61204
+# define POWER_TARGET_ON			(1 << 0)
+
+#define LVDSPP_ON       0x61208
+#define LVDSPP_OFF      0x6120c
+#define PP_CYCLE        0x61210
+
+#define PFIT_CONTROL	0x61230
+# define PFIT_ENABLE				(1 << 31)
+# define PFIT_PIPE_MASK				(3 << 29)
+# define PFIT_PIPE_SHIFT			29
+# define VERT_INTERP_DISABLE			(0 << 10)
+# define VERT_INTERP_BILINEAR			(1 << 10)
+# define VERT_INTERP_MASK			(3 << 10)
+# define VERT_AUTO_SCALE			(1 << 9)
+# define HORIZ_INTERP_DISABLE			(0 << 6)
+# define HORIZ_INTERP_BILINEAR			(1 << 6)
+# define HORIZ_INTERP_MASK			(3 << 6)
+# define HORIZ_AUTO_SCALE			(1 << 5)
+# define PANEL_8TO6_DITHER_ENABLE		(1 << 3)
+
+#define PFIT_PGM_RATIOS	0x61234
+# define PFIT_VERT_SCALE_MASK			0xfff00000
+# define PFIT_HORIZ_SCALE_MASK			0x0000fff0
+
+#define PFIT_AUTO_RATIOS	0x61238
+
+
+#define DPLL_A		0x06014
+#define DPLL_B		0x06018
+# define DPLL_VCO_ENABLE			(1 << 31)
+# define DPLL_DVO_HIGH_SPEED			(1 << 30)
+# define DPLL_SYNCLOCK_ENABLE			(1 << 29)
+# define DPLL_VGA_MODE_DIS			(1 << 28)
+# define DPLLB_MODE_DAC_SERIAL			(1 << 26) /* i915 */
+# define DPLLB_MODE_LVDS			(2 << 26) /* i915 */
+# define DPLL_MODE_MASK				(3 << 26)
+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10	(0 << 24) /* i915 */
+# define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5		(1 << 24) /* i915 */
+# define DPLLB_LVDS_P2_CLOCK_DIV_14		(0 << 24) /* i915 */
+# define DPLLB_LVDS_P2_CLOCK_DIV_7		(1 << 24) /* i915 */
+# define DPLL_P2_CLOCK_DIV_MASK			0x03000000 /* i915 */
+# define DPLL_FPA01_P1_POST_DIV_MASK		0x00ff0000 /* i915 */
+/**
+ *  The i830 generation, in DAC/serial mode, defines p1 as two plus this
+ * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set.
+ */
+# define DPLL_FPA01_P1_POST_DIV_MASK_I830	0x001f0000
+/**
+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+ * this field (only one bit may be set).
+ */
+# define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS	0x003f0000
+# define DPLL_FPA01_P1_POST_DIV_SHIFT		16
+# define PLL_P2_DIVIDE_BY_4			(1 << 23) /* i830, required in DVO non-gang */
+# define PLL_P1_DIVIDE_BY_TWO			(1 << 21) /* i830 */
+# define PLL_REF_INPUT_DREFCLK			(0 << 13)
+# define PLL_REF_INPUT_TVCLKINA			(1 << 13) /* i830 */
+# define PLL_REF_INPUT_TVCLKINBC		(2 << 13) /* SDVO TVCLKIN */
+# define PLLB_REF_INPUT_SPREADSPECTRUMIN	(3 << 13)
+# define PLL_REF_INPUT_MASK			(3 << 13)
+# define PLL_LOAD_PULSE_PHASE_SHIFT		9
+/*
+ * Parallel to Serial Load Pulse phase selection.
+ * Selects the phase for the 10X DPLL clock for the PCIe
+ * digital display port. The range is 4 to 13; 10 or more
+ * is just a flip delay. The default is 6
+ */
+# define PLL_LOAD_PULSE_PHASE_MASK		(0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+# define DISPLAY_RATE_SELECT_FPA1		(1 << 8)
+
+/**
+ * SDVO multiplier for 945G/GM. Not used on 965.
+ *
+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+# define SDVO_MULTIPLIER_MASK			0x000000ff
+# define SDVO_MULTIPLIER_SHIFT_HIRES		4
+# define SDVO_MULTIPLIER_SHIFT_VGA		0
+
+/** @defgroup DPLL_MD
+ * @{
+ */
+/** Pipe A SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_A_MD		0x0601c
+/** Pipe B SDVO/UDI clock multiplier/divider register for G965. */
+#define DPLL_B_MD		0x06020
+/**
+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+ *
+ * Value is pixels minus 1.  Must be set to 1 pixel for SDVO.
+ */
+# define DPLL_MD_UDI_DIVIDER_MASK		0x3f000000
+# define DPLL_MD_UDI_DIVIDER_SHIFT		24
+/** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+# define DPLL_MD_VGA_UDI_DIVIDER_MASK		0x003f0000
+# define DPLL_MD_VGA_UDI_DIVIDER_SHIFT		16
+/**
+ * SDVO/UDI pixel multiplier.
+ *
+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+ * clock rate is 10 times the DPLL clock.  At low resolution/refresh rate
+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+ * dummy bytes in the datastream at an increased clock rate, with both sides of
+ * the link knowing how many bytes are fill.
+ *
+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+ * rate to 130Mhz to get a bus rate of 1.30Ghz.  The DPLL clock rate would be
+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+ * through an SDVO command.
+ *
+ * This register field has values of multiplication factor minus 1, with
+ * a maximum multiplier of 5 for SDVO.
+ */
+# define DPLL_MD_UDI_MULTIPLIER_MASK		0x00003f00
+# define DPLL_MD_UDI_MULTIPLIER_SHIFT		8
+/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+ * This best be set to the default value (3) or the CRT won't work. No,
+ * I don't entirely understand what this does...
+ */
+# define DPLL_MD_VGA_UDI_MULTIPLIER_MASK	0x0000003f
+# define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT	0
+/** @} */
+
+#define DPLL_TEST		0x606c
+# define DPLLB_TEST_SDVO_DIV_1			(0 << 22)
+# define DPLLB_TEST_SDVO_DIV_2			(1 << 22)
+# define DPLLB_TEST_SDVO_DIV_4			(2 << 22)
+# define DPLLB_TEST_SDVO_DIV_MASK		(3 << 22)
+# define DPLLB_TEST_N_BYPASS			(1 << 19)
+# define DPLLB_TEST_M_BYPASS			(1 << 18)
+# define DPLLB_INPUT_BUFFER_ENABLE		(1 << 16)
+# define DPLLA_TEST_N_BYPASS			(1 << 3)
+# define DPLLA_TEST_M_BYPASS			(1 << 2)
+# define DPLLA_INPUT_BUFFER_ENABLE		(1 << 0)
+
+#define ADPA			0x61100
+#define ADPA_DAC_ENABLE		(1<<31)
+#define ADPA_DAC_DISABLE	0
+#define ADPA_PIPE_SELECT_MASK	(1<<30)
+#define ADPA_PIPE_A_SELECT	0
+#define ADPA_PIPE_B_SELECT	(1<<30)
+#define ADPA_USE_VGA_HVPOLARITY (1<<15)
+#define ADPA_SETS_HVPOLARITY	0
+#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
+#define ADPA_VSYNC_CNTL_ENABLE	0
+#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
+#define ADPA_HSYNC_CNTL_ENABLE	0
+#define ADPA_VSYNC_ACTIVE_HIGH	(1<<4)
+#define ADPA_VSYNC_ACTIVE_LOW	0
+#define ADPA_HSYNC_ACTIVE_HIGH	(1<<3)
+#define ADPA_HSYNC_ACTIVE_LOW	0
+
+#define FPA0		0x06040
+#define FPA1		0x06044
+#define FPB0		0x06048
+#define FPB1		0x0604c
+# define FP_N_DIV_MASK				0x003f0000
+# define FP_N_DIV_SHIFT				16
+# define FP_M1_DIV_MASK				0x00003f00
+# define FP_M1_DIV_SHIFT			8
+# define FP_M2_DIV_MASK				0x0000003f
+# define FP_M2_DIV_SHIFT			0
+
+
+#define PORT_HOTPLUG_EN		0x61110
+# define SDVOB_HOTPLUG_INT_EN			(1 << 26)
+# define SDVOC_HOTPLUG_INT_EN			(1 << 25)
+# define TV_HOTPLUG_INT_EN			(1 << 18)
+# define CRT_HOTPLUG_INT_EN			(1 << 9)
+# define CRT_HOTPLUG_FORCE_DETECT		(1 << 3)
+
+#define PORT_HOTPLUG_STAT	0x61114
+# define CRT_HOTPLUG_INT_STATUS			(1 << 11)
+# define TV_HOTPLUG_INT_STATUS			(1 << 10)
+# define CRT_HOTPLUG_MONITOR_MASK		(3 << 8)
+# define CRT_HOTPLUG_MONITOR_COLOR		(3 << 8)
+# define CRT_HOTPLUG_MONITOR_MONO		(2 << 8)
+# define CRT_HOTPLUG_MONITOR_NONE		(0 << 8)
+# define SDVOC_HOTPLUG_INT_STATUS		(1 << 7)
+# define SDVOB_HOTPLUG_INT_STATUS		(1 << 6)
+
+#define SDVOB			0x61140
+#define SDVOC			0x61160
+#define SDVO_ENABLE				(1 << 31)
+#define SDVO_PIPE_B_SELECT			(1 << 30)
+#define SDVO_STALL_SELECT			(1 << 29)
+#define SDVO_INTERRUPT_ENABLE			(1 << 26)
+/**
+ * 915G/GM SDVO pixel multiplier.
+ *
+ * Programmed value is multiplier - 1, up to 5x.
+ *
+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define SDVO_PORT_MULTIPLY_MASK			(7 << 23)
+#define SDVO_PORT_MULTIPLY_SHIFT		23
+#define SDVO_PHASE_SELECT_MASK			(15 << 19)
+#define SDVO_PHASE_SELECT_DEFAULT		(6 << 19)
+#define SDVO_CLOCK_OUTPUT_INVERT		(1 << 18)
+#define SDVOC_GANG_MODE				(1 << 16)
+#define SDVO_BORDER_ENABLE			(1 << 7)
+#define SDVOB_PCIE_CONCURRENCY			(1 << 3)
+#define SDVO_DETECTED				(1 << 2)
+/* Bits to be preserved when writing */
+#define SDVOB_PRESERVE_MASK			((1 << 17) | (1 << 16) | (1 << 14))
+#define SDVOC_PRESERVE_MASK			(1 << 17)
+
+/** @defgroup LVDS
+ * @{
+ */
+/**
+ * This register controls the LVDS output enable, pipe selection, and data
+ * format selection.
+ *
+ * All of the clock/data pairs are force powered down by power sequencing.
+ */
+#define LVDS			0x61180
+/**
+ * Enables the LVDS port.  This bit must be set before DPLLs are enabled, as
+ * the DPLL semantics change when the LVDS is assigned to that pipe.
+ */
+# define LVDS_PORT_EN			(1 << 31)
+/** Selects pipe B for LVDS data.  Must be set on pre-965. */
+# define LVDS_PIPEB_SELECT		(1 << 30)
+
+/**
+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+ * pixel.
+ */
+# define LVDS_A0A2_CLKA_POWER_MASK	(3 << 8)
+# define LVDS_A0A2_CLKA_POWER_DOWN	(0 << 8)
+# define LVDS_A0A2_CLKA_POWER_UP	(3 << 8)
+/**
+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+ * mode.  Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+ * on.
+ */
+# define LVDS_A3_POWER_MASK		(3 << 6)
+# define LVDS_A3_POWER_DOWN		(0 << 6)
+# define LVDS_A3_POWER_UP		(3 << 6)
+/**
+ * Controls the CLKB pair.  This should only be set when LVDS_B0B3_POWER_UP
+ * is set.
+ */
+# define LVDS_CLKB_POWER_MASK		(3 << 4)
+# define LVDS_CLKB_POWER_DOWN		(0 << 4)
+# define LVDS_CLKB_POWER_UP		(3 << 4)
+
+/**
+ * Controls the B0-B3 data pairs.  This must be set to match the DPLL p2
+ * setting for whether we are in dual-channel mode.  The B3 pair will
+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
+ */
+# define LVDS_B0B3_POWER_MASK		(3 << 2)
+# define LVDS_B0B3_POWER_DOWN		(0 << 2)
+# define LVDS_B0B3_POWER_UP		(3 << 2)
+
+#define PIPEACONF 0x70008
+#define PIPEACONF_ENABLE	(1<<31)
+#define PIPEACONF_DISABLE	0
+#define PIPEACONF_DOUBLE_WIDE	(1<<30)
+#define I965_PIPECONF_ACTIVE	(1<<30)
+#define PIPEACONF_SINGLE_WIDE	0
+#define PIPEACONF_PIPE_UNLOCKED 0
+#define PIPEACONF_PIPE_LOCKED	(1<<25)
+#define PIPEACONF_PALETTE	0
+#define PIPEACONF_GAMMA		(1<<24)
+#define PIPECONF_FORCE_BORDER	(1<<25)
+#define PIPECONF_PROGRESSIVE	(0 << 21)
+#define PIPECONF_INTERLACE_W_FIELD_INDICATION	(6 << 21)
+#define PIPECONF_INTERLACE_FIELD_0_ONLY		(7 << 21)
+
+#define DSPARB	  0x70030
+#define DSPARB_CSTART_MASK	(0x7f << 7)
+#define DSPARB_CSTART_SHIFT	7
+#define DSPARB_BSTART_MASK	(0x7f)		 
+#define DSPARB_BSTART_SHIFT	0
+
+#define PIPEBCONF 0x71008
+#define PIPEBCONF_ENABLE	(1<<31)
+#define PIPEBCONF_DISABLE	0
+#define PIPEBCONF_DOUBLE_WIDE	(1<<30)
+#define PIPEBCONF_DISABLE	0
+#define PIPEBCONF_GAMMA		(1<<24)
+#define PIPEBCONF_PALETTE	0
+
+#define PIPEBGCMAXRED		0x71010
+#define PIPEBGCMAXGREEN		0x71014
+#define PIPEBGCMAXBLUE		0x71018
+#define PIPEBSTAT		0x71024
+#define PIPEBFRAMEHIGH		0x71040
+#define PIPEBFRAMEPIXEL		0x71044
+
+#define DSPACNTR		0x70180
+#define DSPBCNTR		0x71180
+#define DISPLAY_PLANE_ENABLE			(1<<31)
+#define DISPLAY_PLANE_DISABLE			0
+#define DISPPLANE_GAMMA_ENABLE			(1<<30)
+#define DISPPLANE_GAMMA_DISABLE			0
+#define DISPPLANE_PIXFORMAT_MASK		(0xf<<26)
+#define DISPPLANE_8BPP				(0x2<<26)
+#define DISPPLANE_15_16BPP			(0x4<<26)
+#define DISPPLANE_16BPP				(0x5<<26)
+#define DISPPLANE_32BPP_NO_ALPHA		(0x6<<26)
+#define DISPPLANE_32BPP				(0x7<<26)
+#define DISPPLANE_STEREO_ENABLE			(1<<25)
+#define DISPPLANE_STEREO_DISABLE		0
+#define DISPPLANE_SEL_PIPE_MASK			(1<<24)
+#define DISPPLANE_SEL_PIPE_A			0
+#define DISPPLANE_SEL_PIPE_B			(1<<24)
+#define DISPPLANE_SRC_KEY_ENABLE		(1<<22)
+#define DISPPLANE_SRC_KEY_DISABLE		0
+#define DISPPLANE_LINE_DOUBLE			(1<<20)
+#define DISPPLANE_NO_LINE_DOUBLE		0
+#define DISPPLANE_STEREO_POLARITY_FIRST		0
+#define DISPPLANE_STEREO_POLARITY_SECOND	(1<<18)
+/* plane B only */
+#define DISPPLANE_ALPHA_TRANS_ENABLE		(1<<15)
+#define DISPPLANE_ALPHA_TRANS_DISABLE		0
+#define DISPPLANE_SPRITE_ABOVE_DISPLAYA		0
+#define DISPPLANE_SPRITE_ABOVE_OVERLAY		(1)
+
+#define DSPABASE		0x70184
+#define DSPASTRIDE		0x70188
+
+#define DSPBBASE		0x71184
+#define DSPBADDR		DSPBBASE
+#define DSPBSTRIDE		0x71188
+
+#define DSPAKEYVAL		0x70194
+#define DSPAKEYMASK		0x70198
+
+#define DSPAPOS			0x7018C /* reserved */
+#define DSPASIZE		0x70190
+#define DSPBPOS			0x7118C
+#define DSPBSIZE		0x71190
+
+#define DSPASURF		0x7019C
+#define DSPATILEOFF		0x701A4
+
+#define DSPBSURF		0x7119C
+#define DSPBTILEOFF		0x711A4
+
+#define VGACNTRL		0x71400
+# define VGA_DISP_DISABLE			(1 << 31)
+# define VGA_2X_MODE				(1 << 30)
+# define VGA_PIPE_B_SELECT			(1 << 29)
+
+/*
+ * Some BIOS scratch area registers.  The 845 (and 830?) store the amount
+ * of video memory available to the BIOS in SWF1.
+ */
+
+#define SWF0			0x71410
+
+/*
+ * 855 scratch registers.
+ */
+#define SWF10			0x70410
+
+#define SWF30			0x72414
+
+/*
+ * Overlay registers.  These are overlay registers accessed via MMIO.
+ * Those loaded via the overlay register page are defined in i830_video.c.
+ */
+#define OVADD			0x30000
+
+#define DOVSTA			0x30008
+#define OC_BUF			(0x3<<20)
+
+#define OGAMC5			0x30010
+#define OGAMC4			0x30014
+#define OGAMC3			0x30018
+#define OGAMC2			0x3001c
+#define OGAMC1			0x30020
+#define OGAMC0			0x30024
+/*
+ * Palette registers
+ */
+#define PALETTE_A		0x0a000
+#define PALETTE_B		0x0a800
+
+#define IS_I830(dev) ((dev)->pci_device == 0x3577)
+#define IS_845G(dev) ((dev)->pci_device == 0x2562)
+#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
+#define IS_I855(dev) ((dev)->pci_device == 0x3582)
+#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+
+#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
+#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
+#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
+#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\
+		        (dev)->pci_device == 0x27AE)
+#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
+		       (dev)->pci_device == 0x2982 || \
+		       (dev)->pci_device == 0x2992 || \
+		       (dev)->pci_device == 0x29A2 || \
+		       (dev)->pci_device == 0x2A02 || \
+		       (dev)->pci_device == 0x2A12 || \
+		       (dev)->pci_device == 0x2A42 || \
+		       (dev)->pci_device == 0x2E02 || \
+		       (dev)->pci_device == 0x2E12 || \
+		       (dev)->pci_device == 0x2E22)
+
+#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
+
+#define IS_IGD_GM(dev) ((dev)->pci_device == 0x2A42)
+
+#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
+		     (dev)->pci_device == 0x2E12 || \
+		     (dev)->pci_device == 0x2E22)
+
+#define IS_G33(dev)    ((dev)->pci_device == 0x29C2 ||	\
+			(dev)->pci_device == 0x29B2 ||	\
+			(dev)->pci_device == 0x29D2)
+
+#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
+		      IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
+
+#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
+			IS_I945GM(dev) || IS_I965GM(dev) || IS_IGD_GM(dev))
+
+#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_IGD_GM(dev) || IS_G4X(dev))
+
+#define PRIMARY_RINGBUFFER_SIZE         (128*1024)
+
+#endif
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
new file mode 100644
index 000000000000..1fe68a251b75
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -0,0 +1,222 @@
+/**
+ * \file i915_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the i915 DRM.
+ *
+ * \author Alan Hourihane <alanh@fairlite.demon.co.uk>
+ *
+ *
+ * Copyright (C) Paul Mackerras 2005
+ * Copyright (C) Alan Hourihane 2005
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <linux/compat.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+
+typedef struct _drm_i915_batchbuffer32 {
+	int start;		/* agp offset */
+	int used;		/* nr bytes in use */
+	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
+	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
+	int num_cliprects;	/* mulitpass with multiple cliprects? */
+	u32 cliprects;		/* pointer to userspace cliprects */
+} drm_i915_batchbuffer32_t;
+
+static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
+				   unsigned long arg)
+{
+	drm_i915_batchbuffer32_t batchbuffer32;
+	drm_i915_batchbuffer_t __user *batchbuffer;
+
+	if (copy_from_user
+	    (&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
+		return -EFAULT;
+
+	batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));
+	if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer))
+	    || __put_user(batchbuffer32.start, &batchbuffer->start)
+	    || __put_user(batchbuffer32.used, &batchbuffer->used)
+	    || __put_user(batchbuffer32.DR1, &batchbuffer->DR1)
+	    || __put_user(batchbuffer32.DR4, &batchbuffer->DR4)
+	    || __put_user(batchbuffer32.num_cliprects,
+			  &batchbuffer->num_cliprects)
+	    || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
+			  &batchbuffer->cliprects))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_I915_BATCHBUFFER,
+			 (unsigned long)batchbuffer);
+}
+
+typedef struct _drm_i915_cmdbuffer32 {
+	u32 buf;		/* pointer to userspace command buffer */
+	int sz;			/* nr bytes in buf */
+	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
+	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
+	int num_cliprects;	/* mulitpass with multiple cliprects? */
+	u32 cliprects;		/* pointer to userspace cliprects */
+} drm_i915_cmdbuffer32_t;
+
+static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
+				 unsigned long arg)
+{
+	drm_i915_cmdbuffer32_t cmdbuffer32;
+	drm_i915_cmdbuffer_t __user *cmdbuffer;
+
+	if (copy_from_user
+	    (&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
+		return -EFAULT;
+
+	cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
+	if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
+	    || __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
+			  &cmdbuffer->buf)
+	    || __put_user(cmdbuffer32.sz, &cmdbuffer->sz)
+	    || __put_user(cmdbuffer32.DR1, &cmdbuffer->DR1)
+	    || __put_user(cmdbuffer32.DR4, &cmdbuffer->DR4)
+	    || __put_user(cmdbuffer32.num_cliprects, &cmdbuffer->num_cliprects)
+	    || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
+			  &cmdbuffer->cliprects))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_I915_CMDBUFFER, (unsigned long)cmdbuffer);
+}
+
+typedef struct drm_i915_irq_emit32 {
+	u32 irq_seq;
+} drm_i915_irq_emit32_t;
+
+static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	drm_i915_irq_emit32_t req32;
+	drm_i915_irq_emit_t __user *request;
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user((int __user *)(unsigned long)req32.irq_seq,
+			  &request->irq_seq))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_I915_IRQ_EMIT, (unsigned long)request);
+}
+typedef struct drm_i915_getparam32 {
+	int param;
+	u32 value;
+} drm_i915_getparam32_t;
+
+static int compat_i915_getparam(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	drm_i915_getparam32_t req32;
+	drm_i915_getparam_t __user *request;
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user(req32.param, &request->param)
+	    || __put_user((void __user *)(unsigned long)req32.value,
+			  &request->value))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_I915_GETPARAM, (unsigned long)request);
+}
+
+typedef struct drm_i915_mem_alloc32 {
+	int region;
+	int alignment;
+	int size;
+	u32 region_offset;	/* offset from start of fb or agp */
+} drm_i915_mem_alloc32_t;
+
+static int compat_i915_alloc(struct file *file, unsigned int cmd,
+			     unsigned long arg)
+{
+	drm_i915_mem_alloc32_t req32;
+	drm_i915_mem_alloc_t __user *request;
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user(req32.region, &request->region)
+	    || __put_user(req32.alignment, &request->alignment)
+	    || __put_user(req32.size, &request->size)
+	    || __put_user((void __user *)(unsigned long)req32.region_offset,
+			  &request->region_offset))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_I915_ALLOC, (unsigned long)request);
+}
+
+drm_ioctl_compat_t *i915_compat_ioctls[] = {
+	[DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
+	[DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
+	[DRM_I915_GETPARAM] = compat_i915_getparam,
+	[DRM_I915_IRQ_EMIT] = compat_i915_irq_emit,
+	[DRM_I915_ALLOC] = compat_i915_alloc
+};
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/dri/card<n>.
+ *
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	drm_ioctl_compat_t *fn = NULL;
+	int ret;
+
+	if (nr < DRM_COMMAND_BASE)
+		return drm_compat_ioctl(filp, cmd, arg);
+
+	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
+		fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
+
+	lock_kernel();		/* XXX for now */
+	if (fn != NULL)
+		ret = (*fn) (filp, cmd, arg);
+	else
+		ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
+	unlock_kernel();
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
new file mode 100644
index 000000000000..df036118b8b1
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -0,0 +1,623 @@
+/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
+ */
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+#define USER_INT_FLAG (1<<1)
+#define VSYNC_PIPEB_FLAG (1<<5)
+#define VSYNC_PIPEA_FLAG (1<<7)
+
+#define MAX_NOPID ((u32)~0)
+
+/**
+ * Emit blits for scheduled buffer swaps.
+ *
+ * This function will be called with the HW lock held.
+ */
+static void i915_vblank_tasklet(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long irqflags;
+	struct list_head *list, *tmp, hits, *hit;
+	int nhits, nrects, slice[2], upper[2], lower[2], i;
+	unsigned counter[2] = { atomic_read(&dev->vbl_received),
+				atomic_read(&dev->vbl_received2) };
+	struct drm_drawable_info *drw;
+	drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	u32 cpp = dev_priv->cpp;
+	u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
+				XY_SRC_COPY_BLT_WRITE_ALPHA |
+				XY_SRC_COPY_BLT_WRITE_RGB)
+			     : XY_SRC_COPY_BLT_CMD;
+	u32 src_pitch = sarea_priv->pitch * cpp;
+	u32 dst_pitch = sarea_priv->pitch * cpp;
+	u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
+	RING_LOCALS;
+
+	if (IS_I965G(dev) && sarea_priv->front_tiled) {
+		cmd |= XY_SRC_COPY_BLT_DST_TILED;
+		dst_pitch >>= 2;
+	}
+	if (IS_I965G(dev) && sarea_priv->back_tiled) {
+		cmd |= XY_SRC_COPY_BLT_SRC_TILED;
+		src_pitch >>= 2;
+	}
+
+	DRM_DEBUG("\n");
+
+	INIT_LIST_HEAD(&hits);
+
+	nhits = nrects = 0;
+
+	spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
+
+	/* Find buffer swaps scheduled for this vertical blank */
+	list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
+		drm_i915_vbl_swap_t *vbl_swap =
+			list_entry(list, drm_i915_vbl_swap_t, head);
+
+		if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23))
+			continue;
+
+		list_del(list);
+		dev_priv->swaps_pending--;
+
+		spin_unlock(&dev_priv->swaps_lock);
+		spin_lock(&dev->drw_lock);
+
+		drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
+
+		if (!drw) {
+			spin_unlock(&dev->drw_lock);
+			drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
+			spin_lock(&dev_priv->swaps_lock);
+			continue;
+		}
+
+		list_for_each(hit, &hits) {
+			drm_i915_vbl_swap_t *swap_cmp =
+				list_entry(hit, drm_i915_vbl_swap_t, head);
+			struct drm_drawable_info *drw_cmp =
+				drm_get_drawable_info(dev, swap_cmp->drw_id);
+
+			if (drw_cmp &&
+			    drw_cmp->rects[0].y1 > drw->rects[0].y1) {
+				list_add_tail(list, hit);
+				break;
+			}
+		}
+
+		spin_unlock(&dev->drw_lock);
+
+		/* List of hits was empty, or we reached the end of it */
+		if (hit == &hits)
+			list_add_tail(list, hits.prev);
+
+		nhits++;
+
+		spin_lock(&dev_priv->swaps_lock);
+	}
+
+	if (nhits == 0) {
+		spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
+		return;
+	}
+
+	spin_unlock(&dev_priv->swaps_lock);
+
+	i915_kernel_lost_context(dev);
+
+	if (IS_I965G(dev)) {
+		BEGIN_LP_RING(4);
+
+		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
+		OUT_RING(0);
+		OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16));
+		OUT_RING(0);
+		ADVANCE_LP_RING();
+	} else {
+		BEGIN_LP_RING(6);
+
+		OUT_RING(GFX_OP_DRAWRECT_INFO);
+		OUT_RING(0);
+		OUT_RING(0);
+		OUT_RING(sarea_priv->width | sarea_priv->height << 16);
+		OUT_RING(sarea_priv->width | sarea_priv->height << 16);
+		OUT_RING(0);
+
+		ADVANCE_LP_RING();
+	}
+
+	sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
+
+	upper[0] = upper[1] = 0;
+	slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
+	slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
+	lower[0] = sarea_priv->pipeA_y + slice[0];
+	lower[1] = sarea_priv->pipeB_y + slice[0];
+
+	spin_lock(&dev->drw_lock);
+
+	/* Emit blits for buffer swaps, partitioning both outputs into as many
+	 * slices as there are buffer swaps scheduled in order to avoid tearing
+	 * (based on the assumption that a single buffer swap would always
+	 * complete before scanout starts).
+	 */
+	for (i = 0; i++ < nhits;
+	     upper[0] = lower[0], lower[0] += slice[0],
+	     upper[1] = lower[1], lower[1] += slice[1]) {
+		if (i == nhits)
+			lower[0] = lower[1] = sarea_priv->height;
+
+		list_for_each(hit, &hits) {
+			drm_i915_vbl_swap_t *swap_hit =
+				list_entry(hit, drm_i915_vbl_swap_t, head);
+			struct drm_clip_rect *rect;
+			int num_rects, pipe;
+			unsigned short top, bottom;
+
+			drw = drm_get_drawable_info(dev, swap_hit->drw_id);
+
+			if (!drw)
+				continue;
+
+			rect = drw->rects;
+			pipe = swap_hit->pipe;
+			top = upper[pipe];
+			bottom = lower[pipe];
+
+			for (num_rects = drw->num_rects; num_rects--; rect++) {
+				int y1 = max(rect->y1, top);
+				int y2 = min(rect->y2, bottom);
+
+				if (y1 >= y2)
+					continue;
+
+				BEGIN_LP_RING(8);
+
+				OUT_RING(cmd);
+				OUT_RING(ropcpp | dst_pitch);
+				OUT_RING((y1 << 16) | rect->x1);
+				OUT_RING((y2 << 16) | rect->x2);
+				OUT_RING(sarea_priv->front_offset);
+				OUT_RING((y1 << 16) | rect->x1);
+				OUT_RING(src_pitch);
+				OUT_RING(sarea_priv->back_offset);
+
+				ADVANCE_LP_RING();
+			}
+		}
+	}
+
+	spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+
+	list_for_each_safe(hit, tmp, &hits) {
+		drm_i915_vbl_swap_t *swap_hit =
+			list_entry(hit, drm_i915_vbl_swap_t, head);
+
+		list_del(hit);
+
+		drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
+	}
+}
+
+irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u16 temp;
+	u32 pipea_stats, pipeb_stats;
+
+	pipea_stats = I915_READ(I915REG_PIPEASTAT);
+	pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
+
+	temp = I915_READ16(I915REG_INT_IDENTITY_R);
+
+	temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG);
+
+	DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
+
+	if (temp == 0)
+		return IRQ_NONE;
+
+	I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
+	(void) I915_READ16(I915REG_INT_IDENTITY_R);
+	DRM_READMEMORYBARRIER();
+
+	dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+
+	if (temp & USER_INT_FLAG)
+		DRM_WAKEUP(&dev_priv->irq_queue);
+
+	if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
+		int vblank_pipe = dev_priv->vblank_pipe;
+
+		if ((vblank_pipe &
+		     (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
+		    == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
+			if (temp & VSYNC_PIPEA_FLAG)
+				atomic_inc(&dev->vbl_received);
+			if (temp & VSYNC_PIPEB_FLAG)
+				atomic_inc(&dev->vbl_received2);
+		} else if (((temp & VSYNC_PIPEA_FLAG) &&
+			    (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
+			   ((temp & VSYNC_PIPEB_FLAG) &&
+			    (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
+			atomic_inc(&dev->vbl_received);
+
+		DRM_WAKEUP(&dev->vbl_queue);
+		drm_vbl_send_signals(dev);
+
+		if (dev_priv->swaps_pending > 0)
+			drm_locked_tasklet(dev, i915_vblank_tasklet);
+		I915_WRITE(I915REG_PIPEASTAT,
+			pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
+			I915_VBLANK_CLEAR);
+		I915_WRITE(I915REG_PIPEBSTAT,
+			pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
+			I915_VBLANK_CLEAR);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int i915_emit_irq(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+
+	i915_kernel_lost_context(dev);
+
+	DRM_DEBUG("\n");
+
+	dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
+
+	if (dev_priv->counter > 0x7FFFFFFFUL)
+		dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
+
+	BEGIN_LP_RING(6);
+	OUT_RING(CMD_STORE_DWORD_IDX);
+	OUT_RING(20);
+	OUT_RING(dev_priv->counter);
+	OUT_RING(0);
+	OUT_RING(0);
+	OUT_RING(GFX_OP_USER_INTERRUPT);
+	ADVANCE_LP_RING();
+
+	return dev_priv->counter;
+}
+
+static int i915_wait_irq(struct drm_device * dev, int irq_nr)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int ret = 0;
+
+	DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
+		  READ_BREADCRUMB(dev_priv));
+
+	if (READ_BREADCRUMB(dev_priv) >= irq_nr)
+		return 0;
+
+	dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+
+	DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
+		    READ_BREADCRUMB(dev_priv) >= irq_nr);
+
+	if (ret == -EBUSY) {
+		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
+			  READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
+	}
+
+	dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+	return ret;
+}
+
+static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
+				      atomic_t *counter)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	unsigned int cur_vblank;
+	int ret = 0;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+		    (((cur_vblank = atomic_read(counter))
+			- *sequence) <= (1<<23)));
+
+	*sequence = cur_vblank;
+
+	return ret;
+}
+
+
+int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
+{
+	return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
+}
+
+int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
+{
+	return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
+}
+
+/* Needs the lock as it touches the ring.
+ */
+int i915_irq_emit(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_irq_emit_t *emit = data;
+	int result;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	result = i915_emit_irq(dev);
+
+	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+		DRM_ERROR("copy_to_user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/* Doesn't need the hardware lock.
+ */
+int i915_irq_wait(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_irq_wait_t *irqwait = data;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	return i915_wait_irq(dev, irqwait->irq_seq);
+}
+
+static void i915_enable_interrupt (struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u16 flag;
+
+	flag = 0;
+	if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
+		flag |= VSYNC_PIPEA_FLAG;
+	if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
+		flag |= VSYNC_PIPEB_FLAG;
+
+	I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag);
+}
+
+/* Set the vblank monitor pipe
+ */
+int i915_vblank_pipe_set(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_vblank_pipe_t *pipe = data;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
+		DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
+		return -EINVAL;
+	}
+
+	dev_priv->vblank_pipe = pipe->pipe;
+
+	i915_enable_interrupt (dev);
+
+	return 0;
+}
+
+int i915_vblank_pipe_get(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_vblank_pipe_t *pipe = data;
+	u16 flag;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	flag = I915_READ(I915REG_INT_ENABLE_R);
+	pipe->pipe = 0;
+	if (flag & VSYNC_PIPEA_FLAG)
+		pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
+	if (flag & VSYNC_PIPEB_FLAG)
+		pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
+
+	return 0;
+}
+
+/**
+ * Schedule buffer swap at given vertical blank.
+ */
+int i915_vblank_swap(struct drm_device *dev, void *data,
+		     struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_vblank_swap_t *swap = data;
+	drm_i915_vbl_swap_t *vbl_swap;
+	unsigned int pipe, seqtype, curseq;
+	unsigned long irqflags;
+	struct list_head *list;
+
+	if (!dev_priv) {
+		DRM_ERROR("%s called with no initialization\n", __func__);
+		return -EINVAL;
+	}
+
+	if (dev_priv->sarea_priv->rotation) {
+		DRM_DEBUG("Rotation not supported\n");
+		return -EINVAL;
+	}
+
+	if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
+			     _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
+		DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
+		return -EINVAL;
+	}
+
+	pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
+
+	seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
+
+	if (!(dev_priv->vblank_pipe & (1 << pipe))) {
+		DRM_ERROR("Invalid pipe %d\n", pipe);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&dev->drw_lock, irqflags);
+
+	if (!drm_get_drawable_info(dev, swap->drawable)) {
+		spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+		DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
+		return -EINVAL;
+	}
+
+	spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+
+	curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
+
+	if (seqtype == _DRM_VBLANK_RELATIVE)
+		swap->sequence += curseq;
+
+	if ((curseq - swap->sequence) <= (1<<23)) {
+		if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
+			swap->sequence = curseq + 1;
+		} else {
+			DRM_DEBUG("Missed target sequence\n");
+			return -EINVAL;
+		}
+	}
+
+	spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
+
+	list_for_each(list, &dev_priv->vbl_swaps.head) {
+		vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
+
+		if (vbl_swap->drw_id == swap->drawable &&
+		    vbl_swap->pipe == pipe &&
+		    vbl_swap->sequence == swap->sequence) {
+			spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
+			DRM_DEBUG("Already scheduled\n");
+			return 0;
+		}
+	}
+
+	spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
+
+	if (dev_priv->swaps_pending >= 100) {
+		DRM_DEBUG("Too many swaps queued\n");
+		return -EBUSY;
+	}
+
+	vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
+
+	if (!vbl_swap) {
+		DRM_ERROR("Failed to allocate memory to queue swap\n");
+		return -ENOMEM;
+	}
+
+	DRM_DEBUG("\n");
+
+	vbl_swap->drw_id = swap->drawable;
+	vbl_swap->pipe = pipe;
+	vbl_swap->sequence = swap->sequence;
+
+	spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
+
+	list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
+	dev_priv->swaps_pending++;
+
+	spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
+
+	return 0;
+}
+
+/* drm_dma.h hooks
+*/
+void i915_driver_irq_preinstall(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+	I915_WRITE16(I915REG_HWSTAM, 0xfffe);
+	I915_WRITE16(I915REG_INT_MASK_R, 0x0);
+	I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
+}
+
+void i915_driver_irq_postinstall(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+	spin_lock_init(&dev_priv->swaps_lock);
+	INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
+	dev_priv->swaps_pending = 0;
+
+	if (!dev_priv->vblank_pipe)
+		dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
+	i915_enable_interrupt(dev);
+	DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
+}
+
+void i915_driver_irq_uninstall(struct drm_device * dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u16 temp;
+
+	if (!dev_priv)
+		return;
+
+	I915_WRITE16(I915REG_HWSTAM, 0xffff);
+	I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
+	I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
+
+	temp = I915_READ16(I915REG_INT_IDENTITY_R);
+	I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
+}
diff --git a/drivers/gpu/drm/i915/i915_mem.c b/drivers/gpu/drm/i915/i915_mem.c
new file mode 100644
index 000000000000..6126a60dc9cb
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_mem.c
@@ -0,0 +1,386 @@
+/* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
+ */
+/*
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/* This memory manager is integrated into the global/local lru
+ * mechanisms used by the clients.  Specifically, it operates by
+ * setting the 'in_use' fields of the global LRU to indicate whether
+ * this region is privately allocated to a client.
+ *
+ * This does require the client to actually respect that field.
+ *
+ * Currently no effort is made to allocate 'private' memory in any
+ * clever way - the LRU information isn't used to determine which
+ * block to allocate, and the ring is drained prior to allocations --
+ * in other words allocation is expensive.
+ */
+static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	struct drm_tex_region *list;
+	unsigned shift, nr;
+	unsigned start;
+	unsigned end;
+	unsigned i;
+	int age;
+
+	shift = dev_priv->tex_lru_log_granularity;
+	nr = I915_NR_TEX_REGIONS;
+
+	start = p->start >> shift;
+	end = (p->start + p->size - 1) >> shift;
+
+	age = ++sarea_priv->texAge;
+	list = sarea_priv->texList;
+
+	/* Mark the regions with the new flag and update their age.  Move
+	 * them to head of list to preserve LRU semantics.
+	 */
+	for (i = start; i <= end; i++) {
+		list[i].in_use = in_use;
+		list[i].age = age;
+
+		/* remove_from_list(i)
+		 */
+		list[(unsigned)list[i].next].prev = list[i].prev;
+		list[(unsigned)list[i].prev].next = list[i].next;
+
+		/* insert_at_head(list, i)
+		 */
+		list[i].prev = nr;
+		list[i].next = list[nr].next;
+		list[(unsigned)list[nr].next].prev = i;
+		list[nr].next = i;
+	}
+}
+
+/* Very simple allocator for agp memory, working on a static range
+ * already mapped into each client's address space.
+ */
+
+static struct mem_block *split_block(struct mem_block *p, int start, int size,
+				     struct drm_file *file_priv)
+{
+	/* Maybe cut off the start of an existing block */
+	if (start > p->start) {
+		struct mem_block *newblock =
+		    drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
+		if (!newblock)
+			goto out;
+		newblock->start = start;
+		newblock->size = p->size - (start - p->start);
+		newblock->file_priv = NULL;
+		newblock->next = p->next;
+		newblock->prev = p;
+		p->next->prev = newblock;
+		p->next = newblock;
+		p->size -= newblock->size;
+		p = newblock;
+	}
+
+	/* Maybe cut off the end of an existing block */
+	if (size < p->size) {
+		struct mem_block *newblock =
+		    drm_alloc(sizeof(*newblock), DRM_MEM_BUFLISTS);
+		if (!newblock)
+			goto out;
+		newblock->start = start + size;
+		newblock->size = p->size - size;
+		newblock->file_priv = NULL;
+		newblock->next = p->next;
+		newblock->prev = p;
+		p->next->prev = newblock;
+		p->next = newblock;
+		p->size = size;
+	}
+
+      out:
+	/* Our block is in the middle */
+	p->file_priv = file_priv;
+	return p;
+}
+
+static struct mem_block *alloc_block(struct mem_block *heap, int size,
+				     int align2, struct drm_file *file_priv)
+{
+	struct mem_block *p;
+	int mask = (1 << align2) - 1;
+
+	for (p = heap->next; p != heap; p = p->next) {
+		int start = (p->start + mask) & ~mask;
+		if (p->file_priv == NULL && start + size <= p->start + p->size)
+			return split_block(p, start, size, file_priv);
+	}
+
+	return NULL;
+}
+
+static struct mem_block *find_block(struct mem_block *heap, int start)
+{
+	struct mem_block *p;
+
+	for (p = heap->next; p != heap; p = p->next)
+		if (p->start == start)
+			return p;
+
+	return NULL;
+}
+
+static void free_block(struct mem_block *p)
+{
+	p->file_priv = NULL;
+
+	/* Assumes a single contiguous range.  Needs a special file_priv in
+	 * 'heap' to stop it being subsumed.
+	 */
+	if (p->next->file_priv == NULL) {
+		struct mem_block *q = p->next;
+		p->size += q->size;
+		p->next = q->next;
+		p->next->prev = p;
+		drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
+	}
+
+	if (p->prev->file_priv == NULL) {
+		struct mem_block *q = p->prev;
+		q->size += p->size;
+		q->next = p->next;
+		q->next->prev = q;
+		drm_free(p, sizeof(*q), DRM_MEM_BUFLISTS);
+	}
+}
+
+/* Initialize.  How to check for an uninitialized heap?
+ */
+static int init_heap(struct mem_block **heap, int start, int size)
+{
+	struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFLISTS);
+
+	if (!blocks)
+		return -ENOMEM;
+
+	*heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFLISTS);
+	if (!*heap) {
+		drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFLISTS);
+		return -ENOMEM;
+	}
+
+	blocks->start = start;
+	blocks->size = size;
+	blocks->file_priv = NULL;
+	blocks->next = blocks->prev = *heap;
+
+	memset(*heap, 0, sizeof(**heap));
+	(*heap)->file_priv = (struct drm_file *) - 1;
+	(*heap)->next = (*heap)->prev = blocks;
+	return 0;
+}
+
+/* Free all blocks associated with the releasing file.
+ */
+void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv,
+		      struct mem_block *heap)
+{
+	struct mem_block *p;
+
+	if (!heap || !heap->next)
+		return;
+
+	for (p = heap->next; p != heap; p = p->next) {
+		if (p->file_priv == file_priv) {
+			p->file_priv = NULL;
+			mark_block(dev, p, 0);
+		}
+	}
+
+	/* Assumes a single contiguous range.  Needs a special file_priv in
+	 * 'heap' to stop it being subsumed.
+	 */
+	for (p = heap->next; p != heap; p = p->next) {
+		while (p->file_priv == NULL && p->next->file_priv == NULL) {
+			struct mem_block *q = p->next;
+			p->size += q->size;
+			p->next = q->next;
+			p->next->prev = p;
+			drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
+		}
+	}
+}
+
+/* Shutdown.
+ */
+void i915_mem_takedown(struct mem_block **heap)
+{
+	struct mem_block *p;
+
+	if (!*heap)
+		return;
+
+	for (p = (*heap)->next; p != *heap;) {
+		struct mem_block *q = p;
+		p = p->next;
+		drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
+	}
+
+	drm_free(*heap, sizeof(**heap), DRM_MEM_BUFLISTS);
+	*heap = NULL;
+}
+
+static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
+{
+	switch (region) {
+	case I915_MEM_REGION_AGP:
+		return &dev_priv->agp_heap;
+	default:
+		return NULL;
+	}
+}
+
+/* IOCTL HANDLERS */
+
+int i915_mem_alloc(struct drm_device *dev, void *data,
+		   struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_mem_alloc_t *alloc = data;
+	struct mem_block *block, **heap;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	heap = get_heap(dev_priv, alloc->region);
+	if (!heap || !*heap)
+		return -EFAULT;
+
+	/* Make things easier on ourselves: all allocations at least
+	 * 4k aligned.
+	 */
+	if (alloc->alignment < 12)
+		alloc->alignment = 12;
+
+	block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
+
+	if (!block)
+		return -ENOMEM;
+
+	mark_block(dev, block, 1);
+
+	if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
+			     sizeof(int))) {
+		DRM_ERROR("copy_to_user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+int i915_mem_free(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_mem_free_t *memfree = data;
+	struct mem_block *block, **heap;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	heap = get_heap(dev_priv, memfree->region);
+	if (!heap || !*heap)
+		return -EFAULT;
+
+	block = find_block(*heap, memfree->region_offset);
+	if (!block)
+		return -EFAULT;
+
+	if (block->file_priv != file_priv)
+		return -EPERM;
+
+	mark_block(dev, block, 0);
+	free_block(block);
+	return 0;
+}
+
+int i915_mem_init_heap(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_mem_init_heap_t *initheap = data;
+	struct mem_block **heap;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	heap = get_heap(dev_priv, initheap->region);
+	if (!heap)
+		return -EFAULT;
+
+	if (*heap) {
+		DRM_ERROR("heap already initialized?");
+		return -EFAULT;
+	}
+
+	return init_heap(heap, initheap->start, initheap->size);
+}
+
+int i915_mem_destroy_heap( struct drm_device *dev, void *data,
+			   struct drm_file *file_priv )
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_mem_destroy_heap_t *destroyheap = data;
+	struct mem_block **heap;
+
+	if ( !dev_priv ) {
+		DRM_ERROR( "called with no initialization\n" );
+		return -EINVAL;
+	}
+
+	heap = get_heap( dev_priv, destroyheap->region );
+	if (!heap) {
+		DRM_ERROR("get_heap failed");
+		return -EFAULT;
+	}
+
+	if (!*heap) {
+		DRM_ERROR("heap not initialized?");
+		return -EFAULT;
+	}
+
+	i915_mem_takedown( heap );
+	return 0;
+}
diff --git a/drivers/gpu/drm/mga/Makefile b/drivers/gpu/drm/mga/Makefile
new file mode 100644
index 000000000000..60684785c203
--- /dev/null
+++ b/drivers/gpu/drm/mga/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+mga-y := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
+
+mga-$(CONFIG_COMPAT) += mga_ioc32.o
+
+obj-$(CONFIG_DRM_MGA)	+= mga.o
+
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
new file mode 100644
index 000000000000..c1d12dbfa8d8
--- /dev/null
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -0,0 +1,1162 @@
+/* mga_dma.c -- DMA support for mga g200/g400 -*- linux-c -*-
+ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file mga_dma.c
+ * DMA support for MGA G200 / G400.
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Jeff Hartmann <jhartmann@valinux.com>
+ * \author Keith Whitwell <keith@tungstengraphics.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_sarea.h"
+#include "mga_drm.h"
+#include "mga_drv.h"
+
+#define MGA_DEFAULT_USEC_TIMEOUT	10000
+#define MGA_FREELIST_DEBUG		0
+
+#define MINIMAL_CLEANUP 0
+#define FULL_CLEANUP 1
+static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup);
+
+/* ================================================================
+ * Engine control
+ */
+
+int mga_do_wait_for_idle(drm_mga_private_t * dev_priv)
+{
+	u32 status = 0;
+	int i;
+	DRM_DEBUG("\n");
+
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
+		if (status == MGA_ENDPRDMASTS) {
+			MGA_WRITE8(MGA_CRTC_INDEX, 0);
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+
+#if MGA_DMA_DEBUG
+	DRM_ERROR("failed!\n");
+	DRM_INFO("   status=0x%08x\n", status);
+#endif
+	return -EBUSY;
+}
+
+static int mga_do_dma_reset(drm_mga_private_t * dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_primary_buffer_t *primary = &dev_priv->prim;
+
+	DRM_DEBUG("\n");
+
+	/* The primary DMA stream should look like new right about now.
+	 */
+	primary->tail = 0;
+	primary->space = primary->size;
+	primary->last_flush = 0;
+
+	sarea_priv->last_wrap = 0;
+
+	/* FIXME: Reset counters, buffer ages etc...
+	 */
+
+	/* FIXME: What else do we need to reinitialize?  WARP stuff?
+	 */
+
+	return 0;
+}
+
+/* ================================================================
+ * Primary DMA stream
+ */
+
+void mga_do_dma_flush(drm_mga_private_t * dev_priv)
+{
+	drm_mga_primary_buffer_t *primary = &dev_priv->prim;
+	u32 head, tail;
+	u32 status = 0;
+	int i;
+	DMA_LOCALS;
+	DRM_DEBUG("\n");
+
+	/* We need to wait so that we can do an safe flush */
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
+		if (status == MGA_ENDPRDMASTS)
+			break;
+		DRM_UDELAY(1);
+	}
+
+	if (primary->tail == primary->last_flush) {
+		DRM_DEBUG("   bailing out...\n");
+		return;
+	}
+
+	tail = primary->tail + dev_priv->primary->offset;
+
+	/* We need to pad the stream between flushes, as the card
+	 * actually (partially?) reads the first of these commands.
+	 * See page 4-16 in the G400 manual, middle of the page or so.
+	 */
+	BEGIN_DMA(1);
+
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
+
+	ADVANCE_DMA();
+
+	primary->last_flush = primary->tail;
+
+	head = MGA_READ(MGA_PRIMADDRESS);
+
+	if (head <= tail) {
+		primary->space = primary->size - primary->tail;
+	} else {
+		primary->space = head - tail;
+	}
+
+	DRM_DEBUG("   head = 0x%06lx\n", head - dev_priv->primary->offset);
+	DRM_DEBUG("   tail = 0x%06lx\n", tail - dev_priv->primary->offset);
+	DRM_DEBUG("  space = 0x%06x\n", primary->space);
+
+	mga_flush_write_combine();
+	MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
+
+	DRM_DEBUG("done.\n");
+}
+
+void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv)
+{
+	drm_mga_primary_buffer_t *primary = &dev_priv->prim;
+	u32 head, tail;
+	DMA_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_DMA_WRAP();
+
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
+
+	ADVANCE_DMA();
+
+	tail = primary->tail + dev_priv->primary->offset;
+
+	primary->tail = 0;
+	primary->last_flush = 0;
+	primary->last_wrap++;
+
+	head = MGA_READ(MGA_PRIMADDRESS);
+
+	if (head == dev_priv->primary->offset) {
+		primary->space = primary->size;
+	} else {
+		primary->space = head - dev_priv->primary->offset;
+	}
+
+	DRM_DEBUG("   head = 0x%06lx\n", head - dev_priv->primary->offset);
+	DRM_DEBUG("   tail = 0x%06x\n", primary->tail);
+	DRM_DEBUG("   wrap = %d\n", primary->last_wrap);
+	DRM_DEBUG("  space = 0x%06x\n", primary->space);
+
+	mga_flush_write_combine();
+	MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
+
+	set_bit(0, &primary->wrapped);
+	DRM_DEBUG("done.\n");
+}
+
+void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv)
+{
+	drm_mga_primary_buffer_t *primary = &dev_priv->prim;
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	u32 head = dev_priv->primary->offset;
+	DRM_DEBUG("\n");
+
+	sarea_priv->last_wrap++;
+	DRM_DEBUG("   wrap = %d\n", sarea_priv->last_wrap);
+
+	mga_flush_write_combine();
+	MGA_WRITE(MGA_PRIMADDRESS, head | MGA_DMA_GENERAL);
+
+	clear_bit(0, &primary->wrapped);
+	DRM_DEBUG("done.\n");
+}
+
+/* ================================================================
+ * Freelist management
+ */
+
+#define MGA_BUFFER_USED		~0
+#define MGA_BUFFER_FREE		0
+
+#if MGA_FREELIST_DEBUG
+static void mga_freelist_print(struct drm_device * dev)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_freelist_t *entry;
+
+	DRM_INFO("\n");
+	DRM_INFO("current dispatch: last=0x%x done=0x%x\n",
+		 dev_priv->sarea_priv->last_dispatch,
+		 (unsigned int)(MGA_READ(MGA_PRIMADDRESS) -
+				dev_priv->primary->offset));
+	DRM_INFO("current freelist:\n");
+
+	for (entry = dev_priv->head->next; entry; entry = entry->next) {
+		DRM_INFO("   %p   idx=%2d  age=0x%x 0x%06lx\n",
+			 entry, entry->buf->idx, entry->age.head,
+			 entry->age.head - dev_priv->primary->offset);
+	}
+	DRM_INFO("\n");
+}
+#endif
+
+static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_mga_buf_priv_t *buf_priv;
+	drm_mga_freelist_t *entry;
+	int i;
+	DRM_DEBUG("count=%d\n", dma->buf_count);
+
+	dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
+	if (dev_priv->head == NULL)
+		return -ENOMEM;
+
+	memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t));
+	SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0);
+
+	for (i = 0; i < dma->buf_count; i++) {
+		buf = dma->buflist[i];
+		buf_priv = buf->dev_private;
+
+		entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
+		if (entry == NULL)
+			return -ENOMEM;
+
+		memset(entry, 0, sizeof(drm_mga_freelist_t));
+
+		entry->next = dev_priv->head->next;
+		entry->prev = dev_priv->head;
+		SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
+		entry->buf = buf;
+
+		if (dev_priv->head->next != NULL)
+			dev_priv->head->next->prev = entry;
+		if (entry->next == NULL)
+			dev_priv->tail = entry;
+
+		buf_priv->list_entry = entry;
+		buf_priv->discard = 0;
+		buf_priv->dispatched = 0;
+
+		dev_priv->head->next = entry;
+	}
+
+	return 0;
+}
+
+static void mga_freelist_cleanup(struct drm_device * dev)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_freelist_t *entry;
+	drm_mga_freelist_t *next;
+	DRM_DEBUG("\n");
+
+	entry = dev_priv->head;
+	while (entry) {
+		next = entry->next;
+		drm_free(entry, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
+		entry = next;
+	}
+
+	dev_priv->head = dev_priv->tail = NULL;
+}
+
+#if 0
+/* FIXME: Still needed?
+ */
+static void mga_freelist_reset(struct drm_device * dev)
+{
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_mga_buf_priv_t *buf_priv;
+	int i;
+
+	for (i = 0; i < dma->buf_count; i++) {
+		buf = dma->buflist[i];
+		buf_priv = buf->dev_private;
+		SET_AGE(&buf_priv->list_entry->age, MGA_BUFFER_FREE, 0);
+	}
+}
+#endif
+
+static struct drm_buf *mga_freelist_get(struct drm_device * dev)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_freelist_t *next;
+	drm_mga_freelist_t *prev;
+	drm_mga_freelist_t *tail = dev_priv->tail;
+	u32 head, wrap;
+	DRM_DEBUG("\n");
+
+	head = MGA_READ(MGA_PRIMADDRESS);
+	wrap = dev_priv->sarea_priv->last_wrap;
+
+	DRM_DEBUG("   tail=0x%06lx %d\n",
+		  tail->age.head ?
+		  tail->age.head - dev_priv->primary->offset : 0,
+		  tail->age.wrap);
+	DRM_DEBUG("   head=0x%06lx %d\n",
+		  head - dev_priv->primary->offset, wrap);
+
+	if (TEST_AGE(&tail->age, head, wrap)) {
+		prev = dev_priv->tail->prev;
+		next = dev_priv->tail;
+		prev->next = NULL;
+		next->prev = next->next = NULL;
+		dev_priv->tail = prev;
+		SET_AGE(&next->age, MGA_BUFFER_USED, 0);
+		return next->buf;
+	}
+
+	DRM_DEBUG("returning NULL!\n");
+	return NULL;
+}
+
+int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_buf_priv_t *buf_priv = buf->dev_private;
+	drm_mga_freelist_t *head, *entry, *prev;
+
+	DRM_DEBUG("age=0x%06lx wrap=%d\n",
+		  buf_priv->list_entry->age.head -
+		  dev_priv->primary->offset, buf_priv->list_entry->age.wrap);
+
+	entry = buf_priv->list_entry;
+	head = dev_priv->head;
+
+	if (buf_priv->list_entry->age.head == MGA_BUFFER_USED) {
+		SET_AGE(&entry->age, MGA_BUFFER_FREE, 0);
+		prev = dev_priv->tail;
+		prev->next = entry;
+		entry->prev = prev;
+		entry->next = NULL;
+	} else {
+		prev = head->next;
+		head->next = entry;
+		prev->prev = entry;
+		entry->prev = head;
+		entry->next = prev;
+	}
+
+	return 0;
+}
+
+/* ================================================================
+ * DMA initialization, cleanup
+ */
+
+int mga_driver_load(struct drm_device * dev, unsigned long flags)
+{
+	drm_mga_private_t *dev_priv;
+
+	dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
+	if (!dev_priv)
+		return -ENOMEM;
+
+	dev->dev_private = (void *)dev_priv;
+	memset(dev_priv, 0, sizeof(drm_mga_private_t));
+
+	dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
+	dev_priv->chipset = flags;
+
+	dev_priv->mmio_base = drm_get_resource_start(dev, 1);
+	dev_priv->mmio_size = drm_get_resource_len(dev, 1);
+
+	dev->counters += 3;
+	dev->types[6] = _DRM_STAT_IRQ;
+	dev->types[7] = _DRM_STAT_PRIMARY;
+	dev->types[8] = _DRM_STAT_SECONDARY;
+
+	return 0;
+}
+
+#if __OS_HAS_AGP
+/**
+ * Bootstrap the driver for AGP DMA.
+ *
+ * \todo
+ * Investigate whether there is any benifit to storing the WARP microcode in
+ * AGP memory.  If not, the microcode may as well always be put in PCI
+ * memory.
+ *
+ * \todo
+ * This routine needs to set dma_bs->agp_mode to the mode actually configured
+ * in the hardware.  Looking just at the Linux AGP driver code, I don't see
+ * an easy way to determine this.
+ *
+ * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
+ */
+static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
+				    drm_mga_dma_bootstrap_t * dma_bs)
+{
+	drm_mga_private_t *const dev_priv =
+	    (drm_mga_private_t *) dev->dev_private;
+	unsigned int warp_size = mga_warp_microcode_size(dev_priv);
+	int err;
+	unsigned offset;
+	const unsigned secondary_size = dma_bs->secondary_bin_count
+	    * dma_bs->secondary_bin_size;
+	const unsigned agp_size = (dma_bs->agp_size << 20);
+	struct drm_buf_desc req;
+	struct drm_agp_mode mode;
+	struct drm_agp_info info;
+	struct drm_agp_buffer agp_req;
+	struct drm_agp_binding bind_req;
+
+	/* Acquire AGP. */
+	err = drm_agp_acquire(dev);
+	if (err) {
+		DRM_ERROR("Unable to acquire AGP: %d\n", err);
+		return err;
+	}
+
+	err = drm_agp_info(dev, &info);
+	if (err) {
+		DRM_ERROR("Unable to get AGP info: %d\n", err);
+		return err;
+	}
+
+	mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode;
+	err = drm_agp_enable(dev, mode);
+	if (err) {
+		DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
+		return err;
+	}
+
+	/* In addition to the usual AGP mode configuration, the G200 AGP cards
+	 * need to have the AGP mode "manually" set.
+	 */
+
+	if (dev_priv->chipset == MGA_CARD_TYPE_G200) {
+		if (mode.mode & 0x02) {
+			MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE);
+		} else {
+			MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE);
+		}
+	}
+
+	/* Allocate and bind AGP memory. */
+	agp_req.size = agp_size;
+	agp_req.type = 0;
+	err = drm_agp_alloc(dev, &agp_req);
+	if (err) {
+		dev_priv->agp_size = 0;
+		DRM_ERROR("Unable to allocate %uMB AGP memory\n",
+			  dma_bs->agp_size);
+		return err;
+	}
+
+	dev_priv->agp_size = agp_size;
+	dev_priv->agp_handle = agp_req.handle;
+
+	bind_req.handle = agp_req.handle;
+	bind_req.offset = 0;
+	err = drm_agp_bind(dev, &bind_req);
+	if (err) {
+		DRM_ERROR("Unable to bind AGP memory: %d\n", err);
+		return err;
+	}
+
+	/* Make drm_addbufs happy by not trying to create a mapping for less
+	 * than a page.
+	 */
+	if (warp_size < PAGE_SIZE)
+		warp_size = PAGE_SIZE;
+
+	offset = 0;
+	err = drm_addmap(dev, offset, warp_size,
+			 _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);
+	if (err) {
+		DRM_ERROR("Unable to map WARP microcode: %d\n", err);
+		return err;
+	}
+
+	offset += warp_size;
+	err = drm_addmap(dev, offset, dma_bs->primary_size,
+			 _DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary);
+	if (err) {
+		DRM_ERROR("Unable to map primary DMA region: %d\n", err);
+		return err;
+	}
+
+	offset += dma_bs->primary_size;
+	err = drm_addmap(dev, offset, secondary_size,
+			 _DRM_AGP, 0, &dev->agp_buffer_map);
+	if (err) {
+		DRM_ERROR("Unable to map secondary DMA region: %d\n", err);
+		return err;
+	}
+
+	(void)memset(&req, 0, sizeof(req));
+	req.count = dma_bs->secondary_bin_count;
+	req.size = dma_bs->secondary_bin_size;
+	req.flags = _DRM_AGP_BUFFER;
+	req.agp_start = offset;
+
+	err = drm_addbufs_agp(dev, &req);
+	if (err) {
+		DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
+		return err;
+	}
+
+	{
+		struct drm_map_list *_entry;
+		unsigned long agp_token = 0;
+
+		list_for_each_entry(_entry, &dev->maplist, head) {
+			if (_entry->map == dev->agp_buffer_map)
+				agp_token = _entry->user_token;
+		}
+		if (!agp_token)
+			return -EFAULT;
+
+		dev->agp_buffer_token = agp_token;
+	}
+
+	offset += secondary_size;
+	err = drm_addmap(dev, offset, agp_size - offset,
+			 _DRM_AGP, 0, &dev_priv->agp_textures);
+	if (err) {
+		DRM_ERROR("Unable to map AGP texture region %d\n", err);
+		return err;
+	}
+
+	drm_core_ioremap(dev_priv->warp, dev);
+	drm_core_ioremap(dev_priv->primary, dev);
+	drm_core_ioremap(dev->agp_buffer_map, dev);
+
+	if (!dev_priv->warp->handle ||
+	    !dev_priv->primary->handle || !dev->agp_buffer_map->handle) {
+		DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
+			  dev_priv->warp->handle, dev_priv->primary->handle,
+			  dev->agp_buffer_map->handle);
+		return -ENOMEM;
+	}
+
+	dev_priv->dma_access = MGA_PAGPXFER;
+	dev_priv->wagp_enable = MGA_WAGP_ENABLE;
+
+	DRM_INFO("Initialized card for AGP DMA.\n");
+	return 0;
+}
+#else
+static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
+				    drm_mga_dma_bootstrap_t * dma_bs)
+{
+	return -EINVAL;
+}
+#endif
+
+/**
+ * Bootstrap the driver for PCI DMA.
+ *
+ * \todo
+ * The algorithm for decreasing the size of the primary DMA buffer could be
+ * better.  The size should be rounded up to the nearest page size, then
+ * decrease the request size by a single page each pass through the loop.
+ *
+ * \todo
+ * Determine whether the maximum address passed to drm_pci_alloc is correct.
+ * The same goes for drm_addbufs_pci.
+ *
+ * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
+ */
+static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
+				    drm_mga_dma_bootstrap_t * dma_bs)
+{
+	drm_mga_private_t *const dev_priv =
+	    (drm_mga_private_t *) dev->dev_private;
+	unsigned int warp_size = mga_warp_microcode_size(dev_priv);
+	unsigned int primary_size;
+	unsigned int bin_count;
+	int err;
+	struct drm_buf_desc req;
+
+	if (dev->dma == NULL) {
+		DRM_ERROR("dev->dma is NULL\n");
+		return -EFAULT;
+	}
+
+	/* Make drm_addbufs happy by not trying to create a mapping for less
+	 * than a page.
+	 */
+	if (warp_size < PAGE_SIZE)
+		warp_size = PAGE_SIZE;
+
+	/* The proper alignment is 0x100 for this mapping */
+	err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
+			 _DRM_READ_ONLY, &dev_priv->warp);
+	if (err != 0) {
+		DRM_ERROR("Unable to create mapping for WARP microcode: %d\n",
+			  err);
+		return err;
+	}
+
+	/* Other than the bottom two bits being used to encode other
+	 * information, there don't appear to be any restrictions on the
+	 * alignment of the primary or secondary DMA buffers.
+	 */
+
+	for (primary_size = dma_bs->primary_size; primary_size != 0;
+	     primary_size >>= 1) {
+		/* The proper alignment for this mapping is 0x04 */
+		err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
+				 _DRM_READ_ONLY, &dev_priv->primary);
+		if (!err)
+			break;
+	}
+
+	if (err != 0) {
+		DRM_ERROR("Unable to allocate primary DMA region: %d\n", err);
+		return -ENOMEM;
+	}
+
+	if (dev_priv->primary->size != dma_bs->primary_size) {
+		DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n",
+			 dma_bs->primary_size,
+			 (unsigned)dev_priv->primary->size);
+		dma_bs->primary_size = dev_priv->primary->size;
+	}
+
+	for (bin_count = dma_bs->secondary_bin_count; bin_count > 0;
+	     bin_count--) {
+		(void)memset(&req, 0, sizeof(req));
+		req.count = bin_count;
+		req.size = dma_bs->secondary_bin_size;
+
+		err = drm_addbufs_pci(dev, &req);
+		if (!err) {
+			break;
+		}
+	}
+
+	if (bin_count == 0) {
+		DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
+		return err;
+	}
+
+	if (bin_count != dma_bs->secondary_bin_count) {
+		DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u "
+			 "to %u.\n", dma_bs->secondary_bin_count, bin_count);
+
+		dma_bs->secondary_bin_count = bin_count;
+	}
+
+	dev_priv->dma_access = 0;
+	dev_priv->wagp_enable = 0;
+
+	dma_bs->agp_mode = 0;
+
+	DRM_INFO("Initialized card for PCI DMA.\n");
+	return 0;
+}
+
+static int mga_do_dma_bootstrap(struct drm_device * dev,
+				drm_mga_dma_bootstrap_t * dma_bs)
+{
+	const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
+	int err;
+	drm_mga_private_t *const dev_priv =
+	    (drm_mga_private_t *) dev->dev_private;
+
+	dev_priv->used_new_dma_init = 1;
+
+	/* The first steps are the same for both PCI and AGP based DMA.  Map
+	 * the cards MMIO registers and map a status page.
+	 */
+	err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size,
+			 _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio);
+	if (err) {
+		DRM_ERROR("Unable to map MMIO region: %d\n", err);
+		return err;
+	}
+
+	err = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
+			 _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
+			 &dev_priv->status);
+	if (err) {
+		DRM_ERROR("Unable to map status region: %d\n", err);
+		return err;
+	}
+
+	/* The DMA initialization procedure is slightly different for PCI and
+	 * AGP cards.  AGP cards just allocate a large block of AGP memory and
+	 * carve off portions of it for internal uses.  The remaining memory
+	 * is returned to user-mode to be used for AGP textures.
+	 */
+	if (is_agp) {
+		err = mga_do_agp_dma_bootstrap(dev, dma_bs);
+	}
+
+	/* If we attempted to initialize the card for AGP DMA but failed,
+	 * clean-up any mess that may have been created.
+	 */
+
+	if (err) {
+		mga_do_cleanup_dma(dev, MINIMAL_CLEANUP);
+	}
+
+	/* Not only do we want to try and initialized PCI cards for PCI DMA,
+	 * but we also try to initialized AGP cards that could not be
+	 * initialized for AGP DMA.  This covers the case where we have an AGP
+	 * card in a system with an unsupported AGP chipset.  In that case the
+	 * card will be detected as AGP, but we won't be able to allocate any
+	 * AGP memory, etc.
+	 */
+
+	if (!is_agp || err) {
+		err = mga_do_pci_dma_bootstrap(dev, dma_bs);
+	}
+
+	return err;
+}
+
+int mga_dma_bootstrap(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	drm_mga_dma_bootstrap_t *bootstrap = data;
+	int err;
+	static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
+	const drm_mga_private_t *const dev_priv =
+		(drm_mga_private_t *) dev->dev_private;
+
+	err = mga_do_dma_bootstrap(dev, bootstrap);
+	if (err) {
+		mga_do_cleanup_dma(dev, FULL_CLEANUP);
+		return err;
+	}
+
+	if (dev_priv->agp_textures != NULL) {
+		bootstrap->texture_handle = dev_priv->agp_textures->offset;
+		bootstrap->texture_size = dev_priv->agp_textures->size;
+	} else {
+		bootstrap->texture_handle = 0;
+		bootstrap->texture_size = 0;
+	}
+
+	bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07];
+
+	return err;
+}
+
+static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
+{
+	drm_mga_private_t *dev_priv;
+	int ret;
+	DRM_DEBUG("\n");
+
+	dev_priv = dev->dev_private;
+
+	if (init->sgram) {
+		dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
+	} else {
+		dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
+	}
+	dev_priv->maccess = init->maccess;
+
+	dev_priv->fb_cpp = init->fb_cpp;
+	dev_priv->front_offset = init->front_offset;
+	dev_priv->front_pitch = init->front_pitch;
+	dev_priv->back_offset = init->back_offset;
+	dev_priv->back_pitch = init->back_pitch;
+
+	dev_priv->depth_cpp = init->depth_cpp;
+	dev_priv->depth_offset = init->depth_offset;
+	dev_priv->depth_pitch = init->depth_pitch;
+
+	/* FIXME: Need to support AGP textures...
+	 */
+	dev_priv->texture_offset = init->texture_offset[0];
+	dev_priv->texture_size = init->texture_size[0];
+
+	dev_priv->sarea = drm_getsarea(dev);
+	if (!dev_priv->sarea) {
+		DRM_ERROR("failed to find sarea!\n");
+		return -EINVAL;
+	}
+
+	if (!dev_priv->used_new_dma_init) {
+
+		dev_priv->dma_access = MGA_PAGPXFER;
+		dev_priv->wagp_enable = MGA_WAGP_ENABLE;
+
+		dev_priv->status = drm_core_findmap(dev, init->status_offset);
+		if (!dev_priv->status) {
+			DRM_ERROR("failed to find status page!\n");
+			return -EINVAL;
+		}
+		dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
+		if (!dev_priv->mmio) {
+			DRM_ERROR("failed to find mmio region!\n");
+			return -EINVAL;
+		}
+		dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
+		if (!dev_priv->warp) {
+			DRM_ERROR("failed to find warp microcode region!\n");
+			return -EINVAL;
+		}
+		dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
+		if (!dev_priv->primary) {
+			DRM_ERROR("failed to find primary dma region!\n");
+			return -EINVAL;
+		}
+		dev->agp_buffer_token = init->buffers_offset;
+		dev->agp_buffer_map =
+		    drm_core_findmap(dev, init->buffers_offset);
+		if (!dev->agp_buffer_map) {
+			DRM_ERROR("failed to find dma buffer region!\n");
+			return -EINVAL;
+		}
+
+		drm_core_ioremap(dev_priv->warp, dev);
+		drm_core_ioremap(dev_priv->primary, dev);
+		drm_core_ioremap(dev->agp_buffer_map, dev);
+	}
+
+	dev_priv->sarea_priv =
+	    (drm_mga_sarea_t *) ((u8 *) dev_priv->sarea->handle +
+				 init->sarea_priv_offset);
+
+	if (!dev_priv->warp->handle ||
+	    !dev_priv->primary->handle ||
+	    ((dev_priv->dma_access != 0) &&
+	     ((dev->agp_buffer_map == NULL) ||
+	      (dev->agp_buffer_map->handle == NULL)))) {
+		DRM_ERROR("failed to ioremap agp regions!\n");
+		return -ENOMEM;
+	}
+
+	ret = mga_warp_install_microcode(dev_priv);
+	if (ret < 0) {
+		DRM_ERROR("failed to install WARP ucode!: %d\n", ret);
+		return ret;
+	}
+
+	ret = mga_warp_init(dev_priv);
+	if (ret < 0) {
+		DRM_ERROR("failed to init WARP engine!: %d\n", ret);
+		return ret;
+	}
+
+	dev_priv->prim.status = (u32 *) dev_priv->status->handle;
+
+	mga_do_wait_for_idle(dev_priv);
+
+	/* Init the primary DMA registers.
+	 */
+	MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL);
+#if 0
+	MGA_WRITE(MGA_PRIMPTR, virt_to_bus((void *)dev_priv->prim.status) | MGA_PRIMPTREN0 |	/* Soft trap, SECEND, SETUPEND */
+		  MGA_PRIMPTREN1);	/* DWGSYNC */
+#endif
+
+	dev_priv->prim.start = (u8 *) dev_priv->primary->handle;
+	dev_priv->prim.end = ((u8 *) dev_priv->primary->handle
+			      + dev_priv->primary->size);
+	dev_priv->prim.size = dev_priv->primary->size;
+
+	dev_priv->prim.tail = 0;
+	dev_priv->prim.space = dev_priv->prim.size;
+	dev_priv->prim.wrapped = 0;
+
+	dev_priv->prim.last_flush = 0;
+	dev_priv->prim.last_wrap = 0;
+
+	dev_priv->prim.high_mark = 256 * DMA_BLOCK_SIZE;
+
+	dev_priv->prim.status[0] = dev_priv->primary->offset;
+	dev_priv->prim.status[1] = 0;
+
+	dev_priv->sarea_priv->last_wrap = 0;
+	dev_priv->sarea_priv->last_frame.head = 0;
+	dev_priv->sarea_priv->last_frame.wrap = 0;
+
+	if (mga_freelist_init(dev, dev_priv) < 0) {
+		DRM_ERROR("could not initialize freelist\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
+{
+	int err = 0;
+	DRM_DEBUG("\n");
+
+	/* Make sure interrupts are disabled here because the uninstall ioctl
+	 * may not have been called from userspace and after dev_private
+	 * is freed, it's too late.
+	 */
+	if (dev->irq_enabled)
+		drm_irq_uninstall(dev);
+
+	if (dev->dev_private) {
+		drm_mga_private_t *dev_priv = dev->dev_private;
+
+		if ((dev_priv->warp != NULL)
+		    && (dev_priv->warp->type != _DRM_CONSISTENT))
+			drm_core_ioremapfree(dev_priv->warp, dev);
+
+		if ((dev_priv->primary != NULL)
+		    && (dev_priv->primary->type != _DRM_CONSISTENT))
+			drm_core_ioremapfree(dev_priv->primary, dev);
+
+		if (dev->agp_buffer_map != NULL)
+			drm_core_ioremapfree(dev->agp_buffer_map, dev);
+
+		if (dev_priv->used_new_dma_init) {
+#if __OS_HAS_AGP
+			if (dev_priv->agp_handle != 0) {
+				struct drm_agp_binding unbind_req;
+				struct drm_agp_buffer free_req;
+
+				unbind_req.handle = dev_priv->agp_handle;
+				drm_agp_unbind(dev, &unbind_req);
+
+				free_req.handle = dev_priv->agp_handle;
+				drm_agp_free(dev, &free_req);
+
+				dev_priv->agp_textures = NULL;
+				dev_priv->agp_size = 0;
+				dev_priv->agp_handle = 0;
+			}
+
+			if ((dev->agp != NULL) && dev->agp->acquired) {
+				err = drm_agp_release(dev);
+			}
+#endif
+		}
+
+		dev_priv->warp = NULL;
+		dev_priv->primary = NULL;
+		dev_priv->sarea = NULL;
+		dev_priv->sarea_priv = NULL;
+		dev->agp_buffer_map = NULL;
+
+		if (full_cleanup) {
+			dev_priv->mmio = NULL;
+			dev_priv->status = NULL;
+			dev_priv->used_new_dma_init = 0;
+		}
+
+		memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
+		dev_priv->warp_pipe = 0;
+		memset(dev_priv->warp_pipe_phys, 0,
+		       sizeof(dev_priv->warp_pipe_phys));
+
+		if (dev_priv->head != NULL) {
+			mga_freelist_cleanup(dev);
+		}
+	}
+
+	return err;
+}
+
+int mga_dma_init(struct drm_device *dev, void *data,
+		 struct drm_file *file_priv)
+{
+	drm_mga_init_t *init = data;
+	int err;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	switch (init->func) {
+	case MGA_INIT_DMA:
+		err = mga_do_init_dma(dev, init);
+		if (err) {
+			(void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
+		}
+		return err;
+	case MGA_CLEANUP_DMA:
+		return mga_do_cleanup_dma(dev, FULL_CLEANUP);
+	}
+
+	return -EINVAL;
+}
+
+/* ================================================================
+ * Primary DMA stream management
+ */
+
+int mga_dma_flush(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
+	struct drm_lock *lock = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DRM_DEBUG("%s%s%s\n",
+		  (lock->flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
+		  (lock->flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "",
+		  (lock->flags & _DRM_LOCK_QUIESCENT) ? "idle, " : "");
+
+	WRAP_WAIT_WITH_RETURN(dev_priv);
+
+	if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) {
+		mga_do_dma_flush(dev_priv);
+	}
+
+	if (lock->flags & _DRM_LOCK_QUIESCENT) {
+#if MGA_DMA_DEBUG
+		int ret = mga_do_wait_for_idle(dev_priv);
+		if (ret < 0)
+			DRM_INFO("-EBUSY\n");
+		return ret;
+#else
+		return mga_do_wait_for_idle(dev_priv);
+#endif
+	} else {
+		return 0;
+	}
+}
+
+int mga_dma_reset(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	return mga_do_dma_reset(dev_priv);
+}
+
+/* ================================================================
+ * DMA buffer management
+ */
+
+static int mga_dma_get_buffers(struct drm_device * dev,
+			       struct drm_file *file_priv, struct drm_dma * d)
+{
+	struct drm_buf *buf;
+	int i;
+
+	for (i = d->granted_count; i < d->request_count; i++) {
+		buf = mga_freelist_get(dev);
+		if (!buf)
+			return -EAGAIN;
+
+		buf->file_priv = file_priv;
+
+		if (DRM_COPY_TO_USER(&d->request_indices[i],
+				     &buf->idx, sizeof(buf->idx)))
+			return -EFAULT;
+		if (DRM_COPY_TO_USER(&d->request_sizes[i],
+				     &buf->total, sizeof(buf->total)))
+			return -EFAULT;
+
+		d->granted_count++;
+	}
+	return 0;
+}
+
+int mga_dma_buffers(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
+	struct drm_dma *d = data;
+	int ret = 0;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	/* Please don't send us buffers.
+	 */
+	if (d->send_count != 0) {
+		DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
+			  DRM_CURRENTPID, d->send_count);
+		return -EINVAL;
+	}
+
+	/* We'll send you buffers.
+	 */
+	if (d->request_count < 0 || d->request_count > dma->buf_count) {
+		DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
+			  DRM_CURRENTPID, d->request_count, dma->buf_count);
+		return -EINVAL;
+	}
+
+	WRAP_TEST_WITH_RETURN(dev_priv);
+
+	d->granted_count = 0;
+
+	if (d->request_count) {
+		ret = mga_dma_get_buffers(dev, file_priv, d);
+	}
+
+	return ret;
+}
+
+/**
+ * Called just before the module is unloaded.
+ */
+int mga_driver_unload(struct drm_device * dev)
+{
+	drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
+	dev->dev_private = NULL;
+
+	return 0;
+}
+
+/**
+ * Called when the last opener of the device is closed.
+ */
+void mga_driver_lastclose(struct drm_device * dev)
+{
+	mga_do_cleanup_dma(dev, FULL_CLEANUP);
+}
+
+int mga_driver_dma_quiescent(struct drm_device * dev)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	return mga_do_wait_for_idle(dev_priv);
+}
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
new file mode 100644
index 000000000000..5572939fc7d1
--- /dev/null
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -0,0 +1,141 @@
+/* mga_drv.c -- Matrox G200/G400 driver -*- linux-c -*-
+ * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Rickard E. (Rik) Faith <faith@valinux.com>
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "mga_drm.h"
+#include "mga_drv.h"
+
+#include "drm_pciids.h"
+
+static int mga_driver_device_is_agp(struct drm_device * dev);
+
+static struct pci_device_id pciidlist[] = {
+	mga_PCI_IDS
+};
+
+static struct drm_driver driver = {
+	.driver_features =
+	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
+	    DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
+	    DRIVER_IRQ_VBL,
+	.dev_priv_size = sizeof(drm_mga_buf_priv_t),
+	.load = mga_driver_load,
+	.unload = mga_driver_unload,
+	.lastclose = mga_driver_lastclose,
+	.dma_quiescent = mga_driver_dma_quiescent,
+	.device_is_agp = mga_driver_device_is_agp,
+	.vblank_wait = mga_driver_vblank_wait,
+	.irq_preinstall = mga_driver_irq_preinstall,
+	.irq_postinstall = mga_driver_irq_postinstall,
+	.irq_uninstall = mga_driver_irq_uninstall,
+	.irq_handler = mga_driver_irq_handler,
+	.reclaim_buffers = drm_core_reclaim_buffers,
+	.get_map_ofs = drm_core_get_map_ofs,
+	.get_reg_ofs = drm_core_get_reg_ofs,
+	.ioctls = mga_ioctls,
+	.dma_ioctl = mga_dma_buffers,
+	.fops = {
+		 .owner = THIS_MODULE,
+		 .open = drm_open,
+		 .release = drm_release,
+		 .ioctl = drm_ioctl,
+		 .mmap = drm_mmap,
+		 .poll = drm_poll,
+		 .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+		 .compat_ioctl = mga_compat_ioctl,
+#endif
+		 },
+	.pci_driver = {
+		 .name = DRIVER_NAME,
+		 .id_table = pciidlist,
+	},
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int __init mga_init(void)
+{
+	driver.num_ioctls = mga_max_ioctl;
+	return drm_init(&driver);
+}
+
+static void __exit mga_exit(void)
+{
+	drm_exit(&driver);
+}
+
+module_init(mga_init);
+module_exit(mga_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
+
+/**
+ * Determine if the device really is AGP or not.
+ *
+ * In addition to the usual tests performed by \c drm_device_is_agp, this
+ * function detects PCI G450 cards that appear to the system exactly like
+ * AGP G450 cards.
+ *
+ * \param dev   The device to be tested.
+ *
+ * \returns
+ * If the device is a PCI G450, zero is returned.  Otherwise 2 is returned.
+ */
+static int mga_driver_device_is_agp(struct drm_device * dev)
+{
+	const struct pci_dev *const pdev = dev->pdev;
+
+	/* There are PCI versions of the G450.  These cards have the
+	 * same PCI ID as the AGP G450, but have an additional PCI-to-PCI
+	 * bridge chip.  We detect these cards, which are not currently
+	 * supported by this driver, by looking at the device ID of the
+	 * bus the "card" is on.  If vendor is 0x3388 (Hint Corp) and the
+	 * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
+	 * device.
+	 */
+
+	if ((pdev->device == 0x0525) && pdev->bus->self
+	    && (pdev->bus->self->vendor == 0x3388)
+	    && (pdev->bus->self->device == 0x0021)) {
+		return 0;
+	}
+
+	return 2;
+}
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
new file mode 100644
index 000000000000..f6ebd24bd587
--- /dev/null
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -0,0 +1,687 @@
+/* mga_drv.h -- Private header for the Matrox G200/G400 driver -*- linux-c -*-
+ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#ifndef __MGA_DRV_H__
+#define __MGA_DRV_H__
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR		"Gareth Hughes, VA Linux Systems Inc."
+
+#define DRIVER_NAME		"mga"
+#define DRIVER_DESC		"Matrox G200/G400"
+#define DRIVER_DATE		"20051102"
+
+#define DRIVER_MAJOR		3
+#define DRIVER_MINOR		2
+#define DRIVER_PATCHLEVEL	1
+
+typedef struct drm_mga_primary_buffer {
+	u8 *start;
+	u8 *end;
+	int size;
+
+	u32 tail;
+	int space;
+	volatile long wrapped;
+
+	volatile u32 *status;
+
+	u32 last_flush;
+	u32 last_wrap;
+
+	u32 high_mark;
+} drm_mga_primary_buffer_t;
+
+typedef struct drm_mga_freelist {
+	struct drm_mga_freelist *next;
+	struct drm_mga_freelist *prev;
+	drm_mga_age_t age;
+	struct drm_buf *buf;
+} drm_mga_freelist_t;
+
+typedef struct {
+	drm_mga_freelist_t *list_entry;
+	int discard;
+	int dispatched;
+} drm_mga_buf_priv_t;
+
+typedef struct drm_mga_private {
+	drm_mga_primary_buffer_t prim;
+	drm_mga_sarea_t *sarea_priv;
+
+	drm_mga_freelist_t *head;
+	drm_mga_freelist_t *tail;
+
+	unsigned int warp_pipe;
+	unsigned long warp_pipe_phys[MGA_MAX_WARP_PIPES];
+
+	int chipset;
+	int usec_timeout;
+
+	/**
+	 * If set, the new DMA initialization sequence was used.  This is
+	 * primarilly used to select how the driver should uninitialized its
+	 * internal DMA structures.
+	 */
+	int used_new_dma_init;
+
+	/**
+	 * If AGP memory is used for DMA buffers, this will be the value
+	 * \c MGA_PAGPXFER.  Otherwise, it will be zero (for a PCI transfer).
+	 */
+	u32 dma_access;
+
+	/**
+	 * If AGP memory is used for DMA buffers, this will be the value
+	 * \c MGA_WAGP_ENABLE.  Otherwise, it will be zero (for a PCI
+	 * transfer).
+	 */
+	u32 wagp_enable;
+
+	/**
+	 * \name MMIO region parameters.
+	 *
+	 * \sa drm_mga_private_t::mmio
+	 */
+	/*@{ */
+	u32 mmio_base;		   /**< Bus address of base of MMIO. */
+	u32 mmio_size;		   /**< Size of the MMIO region. */
+	/*@} */
+
+	u32 clear_cmd;
+	u32 maccess;
+
+	wait_queue_head_t fence_queue;
+	atomic_t last_fence_retired;
+	u32 next_fence_to_post;
+
+	unsigned int fb_cpp;
+	unsigned int front_offset;
+	unsigned int front_pitch;
+	unsigned int back_offset;
+	unsigned int back_pitch;
+
+	unsigned int depth_cpp;
+	unsigned int depth_offset;
+	unsigned int depth_pitch;
+
+	unsigned int texture_offset;
+	unsigned int texture_size;
+
+	drm_local_map_t *sarea;
+	drm_local_map_t *mmio;
+	drm_local_map_t *status;
+	drm_local_map_t *warp;
+	drm_local_map_t *primary;
+	drm_local_map_t *agp_textures;
+
+	unsigned long agp_handle;
+	unsigned int agp_size;
+} drm_mga_private_t;
+
+extern struct drm_ioctl_desc mga_ioctls[];
+extern int mga_max_ioctl;
+
+				/* mga_dma.c */
+extern int mga_dma_bootstrap(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv);
+extern int mga_dma_init(struct drm_device *dev, void *data,
+			struct drm_file *file_priv);
+extern int mga_dma_flush(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+extern int mga_dma_reset(struct drm_device *dev, void *data,
+			 struct drm_file *file_priv);
+extern int mga_dma_buffers(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv);
+extern int mga_driver_load(struct drm_device *dev, unsigned long flags);
+extern int mga_driver_unload(struct drm_device * dev);
+extern void mga_driver_lastclose(struct drm_device * dev);
+extern int mga_driver_dma_quiescent(struct drm_device * dev);
+
+extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
+
+extern void mga_do_dma_flush(drm_mga_private_t * dev_priv);
+extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv);
+extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv);
+
+extern int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf);
+
+				/* mga_warp.c */
+extern unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv);
+extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
+extern int mga_warp_init(drm_mga_private_t * dev_priv);
+
+				/* mga_irq.c */
+extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence);
+extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
+extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
+extern void mga_driver_irq_preinstall(struct drm_device * dev);
+extern void mga_driver_irq_postinstall(struct drm_device * dev);
+extern void mga_driver_irq_uninstall(struct drm_device * dev);
+extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
+			     unsigned long arg);
+
+#define mga_flush_write_combine()	DRM_WRITEMEMORYBARRIER()
+
+#if defined(__linux__) && defined(__alpha__)
+#define MGA_BASE( reg )		((unsigned long)(dev_priv->mmio->handle))
+#define MGA_ADDR( reg )		(MGA_BASE(reg) + reg)
+
+#define MGA_DEREF( reg )	*(volatile u32 *)MGA_ADDR( reg )
+#define MGA_DEREF8( reg )	*(volatile u8 *)MGA_ADDR( reg )
+
+#define MGA_READ( reg )		(_MGA_READ((u32 *)MGA_ADDR(reg)))
+#define MGA_READ8( reg )	(_MGA_READ((u8 *)MGA_ADDR(reg)))
+#define MGA_WRITE( reg, val )	do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF( reg ) = val; } while (0)
+#define MGA_WRITE8( reg, val )  do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8( reg ) = val; } while (0)
+
+static inline u32 _MGA_READ(u32 * addr)
+{
+	DRM_MEMORYBARRIER();
+	return *(volatile u32 *)addr;
+}
+#else
+#define MGA_READ8( reg )	DRM_READ8(dev_priv->mmio, (reg))
+#define MGA_READ( reg )		DRM_READ32(dev_priv->mmio, (reg))
+#define MGA_WRITE8( reg, val )  DRM_WRITE8(dev_priv->mmio, (reg), (val))
+#define MGA_WRITE( reg, val )	DRM_WRITE32(dev_priv->mmio, (reg), (val))
+#endif
+
+#define DWGREG0		0x1c00
+#define DWGREG0_END	0x1dff
+#define DWGREG1		0x2c00
+#define DWGREG1_END	0x2dff
+
+#define ISREG0(r)	(r >= DWGREG0 && r <= DWGREG0_END)
+#define DMAREG0(r)	(u8)((r - DWGREG0) >> 2)
+#define DMAREG1(r)	(u8)(((r - DWGREG1) >> 2) | 0x80)
+#define DMAREG(r)	(ISREG0(r) ? DMAREG0(r) : DMAREG1(r))
+
+/* ================================================================
+ * Helper macross...
+ */
+
+#define MGA_EMIT_STATE( dev_priv, dirty )				\
+do {									\
+	if ( (dirty) & ~MGA_UPLOAD_CLIPRECTS ) {			\
+		if ( dev_priv->chipset >= MGA_CARD_TYPE_G400 ) {	\
+			mga_g400_emit_state( dev_priv );		\
+		} else {						\
+			mga_g200_emit_state( dev_priv );		\
+		}							\
+	}								\
+} while (0)
+
+#define WRAP_TEST_WITH_RETURN( dev_priv )				\
+do {									\
+	if ( test_bit( 0, &dev_priv->prim.wrapped ) ) {			\
+		if ( mga_is_idle( dev_priv ) ) {			\
+			mga_do_dma_wrap_end( dev_priv );		\
+		} else if ( dev_priv->prim.space <			\
+			    dev_priv->prim.high_mark ) {		\
+			if ( MGA_DMA_DEBUG )				\
+				DRM_INFO( "wrap...\n");		\
+			return -EBUSY;			\
+		}							\
+	}								\
+} while (0)
+
+#define WRAP_WAIT_WITH_RETURN( dev_priv )				\
+do {									\
+	if ( test_bit( 0, &dev_priv->prim.wrapped ) ) {			\
+		if ( mga_do_wait_for_idle( dev_priv ) < 0 ) {		\
+			if ( MGA_DMA_DEBUG )				\
+				DRM_INFO( "wrap...\n");		\
+			return -EBUSY;			\
+		}							\
+		mga_do_dma_wrap_end( dev_priv );			\
+	}								\
+} while (0)
+
+/* ================================================================
+ * Primary DMA command stream
+ */
+
+#define MGA_VERBOSE	0
+
+#define DMA_LOCALS	unsigned int write; volatile u8 *prim;
+
+#define DMA_BLOCK_SIZE	(5 * sizeof(u32))
+
+#define BEGIN_DMA( n )							\
+do {									\
+	if ( MGA_VERBOSE ) {						\
+		DRM_INFO( "BEGIN_DMA( %d )\n", (n) );		\
+		DRM_INFO( "   space=0x%x req=0x%Zx\n",			\
+			  dev_priv->prim.space, (n) * DMA_BLOCK_SIZE );	\
+	}								\
+	prim = dev_priv->prim.start;					\
+	write = dev_priv->prim.tail;					\
+} while (0)
+
+#define BEGIN_DMA_WRAP()						\
+do {									\
+	if ( MGA_VERBOSE ) {						\
+		DRM_INFO( "BEGIN_DMA()\n" );				\
+		DRM_INFO( "   space=0x%x\n", dev_priv->prim.space );	\
+	}								\
+	prim = dev_priv->prim.start;					\
+	write = dev_priv->prim.tail;					\
+} while (0)
+
+#define ADVANCE_DMA()							\
+do {									\
+	dev_priv->prim.tail = write;					\
+	if ( MGA_VERBOSE ) {						\
+		DRM_INFO( "ADVANCE_DMA() tail=0x%05x sp=0x%x\n",	\
+			  write, dev_priv->prim.space );		\
+	}								\
+} while (0)
+
+#define FLUSH_DMA()							\
+do {									\
+	if ( 0 ) {							\
+		DRM_INFO( "\n" );					\
+		DRM_INFO( "   tail=0x%06x head=0x%06lx\n",		\
+			  dev_priv->prim.tail,				\
+			  MGA_READ( MGA_PRIMADDRESS ) -			\
+			  dev_priv->primary->offset );			\
+	}								\
+	if ( !test_bit( 0, &dev_priv->prim.wrapped ) ) {		\
+		if ( dev_priv->prim.space <				\
+		     dev_priv->prim.high_mark ) {			\
+			mga_do_dma_wrap_start( dev_priv );		\
+		} else {						\
+			mga_do_dma_flush( dev_priv );			\
+		}							\
+	}								\
+} while (0)
+
+/* Never use this, always use DMA_BLOCK(...) for primary DMA output.
+ */
+#define DMA_WRITE( offset, val )					\
+do {									\
+	if ( MGA_VERBOSE ) {						\
+		DRM_INFO( "   DMA_WRITE( 0x%08x ) at 0x%04Zx\n",	\
+			  (u32)(val), write + (offset) * sizeof(u32) );	\
+	}								\
+	*(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val;	\
+} while (0)
+
+#define DMA_BLOCK( reg0, val0, reg1, val1, reg2, val2, reg3, val3 )	\
+do {									\
+	DMA_WRITE( 0, ((DMAREG( reg0 ) << 0) |				\
+		       (DMAREG( reg1 ) << 8) |				\
+		       (DMAREG( reg2 ) << 16) |				\
+		       (DMAREG( reg3 ) << 24)) );			\
+	DMA_WRITE( 1, val0 );						\
+	DMA_WRITE( 2, val1 );						\
+	DMA_WRITE( 3, val2 );						\
+	DMA_WRITE( 4, val3 );						\
+	write += DMA_BLOCK_SIZE;					\
+} while (0)
+
+/* Buffer aging via primary DMA stream head pointer.
+ */
+
+#define SET_AGE( age, h, w )						\
+do {									\
+	(age)->head = h;						\
+	(age)->wrap = w;						\
+} while (0)
+
+#define TEST_AGE( age, h, w )		( (age)->wrap < w ||		\
+					  ( (age)->wrap == w &&		\
+					    (age)->head < h ) )
+
+#define AGE_BUFFER( buf_priv )						\
+do {									\
+	drm_mga_freelist_t *entry = (buf_priv)->list_entry;		\
+	if ( (buf_priv)->dispatched ) {					\
+		entry->age.head = (dev_priv->prim.tail +		\
+				   dev_priv->primary->offset);		\
+		entry->age.wrap = dev_priv->sarea_priv->last_wrap;	\
+	} else {							\
+		entry->age.head = 0;					\
+		entry->age.wrap = 0;					\
+	}								\
+} while (0)
+
+#define MGA_ENGINE_IDLE_MASK		(MGA_SOFTRAPEN |		\
+					 MGA_DWGENGSTS |		\
+					 MGA_ENDPRDMASTS)
+#define MGA_DMA_IDLE_MASK		(MGA_SOFTRAPEN |		\
+					 MGA_ENDPRDMASTS)
+
+#define MGA_DMA_DEBUG			0
+
+/* A reduced set of the mga registers.
+ */
+#define MGA_CRTC_INDEX			0x1fd4
+#define MGA_CRTC_DATA			0x1fd5
+
+/* CRTC11 */
+#define MGA_VINTCLR			(1 << 4)
+#define MGA_VINTEN			(1 << 5)
+
+#define MGA_ALPHACTRL			0x2c7c
+#define MGA_AR0				0x1c60
+#define MGA_AR1				0x1c64
+#define MGA_AR2				0x1c68
+#define MGA_AR3				0x1c6c
+#define MGA_AR4				0x1c70
+#define MGA_AR5				0x1c74
+#define MGA_AR6				0x1c78
+
+#define MGA_CXBNDRY			0x1c80
+#define MGA_CXLEFT			0x1ca0
+#define MGA_CXRIGHT			0x1ca4
+
+#define MGA_DMAPAD			0x1c54
+#define MGA_DSTORG			0x2cb8
+#define MGA_DWGCTL			0x1c00
+#	define MGA_OPCOD_MASK			(15 << 0)
+#	define MGA_OPCOD_TRAP			(4 << 0)
+#	define MGA_OPCOD_TEXTURE_TRAP		(6 << 0)
+#	define MGA_OPCOD_BITBLT			(8 << 0)
+#	define MGA_OPCOD_ILOAD			(9 << 0)
+#	define MGA_ATYPE_MASK			(7 << 4)
+#	define MGA_ATYPE_RPL			(0 << 4)
+#	define MGA_ATYPE_RSTR			(1 << 4)
+#	define MGA_ATYPE_ZI			(3 << 4)
+#	define MGA_ATYPE_BLK			(4 << 4)
+#	define MGA_ATYPE_I			(7 << 4)
+#	define MGA_LINEAR			(1 << 7)
+#	define MGA_ZMODE_MASK			(7 << 8)
+#	define MGA_ZMODE_NOZCMP			(0 << 8)
+#	define MGA_ZMODE_ZE			(2 << 8)
+#	define MGA_ZMODE_ZNE			(3 << 8)
+#	define MGA_ZMODE_ZLT			(4 << 8)
+#	define MGA_ZMODE_ZLTE			(5 << 8)
+#	define MGA_ZMODE_ZGT			(6 << 8)
+#	define MGA_ZMODE_ZGTE			(7 << 8)
+#	define MGA_SOLID			(1 << 11)
+#	define MGA_ARZERO			(1 << 12)
+#	define MGA_SGNZERO			(1 << 13)
+#	define MGA_SHIFTZERO			(1 << 14)
+#	define MGA_BOP_MASK			(15 << 16)
+#	define MGA_BOP_ZERO			(0 << 16)
+#	define MGA_BOP_DST			(10 << 16)
+#	define MGA_BOP_SRC			(12 << 16)
+#	define MGA_BOP_ONE			(15 << 16)
+#	define MGA_TRANS_SHIFT			20
+#	define MGA_TRANS_MASK			(15 << 20)
+#	define MGA_BLTMOD_MASK			(15 << 25)
+#	define MGA_BLTMOD_BMONOLEF		(0 << 25)
+#	define MGA_BLTMOD_BMONOWF		(4 << 25)
+#	define MGA_BLTMOD_PLAN			(1 << 25)
+#	define MGA_BLTMOD_BFCOL			(2 << 25)
+#	define MGA_BLTMOD_BU32BGR		(3 << 25)
+#	define MGA_BLTMOD_BU32RGB		(7 << 25)
+#	define MGA_BLTMOD_BU24BGR		(11 << 25)
+#	define MGA_BLTMOD_BU24RGB		(15 << 25)
+#	define MGA_PATTERN			(1 << 29)
+#	define MGA_TRANSC			(1 << 30)
+#	define MGA_CLIPDIS			(1 << 31)
+#define MGA_DWGSYNC			0x2c4c
+
+#define MGA_FCOL			0x1c24
+#define MGA_FIFOSTATUS			0x1e10
+#define MGA_FOGCOL			0x1cf4
+#define MGA_FXBNDRY			0x1c84
+#define MGA_FXLEFT			0x1ca8
+#define MGA_FXRIGHT			0x1cac
+
+#define MGA_ICLEAR			0x1e18
+#	define MGA_SOFTRAPICLR			(1 << 0)
+#	define MGA_VLINEICLR			(1 << 5)
+#define MGA_IEN				0x1e1c
+#	define MGA_SOFTRAPIEN			(1 << 0)
+#	define MGA_VLINEIEN			(1 << 5)
+
+#define MGA_LEN				0x1c5c
+
+#define MGA_MACCESS			0x1c04
+
+#define MGA_PITCH			0x1c8c
+#define MGA_PLNWT			0x1c1c
+#define MGA_PRIMADDRESS			0x1e58
+#	define MGA_DMA_GENERAL			(0 << 0)
+#	define MGA_DMA_BLIT			(1 << 0)
+#	define MGA_DMA_VECTOR			(2 << 0)
+#	define MGA_DMA_VERTEX			(3 << 0)
+#define MGA_PRIMEND			0x1e5c
+#	define MGA_PRIMNOSTART			(1 << 0)
+#	define MGA_PAGPXFER			(1 << 1)
+#define MGA_PRIMPTR			0x1e50
+#	define MGA_PRIMPTREN0			(1 << 0)
+#	define MGA_PRIMPTREN1			(1 << 1)
+
+#define MGA_RST				0x1e40
+#	define MGA_SOFTRESET			(1 << 0)
+#	define MGA_SOFTEXTRST			(1 << 1)
+
+#define MGA_SECADDRESS			0x2c40
+#define MGA_SECEND			0x2c44
+#define MGA_SETUPADDRESS		0x2cd0
+#define MGA_SETUPEND			0x2cd4
+#define MGA_SGN				0x1c58
+#define MGA_SOFTRAP			0x2c48
+#define MGA_SRCORG			0x2cb4
+#	define MGA_SRMMAP_MASK			(1 << 0)
+#	define MGA_SRCMAP_FB			(0 << 0)
+#	define MGA_SRCMAP_SYSMEM		(1 << 0)
+#	define MGA_SRCACC_MASK			(1 << 1)
+#	define MGA_SRCACC_PCI			(0 << 1)
+#	define MGA_SRCACC_AGP			(1 << 1)
+#define MGA_STATUS			0x1e14
+#	define MGA_SOFTRAPEN			(1 << 0)
+#	define MGA_VSYNCPEN			(1 << 4)
+#	define MGA_VLINEPEN			(1 << 5)
+#	define MGA_DWGENGSTS			(1 << 16)
+#	define MGA_ENDPRDMASTS			(1 << 17)
+#define MGA_STENCIL			0x2cc8
+#define MGA_STENCILCTL			0x2ccc
+
+#define MGA_TDUALSTAGE0			0x2cf8
+#define MGA_TDUALSTAGE1			0x2cfc
+#define MGA_TEXBORDERCOL		0x2c5c
+#define MGA_TEXCTL			0x2c30
+#define MGA_TEXCTL2			0x2c3c
+#	define MGA_DUALTEX			(1 << 7)
+#	define MGA_G400_TC2_MAGIC		(1 << 15)
+#	define MGA_MAP1_ENABLE			(1 << 31)
+#define MGA_TEXFILTER			0x2c58
+#define MGA_TEXHEIGHT			0x2c2c
+#define MGA_TEXORG			0x2c24
+#	define MGA_TEXORGMAP_MASK		(1 << 0)
+#	define MGA_TEXORGMAP_FB			(0 << 0)
+#	define MGA_TEXORGMAP_SYSMEM		(1 << 0)
+#	define MGA_TEXORGACC_MASK		(1 << 1)
+#	define MGA_TEXORGACC_PCI		(0 << 1)
+#	define MGA_TEXORGACC_AGP		(1 << 1)
+#define MGA_TEXORG1			0x2ca4
+#define MGA_TEXORG2			0x2ca8
+#define MGA_TEXORG3			0x2cac
+#define MGA_TEXORG4			0x2cb0
+#define MGA_TEXTRANS			0x2c34
+#define MGA_TEXTRANSHIGH		0x2c38
+#define MGA_TEXWIDTH			0x2c28
+
+#define MGA_WACCEPTSEQ			0x1dd4
+#define MGA_WCODEADDR			0x1e6c
+#define MGA_WFLAG			0x1dc4
+#define MGA_WFLAG1			0x1de0
+#define MGA_WFLAGNB			0x1e64
+#define MGA_WFLAGNB1			0x1e08
+#define MGA_WGETMSB			0x1dc8
+#define MGA_WIADDR			0x1dc0
+#define MGA_WIADDR2			0x1dd8
+#	define MGA_WMODE_SUSPEND		(0 << 0)
+#	define MGA_WMODE_RESUME			(1 << 0)
+#	define MGA_WMODE_JUMP			(2 << 0)
+#	define MGA_WMODE_START			(3 << 0)
+#	define MGA_WAGP_ENABLE			(1 << 2)
+#define MGA_WMISC			0x1e70
+#	define MGA_WUCODECACHE_ENABLE		(1 << 0)
+#	define MGA_WMASTER_ENABLE		(1 << 1)
+#	define MGA_WCACHEFLUSH_ENABLE		(1 << 3)
+#define MGA_WVRTXSZ			0x1dcc
+
+#define MGA_YBOT			0x1c9c
+#define MGA_YDST			0x1c90
+#define MGA_YDSTLEN			0x1c88
+#define MGA_YDSTORG			0x1c94
+#define MGA_YTOP			0x1c98
+
+#define MGA_ZORG			0x1c0c
+
+/* This finishes the current batch of commands
+ */
+#define MGA_EXEC			0x0100
+
+/* AGP PLL encoding (for G200 only).
+ */
+#define MGA_AGP_PLL			0x1e4c
+#	define MGA_AGP2XPLL_DISABLE		(0 << 0)
+#	define MGA_AGP2XPLL_ENABLE		(1 << 0)
+
+/* Warp registers
+ */
+#define MGA_WR0				0x2d00
+#define MGA_WR1				0x2d04
+#define MGA_WR2				0x2d08
+#define MGA_WR3				0x2d0c
+#define MGA_WR4				0x2d10
+#define MGA_WR5				0x2d14
+#define MGA_WR6				0x2d18
+#define MGA_WR7				0x2d1c
+#define MGA_WR8				0x2d20
+#define MGA_WR9				0x2d24
+#define MGA_WR10			0x2d28
+#define MGA_WR11			0x2d2c
+#define MGA_WR12			0x2d30
+#define MGA_WR13			0x2d34
+#define MGA_WR14			0x2d38
+#define MGA_WR15			0x2d3c
+#define MGA_WR16			0x2d40
+#define MGA_WR17			0x2d44
+#define MGA_WR18			0x2d48
+#define MGA_WR19			0x2d4c
+#define MGA_WR20			0x2d50
+#define MGA_WR21			0x2d54
+#define MGA_WR22			0x2d58
+#define MGA_WR23			0x2d5c
+#define MGA_WR24			0x2d60
+#define MGA_WR25			0x2d64
+#define MGA_WR26			0x2d68
+#define MGA_WR27			0x2d6c
+#define MGA_WR28			0x2d70
+#define MGA_WR29			0x2d74
+#define MGA_WR30			0x2d78
+#define MGA_WR31			0x2d7c
+#define MGA_WR32			0x2d80
+#define MGA_WR33			0x2d84
+#define MGA_WR34			0x2d88
+#define MGA_WR35			0x2d8c
+#define MGA_WR36			0x2d90
+#define MGA_WR37			0x2d94
+#define MGA_WR38			0x2d98
+#define MGA_WR39			0x2d9c
+#define MGA_WR40			0x2da0
+#define MGA_WR41			0x2da4
+#define MGA_WR42			0x2da8
+#define MGA_WR43			0x2dac
+#define MGA_WR44			0x2db0
+#define MGA_WR45			0x2db4
+#define MGA_WR46			0x2db8
+#define MGA_WR47			0x2dbc
+#define MGA_WR48			0x2dc0
+#define MGA_WR49			0x2dc4
+#define MGA_WR50			0x2dc8
+#define MGA_WR51			0x2dcc
+#define MGA_WR52			0x2dd0
+#define MGA_WR53			0x2dd4
+#define MGA_WR54			0x2dd8
+#define MGA_WR55			0x2ddc
+#define MGA_WR56			0x2de0
+#define MGA_WR57			0x2de4
+#define MGA_WR58			0x2de8
+#define MGA_WR59			0x2dec
+#define MGA_WR60			0x2df0
+#define MGA_WR61			0x2df4
+#define MGA_WR62			0x2df8
+#define MGA_WR63			0x2dfc
+#	define MGA_G400_WR_MAGIC		(1 << 6)
+#	define MGA_G400_WR56_MAGIC		0x46480000	/* 12800.0f */
+
+#define MGA_ILOAD_ALIGN		64
+#define MGA_ILOAD_MASK		(MGA_ILOAD_ALIGN - 1)
+
+#define MGA_DWGCTL_FLUSH	(MGA_OPCOD_TEXTURE_TRAP |		\
+				 MGA_ATYPE_I |				\
+				 MGA_ZMODE_NOZCMP |			\
+				 MGA_ARZERO |				\
+				 MGA_SGNZERO |				\
+				 MGA_BOP_SRC |				\
+				 (15 << MGA_TRANS_SHIFT))
+
+#define MGA_DWGCTL_CLEAR	(MGA_OPCOD_TRAP |			\
+				 MGA_ZMODE_NOZCMP |			\
+				 MGA_SOLID |				\
+				 MGA_ARZERO |				\
+				 MGA_SGNZERO |				\
+				 MGA_SHIFTZERO |			\
+				 MGA_BOP_SRC |				\
+				 (0 << MGA_TRANS_SHIFT) |		\
+				 MGA_BLTMOD_BMONOLEF |			\
+				 MGA_TRANSC |				\
+				 MGA_CLIPDIS)
+
+#define MGA_DWGCTL_COPY		(MGA_OPCOD_BITBLT |			\
+				 MGA_ATYPE_RPL |			\
+				 MGA_SGNZERO |				\
+				 MGA_SHIFTZERO |			\
+				 MGA_BOP_SRC |				\
+				 (0 << MGA_TRANS_SHIFT) |		\
+				 MGA_BLTMOD_BFCOL |			\
+				 MGA_CLIPDIS)
+
+/* Simple idle test.
+ */
+static __inline__ int mga_is_idle(drm_mga_private_t * dev_priv)
+{
+	u32 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
+	return (status == MGA_ENDPRDMASTS);
+}
+
+#endif
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
new file mode 100644
index 000000000000..30d00478ddee
--- /dev/null
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -0,0 +1,231 @@
+/**
+ * \file mga_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the MGA DRM.
+ *
+ * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
+ *
+ *
+ * Copyright (C) Paul Mackerras 2005
+ * Copyright (C) Egbert Eich 2003,2004
+ * Copyright (C) Dave Airlie 2005
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <linux/compat.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "mga_drm.h"
+
+typedef struct drm32_mga_init {
+	int func;
+	u32 sarea_priv_offset;
+	int chipset;
+	int sgram;
+	unsigned int maccess;
+	unsigned int fb_cpp;
+	unsigned int front_offset, front_pitch;
+	unsigned int back_offset, back_pitch;
+	unsigned int depth_cpp;
+	unsigned int depth_offset, depth_pitch;
+	unsigned int texture_offset[MGA_NR_TEX_HEAPS];
+	unsigned int texture_size[MGA_NR_TEX_HEAPS];
+	u32 fb_offset;
+	u32 mmio_offset;
+	u32 status_offset;
+	u32 warp_offset;
+	u32 primary_offset;
+	u32 buffers_offset;
+} drm_mga_init32_t;
+
+static int compat_mga_init(struct file *file, unsigned int cmd,
+			   unsigned long arg)
+{
+	drm_mga_init32_t init32;
+	drm_mga_init_t __user *init;
+	int err = 0, i;
+
+	if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
+		return -EFAULT;
+
+	init = compat_alloc_user_space(sizeof(*init));
+	if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
+	    || __put_user(init32.func, &init->func)
+	    || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
+	    || __put_user(init32.chipset, &init->chipset)
+	    || __put_user(init32.sgram, &init->sgram)
+	    || __put_user(init32.maccess, &init->maccess)
+	    || __put_user(init32.fb_cpp, &init->fb_cpp)
+	    || __put_user(init32.front_offset, &init->front_offset)
+	    || __put_user(init32.front_pitch, &init->front_pitch)
+	    || __put_user(init32.back_offset, &init->back_offset)
+	    || __put_user(init32.back_pitch, &init->back_pitch)
+	    || __put_user(init32.depth_cpp, &init->depth_cpp)
+	    || __put_user(init32.depth_offset, &init->depth_offset)
+	    || __put_user(init32.depth_pitch, &init->depth_pitch)
+	    || __put_user(init32.fb_offset, &init->fb_offset)
+	    || __put_user(init32.mmio_offset, &init->mmio_offset)
+	    || __put_user(init32.status_offset, &init->status_offset)
+	    || __put_user(init32.warp_offset, &init->warp_offset)
+	    || __put_user(init32.primary_offset, &init->primary_offset)
+	    || __put_user(init32.buffers_offset, &init->buffers_offset))
+		return -EFAULT;
+
+	for (i = 0; i < MGA_NR_TEX_HEAPS; i++) {
+		err |=
+		    __put_user(init32.texture_offset[i],
+			       &init->texture_offset[i]);
+		err |=
+		    __put_user(init32.texture_size[i], &init->texture_size[i]);
+	}
+	if (err)
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_MGA_INIT, (unsigned long)init);
+}
+
+typedef struct drm_mga_getparam32 {
+	int param;
+	u32 value;
+} drm_mga_getparam32_t;
+
+static int compat_mga_getparam(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	drm_mga_getparam32_t getparam32;
+	drm_mga_getparam_t __user *getparam;
+
+	if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32)))
+		return -EFAULT;
+
+	getparam = compat_alloc_user_space(sizeof(*getparam));
+	if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam))
+	    || __put_user(getparam32.param, &getparam->param)
+	    || __put_user((void __user *)(unsigned long)getparam32.value,
+			  &getparam->value))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
+}
+
+typedef struct drm_mga_drm_bootstrap32 {
+	u32 texture_handle;
+	u32 texture_size;
+	u32 primary_size;
+	u32 secondary_bin_count;
+	u32 secondary_bin_size;
+	u32 agp_mode;
+	u8 agp_size;
+} drm_mga_dma_bootstrap32_t;
+
+static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
+				    unsigned long arg)
+{
+	drm_mga_dma_bootstrap32_t dma_bootstrap32;
+	drm_mga_dma_bootstrap_t __user *dma_bootstrap;
+	int err;
+
+	if (copy_from_user(&dma_bootstrap32, (void __user *)arg,
+			   sizeof(dma_bootstrap32)))
+		return -EFAULT;
+
+	dma_bootstrap = compat_alloc_user_space(sizeof(*dma_bootstrap));
+	if (!access_ok(VERIFY_WRITE, dma_bootstrap, sizeof(*dma_bootstrap))
+	    || __put_user(dma_bootstrap32.texture_handle,
+			  &dma_bootstrap->texture_handle)
+	    || __put_user(dma_bootstrap32.texture_size,
+			  &dma_bootstrap->texture_size)
+	    || __put_user(dma_bootstrap32.primary_size,
+			  &dma_bootstrap->primary_size)
+	    || __put_user(dma_bootstrap32.secondary_bin_count,
+			  &dma_bootstrap->secondary_bin_count)
+	    || __put_user(dma_bootstrap32.secondary_bin_size,
+			  &dma_bootstrap->secondary_bin_size)
+	    || __put_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode)
+	    || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
+		return -EFAULT;
+
+	err = drm_ioctl(file->f_path.dentry->d_inode, file,
+			DRM_IOCTL_MGA_DMA_BOOTSTRAP,
+			(unsigned long)dma_bootstrap);
+	if (err)
+		return err;
+
+	if (__get_user(dma_bootstrap32.texture_handle,
+		       &dma_bootstrap->texture_handle)
+	    || __get_user(dma_bootstrap32.texture_size,
+			  &dma_bootstrap->texture_size)
+	    || __get_user(dma_bootstrap32.primary_size,
+			  &dma_bootstrap->primary_size)
+	    || __get_user(dma_bootstrap32.secondary_bin_count,
+			  &dma_bootstrap->secondary_bin_count)
+	    || __get_user(dma_bootstrap32.secondary_bin_size,
+			  &dma_bootstrap->secondary_bin_size)
+	    || __get_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode)
+	    || __get_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &dma_bootstrap32,
+			 sizeof(dma_bootstrap32)))
+		return -EFAULT;
+
+	return 0;
+}
+
+drm_ioctl_compat_t *mga_compat_ioctls[] = {
+	[DRM_MGA_INIT] = compat_mga_init,
+	[DRM_MGA_GETPARAM] = compat_mga_getparam,
+	[DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
+};
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/dri/card<n>.
+ *
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	drm_ioctl_compat_t *fn = NULL;
+	int ret;
+
+	if (nr < DRM_COMMAND_BASE)
+		return drm_compat_ioctl(filp, cmd, arg);
+
+	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
+		fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
+
+	lock_kernel();		/* XXX for now */
+	if (fn != NULL)
+		ret = (*fn) (filp, cmd, arg);
+	else
+		ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
+	unlock_kernel();
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
new file mode 100644
index 000000000000..9302cb8f0f83
--- /dev/null
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -0,0 +1,148 @@
+/* mga_irq.c -- IRQ handling for radeon -*- linux-c -*-
+ *
+ * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Keith Whitwell <keith@tungstengraphics.com>
+ *    Eric Anholt <anholt@FreeBSD.org>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "mga_drm.h"
+#include "mga_drv.h"
+
+irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
+	int status;
+	int handled = 0;
+
+	status = MGA_READ(MGA_STATUS);
+
+	/* VBLANK interrupt */
+	if (status & MGA_VLINEPEN) {
+		MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
+		atomic_inc(&dev->vbl_received);
+		DRM_WAKEUP(&dev->vbl_queue);
+		drm_vbl_send_signals(dev);
+		handled = 1;
+	}
+
+	/* SOFTRAP interrupt */
+	if (status & MGA_SOFTRAPEN) {
+		const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
+		const u32 prim_end = MGA_READ(MGA_PRIMEND);
+
+		MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);
+
+		/* In addition to clearing the interrupt-pending bit, we
+		 * have to write to MGA_PRIMEND to re-start the DMA operation.
+		 */
+		if ((prim_start & ~0x03) != (prim_end & ~0x03)) {
+			MGA_WRITE(MGA_PRIMEND, prim_end);
+		}
+
+		atomic_inc(&dev_priv->last_fence_retired);
+		DRM_WAKEUP(&dev_priv->fence_queue);
+		handled = 1;
+	}
+
+	if (handled) {
+		return IRQ_HANDLED;
+	}
+	return IRQ_NONE;
+}
+
+int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
+{
+	unsigned int cur_vblank;
+	int ret = 0;
+
+	/* Assume that the user has missed the current sequence number
+	 * by about a day rather than she wants to wait for years
+	 * using vertical blanks...
+	 */
+	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+		    (((cur_vblank = atomic_read(&dev->vbl_received))
+		      - *sequence) <= (1 << 23)));
+
+	*sequence = cur_vblank;
+
+	return ret;
+}
+
+int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
+{
+	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
+	unsigned int cur_fence;
+	int ret = 0;
+
+	/* Assume that the user has missed the current sequence number
+	 * by about a day rather than she wants to wait for years
+	 * using fences.
+	 */
+	DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
+		    (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
+		      - *sequence) <= (1 << 23)));
+
+	*sequence = cur_fence;
+
+	return ret;
+}
+
+void mga_driver_irq_preinstall(struct drm_device * dev)
+{
+	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
+
+	/* Disable *all* interrupts */
+	MGA_WRITE(MGA_IEN, 0);
+	/* Clear bits if they're already high */
+	MGA_WRITE(MGA_ICLEAR, ~0);
+}
+
+void mga_driver_irq_postinstall(struct drm_device * dev)
+{
+	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
+
+	DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
+
+	/* Turn on vertical blank interrupt and soft trap interrupt. */
+	MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
+}
+
+void mga_driver_irq_uninstall(struct drm_device * dev)
+{
+	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
+	if (!dev_priv)
+		return;
+
+	/* Disable *all* interrupts */
+	MGA_WRITE(MGA_IEN, 0);
+
+	dev->irq_enabled = 0;
+}
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
new file mode 100644
index 000000000000..d3f8aade07b3
--- /dev/null
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -0,0 +1,1104 @@
+/* mga_state.c -- State support for MGA G200/G400 -*- linux-c -*-
+ * Created: Thu Jan 27 02:53:43 2000 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Jeff Hartmann <jhartmann@valinux.com>
+ *    Keith Whitwell <keith@tungstengraphics.com>
+ *
+ * Rewritten by:
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "mga_drm.h"
+#include "mga_drv.h"
+
+/* ================================================================
+ * DMA hardware state programming functions
+ */
+
+static void mga_emit_clip_rect(drm_mga_private_t * dev_priv,
+			       struct drm_clip_rect * box)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
+	unsigned int pitch = dev_priv->front_pitch;
+	DMA_LOCALS;
+
+	BEGIN_DMA(2);
+
+	/* Force reset of DWGCTL on G400 (eliminates clip disable bit).
+	 */
+	if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
+		DMA_BLOCK(MGA_DWGCTL, ctx->dwgctl,
+			  MGA_LEN + MGA_EXEC, 0x80000000,
+			  MGA_DWGCTL, ctx->dwgctl,
+			  MGA_LEN + MGA_EXEC, 0x80000000);
+	}
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1,
+		  MGA_YTOP, box->y1 * pitch, MGA_YBOT, (box->y2 - 1) * pitch);
+
+	ADVANCE_DMA();
+}
+
+static __inline__ void mga_g200_emit_context(drm_mga_private_t * dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
+	DMA_LOCALS;
+
+	BEGIN_DMA(3);
+
+	DMA_BLOCK(MGA_DSTORG, ctx->dstorg,
+		  MGA_MACCESS, ctx->maccess,
+		  MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);
+
+	DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl,
+		  MGA_FOGCOL, ctx->fogcolor,
+		  MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset);
+
+	DMA_BLOCK(MGA_FCOL, ctx->fcol,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
+
+	ADVANCE_DMA();
+}
+
+static __inline__ void mga_g400_emit_context(drm_mga_private_t * dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
+	DMA_LOCALS;
+
+	BEGIN_DMA(4);
+
+	DMA_BLOCK(MGA_DSTORG, ctx->dstorg,
+		  MGA_MACCESS, ctx->maccess,
+		  MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);
+
+	DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl,
+		  MGA_FOGCOL, ctx->fogcolor,
+		  MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset);
+
+	DMA_BLOCK(MGA_WFLAG1, ctx->wflag,
+		  MGA_TDUALSTAGE0, ctx->tdualstage0,
+		  MGA_TDUALSTAGE1, ctx->tdualstage1, MGA_FCOL, ctx->fcol);
+
+	DMA_BLOCK(MGA_STENCIL, ctx->stencil,
+		  MGA_STENCILCTL, ctx->stencilctl,
+		  MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
+
+	ADVANCE_DMA();
+}
+
+static __inline__ void mga_g200_emit_tex0(drm_mga_private_t * dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
+	DMA_LOCALS;
+
+	BEGIN_DMA(4);
+
+	DMA_BLOCK(MGA_TEXCTL2, tex->texctl2,
+		  MGA_TEXCTL, tex->texctl,
+		  MGA_TEXFILTER, tex->texfilter,
+		  MGA_TEXBORDERCOL, tex->texbordercol);
+
+	DMA_BLOCK(MGA_TEXORG, tex->texorg,
+		  MGA_TEXORG1, tex->texorg1,
+		  MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3);
+
+	DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
+		  MGA_TEXWIDTH, tex->texwidth,
+		  MGA_TEXHEIGHT, tex->texheight, MGA_WR24, tex->texwidth);
+
+	DMA_BLOCK(MGA_WR34, tex->texheight,
+		  MGA_TEXTRANS, 0x0000ffff,
+		  MGA_TEXTRANSHIGH, 0x0000ffff, MGA_DMAPAD, 0x00000000);
+
+	ADVANCE_DMA();
+}
+
+static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
+	DMA_LOCALS;
+
+/*	printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */
+/*	       tex->texctl, tex->texctl2); */
+
+	BEGIN_DMA(6);
+
+	DMA_BLOCK(MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC,
+		  MGA_TEXCTL, tex->texctl,
+		  MGA_TEXFILTER, tex->texfilter,
+		  MGA_TEXBORDERCOL, tex->texbordercol);
+
+	DMA_BLOCK(MGA_TEXORG, tex->texorg,
+		  MGA_TEXORG1, tex->texorg1,
+		  MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3);
+
+	DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
+		  MGA_TEXWIDTH, tex->texwidth,
+		  MGA_TEXHEIGHT, tex->texheight, MGA_WR49, 0x00000000);
+
+	DMA_BLOCK(MGA_WR57, 0x00000000,
+		  MGA_WR53, 0x00000000,
+		  MGA_WR61, 0x00000000, MGA_WR52, MGA_G400_WR_MAGIC);
+
+	DMA_BLOCK(MGA_WR60, MGA_G400_WR_MAGIC,
+		  MGA_WR54, tex->texwidth | MGA_G400_WR_MAGIC,
+		  MGA_WR62, tex->texheight | MGA_G400_WR_MAGIC,
+		  MGA_DMAPAD, 0x00000000);
+
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_TEXTRANS, 0x0000ffff, MGA_TEXTRANSHIGH, 0x0000ffff);
+
+	ADVANCE_DMA();
+}
+
+static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1];
+	DMA_LOCALS;
+
+/*	printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg,  */
+/*	       tex->texctl, tex->texctl2); */
+
+	BEGIN_DMA(5);
+
+	DMA_BLOCK(MGA_TEXCTL2, (tex->texctl2 |
+				MGA_MAP1_ENABLE |
+				MGA_G400_TC2_MAGIC),
+		  MGA_TEXCTL, tex->texctl,
+		  MGA_TEXFILTER, tex->texfilter,
+		  MGA_TEXBORDERCOL, tex->texbordercol);
+
+	DMA_BLOCK(MGA_TEXORG, tex->texorg,
+		  MGA_TEXORG1, tex->texorg1,
+		  MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3);
+
+	DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
+		  MGA_TEXWIDTH, tex->texwidth,
+		  MGA_TEXHEIGHT, tex->texheight, MGA_WR49, 0x00000000);
+
+	DMA_BLOCK(MGA_WR57, 0x00000000,
+		  MGA_WR53, 0x00000000,
+		  MGA_WR61, 0x00000000,
+		  MGA_WR52, tex->texwidth | MGA_G400_WR_MAGIC);
+
+	DMA_BLOCK(MGA_WR60, tex->texheight | MGA_G400_WR_MAGIC,
+		  MGA_TEXTRANS, 0x0000ffff,
+		  MGA_TEXTRANSHIGH, 0x0000ffff,
+		  MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC);
+
+	ADVANCE_DMA();
+}
+
+static __inline__ void mga_g200_emit_pipe(drm_mga_private_t * dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	unsigned int pipe = sarea_priv->warp_pipe;
+	DMA_LOCALS;
+
+	BEGIN_DMA(3);
+
+	DMA_BLOCK(MGA_WIADDR, MGA_WMODE_SUSPEND,
+		  MGA_WVRTXSZ, 0x00000007,
+		  MGA_WFLAG, 0x00000000, MGA_WR24, 0x00000000);
+
+	DMA_BLOCK(MGA_WR25, 0x00000100,
+		  MGA_WR34, 0x00000000,
+		  MGA_WR42, 0x0000ffff, MGA_WR60, 0x0000ffff);
+
+	/* Padding required to to hardware bug.
+	 */
+	DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
+		  MGA_DMAPAD, 0xffffffff,
+		  MGA_DMAPAD, 0xffffffff,
+		  MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] |
+			       MGA_WMODE_START | dev_priv->wagp_enable));
+
+	ADVANCE_DMA();
+}
+
+static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	unsigned int pipe = sarea_priv->warp_pipe;
+	DMA_LOCALS;
+
+/*	printk("mga_g400_emit_pipe %x\n", pipe); */
+
+	BEGIN_DMA(10);
+
+	DMA_BLOCK(MGA_WIADDR2, MGA_WMODE_SUSPEND,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
+
+	if (pipe & MGA_T2) {
+		DMA_BLOCK(MGA_WVRTXSZ, 0x00001e09,
+			  MGA_DMAPAD, 0x00000000,
+			  MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
+
+		DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000,
+			  MGA_WACCEPTSEQ, 0x00000000,
+			  MGA_WACCEPTSEQ, 0x00000000,
+			  MGA_WACCEPTSEQ, 0x1e000000);
+	} else {
+		if (dev_priv->warp_pipe & MGA_T2) {
+			/* Flush the WARP pipe */
+			DMA_BLOCK(MGA_YDST, 0x00000000,
+				  MGA_FXLEFT, 0x00000000,
+				  MGA_FXRIGHT, 0x00000001,
+				  MGA_DWGCTL, MGA_DWGCTL_FLUSH);
+
+			DMA_BLOCK(MGA_LEN + MGA_EXEC, 0x00000001,
+				  MGA_DWGSYNC, 0x00007000,
+				  MGA_TEXCTL2, MGA_G400_TC2_MAGIC,
+				  MGA_LEN + MGA_EXEC, 0x00000000);
+
+			DMA_BLOCK(MGA_TEXCTL2, (MGA_DUALTEX |
+						MGA_G400_TC2_MAGIC),
+				  MGA_LEN + MGA_EXEC, 0x00000000,
+				  MGA_TEXCTL2, MGA_G400_TC2_MAGIC,
+				  MGA_DMAPAD, 0x00000000);
+		}
+
+		DMA_BLOCK(MGA_WVRTXSZ, 0x00001807,
+			  MGA_DMAPAD, 0x00000000,
+			  MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
+
+		DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000,
+			  MGA_WACCEPTSEQ, 0x00000000,
+			  MGA_WACCEPTSEQ, 0x00000000,
+			  MGA_WACCEPTSEQ, 0x18000000);
+	}
+
+	DMA_BLOCK(MGA_WFLAG, 0x00000000,
+		  MGA_WFLAG1, 0x00000000,
+		  MGA_WR56, MGA_G400_WR56_MAGIC, MGA_DMAPAD, 0x00000000);
+
+	DMA_BLOCK(MGA_WR49, 0x00000000,	/* tex0              */
+		  MGA_WR57, 0x00000000,	/* tex0              */
+		  MGA_WR53, 0x00000000,	/* tex1              */
+		  MGA_WR61, 0x00000000);	/* tex1              */
+
+	DMA_BLOCK(MGA_WR54, MGA_G400_WR_MAGIC,	/* tex0 width        */
+		  MGA_WR62, MGA_G400_WR_MAGIC,	/* tex0 height       */
+		  MGA_WR52, MGA_G400_WR_MAGIC,	/* tex1 width        */
+		  MGA_WR60, MGA_G400_WR_MAGIC);	/* tex1 height       */
+
+	/* Padding required to to hardware bug */
+	DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
+		  MGA_DMAPAD, 0xffffffff,
+		  MGA_DMAPAD, 0xffffffff,
+		  MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] |
+				MGA_WMODE_START | dev_priv->wagp_enable));
+
+	ADVANCE_DMA();
+}
+
+static void mga_g200_emit_state(drm_mga_private_t * dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	unsigned int dirty = sarea_priv->dirty;
+
+	if (sarea_priv->warp_pipe != dev_priv->warp_pipe) {
+		mga_g200_emit_pipe(dev_priv);
+		dev_priv->warp_pipe = sarea_priv->warp_pipe;
+	}
+
+	if (dirty & MGA_UPLOAD_CONTEXT) {
+		mga_g200_emit_context(dev_priv);
+		sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT;
+	}
+
+	if (dirty & MGA_UPLOAD_TEX0) {
+		mga_g200_emit_tex0(dev_priv);
+		sarea_priv->dirty &= ~MGA_UPLOAD_TEX0;
+	}
+}
+
+static void mga_g400_emit_state(drm_mga_private_t * dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	unsigned int dirty = sarea_priv->dirty;
+	int multitex = sarea_priv->warp_pipe & MGA_T2;
+
+	if (sarea_priv->warp_pipe != dev_priv->warp_pipe) {
+		mga_g400_emit_pipe(dev_priv);
+		dev_priv->warp_pipe = sarea_priv->warp_pipe;
+	}
+
+	if (dirty & MGA_UPLOAD_CONTEXT) {
+		mga_g400_emit_context(dev_priv);
+		sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT;
+	}
+
+	if (dirty & MGA_UPLOAD_TEX0) {
+		mga_g400_emit_tex0(dev_priv);
+		sarea_priv->dirty &= ~MGA_UPLOAD_TEX0;
+	}
+
+	if ((dirty & MGA_UPLOAD_TEX1) && multitex) {
+		mga_g400_emit_tex1(dev_priv);
+		sarea_priv->dirty &= ~MGA_UPLOAD_TEX1;
+	}
+}
+
+/* ================================================================
+ * SAREA state verification
+ */
+
+/* Disallow all write destinations except the front and backbuffer.
+ */
+static int mga_verify_context(drm_mga_private_t * dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
+
+	if (ctx->dstorg != dev_priv->front_offset &&
+	    ctx->dstorg != dev_priv->back_offset) {
+		DRM_ERROR("*** bad DSTORG: %x (front %x, back %x)\n\n",
+			  ctx->dstorg, dev_priv->front_offset,
+			  dev_priv->back_offset);
+		ctx->dstorg = 0;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Disallow texture reads from PCI space.
+ */
+static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[unit];
+	unsigned int org;
+
+	org = tex->texorg & (MGA_TEXORGMAP_MASK | MGA_TEXORGACC_MASK);
+
+	if (org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI)) {
+		DRM_ERROR("*** bad TEXORG: 0x%x, unit %d\n", tex->texorg, unit);
+		tex->texorg = 0;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mga_verify_state(drm_mga_private_t * dev_priv)
+{
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	unsigned int dirty = sarea_priv->dirty;
+	int ret = 0;
+
+	if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
+		sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
+
+	if (dirty & MGA_UPLOAD_CONTEXT)
+		ret |= mga_verify_context(dev_priv);
+
+	if (dirty & MGA_UPLOAD_TEX0)
+		ret |= mga_verify_tex(dev_priv, 0);
+
+	if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
+		if (dirty & MGA_UPLOAD_TEX1)
+			ret |= mga_verify_tex(dev_priv, 1);
+
+		if (dirty & MGA_UPLOAD_PIPE)
+			ret |= (sarea_priv->warp_pipe > MGA_MAX_G400_PIPES);
+	} else {
+		if (dirty & MGA_UPLOAD_PIPE)
+			ret |= (sarea_priv->warp_pipe > MGA_MAX_G200_PIPES);
+	}
+
+	return (ret == 0);
+}
+
+static int mga_verify_iload(drm_mga_private_t * dev_priv,
+			    unsigned int dstorg, unsigned int length)
+{
+	if (dstorg < dev_priv->texture_offset ||
+	    dstorg + length > (dev_priv->texture_offset +
+			       dev_priv->texture_size)) {
+		DRM_ERROR("*** bad iload DSTORG: 0x%x\n", dstorg);
+		return -EINVAL;
+	}
+
+	if (length & MGA_ILOAD_MASK) {
+		DRM_ERROR("*** bad iload length: 0x%x\n",
+			  length & MGA_ILOAD_MASK);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int mga_verify_blit(drm_mga_private_t * dev_priv,
+			   unsigned int srcorg, unsigned int dstorg)
+{
+	if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) ||
+	    (dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM)) {
+		DRM_ERROR("*** bad blit: src=0x%x dst=0x%x\n", srcorg, dstorg);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/* ================================================================
+ *
+ */
+
+static void mga_dma_dispatch_clear(struct drm_device * dev, drm_mga_clear_t * clear)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	int nbox = sarea_priv->nbox;
+	int i;
+	DMA_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_DMA(1);
+
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
+
+	ADVANCE_DMA();
+
+	for (i = 0; i < nbox; i++) {
+		struct drm_clip_rect *box = &pbox[i];
+		u32 height = box->y2 - box->y1;
+
+		DRM_DEBUG("   from=%d,%d to=%d,%d\n",
+			  box->x1, box->y1, box->x2, box->y2);
+
+		if (clear->flags & MGA_FRONT) {
+			BEGIN_DMA(2);
+
+			DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+				  MGA_PLNWT, clear->color_mask,
+				  MGA_YDSTLEN, (box->y1 << 16) | height,
+				  MGA_FXBNDRY, (box->x2 << 16) | box->x1);
+
+			DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+				  MGA_FCOL, clear->clear_color,
+				  MGA_DSTORG, dev_priv->front_offset,
+				  MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
+
+			ADVANCE_DMA();
+		}
+
+		if (clear->flags & MGA_BACK) {
+			BEGIN_DMA(2);
+
+			DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+				  MGA_PLNWT, clear->color_mask,
+				  MGA_YDSTLEN, (box->y1 << 16) | height,
+				  MGA_FXBNDRY, (box->x2 << 16) | box->x1);
+
+			DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+				  MGA_FCOL, clear->clear_color,
+				  MGA_DSTORG, dev_priv->back_offset,
+				  MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
+
+			ADVANCE_DMA();
+		}
+
+		if (clear->flags & MGA_DEPTH) {
+			BEGIN_DMA(2);
+
+			DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+				  MGA_PLNWT, clear->depth_mask,
+				  MGA_YDSTLEN, (box->y1 << 16) | height,
+				  MGA_FXBNDRY, (box->x2 << 16) | box->x1);
+
+			DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+				  MGA_FCOL, clear->clear_depth,
+				  MGA_DSTORG, dev_priv->depth_offset,
+				  MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
+
+			ADVANCE_DMA();
+		}
+
+	}
+
+	BEGIN_DMA(1);
+
+	/* Force reset of DWGCTL */
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);
+
+	ADVANCE_DMA();
+
+	FLUSH_DMA();
+}
+
+static void mga_dma_dispatch_swap(struct drm_device * dev)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	int nbox = sarea_priv->nbox;
+	int i;
+	DMA_LOCALS;
+	DRM_DEBUG("\n");
+
+	sarea_priv->last_frame.head = dev_priv->prim.tail;
+	sarea_priv->last_frame.wrap = dev_priv->prim.last_wrap;
+
+	BEGIN_DMA(4 + nbox);
+
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
+
+	DMA_BLOCK(MGA_DSTORG, dev_priv->front_offset,
+		  MGA_MACCESS, dev_priv->maccess,
+		  MGA_SRCORG, dev_priv->back_offset,
+		  MGA_AR5, dev_priv->front_pitch);
+
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_PLNWT, 0xffffffff, MGA_DWGCTL, MGA_DWGCTL_COPY);
+
+	for (i = 0; i < nbox; i++) {
+		struct drm_clip_rect *box = &pbox[i];
+		u32 height = box->y2 - box->y1;
+		u32 start = box->y1 * dev_priv->front_pitch;
+
+		DRM_DEBUG("   from=%d,%d to=%d,%d\n",
+			  box->x1, box->y1, box->x2, box->y2);
+
+		DMA_BLOCK(MGA_AR0, start + box->x2 - 1,
+			  MGA_AR3, start + box->x1,
+			  MGA_FXBNDRY, ((box->x2 - 1) << 16) | box->x1,
+			  MGA_YDSTLEN + MGA_EXEC, (box->y1 << 16) | height);
+	}
+
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_PLNWT, ctx->plnwt,
+		  MGA_SRCORG, dev_priv->front_offset, MGA_DWGCTL, ctx->dwgctl);
+
+	ADVANCE_DMA();
+
+	FLUSH_DMA();
+
+	DRM_DEBUG("... done.\n");
+}
+
+static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_buf_priv_t *buf_priv = buf->dev_private;
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	u32 address = (u32) buf->bus_address;
+	u32 length = (u32) buf->used;
+	int i = 0;
+	DMA_LOCALS;
+	DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used);
+
+	if (buf->used) {
+		buf_priv->dispatched = 1;
+
+		MGA_EMIT_STATE(dev_priv, sarea_priv->dirty);
+
+		do {
+			if (i < sarea_priv->nbox) {
+				mga_emit_clip_rect(dev_priv,
+						   &sarea_priv->boxes[i]);
+			}
+
+			BEGIN_DMA(1);
+
+			DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+				  MGA_DMAPAD, 0x00000000,
+				  MGA_SECADDRESS, (address |
+						   MGA_DMA_VERTEX),
+				  MGA_SECEND, ((address + length) |
+					       dev_priv->dma_access));
+
+			ADVANCE_DMA();
+		} while (++i < sarea_priv->nbox);
+	}
+
+	if (buf_priv->discard) {
+		AGE_BUFFER(buf_priv);
+		buf->pending = 0;
+		buf->used = 0;
+		buf_priv->dispatched = 0;
+
+		mga_freelist_put(dev, buf);
+	}
+
+	FLUSH_DMA();
+}
+
+static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * buf,
+				     unsigned int start, unsigned int end)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_buf_priv_t *buf_priv = buf->dev_private;
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	u32 address = (u32) buf->bus_address;
+	int i = 0;
+	DMA_LOCALS;
+	DRM_DEBUG("buf=%d start=%d end=%d\n", buf->idx, start, end);
+
+	if (start != end) {
+		buf_priv->dispatched = 1;
+
+		MGA_EMIT_STATE(dev_priv, sarea_priv->dirty);
+
+		do {
+			if (i < sarea_priv->nbox) {
+				mga_emit_clip_rect(dev_priv,
+						   &sarea_priv->boxes[i]);
+			}
+
+			BEGIN_DMA(1);
+
+			DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+				  MGA_DMAPAD, 0x00000000,
+				  MGA_SETUPADDRESS, address + start,
+				  MGA_SETUPEND, ((address + end) |
+						 dev_priv->dma_access));
+
+			ADVANCE_DMA();
+		} while (++i < sarea_priv->nbox);
+	}
+
+	if (buf_priv->discard) {
+		AGE_BUFFER(buf_priv);
+		buf->pending = 0;
+		buf->used = 0;
+		buf_priv->dispatched = 0;
+
+		mga_freelist_put(dev, buf);
+	}
+
+	FLUSH_DMA();
+}
+
+/* This copies a 64 byte aligned agp region to the frambuffer with a
+ * standard blit, the ioctl needs to do checking.
+ */
+static void mga_dma_dispatch_iload(struct drm_device * dev, struct drm_buf * buf,
+				   unsigned int dstorg, unsigned int length)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_buf_priv_t *buf_priv = buf->dev_private;
+	drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state;
+	u32 srcorg =
+	    buf->bus_address | dev_priv->dma_access | MGA_SRCMAP_SYSMEM;
+	u32 y2;
+	DMA_LOCALS;
+	DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used);
+
+	y2 = length / 64;
+
+	BEGIN_DMA(5);
+
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
+
+	DMA_BLOCK(MGA_DSTORG, dstorg,
+		  MGA_MACCESS, 0x00000000, MGA_SRCORG, srcorg, MGA_AR5, 64);
+
+	DMA_BLOCK(MGA_PITCH, 64,
+		  MGA_PLNWT, 0xffffffff,
+		  MGA_DMAPAD, 0x00000000, MGA_DWGCTL, MGA_DWGCTL_COPY);
+
+	DMA_BLOCK(MGA_AR0, 63,
+		  MGA_AR3, 0,
+		  MGA_FXBNDRY, (63 << 16) | 0, MGA_YDSTLEN + MGA_EXEC, y2);
+
+	DMA_BLOCK(MGA_PLNWT, ctx->plnwt,
+		  MGA_SRCORG, dev_priv->front_offset,
+		  MGA_PITCH, dev_priv->front_pitch, MGA_DWGSYNC, 0x00007000);
+
+	ADVANCE_DMA();
+
+	AGE_BUFFER(buf_priv);
+
+	buf->pending = 0;
+	buf->used = 0;
+	buf_priv->dispatched = 0;
+
+	mga_freelist_put(dev, buf);
+
+	FLUSH_DMA();
+}
+
+static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	int nbox = sarea_priv->nbox;
+	u32 scandir = 0, i;
+	DMA_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_DMA(4 + nbox);
+
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
+
+	DMA_BLOCK(MGA_DWGCTL, MGA_DWGCTL_COPY,
+		  MGA_PLNWT, blit->planemask,
+		  MGA_SRCORG, blit->srcorg, MGA_DSTORG, blit->dstorg);
+
+	DMA_BLOCK(MGA_SGN, scandir,
+		  MGA_MACCESS, dev_priv->maccess,
+		  MGA_AR5, blit->ydir * blit->src_pitch,
+		  MGA_PITCH, blit->dst_pitch);
+
+	for (i = 0; i < nbox; i++) {
+		int srcx = pbox[i].x1 + blit->delta_sx;
+		int srcy = pbox[i].y1 + blit->delta_sy;
+		int dstx = pbox[i].x1 + blit->delta_dx;
+		int dsty = pbox[i].y1 + blit->delta_dy;
+		int h = pbox[i].y2 - pbox[i].y1;
+		int w = pbox[i].x2 - pbox[i].x1 - 1;
+		int start;
+
+		if (blit->ydir == -1) {
+			srcy = blit->height - srcy - 1;
+		}
+
+		start = srcy * blit->src_pitch + srcx;
+
+		DMA_BLOCK(MGA_AR0, start + w,
+			  MGA_AR3, start,
+			  MGA_FXBNDRY, ((dstx + w) << 16) | (dstx & 0xffff),
+			  MGA_YDSTLEN + MGA_EXEC, (dsty << 16) | h);
+	}
+
+	/* Do something to flush AGP?
+	 */
+
+	/* Force reset of DWGCTL */
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_PLNWT, ctx->plnwt,
+		  MGA_PITCH, dev_priv->front_pitch, MGA_DWGCTL, ctx->dwgctl);
+
+	ADVANCE_DMA();
+}
+
+/* ================================================================
+ *
+ */
+
+static int mga_dma_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_clear_t *clear = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
+		sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
+
+	WRAP_TEST_WITH_RETURN(dev_priv);
+
+	mga_dma_dispatch_clear(dev, clear);
+
+	/* Make sure we restore the 3D state next time.
+	 */
+	dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
+
+	return 0;
+}
+
+static int mga_dma_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
+		sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
+
+	WRAP_TEST_WITH_RETURN(dev_priv);
+
+	mga_dma_dispatch_swap(dev);
+
+	/* Make sure we restore the 3D state next time.
+	 */
+	dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
+
+	return 0;
+}
+
+static int mga_dma_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_mga_buf_priv_t *buf_priv;
+	drm_mga_vertex_t *vertex = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (vertex->idx < 0 || vertex->idx > dma->buf_count)
+		return -EINVAL;
+	buf = dma->buflist[vertex->idx];
+	buf_priv = buf->dev_private;
+
+	buf->used = vertex->used;
+	buf_priv->discard = vertex->discard;
+
+	if (!mga_verify_state(dev_priv)) {
+		if (vertex->discard) {
+			if (buf_priv->dispatched == 1)
+				AGE_BUFFER(buf_priv);
+			buf_priv->dispatched = 0;
+			mga_freelist_put(dev, buf);
+		}
+		return -EINVAL;
+	}
+
+	WRAP_TEST_WITH_RETURN(dev_priv);
+
+	mga_dma_dispatch_vertex(dev, buf);
+
+	return 0;
+}
+
+static int mga_dma_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_mga_buf_priv_t *buf_priv;
+	drm_mga_indices_t *indices = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (indices->idx < 0 || indices->idx > dma->buf_count)
+		return -EINVAL;
+
+	buf = dma->buflist[indices->idx];
+	buf_priv = buf->dev_private;
+
+	buf_priv->discard = indices->discard;
+
+	if (!mga_verify_state(dev_priv)) {
+		if (indices->discard) {
+			if (buf_priv->dispatched == 1)
+				AGE_BUFFER(buf_priv);
+			buf_priv->dispatched = 0;
+			mga_freelist_put(dev, buf);
+		}
+		return -EINVAL;
+	}
+
+	WRAP_TEST_WITH_RETURN(dev_priv);
+
+	mga_dma_dispatch_indices(dev, buf, indices->start, indices->end);
+
+	return 0;
+}
+
+static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	struct drm_buf *buf;
+	drm_mga_buf_priv_t *buf_priv;
+	drm_mga_iload_t *iload = data;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+#if 0
+	if (mga_do_wait_for_idle(dev_priv) < 0) {
+		if (MGA_DMA_DEBUG)
+			DRM_INFO("-EBUSY\n");
+		return -EBUSY;
+	}
+#endif
+	if (iload->idx < 0 || iload->idx > dma->buf_count)
+		return -EINVAL;
+
+	buf = dma->buflist[iload->idx];
+	buf_priv = buf->dev_private;
+
+	if (mga_verify_iload(dev_priv, iload->dstorg, iload->length)) {
+		mga_freelist_put(dev, buf);
+		return -EINVAL;
+	}
+
+	WRAP_TEST_WITH_RETURN(dev_priv);
+
+	mga_dma_dispatch_iload(dev, buf, iload->dstorg, iload->length);
+
+	/* Make sure we restore the 3D state next time.
+	 */
+	dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
+
+	return 0;
+}
+
+static int mga_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_mga_blit_t *blit = data;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
+		sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
+
+	if (mga_verify_blit(dev_priv, blit->srcorg, blit->dstorg))
+		return -EINVAL;
+
+	WRAP_TEST_WITH_RETURN(dev_priv);
+
+	mga_dma_dispatch_blit(dev, blit);
+
+	/* Make sure we restore the 3D state next time.
+	 */
+	dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
+
+	return 0;
+}
+
+static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	drm_mga_getparam_t *param = data;
+	int value;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+
+	switch (param->param) {
+	case MGA_PARAM_IRQ_NR:
+		value = dev->irq;
+		break;
+	case MGA_PARAM_CARD_TYPE:
+		value = dev_priv->chipset;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+		DRM_ERROR("copy_to_user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int mga_set_fence(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	u32 *fence = data;
+	DMA_LOCALS;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+
+	/* I would normal do this assignment in the declaration of fence,
+	 * but dev_priv may be NULL.
+	 */
+
+	*fence = dev_priv->next_fence_to_post;
+	dev_priv->next_fence_to_post++;
+
+	BEGIN_DMA(1);
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000, MGA_SOFTRAP, 0x00000000);
+	ADVANCE_DMA();
+
+	return 0;
+}
+
+static int mga_wait_fence(struct drm_device *dev, void *data, struct drm_file *
+file_priv)
+{
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	u32 *fence = data;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+
+	mga_driver_fence_wait(dev, fence);
+	return 0;
+}
+
+struct drm_ioctl_desc mga_ioctls[] = {
+	DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+};
+
+int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
diff --git a/drivers/gpu/drm/mga/mga_ucode.h b/drivers/gpu/drm/mga/mga_ucode.h
new file mode 100644
index 000000000000..b611e27470e1
--- /dev/null
+++ b/drivers/gpu/drm/mga/mga_ucode.h
@@ -0,0 +1,11645 @@
+/* mga_ucode.h -- Matrox G200/G400 WARP engine microcode -*- linux-c -*-
+ * Created: Thu Jan 11 21:20:43 2001 by gareth@valinux.com
+ *
+ * Copyright 1999 Matrox Graphics Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * MATROX GRAPHICS INC., OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+ * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Kernel-based WARP engine management:
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * WARP pipes are named according to the functions they perform, where:
+ *
+ *   - T stands for computation of texture stage 0
+ *   - T2 stands for computation of both texture stage 0 and texture stage 1
+ *   - G stands for computation of triangle intensity (Gouraud interpolation)
+ *   - Z stands for computation of Z buffer interpolation
+ *   - S stands for computation of specular highlight
+ *   - A stands for computation of the alpha channel
+ *   - F stands for computation of vertex fog interpolation
+ */
+
+static unsigned char warp_g200_tgz[] = {
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x98, 0xA0, 0xE9,
+	0x40, 0x40, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x1F, 0xD7, 0x18, 0xBD,
+	0x3F, 0xD7, 0x22, 0xBD,
+
+	0x81, 0x04,
+	0x89, 0x04,
+	0x01, 0x04,
+	0x09, 0x04,
+
+	0xC9, 0x41, 0xC0, 0xEC,
+	0x11, 0x04,
+	0x00, 0xE0,
+
+	0x41, 0xCC, 0x41, 0xCD,
+	0x49, 0xCC, 0x49, 0xCD,
+
+	0xD1, 0x41, 0xC0, 0xEC,
+	0x51, 0xCC, 0x51, 0xCD,
+
+	0x80, 0x04,
+	0x10, 0x04,
+	0x08, 0x04,
+	0x00, 0xE0,
+
+	0x00, 0xCC, 0xC0, 0xCD,
+	0xD1, 0x49, 0xC0, 0xEC,
+
+	0x8A, 0x1F, 0x20, 0xE9,
+	0x8B, 0x3F, 0x20, 0xE9,
+
+	0x41, 0x3C, 0x41, 0xAD,
+	0x49, 0x3C, 0x49, 0xAD,
+
+	0x10, 0xCC, 0x10, 0xCD,
+	0x08, 0xCC, 0x08, 0xCD,
+
+	0xB9, 0x41, 0x49, 0xBB,
+	0x1F, 0xF0, 0x41, 0xCD,
+
+	0x51, 0x3C, 0x51, 0xAD,
+	0x00, 0x98, 0x80, 0xE9,
+
+	0x72, 0x80, 0x07, 0xEA,
+	0x24, 0x1F, 0x20, 0xE9,
+
+	0x15, 0x41, 0x49, 0xBD,
+	0x1D, 0x41, 0x51, 0xBD,
+
+	0x2E, 0x41, 0x2A, 0xB8,
+	0x34, 0x53, 0xA0, 0xE8,
+
+	0x15, 0x30,
+	0x1D, 0x30,
+	0x58, 0xE3,
+	0x00, 0xE0,
+
+	0xB5, 0x40, 0x48, 0xBD,
+	0x3D, 0x40, 0x50, 0xBD,
+
+	0x24, 0x43, 0xA0, 0xE8,
+	0x2C, 0x4B, 0xA0, 0xE8,
+
+	0x15, 0x72,
+	0x09, 0xE3,
+	0x00, 0xE0,
+	0x1D, 0x72,
+
+	0x35, 0x30,
+	0xB5, 0x30,
+	0xBD, 0x30,
+	0x3D, 0x30,
+
+	0x9C, 0x97, 0x57, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x6C, 0x64, 0xC8, 0xEC,
+	0x98, 0xE1,
+	0xB5, 0x05,
+
+	0xBD, 0x05,
+	0x2E, 0x30,
+	0x32, 0xC0, 0xA0, 0xE8,
+
+	0x33, 0xC0, 0xA0, 0xE8,
+	0x74, 0x64, 0xC8, 0xEC,
+
+	0x40, 0x3C, 0x40, 0xAD,
+	0x32, 0x6A,
+	0x2A, 0x30,
+
+	0x20, 0x73,
+	0x33, 0x6A,
+	0x00, 0xE0,
+	0x28, 0x73,
+
+	0x1C, 0x72,
+	0x83, 0xE2,
+	0x60, 0x80, 0x15, 0xEA,
+
+	0xB8, 0x3D, 0x28, 0xDF,
+	0x30, 0x35, 0x20, 0xDF,
+
+	0x40, 0x30,
+	0x00, 0xE0,
+	0xCC, 0xE2,
+	0x64, 0x72,
+
+	0x25, 0x42, 0x52, 0xBF,
+	0x2D, 0x42, 0x4A, 0xBF,
+
+	0x30, 0x2E, 0x30, 0xDF,
+	0x38, 0x2E, 0x38, 0xDF,
+
+	0x18, 0x1D, 0x45, 0xE9,
+	0x1E, 0x15, 0x45, 0xE9,
+
+	0x2B, 0x49, 0x51, 0xBD,
+	0x00, 0xE0,
+	0x1F, 0x73,
+
+	0x38, 0x38, 0x40, 0xAF,
+	0x30, 0x30, 0x40, 0xAF,
+
+	0x24, 0x1F, 0x24, 0xDF,
+	0x1D, 0x32, 0x20, 0xE9,
+
+	0x2C, 0x1F, 0x2C, 0xDF,
+	0x1A, 0x33, 0x20, 0xE9,
+
+	0xB0, 0x10,
+	0x08, 0xE3,
+	0x40, 0x10,
+	0xB8, 0x10,
+
+	0x26, 0xF0, 0x30, 0xCD,
+	0x2F, 0xF0, 0x38, 0xCD,
+
+	0x2B, 0x80, 0x20, 0xE9,
+	0x2A, 0x80, 0x20, 0xE9,
+
+	0xA6, 0x20,
+	0x88, 0xE2,
+	0x00, 0xE0,
+	0xAF, 0x20,
+
+	0x28, 0x2A, 0x26, 0xAF,
+	0x20, 0x2A, 0xC0, 0xAF,
+
+	0x34, 0x1F, 0x34, 0xDF,
+	0x46, 0x24, 0x46, 0xDF,
+
+	0x28, 0x30, 0x80, 0xBF,
+	0x20, 0x38, 0x80, 0xBF,
+
+	0x47, 0x24, 0x47, 0xDF,
+	0x4E, 0x2C, 0x4E, 0xDF,
+
+	0x4F, 0x2C, 0x4F, 0xDF,
+	0x56, 0x34, 0x56, 0xDF,
+
+	0x28, 0x15, 0x28, 0xDF,
+	0x20, 0x1D, 0x20, 0xDF,
+
+	0x57, 0x34, 0x57, 0xDF,
+	0x00, 0xE0,
+	0x1D, 0x05,
+
+	0x04, 0x80, 0x10, 0xEA,
+	0x89, 0xE2,
+	0x2B, 0x30,
+
+	0x3F, 0xC1, 0x1D, 0xBD,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xA0, 0x68,
+	0xBF, 0x25,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x20, 0xC0, 0x20, 0xAF,
+	0x28, 0x05,
+	0x97, 0x74,
+
+	0x00, 0xE0,
+	0x2A, 0x10,
+	0x16, 0xC0, 0x20, 0xE9,
+
+	0x04, 0x80, 0x10, 0xEA,
+	0x8C, 0xE2,
+	0x95, 0x05,
+
+	0x28, 0xC1, 0x28, 0xAD,
+	0x1F, 0xC1, 0x15, 0xBD,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xA8, 0x67,
+	0x9F, 0x6B,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x28, 0xC0, 0x28, 0xAD,
+	0x1D, 0x25,
+	0x20, 0x05,
+
+	0x28, 0x32, 0x80, 0xAD,
+	0x40, 0x2A, 0x40, 0xBD,
+
+	0x1C, 0x80, 0x20, 0xE9,
+	0x20, 0x33, 0x20, 0xAD,
+
+	0x20, 0x73,
+	0x00, 0xE0,
+	0xB6, 0x49, 0x51, 0xBB,
+
+	0x26, 0x2F, 0xB0, 0xE8,
+	0x19, 0x20, 0x20, 0xE9,
+
+	0x35, 0x20, 0x35, 0xDF,
+	0x3D, 0x20, 0x3D, 0xDF,
+
+	0x15, 0x20, 0x15, 0xDF,
+	0x1D, 0x20, 0x1D, 0xDF,
+
+	0x26, 0xD0, 0x26, 0xCD,
+	0x29, 0x49, 0x2A, 0xB8,
+
+	0x26, 0x40, 0x80, 0xBD,
+	0x3B, 0x48, 0x50, 0xBD,
+
+	0x3E, 0x54, 0x57, 0x9F,
+	0x00, 0xE0,
+	0x82, 0xE1,
+
+	0x1E, 0xAF, 0x59, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x26, 0x30,
+	0x29, 0x30,
+	0x48, 0x3C, 0x48, 0xAD,
+
+	0x2B, 0x72,
+	0xC2, 0xE1,
+	0x2C, 0xC0, 0x44, 0xC2,
+
+	0x05, 0x24, 0x34, 0xBF,
+	0x0D, 0x24, 0x2C, 0xBF,
+
+	0x2D, 0x46, 0x4E, 0xBF,
+	0x25, 0x46, 0x56, 0xBF,
+
+	0x20, 0x1D, 0x6F, 0x8F,
+	0x32, 0x3E, 0x5F, 0xE9,
+
+	0x3E, 0x50, 0x56, 0x9F,
+	0x00, 0xE0,
+	0x3B, 0x30,
+
+	0x1E, 0x8F, 0x51, 0x9F,
+	0x33, 0x1E, 0x5F, 0xE9,
+
+	0x05, 0x44, 0x54, 0xB2,
+	0x0D, 0x44, 0x4C, 0xB2,
+
+	0x19, 0xC0, 0xB0, 0xE8,
+	0x34, 0xC0, 0x44, 0xC4,
+
+	0x33, 0x73,
+	0x00, 0xE0,
+	0x3E, 0x62, 0x57, 0x9F,
+
+	0x1E, 0xAF, 0x59, 0x9F,
+	0x00, 0xE0,
+	0x0D, 0x20,
+
+	0x84, 0x3E, 0x58, 0xE9,
+	0x28, 0x1D, 0x6F, 0x8F,
+
+	0x05, 0x20,
+	0x00, 0xE0,
+	0x85, 0x1E, 0x58, 0xE9,
+
+	0x9B, 0x3B, 0x33, 0xDF,
+	0x20, 0x20, 0x42, 0xAF,
+
+	0x30, 0x42, 0x56, 0x9F,
+	0x80, 0x3E, 0x57, 0xE9,
+
+	0x3F, 0x8F, 0x51, 0x9F,
+	0x30, 0x80, 0x5F, 0xE9,
+
+	0x28, 0x28, 0x24, 0xAF,
+	0x81, 0x1E, 0x57, 0xE9,
+
+	0x05, 0x47, 0x57, 0xBF,
+	0x0D, 0x47, 0x4F, 0xBF,
+
+	0x88, 0x80, 0x58, 0xE9,
+	0x1B, 0x29, 0x1B, 0xDF,
+
+	0x30, 0x1D, 0x6F, 0x8F,
+	0x3A, 0x30, 0x4F, 0xE9,
+
+	0x1C, 0x30, 0x26, 0xDF,
+	0x09, 0xE3,
+	0x3B, 0x05,
+
+	0x3E, 0x50, 0x56, 0x9F,
+	0x3B, 0x3F, 0x4F, 0xE9,
+
+	0x1E, 0x8F, 0x51, 0x9F,
+	0x00, 0xE0,
+	0xAC, 0x20,
+
+	0x2D, 0x44, 0x4C, 0xB4,
+	0x2C, 0x1C, 0xC0, 0xAF,
+
+	0x25, 0x44, 0x54, 0xB4,
+	0x00, 0xE0,
+	0xC8, 0x30,
+
+	0x30, 0x46, 0x30, 0xAF,
+	0x1B, 0x1B, 0x48, 0xAF,
+
+	0x00, 0xE0,
+	0x25, 0x20,
+	0x38, 0x2C, 0x4F, 0xE9,
+
+	0x86, 0x80, 0x57, 0xE9,
+	0x38, 0x1D, 0x6F, 0x8F,
+
+	0x28, 0x74,
+	0x00, 0xE0,
+	0x0D, 0x44, 0x4C, 0xB0,
+
+	0x05, 0x44, 0x54, 0xB0,
+	0x2D, 0x20,
+	0x9B, 0x10,
+
+	0x82, 0x3E, 0x57, 0xE9,
+	0x32, 0xF0, 0x1B, 0xCD,
+
+	0x1E, 0xBD, 0x59, 0x9F,
+	0x83, 0x1E, 0x57, 0xE9,
+
+	0x38, 0x47, 0x38, 0xAF,
+	0x34, 0x20,
+	0x2A, 0x30,
+
+	0x00, 0xE0,
+	0x0D, 0x20,
+	0x32, 0x20,
+	0x05, 0x20,
+
+	0x87, 0x80, 0x57, 0xE9,
+	0x1F, 0x54, 0x57, 0x9F,
+
+	0x17, 0x42, 0x56, 0x9F,
+	0x00, 0xE0,
+	0x3B, 0x6A,
+
+	0x3F, 0x8F, 0x51, 0x9F,
+	0x37, 0x1E, 0x4F, 0xE9,
+
+	0x37, 0x32, 0x2A, 0xAF,
+	0x00, 0xE0,
+	0x32, 0x00,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x27, 0xC0, 0x44, 0xC0,
+
+	0x36, 0x1F, 0x4F, 0xE9,
+	0x1F, 0x1F, 0x26, 0xDF,
+
+	0x37, 0x1B, 0x37, 0xBF,
+	0x17, 0x26, 0x17, 0xDF,
+
+	0x3E, 0x17, 0x4F, 0xE9,
+	0x3F, 0x3F, 0x4F, 0xE9,
+
+	0x34, 0x1F, 0x34, 0xAF,
+	0x2B, 0x05,
+	0xA7, 0x20,
+
+	0x33, 0x2B, 0x37, 0xDF,
+	0x27, 0x17, 0xC0, 0xAF,
+
+	0x34, 0x80, 0x4F, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x03, 0x80, 0x0A, 0xEA,
+	0x17, 0xC1, 0x2B, 0xBD,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xB3, 0x68,
+	0x97, 0x25,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x33, 0xC0, 0x33, 0xAF,
+	0x3C, 0x27, 0x4F, 0xE9,
+
+	0x57, 0x39, 0x20, 0xE9,
+	0x28, 0x19, 0x60, 0xEC,
+
+	0x2B, 0x32, 0x20, 0xE9,
+	0x1D, 0x3B, 0x20, 0xE9,
+
+	0xB3, 0x05,
+	0x00, 0xE0,
+	0x16, 0x28, 0x20, 0xE9,
+
+	0x23, 0x3B, 0x33, 0xAD,
+	0x1E, 0x2B, 0x20, 0xE9,
+
+	0x1C, 0x80, 0x20, 0xE9,
+	0x57, 0x36, 0x20, 0xE9,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x40, 0x40, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x90, 0xE2,
+	0x00, 0xE0,
+
+	0x85, 0xFF, 0x20, 0xEA,
+	0x19, 0xC8, 0xC1, 0xCD,
+
+	0x1F, 0xD7, 0x18, 0xBD,
+	0x3F, 0xD7, 0x22, 0xBD,
+
+	0x9F, 0x41, 0x49, 0xBD,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x25, 0x41, 0x49, 0xBD,
+	0x2D, 0x41, 0x51, 0xBD,
+
+	0x0D, 0x80, 0x07, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x35, 0x40, 0x48, 0xBD,
+	0x3D, 0x40, 0x50, 0xBD,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x25, 0x30,
+	0x2D, 0x30,
+
+	0x35, 0x30,
+	0xB5, 0x30,
+	0xBD, 0x30,
+	0x3D, 0x30,
+
+	0x9C, 0xA7, 0x5B, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x84, 0xFF, 0x0A, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xC9, 0x41, 0xC8, 0xEC,
+	0x42, 0xE1,
+	0x00, 0xE0,
+
+	0x82, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xC8, 0x40, 0xC0, 0xEC,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x7F, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+};
+
+static unsigned char warp_g200_tgza[] = {
+
+	0x00, 0x98, 0xA0, 0xE9,
+	0x40, 0x40, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x1F, 0xD7, 0x18, 0xBD,
+	0x3F, 0xD7, 0x22, 0xBD,
+
+	0x81, 0x04,
+	0x89, 0x04,
+	0x01, 0x04,
+	0x09, 0x04,
+
+	0xC9, 0x41, 0xC0, 0xEC,
+	0x11, 0x04,
+	0x00, 0xE0,
+
+	0x41, 0xCC, 0x41, 0xCD,
+	0x49, 0xCC, 0x49, 0xCD,
+
+	0xD1, 0x41, 0xC0, 0xEC,
+	0x51, 0xCC, 0x51, 0xCD,
+
+	0x80, 0x04,
+	0x10, 0x04,
+	0x08, 0x04,
+	0x00, 0xE0,
+
+	0x00, 0xCC, 0xC0, 0xCD,
+	0xD1, 0x49, 0xC0, 0xEC,
+
+	0x8A, 0x1F, 0x20, 0xE9,
+	0x8B, 0x3F, 0x20, 0xE9,
+
+	0x41, 0x3C, 0x41, 0xAD,
+	0x49, 0x3C, 0x49, 0xAD,
+
+	0x10, 0xCC, 0x10, 0xCD,
+	0x08, 0xCC, 0x08, 0xCD,
+
+	0xB9, 0x41, 0x49, 0xBB,
+	0x1F, 0xF0, 0x41, 0xCD,
+
+	0x51, 0x3C, 0x51, 0xAD,
+	0x00, 0x98, 0x80, 0xE9,
+
+	0x7D, 0x80, 0x07, 0xEA,
+	0x24, 0x1F, 0x20, 0xE9,
+
+	0x15, 0x41, 0x49, 0xBD,
+	0x1D, 0x41, 0x51, 0xBD,
+
+	0x2E, 0x41, 0x2A, 0xB8,
+	0x34, 0x53, 0xA0, 0xE8,
+
+	0x15, 0x30,
+	0x1D, 0x30,
+	0x58, 0xE3,
+	0x00, 0xE0,
+
+	0xB5, 0x40, 0x48, 0xBD,
+	0x3D, 0x40, 0x50, 0xBD,
+
+	0x24, 0x43, 0xA0, 0xE8,
+	0x2C, 0x4B, 0xA0, 0xE8,
+
+	0x15, 0x72,
+	0x09, 0xE3,
+	0x00, 0xE0,
+	0x1D, 0x72,
+
+	0x35, 0x30,
+	0xB5, 0x30,
+	0xBD, 0x30,
+	0x3D, 0x30,
+
+	0x9C, 0x97, 0x57, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x6C, 0x64, 0xC8, 0xEC,
+	0x98, 0xE1,
+	0xB5, 0x05,
+
+	0xBD, 0x05,
+	0x2E, 0x30,
+	0x32, 0xC0, 0xA0, 0xE8,
+
+	0x33, 0xC0, 0xA0, 0xE8,
+	0x74, 0x64, 0xC8, 0xEC,
+
+	0x40, 0x3C, 0x40, 0xAD,
+	0x32, 0x6A,
+	0x2A, 0x30,
+
+	0x20, 0x73,
+	0x33, 0x6A,
+	0x00, 0xE0,
+	0x28, 0x73,
+
+	0x1C, 0x72,
+	0x83, 0xE2,
+	0x6B, 0x80, 0x15, 0xEA,
+
+	0xB8, 0x3D, 0x28, 0xDF,
+	0x30, 0x35, 0x20, 0xDF,
+
+	0x40, 0x30,
+	0x00, 0xE0,
+	0xCC, 0xE2,
+	0x64, 0x72,
+
+	0x25, 0x42, 0x52, 0xBF,
+	0x2D, 0x42, 0x4A, 0xBF,
+
+	0x30, 0x2E, 0x30, 0xDF,
+	0x38, 0x2E, 0x38, 0xDF,
+
+	0x18, 0x1D, 0x45, 0xE9,
+	0x1E, 0x15, 0x45, 0xE9,
+
+	0x2B, 0x49, 0x51, 0xBD,
+	0x00, 0xE0,
+	0x1F, 0x73,
+
+	0x38, 0x38, 0x40, 0xAF,
+	0x30, 0x30, 0x40, 0xAF,
+
+	0x24, 0x1F, 0x24, 0xDF,
+	0x1D, 0x32, 0x20, 0xE9,
+
+	0x2C, 0x1F, 0x2C, 0xDF,
+	0x1A, 0x33, 0x20, 0xE9,
+
+	0xB0, 0x10,
+	0x08, 0xE3,
+	0x40, 0x10,
+	0xB8, 0x10,
+
+	0x26, 0xF0, 0x30, 0xCD,
+	0x2F, 0xF0, 0x38, 0xCD,
+
+	0x2B, 0x80, 0x20, 0xE9,
+	0x2A, 0x80, 0x20, 0xE9,
+
+	0xA6, 0x20,
+	0x88, 0xE2,
+	0x00, 0xE0,
+	0xAF, 0x20,
+
+	0x28, 0x2A, 0x26, 0xAF,
+	0x20, 0x2A, 0xC0, 0xAF,
+
+	0x34, 0x1F, 0x34, 0xDF,
+	0x46, 0x24, 0x46, 0xDF,
+
+	0x28, 0x30, 0x80, 0xBF,
+	0x20, 0x38, 0x80, 0xBF,
+
+	0x47, 0x24, 0x47, 0xDF,
+	0x4E, 0x2C, 0x4E, 0xDF,
+
+	0x4F, 0x2C, 0x4F, 0xDF,
+	0x56, 0x34, 0x56, 0xDF,
+
+	0x28, 0x15, 0x28, 0xDF,
+	0x20, 0x1D, 0x20, 0xDF,
+
+	0x57, 0x34, 0x57, 0xDF,
+	0x00, 0xE0,
+	0x1D, 0x05,
+
+	0x04, 0x80, 0x10, 0xEA,
+	0x89, 0xE2,
+	0x2B, 0x30,
+
+	0x3F, 0xC1, 0x1D, 0xBD,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xA0, 0x68,
+	0xBF, 0x25,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x20, 0xC0, 0x20, 0xAF,
+	0x28, 0x05,
+	0x97, 0x74,
+
+	0x00, 0xE0,
+	0x2A, 0x10,
+	0x16, 0xC0, 0x20, 0xE9,
+
+	0x04, 0x80, 0x10, 0xEA,
+	0x8C, 0xE2,
+	0x95, 0x05,
+
+	0x28, 0xC1, 0x28, 0xAD,
+	0x1F, 0xC1, 0x15, 0xBD,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xA8, 0x67,
+	0x9F, 0x6B,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x28, 0xC0, 0x28, 0xAD,
+	0x1D, 0x25,
+	0x20, 0x05,
+
+	0x28, 0x32, 0x80, 0xAD,
+	0x40, 0x2A, 0x40, 0xBD,
+
+	0x1C, 0x80, 0x20, 0xE9,
+	0x20, 0x33, 0x20, 0xAD,
+
+	0x20, 0x73,
+	0x00, 0xE0,
+	0xB6, 0x49, 0x51, 0xBB,
+
+	0x26, 0x2F, 0xB0, 0xE8,
+	0x19, 0x20, 0x20, 0xE9,
+
+	0x35, 0x20, 0x35, 0xDF,
+	0x3D, 0x20, 0x3D, 0xDF,
+
+	0x15, 0x20, 0x15, 0xDF,
+	0x1D, 0x20, 0x1D, 0xDF,
+
+	0x26, 0xD0, 0x26, 0xCD,
+	0x29, 0x49, 0x2A, 0xB8,
+
+	0x26, 0x40, 0x80, 0xBD,
+	0x3B, 0x48, 0x50, 0xBD,
+
+	0x3E, 0x54, 0x57, 0x9F,
+	0x00, 0xE0,
+	0x82, 0xE1,
+
+	0x1E, 0xAF, 0x59, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x26, 0x30,
+	0x29, 0x30,
+	0x48, 0x3C, 0x48, 0xAD,
+
+	0x2B, 0x72,
+	0xC2, 0xE1,
+	0x2C, 0xC0, 0x44, 0xC2,
+
+	0x05, 0x24, 0x34, 0xBF,
+	0x0D, 0x24, 0x2C, 0xBF,
+
+	0x2D, 0x46, 0x4E, 0xBF,
+	0x25, 0x46, 0x56, 0xBF,
+
+	0x20, 0x1D, 0x6F, 0x8F,
+	0x32, 0x3E, 0x5F, 0xE9,
+
+	0x3E, 0x50, 0x56, 0x9F,
+	0x00, 0xE0,
+	0x3B, 0x30,
+
+	0x1E, 0x8F, 0x51, 0x9F,
+	0x33, 0x1E, 0x5F, 0xE9,
+
+	0x05, 0x44, 0x54, 0xB2,
+	0x0D, 0x44, 0x4C, 0xB2,
+
+	0x19, 0xC0, 0xB0, 0xE8,
+	0x34, 0xC0, 0x44, 0xC4,
+
+	0x33, 0x73,
+	0x00, 0xE0,
+	0x3E, 0x62, 0x57, 0x9F,
+
+	0x1E, 0xAF, 0x59, 0x9F,
+	0x00, 0xE0,
+	0x0D, 0x20,
+
+	0x84, 0x3E, 0x58, 0xE9,
+	0x28, 0x1D, 0x6F, 0x8F,
+
+	0x05, 0x20,
+	0x00, 0xE0,
+	0x85, 0x1E, 0x58, 0xE9,
+
+	0x9B, 0x3B, 0x33, 0xDF,
+	0x20, 0x20, 0x42, 0xAF,
+
+	0x30, 0x42, 0x56, 0x9F,
+	0x80, 0x3E, 0x57, 0xE9,
+
+	0x3F, 0x8F, 0x51, 0x9F,
+	0x30, 0x80, 0x5F, 0xE9,
+
+	0x28, 0x28, 0x24, 0xAF,
+	0x81, 0x1E, 0x57, 0xE9,
+
+	0x05, 0x47, 0x57, 0xBF,
+	0x0D, 0x47, 0x4F, 0xBF,
+
+	0x88, 0x80, 0x58, 0xE9,
+	0x1B, 0x29, 0x1B, 0xDF,
+
+	0x30, 0x1D, 0x6F, 0x8F,
+	0x3A, 0x30, 0x4F, 0xE9,
+
+	0x1C, 0x30, 0x26, 0xDF,
+	0x09, 0xE3,
+	0x3B, 0x05,
+
+	0x3E, 0x50, 0x56, 0x9F,
+	0x3B, 0x3F, 0x4F, 0xE9,
+
+	0x1E, 0x8F, 0x51, 0x9F,
+	0x00, 0xE0,
+	0xAC, 0x20,
+
+	0x2D, 0x44, 0x4C, 0xB4,
+	0x2C, 0x1C, 0xC0, 0xAF,
+
+	0x25, 0x44, 0x54, 0xB4,
+	0x00, 0xE0,
+	0xC8, 0x30,
+
+	0x30, 0x46, 0x30, 0xAF,
+	0x1B, 0x1B, 0x48, 0xAF,
+
+	0x00, 0xE0,
+	0x25, 0x20,
+	0x38, 0x2C, 0x4F, 0xE9,
+
+	0x86, 0x80, 0x57, 0xE9,
+	0x38, 0x1D, 0x6F, 0x8F,
+
+	0x28, 0x74,
+	0x00, 0xE0,
+	0x0D, 0x44, 0x4C, 0xB0,
+
+	0x05, 0x44, 0x54, 0xB0,
+	0x2D, 0x20,
+	0x9B, 0x10,
+
+	0x82, 0x3E, 0x57, 0xE9,
+	0x32, 0xF0, 0x1B, 0xCD,
+
+	0x1E, 0xBD, 0x59, 0x9F,
+	0x83, 0x1E, 0x57, 0xE9,
+
+	0x38, 0x47, 0x38, 0xAF,
+	0x34, 0x20,
+	0x2A, 0x30,
+
+	0x00, 0xE0,
+	0x0D, 0x20,
+	0x32, 0x20,
+	0x05, 0x20,
+
+	0x87, 0x80, 0x57, 0xE9,
+	0x1F, 0x54, 0x57, 0x9F,
+
+	0x17, 0x42, 0x56, 0x9F,
+	0x00, 0xE0,
+	0x3B, 0x6A,
+
+	0x3F, 0x8F, 0x51, 0x9F,
+	0x37, 0x1E, 0x4F, 0xE9,
+
+	0x37, 0x32, 0x2A, 0xAF,
+	0x00, 0xE0,
+	0x32, 0x00,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x27, 0xC0, 0x44, 0xC0,
+
+	0x36, 0x1F, 0x4F, 0xE9,
+	0x1F, 0x1F, 0x26, 0xDF,
+
+	0x37, 0x1B, 0x37, 0xBF,
+	0x17, 0x26, 0x17, 0xDF,
+
+	0x3E, 0x17, 0x4F, 0xE9,
+	0x3F, 0x3F, 0x4F, 0xE9,
+
+	0x34, 0x1F, 0x34, 0xAF,
+	0x2B, 0x05,
+	0xA7, 0x20,
+
+	0x33, 0x2B, 0x37, 0xDF,
+	0x27, 0x17, 0xC0, 0xAF,
+
+	0x34, 0x80, 0x4F, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x2D, 0x44, 0x4C, 0xB6,
+	0x25, 0x44, 0x54, 0xB6,
+
+	0x03, 0x80, 0x2A, 0xEA,
+	0x17, 0xC1, 0x2B, 0xBD,
+
+	0x2D, 0x20,
+	0x25, 0x20,
+	0x07, 0xC0, 0x44, 0xC6,
+
+	0xB3, 0x68,
+	0x97, 0x25,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x33, 0xC0, 0x33, 0xAF,
+	0x3C, 0x27, 0x4F, 0xE9,
+
+	0x1F, 0x62, 0x57, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x3F, 0x3D, 0x5D, 0x9F,
+	0x00, 0xE0,
+	0x07, 0x20,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x28, 0x19, 0x60, 0xEC,
+
+	0xB3, 0x05,
+	0x00, 0xE0,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x23, 0x3B, 0x33, 0xAD,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x1F, 0x26, 0x1F, 0xDF,
+	0x9D, 0x1F, 0x4F, 0xE9,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x9E, 0x3F, 0x4F, 0xE9,
+
+	0x07, 0x07, 0x1F, 0xAF,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x9C, 0x80, 0x4F, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x57, 0x39, 0x20, 0xE9,
+
+	0x16, 0x28, 0x20, 0xE9,
+	0x1D, 0x3B, 0x20, 0xE9,
+
+	0x1E, 0x2B, 0x20, 0xE9,
+	0x2B, 0x32, 0x20, 0xE9,
+
+	0x1C, 0x23, 0x20, 0xE9,
+	0x57, 0x36, 0x20, 0xE9,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x40, 0x40, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x90, 0xE2,
+	0x00, 0xE0,
+
+	0x7A, 0xFF, 0x20, 0xEA,
+	0x19, 0xC8, 0xC1, 0xCD,
+
+	0x1F, 0xD7, 0x18, 0xBD,
+	0x3F, 0xD7, 0x22, 0xBD,
+
+	0x9F, 0x41, 0x49, 0xBD,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x25, 0x41, 0x49, 0xBD,
+	0x2D, 0x41, 0x51, 0xBD,
+
+	0x0D, 0x80, 0x07, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x35, 0x40, 0x48, 0xBD,
+	0x3D, 0x40, 0x50, 0xBD,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x25, 0x30,
+	0x2D, 0x30,
+
+	0x35, 0x30,
+	0xB5, 0x30,
+	0xBD, 0x30,
+	0x3D, 0x30,
+
+	0x9C, 0xA7, 0x5B, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x79, 0xFF, 0x0A, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xC9, 0x41, 0xC8, 0xEC,
+	0x42, 0xE1,
+	0x00, 0xE0,
+
+	0x77, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xC8, 0x40, 0xC0, 0xEC,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x74, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+};
+
+static unsigned char warp_g200_tgzaf[] = {
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x98, 0xA0, 0xE9,
+	0x40, 0x40, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x1F, 0xD7, 0x18, 0xBD,
+	0x3F, 0xD7, 0x22, 0xBD,
+
+	0x81, 0x04,
+	0x89, 0x04,
+	0x01, 0x04,
+	0x09, 0x04,
+
+	0xC9, 0x41, 0xC0, 0xEC,
+	0x11, 0x04,
+	0x00, 0xE0,
+
+	0x41, 0xCC, 0x41, 0xCD,
+	0x49, 0xCC, 0x49, 0xCD,
+
+	0xD1, 0x41, 0xC0, 0xEC,
+	0x51, 0xCC, 0x51, 0xCD,
+
+	0x80, 0x04,
+	0x10, 0x04,
+	0x08, 0x04,
+	0x00, 0xE0,
+
+	0x00, 0xCC, 0xC0, 0xCD,
+	0xD1, 0x49, 0xC0, 0xEC,
+
+	0x8A, 0x1F, 0x20, 0xE9,
+	0x8B, 0x3F, 0x20, 0xE9,
+
+	0x41, 0x3C, 0x41, 0xAD,
+	0x49, 0x3C, 0x49, 0xAD,
+
+	0x10, 0xCC, 0x10, 0xCD,
+	0x08, 0xCC, 0x08, 0xCD,
+
+	0xB9, 0x41, 0x49, 0xBB,
+	0x1F, 0xF0, 0x41, 0xCD,
+
+	0x51, 0x3C, 0x51, 0xAD,
+	0x00, 0x98, 0x80, 0xE9,
+
+	0x83, 0x80, 0x07, 0xEA,
+	0x24, 0x1F, 0x20, 0xE9,
+
+	0x21, 0x45, 0x80, 0xE8,
+	0x1A, 0x4D, 0x80, 0xE8,
+
+	0x31, 0x55, 0x80, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x15, 0x41, 0x49, 0xBD,
+	0x1D, 0x41, 0x51, 0xBD,
+
+	0x2E, 0x41, 0x2A, 0xB8,
+	0x34, 0x53, 0xA0, 0xE8,
+
+	0x15, 0x30,
+	0x1D, 0x30,
+	0x58, 0xE3,
+	0x00, 0xE0,
+
+	0xB5, 0x40, 0x48, 0xBD,
+	0x3D, 0x40, 0x50, 0xBD,
+
+	0x24, 0x43, 0xA0, 0xE8,
+	0x2C, 0x4B, 0xA0, 0xE8,
+
+	0x15, 0x72,
+	0x09, 0xE3,
+	0x00, 0xE0,
+	0x1D, 0x72,
+
+	0x35, 0x30,
+	0xB5, 0x30,
+	0xBD, 0x30,
+	0x3D, 0x30,
+
+	0x9C, 0x97, 0x57, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x6C, 0x64, 0xC8, 0xEC,
+	0x98, 0xE1,
+	0xB5, 0x05,
+
+	0xBD, 0x05,
+	0x2E, 0x30,
+	0x32, 0xC0, 0xA0, 0xE8,
+
+	0x33, 0xC0, 0xA0, 0xE8,
+	0x74, 0x64, 0xC8, 0xEC,
+
+	0x40, 0x3C, 0x40, 0xAD,
+	0x32, 0x6A,
+	0x2A, 0x30,
+
+	0x20, 0x73,
+	0x33, 0x6A,
+	0x00, 0xE0,
+	0x28, 0x73,
+
+	0x1C, 0x72,
+	0x83, 0xE2,
+	0x6F, 0x80, 0x15, 0xEA,
+
+	0xB8, 0x3D, 0x28, 0xDF,
+	0x30, 0x35, 0x20, 0xDF,
+
+	0x40, 0x30,
+	0x00, 0xE0,
+	0xCC, 0xE2,
+	0x64, 0x72,
+
+	0x25, 0x42, 0x52, 0xBF,
+	0x2D, 0x42, 0x4A, 0xBF,
+
+	0x30, 0x2E, 0x30, 0xDF,
+	0x38, 0x2E, 0x38, 0xDF,
+
+	0x18, 0x1D, 0x45, 0xE9,
+	0x1E, 0x15, 0x45, 0xE9,
+
+	0x2B, 0x49, 0x51, 0xBD,
+	0x00, 0xE0,
+	0x1F, 0x73,
+
+	0x38, 0x38, 0x40, 0xAF,
+	0x30, 0x30, 0x40, 0xAF,
+
+	0x24, 0x1F, 0x24, 0xDF,
+	0x1D, 0x32, 0x20, 0xE9,
+
+	0x2C, 0x1F, 0x2C, 0xDF,
+	0x1A, 0x33, 0x20, 0xE9,
+
+	0xB0, 0x10,
+	0x08, 0xE3,
+	0x40, 0x10,
+	0xB8, 0x10,
+
+	0x26, 0xF0, 0x30, 0xCD,
+	0x2F, 0xF0, 0x38, 0xCD,
+
+	0x2B, 0x80, 0x20, 0xE9,
+	0x2A, 0x80, 0x20, 0xE9,
+
+	0xA6, 0x20,
+	0x88, 0xE2,
+	0x00, 0xE0,
+	0xAF, 0x20,
+
+	0x28, 0x2A, 0x26, 0xAF,
+	0x20, 0x2A, 0xC0, 0xAF,
+
+	0x34, 0x1F, 0x34, 0xDF,
+	0x46, 0x24, 0x46, 0xDF,
+
+	0x28, 0x30, 0x80, 0xBF,
+	0x20, 0x38, 0x80, 0xBF,
+
+	0x47, 0x24, 0x47, 0xDF,
+	0x4E, 0x2C, 0x4E, 0xDF,
+
+	0x4F, 0x2C, 0x4F, 0xDF,
+	0x56, 0x34, 0x56, 0xDF,
+
+	0x28, 0x15, 0x28, 0xDF,
+	0x20, 0x1D, 0x20, 0xDF,
+
+	0x57, 0x34, 0x57, 0xDF,
+	0x00, 0xE0,
+	0x1D, 0x05,
+
+	0x04, 0x80, 0x10, 0xEA,
+	0x89, 0xE2,
+	0x2B, 0x30,
+
+	0x3F, 0xC1, 0x1D, 0xBD,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xA0, 0x68,
+	0xBF, 0x25,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x20, 0xC0, 0x20, 0xAF,
+	0x28, 0x05,
+	0x97, 0x74,
+
+	0x00, 0xE0,
+	0x2A, 0x10,
+	0x16, 0xC0, 0x20, 0xE9,
+
+	0x04, 0x80, 0x10, 0xEA,
+	0x8C, 0xE2,
+	0x95, 0x05,
+
+	0x28, 0xC1, 0x28, 0xAD,
+	0x1F, 0xC1, 0x15, 0xBD,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xA8, 0x67,
+	0x9F, 0x6B,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x28, 0xC0, 0x28, 0xAD,
+	0x1D, 0x25,
+	0x20, 0x05,
+
+	0x28, 0x32, 0x80, 0xAD,
+	0x40, 0x2A, 0x40, 0xBD,
+
+	0x1C, 0x80, 0x20, 0xE9,
+	0x20, 0x33, 0x20, 0xAD,
+
+	0x20, 0x73,
+	0x00, 0xE0,
+	0xB6, 0x49, 0x51, 0xBB,
+
+	0x26, 0x2F, 0xB0, 0xE8,
+	0x19, 0x20, 0x20, 0xE9,
+
+	0x35, 0x20, 0x35, 0xDF,
+	0x3D, 0x20, 0x3D, 0xDF,
+
+	0x15, 0x20, 0x15, 0xDF,
+	0x1D, 0x20, 0x1D, 0xDF,
+
+	0x26, 0xD0, 0x26, 0xCD,
+	0x29, 0x49, 0x2A, 0xB8,
+
+	0x26, 0x40, 0x80, 0xBD,
+	0x3B, 0x48, 0x50, 0xBD,
+
+	0x3E, 0x54, 0x57, 0x9F,
+	0x00, 0xE0,
+	0x82, 0xE1,
+
+	0x1E, 0xAF, 0x59, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x26, 0x30,
+	0x29, 0x30,
+	0x48, 0x3C, 0x48, 0xAD,
+
+	0x2B, 0x72,
+	0xC2, 0xE1,
+	0x2C, 0xC0, 0x44, 0xC2,
+
+	0x05, 0x24, 0x34, 0xBF,
+	0x0D, 0x24, 0x2C, 0xBF,
+
+	0x2D, 0x46, 0x4E, 0xBF,
+	0x25, 0x46, 0x56, 0xBF,
+
+	0x20, 0x1D, 0x6F, 0x8F,
+	0x32, 0x3E, 0x5F, 0xE9,
+
+	0x3E, 0x50, 0x56, 0x9F,
+	0x00, 0xE0,
+	0x3B, 0x30,
+
+	0x1E, 0x8F, 0x51, 0x9F,
+	0x33, 0x1E, 0x5F, 0xE9,
+
+	0x05, 0x44, 0x54, 0xB2,
+	0x0D, 0x44, 0x4C, 0xB2,
+
+	0x19, 0xC0, 0xB0, 0xE8,
+	0x34, 0xC0, 0x44, 0xC4,
+
+	0x33, 0x73,
+	0x00, 0xE0,
+	0x3E, 0x62, 0x57, 0x9F,
+
+	0x1E, 0xAF, 0x59, 0x9F,
+	0x00, 0xE0,
+	0x0D, 0x20,
+
+	0x84, 0x3E, 0x58, 0xE9,
+	0x28, 0x1D, 0x6F, 0x8F,
+
+	0x05, 0x20,
+	0x00, 0xE0,
+	0x85, 0x1E, 0x58, 0xE9,
+
+	0x9B, 0x3B, 0x33, 0xDF,
+	0x20, 0x20, 0x42, 0xAF,
+
+	0x30, 0x42, 0x56, 0x9F,
+	0x80, 0x3E, 0x57, 0xE9,
+
+	0x3F, 0x8F, 0x51, 0x9F,
+	0x30, 0x80, 0x5F, 0xE9,
+
+	0x28, 0x28, 0x24, 0xAF,
+	0x81, 0x1E, 0x57, 0xE9,
+
+	0x05, 0x47, 0x57, 0xBF,
+	0x0D, 0x47, 0x4F, 0xBF,
+
+	0x88, 0x80, 0x58, 0xE9,
+	0x1B, 0x29, 0x1B, 0xDF,
+
+	0x30, 0x1D, 0x6F, 0x8F,
+	0x3A, 0x30, 0x4F, 0xE9,
+
+	0x1C, 0x30, 0x26, 0xDF,
+	0x09, 0xE3,
+	0x3B, 0x05,
+
+	0x3E, 0x50, 0x56, 0x9F,
+	0x3B, 0x3F, 0x4F, 0xE9,
+
+	0x1E, 0x8F, 0x51, 0x9F,
+	0x00, 0xE0,
+	0xAC, 0x20,
+
+	0x2D, 0x44, 0x4C, 0xB4,
+	0x2C, 0x1C, 0xC0, 0xAF,
+
+	0x25, 0x44, 0x54, 0xB4,
+	0x00, 0xE0,
+	0xC8, 0x30,
+
+	0x30, 0x46, 0x30, 0xAF,
+	0x1B, 0x1B, 0x48, 0xAF,
+
+	0x00, 0xE0,
+	0x25, 0x20,
+	0x38, 0x2C, 0x4F, 0xE9,
+
+	0x86, 0x80, 0x57, 0xE9,
+	0x38, 0x1D, 0x6F, 0x8F,
+
+	0x28, 0x74,
+	0x00, 0xE0,
+	0x0D, 0x44, 0x4C, 0xB0,
+
+	0x05, 0x44, 0x54, 0xB0,
+	0x2D, 0x20,
+	0x9B, 0x10,
+
+	0x82, 0x3E, 0x57, 0xE9,
+	0x32, 0xF0, 0x1B, 0xCD,
+
+	0x1E, 0xBD, 0x59, 0x9F,
+	0x83, 0x1E, 0x57, 0xE9,
+
+	0x38, 0x47, 0x38, 0xAF,
+	0x34, 0x20,
+	0x2A, 0x30,
+
+	0x00, 0xE0,
+	0x0D, 0x20,
+	0x32, 0x20,
+	0x05, 0x20,
+
+	0x87, 0x80, 0x57, 0xE9,
+	0x1F, 0x54, 0x57, 0x9F,
+
+	0x17, 0x42, 0x56, 0x9F,
+	0x00, 0xE0,
+	0x3B, 0x6A,
+
+	0x3F, 0x8F, 0x51, 0x9F,
+	0x37, 0x1E, 0x4F, 0xE9,
+
+	0x37, 0x32, 0x2A, 0xAF,
+	0x00, 0xE0,
+	0x32, 0x00,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x27, 0xC0, 0x44, 0xC0,
+
+	0x36, 0x1F, 0x4F, 0xE9,
+	0x1F, 0x1F, 0x26, 0xDF,
+
+	0x37, 0x1B, 0x37, 0xBF,
+	0x17, 0x26, 0x17, 0xDF,
+
+	0x3E, 0x17, 0x4F, 0xE9,
+	0x3F, 0x3F, 0x4F, 0xE9,
+
+	0x34, 0x1F, 0x34, 0xAF,
+	0x2B, 0x05,
+	0xA7, 0x20,
+
+	0x33, 0x2B, 0x37, 0xDF,
+	0x27, 0x17, 0xC0, 0xAF,
+
+	0x34, 0x80, 0x4F, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x0D, 0x21, 0x1A, 0xB6,
+	0x05, 0x21, 0x31, 0xB6,
+
+	0x2D, 0x44, 0x4C, 0xB6,
+	0x25, 0x44, 0x54, 0xB6,
+
+	0x03, 0x80, 0x2A, 0xEA,
+	0x17, 0xC1, 0x2B, 0xBD,
+
+	0x0D, 0x20,
+	0x05, 0x20,
+	0x2F, 0xC0, 0x21, 0xC6,
+
+	0xB3, 0x68,
+	0x97, 0x25,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x33, 0xC0, 0x33, 0xAF,
+	0x3C, 0x27, 0x4F, 0xE9,
+
+	0x00, 0xE0,
+	0x25, 0x20,
+	0x07, 0xC0, 0x44, 0xC6,
+
+	0x17, 0x50, 0x56, 0x9F,
+	0x00, 0xE0,
+	0x2D, 0x20,
+
+	0x37, 0x0F, 0x5C, 0x9F,
+	0x00, 0xE0,
+	0x2F, 0x20,
+
+	0x1F, 0x62, 0x57, 0x9F,
+	0x00, 0xE0,
+	0x07, 0x20,
+
+	0x3F, 0x3D, 0x5D, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x28, 0x19, 0x60, 0xEC,
+
+	0xB3, 0x05,
+	0x00, 0xE0,
+	0x17, 0x26, 0x17, 0xDF,
+
+	0x23, 0x3B, 0x33, 0xAD,
+	0x35, 0x17, 0x4F, 0xE9,
+
+	0x1F, 0x26, 0x1F, 0xDF,
+	0x9D, 0x1F, 0x4F, 0xE9,
+
+	0x9E, 0x3F, 0x4F, 0xE9,
+	0x39, 0x37, 0x4F, 0xE9,
+
+	0x2F, 0x2F, 0x17, 0xAF,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x07, 0x07, 0x1F, 0xAF,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x31, 0x80, 0x4F, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x9C, 0x80, 0x4F, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x57, 0x39, 0x20, 0xE9,
+
+	0x16, 0x28, 0x20, 0xE9,
+	0x1D, 0x3B, 0x20, 0xE9,
+
+	0x1E, 0x2B, 0x20, 0xE9,
+	0x2B, 0x32, 0x20, 0xE9,
+
+	0x1C, 0x23, 0x20, 0xE9,
+	0x57, 0x36, 0x20, 0xE9,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x40, 0x40, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x90, 0xE2,
+	0x00, 0xE0,
+
+	0x74, 0xFF, 0x20, 0xEA,
+	0x19, 0xC8, 0xC1, 0xCD,
+
+	0x1F, 0xD7, 0x18, 0xBD,
+	0x3F, 0xD7, 0x22, 0xBD,
+
+	0x9F, 0x41, 0x49, 0xBD,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x25, 0x41, 0x49, 0xBD,
+	0x2D, 0x41, 0x51, 0xBD,
+
+	0x0D, 0x80, 0x07, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x35, 0x40, 0x48, 0xBD,
+	0x3D, 0x40, 0x50, 0xBD,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x25, 0x30,
+	0x2D, 0x30,
+
+	0x35, 0x30,
+	0xB5, 0x30,
+	0xBD, 0x30,
+	0x3D, 0x30,
+
+	0x9C, 0xA7, 0x5B, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x73, 0xFF, 0x0A, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xC9, 0x41, 0xC8, 0xEC,
+	0x42, 0xE1,
+	0x00, 0xE0,
+
+	0x71, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xC8, 0x40, 0xC0, 0xEC,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x6E, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+};
+
+static unsigned char warp_g200_tgzf[] = {
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x98, 0xA0, 0xE9,
+	0x40, 0x40, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x1F, 0xD7, 0x18, 0xBD,
+	0x3F, 0xD7, 0x22, 0xBD,
+
+	0x81, 0x04,
+	0x89, 0x04,
+	0x01, 0x04,
+	0x09, 0x04,
+
+	0xC9, 0x41, 0xC0, 0xEC,
+	0x11, 0x04,
+	0x00, 0xE0,
+
+	0x41, 0xCC, 0x41, 0xCD,
+	0x49, 0xCC, 0x49, 0xCD,
+
+	0xD1, 0x41, 0xC0, 0xEC,
+	0x51, 0xCC, 0x51, 0xCD,
+
+	0x80, 0x04,
+	0x10, 0x04,
+	0x08, 0x04,
+	0x00, 0xE0,
+
+	0x00, 0xCC, 0xC0, 0xCD,
+	0xD1, 0x49, 0xC0, 0xEC,
+
+	0x8A, 0x1F, 0x20, 0xE9,
+	0x8B, 0x3F, 0x20, 0xE9,
+
+	0x41, 0x3C, 0x41, 0xAD,
+	0x49, 0x3C, 0x49, 0xAD,
+
+	0x10, 0xCC, 0x10, 0xCD,
+	0x08, 0xCC, 0x08, 0xCD,
+
+	0xB9, 0x41, 0x49, 0xBB,
+	0x1F, 0xF0, 0x41, 0xCD,
+
+	0x51, 0x3C, 0x51, 0xAD,
+	0x00, 0x98, 0x80, 0xE9,
+
+	0x7F, 0x80, 0x07, 0xEA,
+	0x24, 0x1F, 0x20, 0xE9,
+
+	0x21, 0x45, 0x80, 0xE8,
+	0x1A, 0x4D, 0x80, 0xE8,
+
+	0x31, 0x55, 0x80, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x15, 0x41, 0x49, 0xBD,
+	0x1D, 0x41, 0x51, 0xBD,
+
+	0x2E, 0x41, 0x2A, 0xB8,
+	0x34, 0x53, 0xA0, 0xE8,
+
+	0x15, 0x30,
+	0x1D, 0x30,
+	0x58, 0xE3,
+	0x00, 0xE0,
+
+	0xB5, 0x40, 0x48, 0xBD,
+	0x3D, 0x40, 0x50, 0xBD,
+
+	0x24, 0x43, 0xA0, 0xE8,
+	0x2C, 0x4B, 0xA0, 0xE8,
+
+	0x15, 0x72,
+	0x09, 0xE3,
+	0x00, 0xE0,
+	0x1D, 0x72,
+
+	0x35, 0x30,
+	0xB5, 0x30,
+	0xBD, 0x30,
+	0x3D, 0x30,
+
+	0x9C, 0x97, 0x57, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x6C, 0x64, 0xC8, 0xEC,
+	0x98, 0xE1,
+	0xB5, 0x05,
+
+	0xBD, 0x05,
+	0x2E, 0x30,
+	0x32, 0xC0, 0xA0, 0xE8,
+
+	0x33, 0xC0, 0xA0, 0xE8,
+	0x74, 0x64, 0xC8, 0xEC,
+
+	0x40, 0x3C, 0x40, 0xAD,
+	0x32, 0x6A,
+	0x2A, 0x30,
+
+	0x20, 0x73,
+	0x33, 0x6A,
+	0x00, 0xE0,
+	0x28, 0x73,
+
+	0x1C, 0x72,
+	0x83, 0xE2,
+	0x6B, 0x80, 0x15, 0xEA,
+
+	0xB8, 0x3D, 0x28, 0xDF,
+	0x30, 0x35, 0x20, 0xDF,
+
+	0x40, 0x30,
+	0x00, 0xE0,
+	0xCC, 0xE2,
+	0x64, 0x72,
+
+	0x25, 0x42, 0x52, 0xBF,
+	0x2D, 0x42, 0x4A, 0xBF,
+
+	0x30, 0x2E, 0x30, 0xDF,
+	0x38, 0x2E, 0x38, 0xDF,
+
+	0x18, 0x1D, 0x45, 0xE9,
+	0x1E, 0x15, 0x45, 0xE9,
+
+	0x2B, 0x49, 0x51, 0xBD,
+	0x00, 0xE0,
+	0x1F, 0x73,
+
+	0x38, 0x38, 0x40, 0xAF,
+	0x30, 0x30, 0x40, 0xAF,
+
+	0x24, 0x1F, 0x24, 0xDF,
+	0x1D, 0x32, 0x20, 0xE9,
+
+	0x2C, 0x1F, 0x2C, 0xDF,
+	0x1A, 0x33, 0x20, 0xE9,
+
+	0xB0, 0x10,
+	0x08, 0xE3,
+	0x40, 0x10,
+	0xB8, 0x10,
+
+	0x26, 0xF0, 0x30, 0xCD,
+	0x2F, 0xF0, 0x38, 0xCD,
+
+	0x2B, 0x80, 0x20, 0xE9,
+	0x2A, 0x80, 0x20, 0xE9,
+
+	0xA6, 0x20,
+	0x88, 0xE2,
+	0x00, 0xE0,
+	0xAF, 0x20,
+
+	0x28, 0x2A, 0x26, 0xAF,
+	0x20, 0x2A, 0xC0, 0xAF,
+
+	0x34, 0x1F, 0x34, 0xDF,
+	0x46, 0x24, 0x46, 0xDF,
+
+	0x28, 0x30, 0x80, 0xBF,
+	0x20, 0x38, 0x80, 0xBF,
+
+	0x47, 0x24, 0x47, 0xDF,
+	0x4E, 0x2C, 0x4E, 0xDF,
+
+	0x4F, 0x2C, 0x4F, 0xDF,
+	0x56, 0x34, 0x56, 0xDF,
+
+	0x28, 0x15, 0x28, 0xDF,
+	0x20, 0x1D, 0x20, 0xDF,
+
+	0x57, 0x34, 0x57, 0xDF,
+	0x00, 0xE0,
+	0x1D, 0x05,
+
+	0x04, 0x80, 0x10, 0xEA,
+	0x89, 0xE2,
+	0x2B, 0x30,
+
+	0x3F, 0xC1, 0x1D, 0xBD,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xA0, 0x68,
+	0xBF, 0x25,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x20, 0xC0, 0x20, 0xAF,
+	0x28, 0x05,
+	0x97, 0x74,
+
+	0x00, 0xE0,
+	0x2A, 0x10,
+	0x16, 0xC0, 0x20, 0xE9,
+
+	0x04, 0x80, 0x10, 0xEA,
+	0x8C, 0xE2,
+	0x95, 0x05,
+
+	0x28, 0xC1, 0x28, 0xAD,
+	0x1F, 0xC1, 0x15, 0xBD,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xA8, 0x67,
+	0x9F, 0x6B,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x28, 0xC0, 0x28, 0xAD,
+	0x1D, 0x25,
+	0x20, 0x05,
+
+	0x28, 0x32, 0x80, 0xAD,
+	0x40, 0x2A, 0x40, 0xBD,
+
+	0x1C, 0x80, 0x20, 0xE9,
+	0x20, 0x33, 0x20, 0xAD,
+
+	0x20, 0x73,
+	0x00, 0xE0,
+	0xB6, 0x49, 0x51, 0xBB,
+
+	0x26, 0x2F, 0xB0, 0xE8,
+	0x19, 0x20, 0x20, 0xE9,
+
+	0x35, 0x20, 0x35, 0xDF,
+	0x3D, 0x20, 0x3D, 0xDF,
+
+	0x15, 0x20, 0x15, 0xDF,
+	0x1D, 0x20, 0x1D, 0xDF,
+
+	0x26, 0xD0, 0x26, 0xCD,
+	0x29, 0x49, 0x2A, 0xB8,
+
+	0x26, 0x40, 0x80, 0xBD,
+	0x3B, 0x48, 0x50, 0xBD,
+
+	0x3E, 0x54, 0x57, 0x9F,
+	0x00, 0xE0,
+	0x82, 0xE1,
+
+	0x1E, 0xAF, 0x59, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x26, 0x30,
+	0x29, 0x30,
+	0x48, 0x3C, 0x48, 0xAD,
+
+	0x2B, 0x72,
+	0xC2, 0xE1,
+	0x2C, 0xC0, 0x44, 0xC2,
+
+	0x05, 0x24, 0x34, 0xBF,
+	0x0D, 0x24, 0x2C, 0xBF,
+
+	0x2D, 0x46, 0x4E, 0xBF,
+	0x25, 0x46, 0x56, 0xBF,
+
+	0x20, 0x1D, 0x6F, 0x8F,
+	0x32, 0x3E, 0x5F, 0xE9,
+
+	0x3E, 0x50, 0x56, 0x9F,
+	0x00, 0xE0,
+	0x3B, 0x30,
+
+	0x1E, 0x8F, 0x51, 0x9F,
+	0x33, 0x1E, 0x5F, 0xE9,
+
+	0x05, 0x44, 0x54, 0xB2,
+	0x0D, 0x44, 0x4C, 0xB2,
+
+	0x19, 0xC0, 0xB0, 0xE8,
+	0x34, 0xC0, 0x44, 0xC4,
+
+	0x33, 0x73,
+	0x00, 0xE0,
+	0x3E, 0x62, 0x57, 0x9F,
+
+	0x1E, 0xAF, 0x59, 0x9F,
+	0x00, 0xE0,
+	0x0D, 0x20,
+
+	0x84, 0x3E, 0x58, 0xE9,
+	0x28, 0x1D, 0x6F, 0x8F,
+
+	0x05, 0x20,
+	0x00, 0xE0,
+	0x85, 0x1E, 0x58, 0xE9,
+
+	0x9B, 0x3B, 0x33, 0xDF,
+	0x20, 0x20, 0x42, 0xAF,
+
+	0x30, 0x42, 0x56, 0x9F,
+	0x80, 0x3E, 0x57, 0xE9,
+
+	0x3F, 0x8F, 0x51, 0x9F,
+	0x30, 0x80, 0x5F, 0xE9,
+
+	0x28, 0x28, 0x24, 0xAF,
+	0x81, 0x1E, 0x57, 0xE9,
+
+	0x05, 0x47, 0x57, 0xBF,
+	0x0D, 0x47, 0x4F, 0xBF,
+
+	0x88, 0x80, 0x58, 0xE9,
+	0x1B, 0x29, 0x1B, 0xDF,
+
+	0x30, 0x1D, 0x6F, 0x8F,
+	0x3A, 0x30, 0x4F, 0xE9,
+
+	0x1C, 0x30, 0x26, 0xDF,
+	0x09, 0xE3,
+	0x3B, 0x05,
+
+	0x3E, 0x50, 0x56, 0x9F,
+	0x3B, 0x3F, 0x4F, 0xE9,
+
+	0x1E, 0x8F, 0x51, 0x9F,
+	0x00, 0xE0,
+	0xAC, 0x20,
+
+	0x2D, 0x44, 0x4C, 0xB4,
+	0x2C, 0x1C, 0xC0, 0xAF,
+
+	0x25, 0x44, 0x54, 0xB4,
+	0x00, 0xE0,
+	0xC8, 0x30,
+
+	0x30, 0x46, 0x30, 0xAF,
+	0x1B, 0x1B, 0x48, 0xAF,
+
+	0x00, 0xE0,
+	0x25, 0x20,
+	0x38, 0x2C, 0x4F, 0xE9,
+
+	0x86, 0x80, 0x57, 0xE9,
+	0x38, 0x1D, 0x6F, 0x8F,
+
+	0x28, 0x74,
+	0x00, 0xE0,
+	0x0D, 0x44, 0x4C, 0xB0,
+
+	0x05, 0x44, 0x54, 0xB0,
+	0x2D, 0x20,
+	0x9B, 0x10,
+
+	0x82, 0x3E, 0x57, 0xE9,
+	0x32, 0xF0, 0x1B, 0xCD,
+
+	0x1E, 0xBD, 0x59, 0x9F,
+	0x83, 0x1E, 0x57, 0xE9,
+
+	0x38, 0x47, 0x38, 0xAF,
+	0x34, 0x20,
+	0x2A, 0x30,
+
+	0x00, 0xE0,
+	0x0D, 0x20,
+	0x32, 0x20,
+	0x05, 0x20,
+
+	0x87, 0x80, 0x57, 0xE9,
+	0x1F, 0x54, 0x57, 0x9F,
+
+	0x17, 0x42, 0x56, 0x9F,
+	0x00, 0xE0,
+	0x3B, 0x6A,
+
+	0x3F, 0x8F, 0x51, 0x9F,
+	0x37, 0x1E, 0x4F, 0xE9,
+
+	0x37, 0x32, 0x2A, 0xAF,
+	0x00, 0xE0,
+	0x32, 0x00,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x27, 0xC0, 0x44, 0xC0,
+
+	0x36, 0x1F, 0x4F, 0xE9,
+	0x1F, 0x1F, 0x26, 0xDF,
+
+	0x37, 0x1B, 0x37, 0xBF,
+	0x17, 0x26, 0x17, 0xDF,
+
+	0x3E, 0x17, 0x4F, 0xE9,
+	0x3F, 0x3F, 0x4F, 0xE9,
+
+	0x34, 0x1F, 0x34, 0xAF,
+	0x2B, 0x05,
+	0xA7, 0x20,
+
+	0x33, 0x2B, 0x37, 0xDF,
+	0x27, 0x17, 0xC0, 0xAF,
+
+	0x34, 0x80, 0x4F, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x0D, 0x21, 0x1A, 0xB6,
+	0x05, 0x21, 0x31, 0xB6,
+
+	0x03, 0x80, 0x2A, 0xEA,
+	0x17, 0xC1, 0x2B, 0xBD,
+
+	0x0D, 0x20,
+	0x05, 0x20,
+	0x2F, 0xC0, 0x21, 0xC6,
+
+	0xB3, 0x68,
+	0x97, 0x25,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x33, 0xC0, 0x33, 0xAF,
+	0x3C, 0x27, 0x4F, 0xE9,
+
+	0x17, 0x50, 0x56, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x37, 0x0F, 0x5C, 0x9F,
+	0x00, 0xE0,
+	0x2F, 0x20,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x28, 0x19, 0x60, 0xEC,
+
+	0xB3, 0x05,
+	0x00, 0xE0,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x23, 0x3B, 0x33, 0xAD,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x17, 0x26, 0x17, 0xDF,
+	0x35, 0x17, 0x4F, 0xE9,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x39, 0x37, 0x4F, 0xE9,
+
+	0x2F, 0x2F, 0x17, 0xAF,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x31, 0x80, 0x4F, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x57, 0x39, 0x20, 0xE9,
+
+	0x16, 0x28, 0x20, 0xE9,
+	0x1D, 0x3B, 0x20, 0xE9,
+
+	0x1E, 0x2B, 0x20, 0xE9,
+	0x2B, 0x32, 0x20, 0xE9,
+
+	0x1C, 0x23, 0x20, 0xE9,
+	0x57, 0x36, 0x20, 0xE9,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x40, 0x40, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x90, 0xE2,
+	0x00, 0xE0,
+
+	0x78, 0xFF, 0x20, 0xEA,
+	0x19, 0xC8, 0xC1, 0xCD,
+
+	0x1F, 0xD7, 0x18, 0xBD,
+	0x3F, 0xD7, 0x22, 0xBD,
+
+	0x9F, 0x41, 0x49, 0xBD,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x25, 0x41, 0x49, 0xBD,
+	0x2D, 0x41, 0x51, 0xBD,
+
+	0x0D, 0x80, 0x07, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x35, 0x40, 0x48, 0xBD,
+	0x3D, 0x40, 0x50, 0xBD,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x25, 0x30,
+	0x2D, 0x30,
+
+	0x35, 0x30,
+	0xB5, 0x30,
+	0xBD, 0x30,
+	0x3D, 0x30,
+
+	0x9C, 0xA7, 0x5B, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x77, 0xFF, 0x0A, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xC9, 0x41, 0xC8, 0xEC,
+	0x42, 0xE1,
+	0x00, 0xE0,
+
+	0x75, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xC8, 0x40, 0xC0, 0xEC,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x72, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+};
+
+static unsigned char warp_g200_tgzs[] = {
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x98, 0xA0, 0xE9,
+	0x40, 0x40, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x1F, 0xD7, 0x18, 0xBD,
+	0x3F, 0xD7, 0x22, 0xBD,
+
+	0x81, 0x04,
+	0x89, 0x04,
+	0x01, 0x04,
+	0x09, 0x04,
+
+	0xC9, 0x41, 0xC0, 0xEC,
+	0x11, 0x04,
+	0x00, 0xE0,
+
+	0x41, 0xCC, 0x41, 0xCD,
+	0x49, 0xCC, 0x49, 0xCD,
+
+	0xD1, 0x41, 0xC0, 0xEC,
+	0x51, 0xCC, 0x51, 0xCD,
+
+	0x80, 0x04,
+	0x10, 0x04,
+	0x08, 0x04,
+	0x00, 0xE0,
+
+	0x00, 0xCC, 0xC0, 0xCD,
+	0xD1, 0x49, 0xC0, 0xEC,
+
+	0x8A, 0x1F, 0x20, 0xE9,
+	0x8B, 0x3F, 0x20, 0xE9,
+
+	0x41, 0x3C, 0x41, 0xAD,
+	0x49, 0x3C, 0x49, 0xAD,
+
+	0x10, 0xCC, 0x10, 0xCD,
+	0x08, 0xCC, 0x08, 0xCD,
+
+	0xB9, 0x41, 0x49, 0xBB,
+	0x1F, 0xF0, 0x41, 0xCD,
+
+	0x51, 0x3C, 0x51, 0xAD,
+	0x00, 0x98, 0x80, 0xE9,
+
+	0x8B, 0x80, 0x07, 0xEA,
+	0x24, 0x1F, 0x20, 0xE9,
+
+	0x21, 0x45, 0x80, 0xE8,
+	0x1A, 0x4D, 0x80, 0xE8,
+
+	0x31, 0x55, 0x80, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x15, 0x41, 0x49, 0xBD,
+	0x1D, 0x41, 0x51, 0xBD,
+
+	0x2E, 0x41, 0x2A, 0xB8,
+	0x34, 0x53, 0xA0, 0xE8,
+
+	0x15, 0x30,
+	0x1D, 0x30,
+	0x58, 0xE3,
+	0x00, 0xE0,
+
+	0xB5, 0x40, 0x48, 0xBD,
+	0x3D, 0x40, 0x50, 0xBD,
+
+	0x24, 0x43, 0xA0, 0xE8,
+	0x2C, 0x4B, 0xA0, 0xE8,
+
+	0x15, 0x72,
+	0x09, 0xE3,
+	0x00, 0xE0,
+	0x1D, 0x72,
+
+	0x35, 0x30,
+	0xB5, 0x30,
+	0xBD, 0x30,
+	0x3D, 0x30,
+
+	0x9C, 0x97, 0x57, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x6C, 0x64, 0xC8, 0xEC,
+	0x98, 0xE1,
+	0xB5, 0x05,
+
+	0xBD, 0x05,
+	0x2E, 0x30,
+	0x32, 0xC0, 0xA0, 0xE8,
+
+	0x33, 0xC0, 0xA0, 0xE8,
+	0x74, 0x64, 0xC8, 0xEC,
+
+	0x40, 0x3C, 0x40, 0xAD,
+	0x32, 0x6A,
+	0x2A, 0x30,
+
+	0x20, 0x73,
+	0x33, 0x6A,
+	0x00, 0xE0,
+	0x28, 0x73,
+
+	0x1C, 0x72,
+	0x83, 0xE2,
+	0x77, 0x80, 0x15, 0xEA,
+
+	0xB8, 0x3D, 0x28, 0xDF,
+	0x30, 0x35, 0x20, 0xDF,
+
+	0x40, 0x30,
+	0x00, 0xE0,
+	0xCC, 0xE2,
+	0x64, 0x72,
+
+	0x25, 0x42, 0x52, 0xBF,
+	0x2D, 0x42, 0x4A, 0xBF,
+
+	0x30, 0x2E, 0x30, 0xDF,
+	0x38, 0x2E, 0x38, 0xDF,
+
+	0x18, 0x1D, 0x45, 0xE9,
+	0x1E, 0x15, 0x45, 0xE9,
+
+	0x2B, 0x49, 0x51, 0xBD,
+	0x00, 0xE0,
+	0x1F, 0x73,
+
+	0x38, 0x38, 0x40, 0xAF,
+	0x30, 0x30, 0x40, 0xAF,
+
+	0x24, 0x1F, 0x24, 0xDF,
+	0x1D, 0x32, 0x20, 0xE9,
+
+	0x2C, 0x1F, 0x2C, 0xDF,
+	0x1A, 0x33, 0x20, 0xE9,
+
+	0xB0, 0x10,
+	0x08, 0xE3,
+	0x40, 0x10,
+	0xB8, 0x10,
+
+	0x26, 0xF0, 0x30, 0xCD,
+	0x2F, 0xF0, 0x38, 0xCD,
+
+	0x2B, 0x80, 0x20, 0xE9,
+	0x2A, 0x80, 0x20, 0xE9,
+
+	0xA6, 0x20,
+	0x88, 0xE2,
+	0x00, 0xE0,
+	0xAF, 0x20,
+
+	0x28, 0x2A, 0x26, 0xAF,
+	0x20, 0x2A, 0xC0, 0xAF,
+
+	0x34, 0x1F, 0x34, 0xDF,
+	0x46, 0x24, 0x46, 0xDF,
+
+	0x28, 0x30, 0x80, 0xBF,
+	0x20, 0x38, 0x80, 0xBF,
+
+	0x47, 0x24, 0x47, 0xDF,
+	0x4E, 0x2C, 0x4E, 0xDF,
+
+	0x4F, 0x2C, 0x4F, 0xDF,
+	0x56, 0x34, 0x56, 0xDF,
+
+	0x28, 0x15, 0x28, 0xDF,
+	0x20, 0x1D, 0x20, 0xDF,
+
+	0x57, 0x34, 0x57, 0xDF,
+	0x00, 0xE0,
+	0x1D, 0x05,
+
+	0x04, 0x80, 0x10, 0xEA,
+	0x89, 0xE2,
+	0x2B, 0x30,
+
+	0x3F, 0xC1, 0x1D, 0xBD,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xA0, 0x68,
+	0xBF, 0x25,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x20, 0xC0, 0x20, 0xAF,
+	0x28, 0x05,
+	0x97, 0x74,
+
+	0x00, 0xE0,
+	0x2A, 0x10,
+	0x16, 0xC0, 0x20, 0xE9,
+
+	0x04, 0x80, 0x10, 0xEA,
+	0x8C, 0xE2,
+	0x95, 0x05,
+
+	0x28, 0xC1, 0x28, 0xAD,
+	0x1F, 0xC1, 0x15, 0xBD,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xA8, 0x67,
+	0x9F, 0x6B,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x28, 0xC0, 0x28, 0xAD,
+	0x1D, 0x25,
+	0x20, 0x05,
+
+	0x28, 0x32, 0x80, 0xAD,
+	0x40, 0x2A, 0x40, 0xBD,
+
+	0x1C, 0x80, 0x20, 0xE9,
+	0x20, 0x33, 0x20, 0xAD,
+
+	0x20, 0x73,
+	0x00, 0xE0,
+	0xB6, 0x49, 0x51, 0xBB,
+
+	0x26, 0x2F, 0xB0, 0xE8,
+	0x19, 0x20, 0x20, 0xE9,
+
+	0x35, 0x20, 0x35, 0xDF,
+	0x3D, 0x20, 0x3D, 0xDF,
+
+	0x15, 0x20, 0x15, 0xDF,
+	0x1D, 0x20, 0x1D, 0xDF,
+
+	0x26, 0xD0, 0x26, 0xCD,
+	0x29, 0x49, 0x2A, 0xB8,
+
+	0x26, 0x40, 0x80, 0xBD,
+	0x3B, 0x48, 0x50, 0xBD,
+
+	0x3E, 0x54, 0x57, 0x9F,
+	0x00, 0xE0,
+	0x82, 0xE1,
+
+	0x1E, 0xAF, 0x59, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x26, 0x30,
+	0x29, 0x30,
+	0x48, 0x3C, 0x48, 0xAD,
+
+	0x2B, 0x72,
+	0xC2, 0xE1,
+	0x2C, 0xC0, 0x44, 0xC2,
+
+	0x05, 0x24, 0x34, 0xBF,
+	0x0D, 0x24, 0x2C, 0xBF,
+
+	0x2D, 0x46, 0x4E, 0xBF,
+	0x25, 0x46, 0x56, 0xBF,
+
+	0x20, 0x1D, 0x6F, 0x8F,
+	0x32, 0x3E, 0x5F, 0xE9,
+
+	0x3E, 0x50, 0x56, 0x9F,
+	0x00, 0xE0,
+	0x3B, 0x30,
+
+	0x1E, 0x8F, 0x51, 0x9F,
+	0x33, 0x1E, 0x5F, 0xE9,
+
+	0x05, 0x44, 0x54, 0xB2,
+	0x0D, 0x44, 0x4C, 0xB2,
+
+	0x19, 0xC0, 0xB0, 0xE8,
+	0x34, 0xC0, 0x44, 0xC4,
+
+	0x33, 0x73,
+	0x00, 0xE0,
+	0x3E, 0x62, 0x57, 0x9F,
+
+	0x1E, 0xAF, 0x59, 0x9F,
+	0x00, 0xE0,
+	0x0D, 0x20,
+
+	0x84, 0x3E, 0x58, 0xE9,
+	0x28, 0x1D, 0x6F, 0x8F,
+
+	0x05, 0x20,
+	0x00, 0xE0,
+	0x85, 0x1E, 0x58, 0xE9,
+
+	0x9B, 0x3B, 0x33, 0xDF,
+	0x20, 0x20, 0x42, 0xAF,
+
+	0x30, 0x42, 0x56, 0x9F,
+	0x80, 0x3E, 0x57, 0xE9,
+
+	0x3F, 0x8F, 0x51, 0x9F,
+	0x30, 0x80, 0x5F, 0xE9,
+
+	0x28, 0x28, 0x24, 0xAF,
+	0x81, 0x1E, 0x57, 0xE9,
+
+	0x05, 0x47, 0x57, 0xBF,
+	0x0D, 0x47, 0x4F, 0xBF,
+
+	0x88, 0x80, 0x58, 0xE9,
+	0x1B, 0x29, 0x1B, 0xDF,
+
+	0x30, 0x1D, 0x6F, 0x8F,
+	0x3A, 0x30, 0x4F, 0xE9,
+
+	0x1C, 0x30, 0x26, 0xDF,
+	0x09, 0xE3,
+	0x3B, 0x05,
+
+	0x3E, 0x50, 0x56, 0x9F,
+	0x3B, 0x3F, 0x4F, 0xE9,
+
+	0x1E, 0x8F, 0x51, 0x9F,
+	0x00, 0xE0,
+	0xAC, 0x20,
+
+	0x2D, 0x44, 0x4C, 0xB4,
+	0x2C, 0x1C, 0xC0, 0xAF,
+
+	0x25, 0x44, 0x54, 0xB4,
+	0x00, 0xE0,
+	0xC8, 0x30,
+
+	0x30, 0x46, 0x30, 0xAF,
+	0x1B, 0x1B, 0x48, 0xAF,
+
+	0x00, 0xE0,
+	0x25, 0x20,
+	0x38, 0x2C, 0x4F, 0xE9,
+
+	0x86, 0x80, 0x57, 0xE9,
+	0x38, 0x1D, 0x6F, 0x8F,
+
+	0x28, 0x74,
+	0x00, 0xE0,
+	0x0D, 0x44, 0x4C, 0xB0,
+
+	0x05, 0x44, 0x54, 0xB0,
+	0x2D, 0x20,
+	0x9B, 0x10,
+
+	0x82, 0x3E, 0x57, 0xE9,
+	0x32, 0xF0, 0x1B, 0xCD,
+
+	0x1E, 0xBD, 0x59, 0x9F,
+	0x83, 0x1E, 0x57, 0xE9,
+
+	0x38, 0x47, 0x38, 0xAF,
+	0x34, 0x20,
+	0x2A, 0x30,
+
+	0x00, 0xE0,
+	0x0D, 0x20,
+	0x32, 0x20,
+	0x05, 0x20,
+
+	0x87, 0x80, 0x57, 0xE9,
+	0x1F, 0x54, 0x57, 0x9F,
+
+	0x17, 0x42, 0x56, 0x9F,
+	0x00, 0xE0,
+	0x3B, 0x6A,
+
+	0x3F, 0x8F, 0x51, 0x9F,
+	0x37, 0x1E, 0x4F, 0xE9,
+
+	0x37, 0x32, 0x2A, 0xAF,
+	0x00, 0xE0,
+	0x32, 0x00,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x27, 0xC0, 0x44, 0xC0,
+
+	0x36, 0x1F, 0x4F, 0xE9,
+	0x1F, 0x1F, 0x26, 0xDF,
+
+	0x37, 0x1B, 0x37, 0xBF,
+	0x17, 0x26, 0x17, 0xDF,
+
+	0x3E, 0x17, 0x4F, 0xE9,
+	0x3F, 0x3F, 0x4F, 0xE9,
+
+	0x34, 0x1F, 0x34, 0xAF,
+	0x2B, 0x05,
+	0xA7, 0x20,
+
+	0x33, 0x2B, 0x37, 0xDF,
+	0x27, 0x17, 0xC0, 0xAF,
+
+	0x34, 0x80, 0x4F, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x2D, 0x21, 0x1A, 0xB0,
+	0x25, 0x21, 0x31, 0xB0,
+
+	0x0D, 0x21, 0x1A, 0xB2,
+	0x05, 0x21, 0x31, 0xB2,
+
+	0x03, 0x80, 0x2A, 0xEA,
+	0x17, 0xC1, 0x2B, 0xBD,
+
+	0x2D, 0x20,
+	0x25, 0x20,
+	0x05, 0x20,
+	0x0D, 0x20,
+
+	0xB3, 0x68,
+	0x97, 0x25,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x33, 0xC0, 0x33, 0xAF,
+	0x2F, 0xC0, 0x21, 0xC0,
+
+	0x16, 0x42, 0x56, 0x9F,
+	0x3C, 0x27, 0x4F, 0xE9,
+
+	0x1E, 0x62, 0x57, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x25, 0x21, 0x31, 0xB4,
+	0x2D, 0x21, 0x1A, 0xB4,
+
+	0x3F, 0x2F, 0x5D, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x33, 0x05,
+	0x00, 0xE0,
+	0x28, 0x19, 0x60, 0xEC,
+
+	0x37, 0x0F, 0x5C, 0x9F,
+	0x00, 0xE0,
+	0x2F, 0x20,
+
+	0x23, 0x3B, 0x33, 0xAD,
+	0x1E, 0x26, 0x1E, 0xDF,
+
+	0xA7, 0x1E, 0x4F, 0xE9,
+	0x17, 0x26, 0x16, 0xDF,
+
+	0x2D, 0x20,
+	0x00, 0xE0,
+	0xA8, 0x3F, 0x4F, 0xE9,
+
+	0x2F, 0x2F, 0x1E, 0xAF,
+	0x25, 0x20,
+	0x00, 0xE0,
+
+	0xA4, 0x16, 0x4F, 0xE9,
+	0x0F, 0xC0, 0x21, 0xC2,
+
+	0xA6, 0x80, 0x4F, 0xE9,
+	0x1F, 0x62, 0x57, 0x9F,
+
+	0x3F, 0x2F, 0x5D, 0x9F,
+	0x00, 0xE0,
+	0x8F, 0x20,
+
+	0xA5, 0x37, 0x4F, 0xE9,
+	0x0F, 0x17, 0x0F, 0xAF,
+
+	0x06, 0xC0, 0x21, 0xC4,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0xA3, 0x80, 0x4F, 0xE9,
+
+	0x06, 0x20,
+	0x00, 0xE0,
+	0x1F, 0x26, 0x1F, 0xDF,
+
+	0xA1, 0x1F, 0x4F, 0xE9,
+	0xA2, 0x3F, 0x4F, 0xE9,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x06, 0x06, 0x1F, 0xAF,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xA0, 0x80, 0x4F, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x57, 0x39, 0x20, 0xE9,
+
+	0x16, 0x28, 0x20, 0xE9,
+	0x1D, 0x3B, 0x20, 0xE9,
+
+	0x1E, 0x2B, 0x20, 0xE9,
+	0x2B, 0x32, 0x20, 0xE9,
+
+	0x1C, 0x23, 0x20, 0xE9,
+	0x57, 0x36, 0x20, 0xE9,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x40, 0x40, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x90, 0xE2,
+	0x00, 0xE0,
+
+	0x6C, 0xFF, 0x20, 0xEA,
+	0x19, 0xC8, 0xC1, 0xCD,
+
+	0x1F, 0xD7, 0x18, 0xBD,
+	0x3F, 0xD7, 0x22, 0xBD,
+
+	0x9F, 0x41, 0x49, 0xBD,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x25, 0x41, 0x49, 0xBD,
+	0x2D, 0x41, 0x51, 0xBD,
+
+	0x0D, 0x80, 0x07, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x35, 0x40, 0x48, 0xBD,
+	0x3D, 0x40, 0x50, 0xBD,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x25, 0x30,
+	0x2D, 0x30,
+
+	0x35, 0x30,
+	0xB5, 0x30,
+	0xBD, 0x30,
+	0x3D, 0x30,
+
+	0x9C, 0xA7, 0x5B, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x6B, 0xFF, 0x0A, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xC9, 0x41, 0xC8, 0xEC,
+	0x42, 0xE1,
+	0x00, 0xE0,
+
+	0x69, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xC8, 0x40, 0xC0, 0xEC,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x66, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+};
+
+static unsigned char warp_g200_tgzsa[] = {
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x98, 0xA0, 0xE9,
+	0x40, 0x40, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x1F, 0xD7, 0x18, 0xBD,
+	0x3F, 0xD7, 0x22, 0xBD,
+
+	0x81, 0x04,
+	0x89, 0x04,
+	0x01, 0x04,
+	0x09, 0x04,
+
+	0xC9, 0x41, 0xC0, 0xEC,
+	0x11, 0x04,
+	0x00, 0xE0,
+
+	0x41, 0xCC, 0x41, 0xCD,
+	0x49, 0xCC, 0x49, 0xCD,
+
+	0xD1, 0x41, 0xC0, 0xEC,
+	0x51, 0xCC, 0x51, 0xCD,
+
+	0x80, 0x04,
+	0x10, 0x04,
+	0x08, 0x04,
+	0x00, 0xE0,
+
+	0x00, 0xCC, 0xC0, 0xCD,
+	0xD1, 0x49, 0xC0, 0xEC,
+
+	0x8A, 0x1F, 0x20, 0xE9,
+	0x8B, 0x3F, 0x20, 0xE9,
+
+	0x41, 0x3C, 0x41, 0xAD,
+	0x49, 0x3C, 0x49, 0xAD,
+
+	0x10, 0xCC, 0x10, 0xCD,
+	0x08, 0xCC, 0x08, 0xCD,
+
+	0xB9, 0x41, 0x49, 0xBB,
+	0x1F, 0xF0, 0x41, 0xCD,
+
+	0x51, 0x3C, 0x51, 0xAD,
+	0x00, 0x98, 0x80, 0xE9,
+
+	0x8F, 0x80, 0x07, 0xEA,
+	0x24, 0x1F, 0x20, 0xE9,
+
+	0x21, 0x45, 0x80, 0xE8,
+	0x1A, 0x4D, 0x80, 0xE8,
+
+	0x31, 0x55, 0x80, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x15, 0x41, 0x49, 0xBD,
+	0x1D, 0x41, 0x51, 0xBD,
+
+	0x2E, 0x41, 0x2A, 0xB8,
+	0x34, 0x53, 0xA0, 0xE8,
+
+	0x15, 0x30,
+	0x1D, 0x30,
+	0x58, 0xE3,
+	0x00, 0xE0,
+
+	0xB5, 0x40, 0x48, 0xBD,
+	0x3D, 0x40, 0x50, 0xBD,
+
+	0x24, 0x43, 0xA0, 0xE8,
+	0x2C, 0x4B, 0xA0, 0xE8,
+
+	0x15, 0x72,
+	0x09, 0xE3,
+	0x00, 0xE0,
+	0x1D, 0x72,
+
+	0x35, 0x30,
+	0xB5, 0x30,
+	0xBD, 0x30,
+	0x3D, 0x30,
+
+	0x9C, 0x97, 0x57, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x6C, 0x64, 0xC8, 0xEC,
+	0x98, 0xE1,
+	0xB5, 0x05,
+
+	0xBD, 0x05,
+	0x2E, 0x30,
+	0x32, 0xC0, 0xA0, 0xE8,
+
+	0x33, 0xC0, 0xA0, 0xE8,
+	0x74, 0x64, 0xC8, 0xEC,
+
+	0x40, 0x3C, 0x40, 0xAD,
+	0x32, 0x6A,
+	0x2A, 0x30,
+
+	0x20, 0x73,
+	0x33, 0x6A,
+	0x00, 0xE0,
+	0x28, 0x73,
+
+	0x1C, 0x72,
+	0x83, 0xE2,
+	0x7B, 0x80, 0x15, 0xEA,
+
+	0xB8, 0x3D, 0x28, 0xDF,
+	0x30, 0x35, 0x20, 0xDF,
+
+	0x40, 0x30,
+	0x00, 0xE0,
+	0xCC, 0xE2,
+	0x64, 0x72,
+
+	0x25, 0x42, 0x52, 0xBF,
+	0x2D, 0x42, 0x4A, 0xBF,
+
+	0x30, 0x2E, 0x30, 0xDF,
+	0x38, 0x2E, 0x38, 0xDF,
+
+	0x18, 0x1D, 0x45, 0xE9,
+	0x1E, 0x15, 0x45, 0xE9,
+
+	0x2B, 0x49, 0x51, 0xBD,
+	0x00, 0xE0,
+	0x1F, 0x73,
+
+	0x38, 0x38, 0x40, 0xAF,
+	0x30, 0x30, 0x40, 0xAF,
+
+	0x24, 0x1F, 0x24, 0xDF,
+	0x1D, 0x32, 0x20, 0xE9,
+
+	0x2C, 0x1F, 0x2C, 0xDF,
+	0x1A, 0x33, 0x20, 0xE9,
+
+	0xB0, 0x10,
+	0x08, 0xE3,
+	0x40, 0x10,
+	0xB8, 0x10,
+
+	0x26, 0xF0, 0x30, 0xCD,
+	0x2F, 0xF0, 0x38, 0xCD,
+
+	0x2B, 0x80, 0x20, 0xE9,
+	0x2A, 0x80, 0x20, 0xE9,
+
+	0xA6, 0x20,
+	0x88, 0xE2,
+	0x00, 0xE0,
+	0xAF, 0x20,
+
+	0x28, 0x2A, 0x26, 0xAF,
+	0x20, 0x2A, 0xC0, 0xAF,
+
+	0x34, 0x1F, 0x34, 0xDF,
+	0x46, 0x24, 0x46, 0xDF,
+
+	0x28, 0x30, 0x80, 0xBF,
+	0x20, 0x38, 0x80, 0xBF,
+
+	0x47, 0x24, 0x47, 0xDF,
+	0x4E, 0x2C, 0x4E, 0xDF,
+
+	0x4F, 0x2C, 0x4F, 0xDF,
+	0x56, 0x34, 0x56, 0xDF,
+
+	0x28, 0x15, 0x28, 0xDF,
+	0x20, 0x1D, 0x20, 0xDF,
+
+	0x57, 0x34, 0x57, 0xDF,
+	0x00, 0xE0,
+	0x1D, 0x05,
+
+	0x04, 0x80, 0x10, 0xEA,
+	0x89, 0xE2,
+	0x2B, 0x30,
+
+	0x3F, 0xC1, 0x1D, 0xBD,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xA0, 0x68,
+	0xBF, 0x25,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x20, 0xC0, 0x20, 0xAF,
+	0x28, 0x05,
+	0x97, 0x74,
+
+	0x00, 0xE0,
+	0x2A, 0x10,
+	0x16, 0xC0, 0x20, 0xE9,
+
+	0x04, 0x80, 0x10, 0xEA,
+	0x8C, 0xE2,
+	0x95, 0x05,
+
+	0x28, 0xC1, 0x28, 0xAD,
+	0x1F, 0xC1, 0x15, 0xBD,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xA8, 0x67,
+	0x9F, 0x6B,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x28, 0xC0, 0x28, 0xAD,
+	0x1D, 0x25,
+	0x20, 0x05,
+
+	0x28, 0x32, 0x80, 0xAD,
+	0x40, 0x2A, 0x40, 0xBD,
+
+	0x1C, 0x80, 0x20, 0xE9,
+	0x20, 0x33, 0x20, 0xAD,
+
+	0x20, 0x73,
+	0x00, 0xE0,
+	0xB6, 0x49, 0x51, 0xBB,
+
+	0x26, 0x2F, 0xB0, 0xE8,
+	0x19, 0x20, 0x20, 0xE9,
+
+	0x35, 0x20, 0x35, 0xDF,
+	0x3D, 0x20, 0x3D, 0xDF,
+
+	0x15, 0x20, 0x15, 0xDF,
+	0x1D, 0x20, 0x1D, 0xDF,
+
+	0x26, 0xD0, 0x26, 0xCD,
+	0x29, 0x49, 0x2A, 0xB8,
+
+	0x26, 0x40, 0x80, 0xBD,
+	0x3B, 0x48, 0x50, 0xBD,
+
+	0x3E, 0x54, 0x57, 0x9F,
+	0x00, 0xE0,
+	0x82, 0xE1,
+
+	0x1E, 0xAF, 0x59, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x26, 0x30,
+	0x29, 0x30,
+	0x48, 0x3C, 0x48, 0xAD,
+
+	0x2B, 0x72,
+	0xC2, 0xE1,
+	0x2C, 0xC0, 0x44, 0xC2,
+
+	0x05, 0x24, 0x34, 0xBF,
+	0x0D, 0x24, 0x2C, 0xBF,
+
+	0x2D, 0x46, 0x4E, 0xBF,
+	0x25, 0x46, 0x56, 0xBF,
+
+	0x20, 0x1D, 0x6F, 0x8F,
+	0x32, 0x3E, 0x5F, 0xE9,
+
+	0x3E, 0x50, 0x56, 0x9F,
+	0x00, 0xE0,
+	0x3B, 0x30,
+
+	0x1E, 0x8F, 0x51, 0x9F,
+	0x33, 0x1E, 0x5F, 0xE9,
+
+	0x05, 0x44, 0x54, 0xB2,
+	0x0D, 0x44, 0x4C, 0xB2,
+
+	0x19, 0xC0, 0xB0, 0xE8,
+	0x34, 0xC0, 0x44, 0xC4,
+
+	0x33, 0x73,
+	0x00, 0xE0,
+	0x3E, 0x62, 0x57, 0x9F,
+
+	0x1E, 0xAF, 0x59, 0x9F,
+	0x00, 0xE0,
+	0x0D, 0x20,
+
+	0x84, 0x3E, 0x58, 0xE9,
+	0x28, 0x1D, 0x6F, 0x8F,
+
+	0x05, 0x20,
+	0x00, 0xE0,
+	0x85, 0x1E, 0x58, 0xE9,
+
+	0x9B, 0x3B, 0x33, 0xDF,
+	0x20, 0x20, 0x42, 0xAF,
+
+	0x30, 0x42, 0x56, 0x9F,
+	0x80, 0x3E, 0x57, 0xE9,
+
+	0x3F, 0x8F, 0x51, 0x9F,
+	0x30, 0x80, 0x5F, 0xE9,
+
+	0x28, 0x28, 0x24, 0xAF,
+	0x81, 0x1E, 0x57, 0xE9,
+
+	0x05, 0x47, 0x57, 0xBF,
+	0x0D, 0x47, 0x4F, 0xBF,
+
+	0x88, 0x80, 0x58, 0xE9,
+	0x1B, 0x29, 0x1B, 0xDF,
+
+	0x30, 0x1D, 0x6F, 0x8F,
+	0x3A, 0x30, 0x4F, 0xE9,
+
+	0x1C, 0x30, 0x26, 0xDF,
+	0x09, 0xE3,
+	0x3B, 0x05,
+
+	0x3E, 0x50, 0x56, 0x9F,
+	0x3B, 0x3F, 0x4F, 0xE9,
+
+	0x1E, 0x8F, 0x51, 0x9F,
+	0x00, 0xE0,
+	0xAC, 0x20,
+
+	0x2D, 0x44, 0x4C, 0xB4,
+	0x2C, 0x1C, 0xC0, 0xAF,
+
+	0x25, 0x44, 0x54, 0xB4,
+	0x00, 0xE0,
+	0xC8, 0x30,
+
+	0x30, 0x46, 0x30, 0xAF,
+	0x1B, 0x1B, 0x48, 0xAF,
+
+	0x00, 0xE0,
+	0x25, 0x20,
+	0x38, 0x2C, 0x4F, 0xE9,
+
+	0x86, 0x80, 0x57, 0xE9,
+	0x38, 0x1D, 0x6F, 0x8F,
+
+	0x28, 0x74,
+	0x00, 0xE0,
+	0x0D, 0x44, 0x4C, 0xB0,
+
+	0x05, 0x44, 0x54, 0xB0,
+	0x2D, 0x20,
+	0x9B, 0x10,
+
+	0x82, 0x3E, 0x57, 0xE9,
+	0x32, 0xF0, 0x1B, 0xCD,
+
+	0x1E, 0xBD, 0x59, 0x9F,
+	0x83, 0x1E, 0x57, 0xE9,
+
+	0x38, 0x47, 0x38, 0xAF,
+	0x34, 0x20,
+	0x2A, 0x30,
+
+	0x00, 0xE0,
+	0x0D, 0x20,
+	0x32, 0x20,
+	0x05, 0x20,
+
+	0x87, 0x80, 0x57, 0xE9,
+	0x1F, 0x54, 0x57, 0x9F,
+
+	0x17, 0x42, 0x56, 0x9F,
+	0x00, 0xE0,
+	0x3B, 0x6A,
+
+	0x3F, 0x8F, 0x51, 0x9F,
+	0x37, 0x1E, 0x4F, 0xE9,
+
+	0x37, 0x32, 0x2A, 0xAF,
+	0x00, 0xE0,
+	0x32, 0x00,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x27, 0xC0, 0x44, 0xC0,
+
+	0x36, 0x1F, 0x4F, 0xE9,
+	0x1F, 0x1F, 0x26, 0xDF,
+
+	0x37, 0x1B, 0x37, 0xBF,
+	0x17, 0x26, 0x17, 0xDF,
+
+	0x3E, 0x17, 0x4F, 0xE9,
+	0x3F, 0x3F, 0x4F, 0xE9,
+
+	0x34, 0x1F, 0x34, 0xAF,
+	0x2B, 0x05,
+	0xA7, 0x20,
+
+	0x33, 0x2B, 0x37, 0xDF,
+	0x27, 0x17, 0xC0, 0xAF,
+
+	0x34, 0x80, 0x4F, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x2D, 0x21, 0x1A, 0xB0,
+	0x25, 0x21, 0x31, 0xB0,
+
+	0x0D, 0x21, 0x1A, 0xB2,
+	0x05, 0x21, 0x31, 0xB2,
+
+	0x03, 0x80, 0x2A, 0xEA,
+	0x17, 0xC1, 0x2B, 0xBD,
+
+	0x2D, 0x20,
+	0x25, 0x20,
+	0x05, 0x20,
+	0x0D, 0x20,
+
+	0xB3, 0x68,
+	0x97, 0x25,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x33, 0xC0, 0x33, 0xAF,
+	0x2F, 0xC0, 0x21, 0xC0,
+
+	0x16, 0x42, 0x56, 0x9F,
+	0x3C, 0x27, 0x4F, 0xE9,
+
+	0x1E, 0x62, 0x57, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x25, 0x21, 0x31, 0xB4,
+	0x2D, 0x21, 0x1A, 0xB4,
+
+	0x3F, 0x2F, 0x5D, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x33, 0x05,
+	0x00, 0xE0,
+	0x28, 0x19, 0x60, 0xEC,
+
+	0x0D, 0x44, 0x4C, 0xB6,
+	0x05, 0x44, 0x54, 0xB6,
+
+	0x37, 0x0F, 0x5C, 0x9F,
+	0x00, 0xE0,
+	0x2F, 0x20,
+
+	0x23, 0x3B, 0x33, 0xAD,
+	0x1E, 0x26, 0x1E, 0xDF,
+
+	0xA7, 0x1E, 0x4F, 0xE9,
+	0x17, 0x26, 0x16, 0xDF,
+
+	0x2D, 0x20,
+	0x00, 0xE0,
+	0xA8, 0x3F, 0x4F, 0xE9,
+
+	0x2F, 0x2F, 0x1E, 0xAF,
+	0x25, 0x20,
+	0x00, 0xE0,
+
+	0xA4, 0x16, 0x4F, 0xE9,
+	0x0F, 0xC0, 0x21, 0xC2,
+
+	0xA6, 0x80, 0x4F, 0xE9,
+	0x1F, 0x62, 0x57, 0x9F,
+
+	0x0D, 0x20,
+	0x05, 0x20,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x3F, 0x2F, 0x5D, 0x9F,
+	0x00, 0xE0,
+	0x0F, 0x20,
+
+	0x17, 0x50, 0x56, 0x9F,
+	0xA5, 0x37, 0x4F, 0xE9,
+
+	0x06, 0xC0, 0x21, 0xC4,
+	0x0F, 0x17, 0x0F, 0xAF,
+
+	0x37, 0x0F, 0x5C, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x2F, 0xC0, 0x44, 0xC6,
+	0xA3, 0x80, 0x4F, 0xE9,
+
+	0x06, 0x20,
+	0x00, 0xE0,
+	0x1F, 0x26, 0x1F, 0xDF,
+
+	0x17, 0x26, 0x17, 0xDF,
+	0x9D, 0x17, 0x4F, 0xE9,
+
+	0xA1, 0x1F, 0x4F, 0xE9,
+	0xA2, 0x3F, 0x4F, 0xE9,
+
+	0x06, 0x06, 0x1F, 0xAF,
+	0x00, 0xE0,
+	0xAF, 0x20,
+
+	0x9E, 0x37, 0x4F, 0xE9,
+	0x2F, 0x17, 0x2F, 0xAF,
+
+	0xA0, 0x80, 0x4F, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x9C, 0x80, 0x4F, 0xE9,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x57, 0x39, 0x20, 0xE9,
+
+	0x16, 0x28, 0x20, 0xE9,
+	0x1D, 0x3B, 0x20, 0xE9,
+
+	0x1E, 0x2B, 0x20, 0xE9,
+	0x2B, 0x32, 0x20, 0xE9,
+
+	0x1C, 0x23, 0x20, 0xE9,
+	0x57, 0x36, 0x20, 0xE9,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x40, 0x40, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x90, 0xE2,
+	0x00, 0xE0,
+
+	0x68, 0xFF, 0x20, 0xEA,
+	0x19, 0xC8, 0xC1, 0xCD,
+
+	0x1F, 0xD7, 0x18, 0xBD,
+	0x3F, 0xD7, 0x22, 0xBD,
+
+	0x9F, 0x41, 0x49, 0xBD,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x25, 0x41, 0x49, 0xBD,
+	0x2D, 0x41, 0x51, 0xBD,
+
+	0x0D, 0x80, 0x07, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x35, 0x40, 0x48, 0xBD,
+	0x3D, 0x40, 0x50, 0xBD,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x25, 0x30,
+	0x2D, 0x30,
+
+	0x35, 0x30,
+	0xB5, 0x30,
+	0xBD, 0x30,
+	0x3D, 0x30,
+
+	0x9C, 0xA7, 0x5B, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x67, 0xFF, 0x0A, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xC9, 0x41, 0xC8, 0xEC,
+	0x42, 0xE1,
+	0x00, 0xE0,
+
+	0x65, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xC8, 0x40, 0xC0, 0xEC,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x62, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+};
+
+static unsigned char warp_g200_tgzsaf[] = {
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x98, 0xA0, 0xE9,
+	0x40, 0x40, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x1F, 0xD7, 0x18, 0xBD,
+	0x3F, 0xD7, 0x22, 0xBD,
+
+	0x81, 0x04,
+	0x89, 0x04,
+	0x01, 0x04,
+	0x09, 0x04,
+
+	0xC9, 0x41, 0xC0, 0xEC,
+	0x11, 0x04,
+	0x00, 0xE0,
+
+	0x41, 0xCC, 0x41, 0xCD,
+	0x49, 0xCC, 0x49, 0xCD,
+
+	0xD1, 0x41, 0xC0, 0xEC,
+	0x51, 0xCC, 0x51, 0xCD,
+
+	0x80, 0x04,
+	0x10, 0x04,
+	0x08, 0x04,
+	0x00, 0xE0,
+
+	0x00, 0xCC, 0xC0, 0xCD,
+	0xD1, 0x49, 0xC0, 0xEC,
+
+	0x8A, 0x1F, 0x20, 0xE9,
+	0x8B, 0x3F, 0x20, 0xE9,
+
+	0x41, 0x3C, 0x41, 0xAD,
+	0x49, 0x3C, 0x49, 0xAD,
+
+	0x10, 0xCC, 0x10, 0xCD,
+	0x08, 0xCC, 0x08, 0xCD,
+
+	0xB9, 0x41, 0x49, 0xBB,
+	0x1F, 0xF0, 0x41, 0xCD,
+
+	0x51, 0x3C, 0x51, 0xAD,
+	0x00, 0x98, 0x80, 0xE9,
+
+	0x94, 0x80, 0x07, 0xEA,
+	0x24, 0x1F, 0x20, 0xE9,
+
+	0x21, 0x45, 0x80, 0xE8,
+	0x1A, 0x4D, 0x80, 0xE8,
+
+	0x31, 0x55, 0x80, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x15, 0x41, 0x49, 0xBD,
+	0x1D, 0x41, 0x51, 0xBD,
+
+	0x2E, 0x41, 0x2A, 0xB8,
+	0x34, 0x53, 0xA0, 0xE8,
+
+	0x15, 0x30,
+	0x1D, 0x30,
+	0x58, 0xE3,
+	0x00, 0xE0,
+
+	0xB5, 0x40, 0x48, 0xBD,
+	0x3D, 0x40, 0x50, 0xBD,
+
+	0x24, 0x43, 0xA0, 0xE8,
+	0x2C, 0x4B, 0xA0, 0xE8,
+
+	0x15, 0x72,
+	0x09, 0xE3,
+	0x00, 0xE0,
+	0x1D, 0x72,
+
+	0x35, 0x30,
+	0xB5, 0x30,
+	0xBD, 0x30,
+	0x3D, 0x30,
+
+	0x9C, 0x97, 0x57, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x6C, 0x64, 0xC8, 0xEC,
+	0x98, 0xE1,
+	0xB5, 0x05,
+
+	0xBD, 0x05,
+	0x2E, 0x30,
+	0x32, 0xC0, 0xA0, 0xE8,
+
+	0x33, 0xC0, 0xA0, 0xE8,
+	0x74, 0x64, 0xC8, 0xEC,
+
+	0x40, 0x3C, 0x40, 0xAD,
+	0x32, 0x6A,
+	0x2A, 0x30,
+
+	0x20, 0x73,
+	0x33, 0x6A,
+	0x00, 0xE0,
+	0x28, 0x73,
+
+	0x1C, 0x72,
+	0x83, 0xE2,
+	0x80, 0x80, 0x15, 0xEA,
+
+	0xB8, 0x3D, 0x28, 0xDF,
+	0x30, 0x35, 0x20, 0xDF,
+
+	0x40, 0x30,
+	0x00, 0xE0,
+	0xCC, 0xE2,
+	0x64, 0x72,
+
+	0x25, 0x42, 0x52, 0xBF,
+	0x2D, 0x42, 0x4A, 0xBF,
+
+	0x30, 0x2E, 0x30, 0xDF,
+	0x38, 0x2E, 0x38, 0xDF,
+
+	0x18, 0x1D, 0x45, 0xE9,
+	0x1E, 0x15, 0x45, 0xE9,
+
+	0x2B, 0x49, 0x51, 0xBD,
+	0x00, 0xE0,
+	0x1F, 0x73,
+
+	0x38, 0x38, 0x40, 0xAF,
+	0x30, 0x30, 0x40, 0xAF,
+
+	0x24, 0x1F, 0x24, 0xDF,
+	0x1D, 0x32, 0x20, 0xE9,
+
+	0x2C, 0x1F, 0x2C, 0xDF,
+	0x1A, 0x33, 0x20, 0xE9,
+
+	0xB0, 0x10,
+	0x08, 0xE3,
+	0x40, 0x10,
+	0xB8, 0x10,
+
+	0x26, 0xF0, 0x30, 0xCD,
+	0x2F, 0xF0, 0x38, 0xCD,
+
+	0x2B, 0x80, 0x20, 0xE9,
+	0x2A, 0x80, 0x20, 0xE9,
+
+	0xA6, 0x20,
+	0x88, 0xE2,
+	0x00, 0xE0,
+	0xAF, 0x20,
+
+	0x28, 0x2A, 0x26, 0xAF,
+	0x20, 0x2A, 0xC0, 0xAF,
+
+	0x34, 0x1F, 0x34, 0xDF,
+	0x46, 0x24, 0x46, 0xDF,
+
+	0x28, 0x30, 0x80, 0xBF,
+	0x20, 0x38, 0x80, 0xBF,
+
+	0x47, 0x24, 0x47, 0xDF,
+	0x4E, 0x2C, 0x4E, 0xDF,
+
+	0x4F, 0x2C, 0x4F, 0xDF,
+	0x56, 0x34, 0x56, 0xDF,
+
+	0x28, 0x15, 0x28, 0xDF,
+	0x20, 0x1D, 0x20, 0xDF,
+
+	0x57, 0x34, 0x57, 0xDF,
+	0x00, 0xE0,
+	0x1D, 0x05,
+
+	0x04, 0x80, 0x10, 0xEA,
+	0x89, 0xE2,
+	0x2B, 0x30,
+
+	0x3F, 0xC1, 0x1D, 0xBD,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xA0, 0x68,
+	0xBF, 0x25,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x20, 0xC0, 0x20, 0xAF,
+	0x28, 0x05,
+	0x97, 0x74,
+
+	0x00, 0xE0,
+	0x2A, 0x10,
+	0x16, 0xC0, 0x20, 0xE9,
+
+	0x04, 0x80, 0x10, 0xEA,
+	0x8C, 0xE2,
+	0x95, 0x05,
+
+	0x28, 0xC1, 0x28, 0xAD,
+	0x1F, 0xC1, 0x15, 0xBD,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xA8, 0x67,
+	0x9F, 0x6B,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x28, 0xC0, 0x28, 0xAD,
+	0x1D, 0x25,
+	0x20, 0x05,
+
+	0x28, 0x32, 0x80, 0xAD,
+	0x40, 0x2A, 0x40, 0xBD,
+
+	0x1C, 0x80, 0x20, 0xE9,
+	0x20, 0x33, 0x20, 0xAD,
+
+	0x20, 0x73,
+	0x00, 0xE0,
+	0xB6, 0x49, 0x51, 0xBB,
+
+	0x26, 0x2F, 0xB0, 0xE8,
+	0x19, 0x20, 0x20, 0xE9,
+
+	0x35, 0x20, 0x35, 0xDF,
+	0x3D, 0x20, 0x3D, 0xDF,
+
+	0x15, 0x20, 0x15, 0xDF,
+	0x1D, 0x20, 0x1D, 0xDF,
+
+	0x26, 0xD0, 0x26, 0xCD,
+	0x29, 0x49, 0x2A, 0xB8,
+
+	0x26, 0x40, 0x80, 0xBD,
+	0x3B, 0x48, 0x50, 0xBD,
+
+	0x3E, 0x54, 0x57, 0x9F,
+	0x00, 0xE0,
+	0x82, 0xE1,
+
+	0x1E, 0xAF, 0x59, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x26, 0x30,
+	0x29, 0x30,
+	0x48, 0x3C, 0x48, 0xAD,
+
+	0x2B, 0x72,
+	0xC2, 0xE1,
+	0x2C, 0xC0, 0x44, 0xC2,
+
+	0x05, 0x24, 0x34, 0xBF,
+	0x0D, 0x24, 0x2C, 0xBF,
+
+	0x2D, 0x46, 0x4E, 0xBF,
+	0x25, 0x46, 0x56, 0xBF,
+
+	0x20, 0x1D, 0x6F, 0x8F,
+	0x32, 0x3E, 0x5F, 0xE9,
+
+	0x3E, 0x50, 0x56, 0x9F,
+	0x00, 0xE0,
+	0x3B, 0x30,
+
+	0x1E, 0x8F, 0x51, 0x9F,
+	0x33, 0x1E, 0x5F, 0xE9,
+
+	0x05, 0x44, 0x54, 0xB2,
+	0x0D, 0x44, 0x4C, 0xB2,
+
+	0x19, 0xC0, 0xB0, 0xE8,
+	0x34, 0xC0, 0x44, 0xC4,
+
+	0x33, 0x73,
+	0x00, 0xE0,
+	0x3E, 0x62, 0x57, 0x9F,
+
+	0x1E, 0xAF, 0x59, 0x9F,
+	0x00, 0xE0,
+	0x0D, 0x20,
+
+	0x84, 0x3E, 0x58, 0xE9,
+	0x28, 0x1D, 0x6F, 0x8F,
+
+	0x05, 0x20,
+	0x00, 0xE0,
+	0x85, 0x1E, 0x58, 0xE9,
+
+	0x9B, 0x3B, 0x33, 0xDF,
+	0x20, 0x20, 0x42, 0xAF,
+
+	0x30, 0x42, 0x56, 0x9F,
+	0x80, 0x3E, 0x57, 0xE9,
+
+	0x3F, 0x8F, 0x51, 0x9F,
+	0x30, 0x80, 0x5F, 0xE9,
+
+	0x28, 0x28, 0x24, 0xAF,
+	0x81, 0x1E, 0x57, 0xE9,
+
+	0x05, 0x47, 0x57, 0xBF,
+	0x0D, 0x47, 0x4F, 0xBF,
+
+	0x88, 0x80, 0x58, 0xE9,
+	0x1B, 0x29, 0x1B, 0xDF,
+
+	0x30, 0x1D, 0x6F, 0x8F,
+	0x3A, 0x30, 0x4F, 0xE9,
+
+	0x1C, 0x30, 0x26, 0xDF,
+	0x09, 0xE3,
+	0x3B, 0x05,
+
+	0x3E, 0x50, 0x56, 0x9F,
+	0x3B, 0x3F, 0x4F, 0xE9,
+
+	0x1E, 0x8F, 0x51, 0x9F,
+	0x00, 0xE0,
+	0xAC, 0x20,
+
+	0x2D, 0x44, 0x4C, 0xB4,
+	0x2C, 0x1C, 0xC0, 0xAF,
+
+	0x25, 0x44, 0x54, 0xB4,
+	0x00, 0xE0,
+	0xC8, 0x30,
+
+	0x30, 0x46, 0x30, 0xAF,
+	0x1B, 0x1B, 0x48, 0xAF,
+
+	0x00, 0xE0,
+	0x25, 0x20,
+	0x38, 0x2C, 0x4F, 0xE9,
+
+	0x86, 0x80, 0x57, 0xE9,
+	0x38, 0x1D, 0x6F, 0x8F,
+
+	0x28, 0x74,
+	0x00, 0xE0,
+	0x0D, 0x44, 0x4C, 0xB0,
+
+	0x05, 0x44, 0x54, 0xB0,
+	0x2D, 0x20,
+	0x9B, 0x10,
+
+	0x82, 0x3E, 0x57, 0xE9,
+	0x32, 0xF0, 0x1B, 0xCD,
+
+	0x1E, 0xBD, 0x59, 0x9F,
+	0x83, 0x1E, 0x57, 0xE9,
+
+	0x38, 0x47, 0x38, 0xAF,
+	0x34, 0x20,
+	0x2A, 0x30,
+
+	0x00, 0xE0,
+	0x0D, 0x20,
+	0x32, 0x20,
+	0x05, 0x20,
+
+	0x87, 0x80, 0x57, 0xE9,
+	0x1F, 0x54, 0x57, 0x9F,
+
+	0x17, 0x42, 0x56, 0x9F,
+	0x00, 0xE0,
+	0x3B, 0x6A,
+
+	0x3F, 0x8F, 0x51, 0x9F,
+	0x37, 0x1E, 0x4F, 0xE9,
+
+	0x37, 0x32, 0x2A, 0xAF,
+	0x00, 0xE0,
+	0x32, 0x00,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x27, 0xC0, 0x44, 0xC0,
+
+	0x36, 0x1F, 0x4F, 0xE9,
+	0x1F, 0x1F, 0x26, 0xDF,
+
+	0x37, 0x1B, 0x37, 0xBF,
+	0x17, 0x26, 0x17, 0xDF,
+
+	0x3E, 0x17, 0x4F, 0xE9,
+	0x3F, 0x3F, 0x4F, 0xE9,
+
+	0x34, 0x1F, 0x34, 0xAF,
+	0x2B, 0x05,
+	0xA7, 0x20,
+
+	0x33, 0x2B, 0x37, 0xDF,
+	0x27, 0x17, 0xC0, 0xAF,
+
+	0x34, 0x80, 0x4F, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x2D, 0x21, 0x1A, 0xB0,
+	0x25, 0x21, 0x31, 0xB0,
+
+	0x0D, 0x21, 0x1A, 0xB2,
+	0x05, 0x21, 0x31, 0xB2,
+
+	0x03, 0x80, 0x2A, 0xEA,
+	0x17, 0xC1, 0x2B, 0xBD,
+
+	0x2D, 0x20,
+	0x25, 0x20,
+	0x05, 0x20,
+	0x0D, 0x20,
+
+	0xB3, 0x68,
+	0x97, 0x25,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x33, 0xC0, 0x33, 0xAF,
+	0x2F, 0xC0, 0x21, 0xC0,
+
+	0x16, 0x42, 0x56, 0x9F,
+	0x3C, 0x27, 0x4F, 0xE9,
+
+	0x1E, 0x62, 0x57, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x25, 0x21, 0x31, 0xB4,
+	0x2D, 0x21, 0x1A, 0xB4,
+
+	0x3F, 0x2F, 0x5D, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x33, 0x05,
+	0x00, 0xE0,
+	0x28, 0x19, 0x60, 0xEC,
+
+	0x0D, 0x21, 0x1A, 0xB6,
+	0x05, 0x21, 0x31, 0xB6,
+
+	0x37, 0x0F, 0x5C, 0x9F,
+	0x00, 0xE0,
+	0x2F, 0x20,
+
+	0x23, 0x3B, 0x33, 0xAD,
+	0x1E, 0x26, 0x1E, 0xDF,
+
+	0xA7, 0x1E, 0x4F, 0xE9,
+	0x17, 0x26, 0x16, 0xDF,
+
+	0x2D, 0x20,
+	0x00, 0xE0,
+	0xA8, 0x3F, 0x4F, 0xE9,
+
+	0x2F, 0x2F, 0x1E, 0xAF,
+	0x25, 0x20,
+	0x00, 0xE0,
+
+	0xA4, 0x16, 0x4F, 0xE9,
+	0x0F, 0xC0, 0x21, 0xC2,
+
+	0xA6, 0x80, 0x4F, 0xE9,
+	0x1F, 0x62, 0x57, 0x9F,
+
+	0x0D, 0x20,
+	0x05, 0x20,
+	0x2F, 0xC0, 0x21, 0xC6,
+
+	0x2D, 0x44, 0x4C, 0xB6,
+	0x25, 0x44, 0x54, 0xB6,
+
+	0x3F, 0x2F, 0x5D, 0x9F,
+	0x00, 0xE0,
+	0x0F, 0x20,
+
+	0x2D, 0x20,
+	0x25, 0x20,
+	0x07, 0xC0, 0x44, 0xC6,
+
+	0x17, 0x50, 0x56, 0x9F,
+	0xA5, 0x37, 0x4F, 0xE9,
+
+	0x06, 0xC0, 0x21, 0xC4,
+	0x0F, 0x17, 0x0F, 0xAF,
+
+	0x37, 0x0F, 0x5C, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x1E, 0x62, 0x57, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x3E, 0x3D, 0x5D, 0x9F,
+	0x00, 0xE0,
+	0x07, 0x20,
+
+	0x2F, 0x20,
+	0x00, 0xE0,
+	0xA3, 0x0F, 0x4F, 0xE9,
+
+	0x06, 0x20,
+	0x00, 0xE0,
+	0x1F, 0x26, 0x1F, 0xDF,
+
+	0x17, 0x26, 0x17, 0xDF,
+	0xA1, 0x1F, 0x4F, 0xE9,
+
+	0x1E, 0x26, 0x1E, 0xDF,
+	0x9D, 0x1E, 0x4F, 0xE9,
+
+	0x35, 0x17, 0x4F, 0xE9,
+	0xA2, 0x3F, 0x4F, 0xE9,
+
+	0x06, 0x06, 0x1F, 0xAF,
+	0x39, 0x37, 0x4F, 0xE9,
+
+	0x2F, 0x2F, 0x17, 0xAF,
+	0x07, 0x07, 0x1E, 0xAF,
+
+	0xA0, 0x80, 0x4F, 0xE9,
+	0x9E, 0x3E, 0x4F, 0xE9,
+
+	0x31, 0x80, 0x4F, 0xE9,
+	0x9C, 0x80, 0x4F, 0xE9,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x57, 0x39, 0x20, 0xE9,
+
+	0x16, 0x28, 0x20, 0xE9,
+	0x1D, 0x3B, 0x20, 0xE9,
+
+	0x1E, 0x2B, 0x20, 0xE9,
+	0x2B, 0x32, 0x20, 0xE9,
+
+	0x1C, 0x23, 0x20, 0xE9,
+	0x57, 0x36, 0x20, 0xE9,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x40, 0x40, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x90, 0xE2,
+	0x00, 0xE0,
+
+	0x63, 0xFF, 0x20, 0xEA,
+	0x19, 0xC8, 0xC1, 0xCD,
+
+	0x1F, 0xD7, 0x18, 0xBD,
+	0x3F, 0xD7, 0x22, 0xBD,
+
+	0x9F, 0x41, 0x49, 0xBD,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x25, 0x41, 0x49, 0xBD,
+	0x2D, 0x41, 0x51, 0xBD,
+
+	0x0D, 0x80, 0x07, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x35, 0x40, 0x48, 0xBD,
+	0x3D, 0x40, 0x50, 0xBD,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x25, 0x30,
+	0x2D, 0x30,
+
+	0x35, 0x30,
+	0xB5, 0x30,
+	0xBD, 0x30,
+	0x3D, 0x30,
+
+	0x9C, 0xA7, 0x5B, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x62, 0xFF, 0x0A, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xC9, 0x41, 0xC8, 0xEC,
+	0x42, 0xE1,
+	0x00, 0xE0,
+
+	0x60, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xC8, 0x40, 0xC0, 0xEC,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x5D, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+};
+
+static unsigned char warp_g200_tgzsf[] = {
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x98, 0xA0, 0xE9,
+	0x40, 0x40, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x1F, 0xD7, 0x18, 0xBD,
+	0x3F, 0xD7, 0x22, 0xBD,
+
+	0x81, 0x04,
+	0x89, 0x04,
+	0x01, 0x04,
+	0x09, 0x04,
+
+	0xC9, 0x41, 0xC0, 0xEC,
+	0x11, 0x04,
+	0x00, 0xE0,
+
+	0x41, 0xCC, 0x41, 0xCD,
+	0x49, 0xCC, 0x49, 0xCD,
+
+	0xD1, 0x41, 0xC0, 0xEC,
+	0x51, 0xCC, 0x51, 0xCD,
+
+	0x80, 0x04,
+	0x10, 0x04,
+	0x08, 0x04,
+	0x00, 0xE0,
+
+	0x00, 0xCC, 0xC0, 0xCD,
+	0xD1, 0x49, 0xC0, 0xEC,
+
+	0x8A, 0x1F, 0x20, 0xE9,
+	0x8B, 0x3F, 0x20, 0xE9,
+
+	0x41, 0x3C, 0x41, 0xAD,
+	0x49, 0x3C, 0x49, 0xAD,
+
+	0x10, 0xCC, 0x10, 0xCD,
+	0x08, 0xCC, 0x08, 0xCD,
+
+	0xB9, 0x41, 0x49, 0xBB,
+	0x1F, 0xF0, 0x41, 0xCD,
+
+	0x51, 0x3C, 0x51, 0xAD,
+	0x00, 0x98, 0x80, 0xE9,
+
+	0x8F, 0x80, 0x07, 0xEA,
+	0x24, 0x1F, 0x20, 0xE9,
+
+	0x21, 0x45, 0x80, 0xE8,
+	0x1A, 0x4D, 0x80, 0xE8,
+
+	0x31, 0x55, 0x80, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x15, 0x41, 0x49, 0xBD,
+	0x1D, 0x41, 0x51, 0xBD,
+
+	0x2E, 0x41, 0x2A, 0xB8,
+	0x34, 0x53, 0xA0, 0xE8,
+
+	0x15, 0x30,
+	0x1D, 0x30,
+	0x58, 0xE3,
+	0x00, 0xE0,
+
+	0xB5, 0x40, 0x48, 0xBD,
+	0x3D, 0x40, 0x50, 0xBD,
+
+	0x24, 0x43, 0xA0, 0xE8,
+	0x2C, 0x4B, 0xA0, 0xE8,
+
+	0x15, 0x72,
+	0x09, 0xE3,
+	0x00, 0xE0,
+	0x1D, 0x72,
+
+	0x35, 0x30,
+	0xB5, 0x30,
+	0xBD, 0x30,
+	0x3D, 0x30,
+
+	0x9C, 0x97, 0x57, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x6C, 0x64, 0xC8, 0xEC,
+	0x98, 0xE1,
+	0xB5, 0x05,
+
+	0xBD, 0x05,
+	0x2E, 0x30,
+	0x32, 0xC0, 0xA0, 0xE8,
+
+	0x33, 0xC0, 0xA0, 0xE8,
+	0x74, 0x64, 0xC8, 0xEC,
+
+	0x40, 0x3C, 0x40, 0xAD,
+	0x32, 0x6A,
+	0x2A, 0x30,
+
+	0x20, 0x73,
+	0x33, 0x6A,
+	0x00, 0xE0,
+	0x28, 0x73,
+
+	0x1C, 0x72,
+	0x83, 0xE2,
+	0x7B, 0x80, 0x15, 0xEA,
+
+	0xB8, 0x3D, 0x28, 0xDF,
+	0x30, 0x35, 0x20, 0xDF,
+
+	0x40, 0x30,
+	0x00, 0xE0,
+	0xCC, 0xE2,
+	0x64, 0x72,
+
+	0x25, 0x42, 0x52, 0xBF,
+	0x2D, 0x42, 0x4A, 0xBF,
+
+	0x30, 0x2E, 0x30, 0xDF,
+	0x38, 0x2E, 0x38, 0xDF,
+
+	0x18, 0x1D, 0x45, 0xE9,
+	0x1E, 0x15, 0x45, 0xE9,
+
+	0x2B, 0x49, 0x51, 0xBD,
+	0x00, 0xE0,
+	0x1F, 0x73,
+
+	0x38, 0x38, 0x40, 0xAF,
+	0x30, 0x30, 0x40, 0xAF,
+
+	0x24, 0x1F, 0x24, 0xDF,
+	0x1D, 0x32, 0x20, 0xE9,
+
+	0x2C, 0x1F, 0x2C, 0xDF,
+	0x1A, 0x33, 0x20, 0xE9,
+
+	0xB0, 0x10,
+	0x08, 0xE3,
+	0x40, 0x10,
+	0xB8, 0x10,
+
+	0x26, 0xF0, 0x30, 0xCD,
+	0x2F, 0xF0, 0x38, 0xCD,
+
+	0x2B, 0x80, 0x20, 0xE9,
+	0x2A, 0x80, 0x20, 0xE9,
+
+	0xA6, 0x20,
+	0x88, 0xE2,
+	0x00, 0xE0,
+	0xAF, 0x20,
+
+	0x28, 0x2A, 0x26, 0xAF,
+	0x20, 0x2A, 0xC0, 0xAF,
+
+	0x34, 0x1F, 0x34, 0xDF,
+	0x46, 0x24, 0x46, 0xDF,
+
+	0x28, 0x30, 0x80, 0xBF,
+	0x20, 0x38, 0x80, 0xBF,
+
+	0x47, 0x24, 0x47, 0xDF,
+	0x4E, 0x2C, 0x4E, 0xDF,
+
+	0x4F, 0x2C, 0x4F, 0xDF,
+	0x56, 0x34, 0x56, 0xDF,
+
+	0x28, 0x15, 0x28, 0xDF,
+	0x20, 0x1D, 0x20, 0xDF,
+
+	0x57, 0x34, 0x57, 0xDF,
+	0x00, 0xE0,
+	0x1D, 0x05,
+
+	0x04, 0x80, 0x10, 0xEA,
+	0x89, 0xE2,
+	0x2B, 0x30,
+
+	0x3F, 0xC1, 0x1D, 0xBD,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xA0, 0x68,
+	0xBF, 0x25,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x20, 0xC0, 0x20, 0xAF,
+	0x28, 0x05,
+	0x97, 0x74,
+
+	0x00, 0xE0,
+	0x2A, 0x10,
+	0x16, 0xC0, 0x20, 0xE9,
+
+	0x04, 0x80, 0x10, 0xEA,
+	0x8C, 0xE2,
+	0x95, 0x05,
+
+	0x28, 0xC1, 0x28, 0xAD,
+	0x1F, 0xC1, 0x15, 0xBD,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xA8, 0x67,
+	0x9F, 0x6B,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x28, 0xC0, 0x28, 0xAD,
+	0x1D, 0x25,
+	0x20, 0x05,
+
+	0x28, 0x32, 0x80, 0xAD,
+	0x40, 0x2A, 0x40, 0xBD,
+
+	0x1C, 0x80, 0x20, 0xE9,
+	0x20, 0x33, 0x20, 0xAD,
+
+	0x20, 0x73,
+	0x00, 0xE0,
+	0xB6, 0x49, 0x51, 0xBB,
+
+	0x26, 0x2F, 0xB0, 0xE8,
+	0x19, 0x20, 0x20, 0xE9,
+
+	0x35, 0x20, 0x35, 0xDF,
+	0x3D, 0x20, 0x3D, 0xDF,
+
+	0x15, 0x20, 0x15, 0xDF,
+	0x1D, 0x20, 0x1D, 0xDF,
+
+	0x26, 0xD0, 0x26, 0xCD,
+	0x29, 0x49, 0x2A, 0xB8,
+
+	0x26, 0x40, 0x80, 0xBD,
+	0x3B, 0x48, 0x50, 0xBD,
+
+	0x3E, 0x54, 0x57, 0x9F,
+	0x00, 0xE0,
+	0x82, 0xE1,
+
+	0x1E, 0xAF, 0x59, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x26, 0x30,
+	0x29, 0x30,
+	0x48, 0x3C, 0x48, 0xAD,
+
+	0x2B, 0x72,
+	0xC2, 0xE1,
+	0x2C, 0xC0, 0x44, 0xC2,
+
+	0x05, 0x24, 0x34, 0xBF,
+	0x0D, 0x24, 0x2C, 0xBF,
+
+	0x2D, 0x46, 0x4E, 0xBF,
+	0x25, 0x46, 0x56, 0xBF,
+
+	0x20, 0x1D, 0x6F, 0x8F,
+	0x32, 0x3E, 0x5F, 0xE9,
+
+	0x3E, 0x50, 0x56, 0x9F,
+	0x00, 0xE0,
+	0x3B, 0x30,
+
+	0x1E, 0x8F, 0x51, 0x9F,
+	0x33, 0x1E, 0x5F, 0xE9,
+
+	0x05, 0x44, 0x54, 0xB2,
+	0x0D, 0x44, 0x4C, 0xB2,
+
+	0x19, 0xC0, 0xB0, 0xE8,
+	0x34, 0xC0, 0x44, 0xC4,
+
+	0x33, 0x73,
+	0x00, 0xE0,
+	0x3E, 0x62, 0x57, 0x9F,
+
+	0x1E, 0xAF, 0x59, 0x9F,
+	0x00, 0xE0,
+	0x0D, 0x20,
+
+	0x84, 0x3E, 0x58, 0xE9,
+	0x28, 0x1D, 0x6F, 0x8F,
+
+	0x05, 0x20,
+	0x00, 0xE0,
+	0x85, 0x1E, 0x58, 0xE9,
+
+	0x9B, 0x3B, 0x33, 0xDF,
+	0x20, 0x20, 0x42, 0xAF,
+
+	0x30, 0x42, 0x56, 0x9F,
+	0x80, 0x3E, 0x57, 0xE9,
+
+	0x3F, 0x8F, 0x51, 0x9F,
+	0x30, 0x80, 0x5F, 0xE9,
+
+	0x28, 0x28, 0x24, 0xAF,
+	0x81, 0x1E, 0x57, 0xE9,
+
+	0x05, 0x47, 0x57, 0xBF,
+	0x0D, 0x47, 0x4F, 0xBF,
+
+	0x88, 0x80, 0x58, 0xE9,
+	0x1B, 0x29, 0x1B, 0xDF,
+
+	0x30, 0x1D, 0x6F, 0x8F,
+	0x3A, 0x30, 0x4F, 0xE9,
+
+	0x1C, 0x30, 0x26, 0xDF,
+	0x09, 0xE3,
+	0x3B, 0x05,
+
+	0x3E, 0x50, 0x56, 0x9F,
+	0x3B, 0x3F, 0x4F, 0xE9,
+
+	0x1E, 0x8F, 0x51, 0x9F,
+	0x00, 0xE0,
+	0xAC, 0x20,
+
+	0x2D, 0x44, 0x4C, 0xB4,
+	0x2C, 0x1C, 0xC0, 0xAF,
+
+	0x25, 0x44, 0x54, 0xB4,
+	0x00, 0xE0,
+	0xC8, 0x30,
+
+	0x30, 0x46, 0x30, 0xAF,
+	0x1B, 0x1B, 0x48, 0xAF,
+
+	0x00, 0xE0,
+	0x25, 0x20,
+	0x38, 0x2C, 0x4F, 0xE9,
+
+	0x86, 0x80, 0x57, 0xE9,
+	0x38, 0x1D, 0x6F, 0x8F,
+
+	0x28, 0x74,
+	0x00, 0xE0,
+	0x0D, 0x44, 0x4C, 0xB0,
+
+	0x05, 0x44, 0x54, 0xB0,
+	0x2D, 0x20,
+	0x9B, 0x10,
+
+	0x82, 0x3E, 0x57, 0xE9,
+	0x32, 0xF0, 0x1B, 0xCD,
+
+	0x1E, 0xBD, 0x59, 0x9F,
+	0x83, 0x1E, 0x57, 0xE9,
+
+	0x38, 0x47, 0x38, 0xAF,
+	0x34, 0x20,
+	0x2A, 0x30,
+
+	0x00, 0xE0,
+	0x0D, 0x20,
+	0x32, 0x20,
+	0x05, 0x20,
+
+	0x87, 0x80, 0x57, 0xE9,
+	0x1F, 0x54, 0x57, 0x9F,
+
+	0x17, 0x42, 0x56, 0x9F,
+	0x00, 0xE0,
+	0x3B, 0x6A,
+
+	0x3F, 0x8F, 0x51, 0x9F,
+	0x37, 0x1E, 0x4F, 0xE9,
+
+	0x37, 0x32, 0x2A, 0xAF,
+	0x00, 0xE0,
+	0x32, 0x00,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x27, 0xC0, 0x44, 0xC0,
+
+	0x36, 0x1F, 0x4F, 0xE9,
+	0x1F, 0x1F, 0x26, 0xDF,
+
+	0x37, 0x1B, 0x37, 0xBF,
+	0x17, 0x26, 0x17, 0xDF,
+
+	0x3E, 0x17, 0x4F, 0xE9,
+	0x3F, 0x3F, 0x4F, 0xE9,
+
+	0x34, 0x1F, 0x34, 0xAF,
+	0x2B, 0x05,
+	0xA7, 0x20,
+
+	0x33, 0x2B, 0x37, 0xDF,
+	0x27, 0x17, 0xC0, 0xAF,
+
+	0x34, 0x80, 0x4F, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x2D, 0x21, 0x1A, 0xB0,
+	0x25, 0x21, 0x31, 0xB0,
+
+	0x0D, 0x21, 0x1A, 0xB2,
+	0x05, 0x21, 0x31, 0xB2,
+
+	0x03, 0x80, 0x2A, 0xEA,
+	0x17, 0xC1, 0x2B, 0xBD,
+
+	0x2D, 0x20,
+	0x25, 0x20,
+	0x05, 0x20,
+	0x0D, 0x20,
+
+	0xB3, 0x68,
+	0x97, 0x25,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x33, 0xC0, 0x33, 0xAF,
+	0x2F, 0xC0, 0x21, 0xC0,
+
+	0x16, 0x42, 0x56, 0x9F,
+	0x3C, 0x27, 0x4F, 0xE9,
+
+	0x1E, 0x62, 0x57, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x25, 0x21, 0x31, 0xB4,
+	0x2D, 0x21, 0x1A, 0xB4,
+
+	0x3F, 0x2F, 0x5D, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x33, 0x05,
+	0x00, 0xE0,
+	0x28, 0x19, 0x60, 0xEC,
+
+	0x0D, 0x21, 0x1A, 0xB6,
+	0x05, 0x21, 0x31, 0xB6,
+
+	0x37, 0x0F, 0x5C, 0x9F,
+	0x00, 0xE0,
+	0x2F, 0x20,
+
+	0x23, 0x3B, 0x33, 0xAD,
+	0x1E, 0x26, 0x1E, 0xDF,
+
+	0xA7, 0x1E, 0x4F, 0xE9,
+	0x17, 0x26, 0x16, 0xDF,
+
+	0x2D, 0x20,
+	0x00, 0xE0,
+	0xA8, 0x3F, 0x4F, 0xE9,
+
+	0x2F, 0x2F, 0x1E, 0xAF,
+	0x25, 0x20,
+	0x00, 0xE0,
+
+	0xA4, 0x16, 0x4F, 0xE9,
+	0x0F, 0xC0, 0x21, 0xC2,
+
+	0xA6, 0x80, 0x4F, 0xE9,
+	0x1F, 0x62, 0x57, 0x9F,
+
+	0x0D, 0x20,
+	0x05, 0x20,
+	0x2F, 0xC0, 0x21, 0xC6,
+
+	0x3F, 0x2F, 0x5D, 0x9F,
+	0x00, 0xE0,
+	0x0F, 0x20,
+
+	0x17, 0x50, 0x56, 0x9F,
+	0xA5, 0x37, 0x4F, 0xE9,
+
+	0x06, 0xC0, 0x21, 0xC4,
+	0x0F, 0x17, 0x0F, 0xAF,
+
+	0x37, 0x0F, 0x5C, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x2F, 0x20,
+	0x00, 0xE0,
+	0xA3, 0x80, 0x4F, 0xE9,
+
+	0x06, 0x20,
+	0x00, 0xE0,
+	0x1F, 0x26, 0x1F, 0xDF,
+
+	0x17, 0x26, 0x17, 0xDF,
+	0x35, 0x17, 0x4F, 0xE9,
+
+	0xA1, 0x1F, 0x4F, 0xE9,
+	0xA2, 0x3F, 0x4F, 0xE9,
+
+	0x06, 0x06, 0x1F, 0xAF,
+	0x39, 0x37, 0x4F, 0xE9,
+
+	0x2F, 0x2F, 0x17, 0xAF,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xA0, 0x80, 0x4F, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x31, 0x80, 0x4F, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x57, 0x39, 0x20, 0xE9,
+
+	0x16, 0x28, 0x20, 0xE9,
+	0x1D, 0x3B, 0x20, 0xE9,
+
+	0x1E, 0x2B, 0x20, 0xE9,
+	0x2B, 0x32, 0x20, 0xE9,
+
+	0x1C, 0x23, 0x20, 0xE9,
+	0x57, 0x36, 0x20, 0xE9,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x40, 0x40, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x90, 0xE2,
+	0x00, 0xE0,
+
+	0x68, 0xFF, 0x20, 0xEA,
+	0x19, 0xC8, 0xC1, 0xCD,
+
+	0x1F, 0xD7, 0x18, 0xBD,
+	0x3F, 0xD7, 0x22, 0xBD,
+
+	0x9F, 0x41, 0x49, 0xBD,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x25, 0x41, 0x49, 0xBD,
+	0x2D, 0x41, 0x51, 0xBD,
+
+	0x0D, 0x80, 0x07, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x35, 0x40, 0x48, 0xBD,
+	0x3D, 0x40, 0x50, 0xBD,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x25, 0x30,
+	0x2D, 0x30,
+
+	0x35, 0x30,
+	0xB5, 0x30,
+	0xBD, 0x30,
+	0x3D, 0x30,
+
+	0x9C, 0xA7, 0x5B, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x67, 0xFF, 0x0A, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xC9, 0x41, 0xC8, 0xEC,
+	0x42, 0xE1,
+	0x00, 0xE0,
+
+	0x65, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xC8, 0x40, 0xC0, 0xEC,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x62, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+};
+
+static unsigned char warp_g400_t2gz[] = {
+
+	0x00, 0x8A, 0x98, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x0A, 0x40, 0x50, 0xBF,
+	0x2A, 0x40, 0x60, 0xBF,
+
+	0x32, 0x41, 0x51, 0xBF,
+	0x3A, 0x41, 0x61, 0xBF,
+
+	0xC3, 0x6B,
+	0xD3, 0x6B,
+	0x00, 0x8A, 0x98, 0xE9,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x96, 0xE2,
+	0x41, 0x04,
+
+	0x7B, 0x43, 0xA0, 0xE8,
+	0x73, 0x53, 0xA0, 0xE8,
+
+	0xAD, 0xEE, 0x23, 0x9F,
+	0x00, 0xE0,
+	0x51, 0x04,
+
+	0x90, 0xE2,
+	0x61, 0x04,
+	0x31, 0x46, 0xB1, 0xE8,
+
+	0x51, 0x41, 0xE0, 0xEC,
+	0x39, 0x67, 0xB1, 0xE8,
+
+	0x00, 0x04,
+	0x46, 0xE2,
+	0x73, 0x63, 0xA0, 0xE8,
+
+	0x61, 0x41, 0xE0, 0xEC,
+	0x31, 0x00,
+	0x39, 0x00,
+
+	0x78, 0x80, 0x15, 0xEA,
+	0x10, 0x04,
+	0x20, 0x04,
+
+	0x61, 0x51, 0xE0, 0xEC,
+	0x2F, 0x41, 0x60, 0xEA,
+
+	0x31, 0x20,
+	0x39, 0x20,
+	0x1F, 0x42, 0xA0, 0xE8,
+
+	0x2A, 0x42, 0x52, 0xBF,
+	0x0F, 0x52, 0xA0, 0xE8,
+
+	0x1A, 0x42, 0x62, 0xBF,
+	0x1E, 0x51, 0x60, 0xEA,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x0E, 0x61, 0x60, 0xEA,
+
+	0x32, 0x40, 0x50, 0xBD,
+	0x22, 0x40, 0x60, 0xBD,
+
+	0x12, 0x41, 0x51, 0xBD,
+	0x3A, 0x41, 0x61, 0xBD,
+
+	0xBF, 0x2F, 0x0E, 0xBD,
+	0x97, 0xE2,
+	0x7B, 0x72,
+
+	0x32, 0x20,
+	0x22, 0x20,
+	0x12, 0x20,
+	0x3A, 0x20,
+
+	0x35, 0x48, 0xB1, 0xE8,
+	0x3D, 0x59, 0xB1, 0xE8,
+
+	0x46, 0x31, 0x46, 0xBF,
+	0x56, 0x31, 0x56, 0xBF,
+
+	0xB3, 0xE2, 0x2D, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x66, 0x31, 0x66, 0xBF,
+	0x47, 0x39, 0x47, 0xBF,
+
+	0x57, 0x39, 0x57, 0xBF,
+	0x67, 0x39, 0x67, 0xBF,
+
+	0x69, 0x80, 0x07, 0xEA,
+	0x24, 0x41, 0x20, 0xE9,
+
+	0x35, 0x00,
+	0x3D, 0x00,
+	0x00, 0xE0,
+	0x2D, 0x73,
+
+	0x33, 0x72,
+	0x0C, 0xE3,
+	0x8D, 0x2F, 0x1E, 0xBD,
+
+	0x43, 0x75, 0xF8, 0xEC,
+	0x35, 0x20,
+	0x3D, 0x20,
+
+	0x43, 0x43, 0x2D, 0xDF,
+	0x53, 0x53, 0x2D, 0xDF,
+
+	0xAE, 0x1E, 0x0E, 0xBD,
+	0x58, 0xE3,
+	0x33, 0x66,
+
+	0x48, 0x35, 0x48, 0xBF,
+	0x58, 0x35, 0x58, 0xBF,
+
+	0x68, 0x35, 0x68, 0xBF,
+	0x49, 0x3D, 0x49, 0xBF,
+
+	0x59, 0x3D, 0x59, 0xBF,
+	0x69, 0x3D, 0x69, 0xBF,
+
+	0x63, 0x63, 0x2D, 0xDF,
+	0x4D, 0x7D, 0xF8, 0xEC,
+
+	0x59, 0xE3,
+	0x00, 0xE0,
+	0xB8, 0x38, 0x33, 0xBF,
+
+	0x2D, 0x73,
+	0x30, 0x76,
+	0x18, 0x3A, 0x41, 0xE9,
+
+	0x3F, 0x53, 0xA0, 0xE8,
+	0x05, 0x80, 0x3D, 0xEA,
+
+	0x37, 0x43, 0xA0, 0xE8,
+	0x3D, 0x63, 0xA0, 0xE8,
+
+	0x50, 0x70, 0xF8, 0xEC,
+	0x2B, 0x50, 0x3C, 0xE9,
+
+	0x1F, 0x0F, 0xBC, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x59, 0x78, 0xF8, 0xEC,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x1E, 0x12, 0x41, 0xE9,
+	0x1A, 0x22, 0x41, 0xE9,
+
+	0x46, 0x37, 0x46, 0xDF,
+	0x56, 0x3F, 0x56, 0xDF,
+
+	0x2B, 0x40, 0x3D, 0xE9,
+	0x66, 0x3D, 0x66, 0xDF,
+
+	0x1D, 0x32, 0x41, 0xE9,
+	0x67, 0x3D, 0x67, 0xDF,
+
+	0x47, 0x37, 0x47, 0xDF,
+	0x57, 0x3F, 0x57, 0xDF,
+
+	0x2A, 0x40, 0x20, 0xE9,
+	0x59, 0x3F, 0x59, 0xDF,
+
+	0x16, 0x30, 0x20, 0xE9,
+	0x69, 0x3D, 0x69, 0xDF,
+
+	0x48, 0x37, 0x48, 0xDF,
+	0x58, 0x3F, 0x58, 0xDF,
+
+	0x12, 0x12, 0x2D, 0xDF,
+	0x22, 0x22, 0x2D, 0xDF,
+
+	0x32, 0x32, 0x2D, 0xDF,
+	0x3A, 0x3A, 0x2D, 0xDF,
+
+	0x68, 0x3D, 0x68, 0xDF,
+	0x49, 0x37, 0x49, 0xDF,
+
+	0x3D, 0xCF, 0x74, 0xC0,
+	0x37, 0xCF, 0x74, 0xC4,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x34, 0x80, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3C, 0x3D, 0x20, 0xE9,
+
+	0x0A, 0x44, 0x54, 0xB0,
+	0x02, 0x44, 0x64, 0xB0,
+
+	0x2A, 0x44, 0x54, 0xB2,
+	0x1A, 0x44, 0x64, 0xB2,
+
+	0x25, 0x80, 0x3A, 0xEA,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x3D, 0xCF, 0x74, 0xC2,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x32, 0x31, 0x5F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x33, 0x39, 0x5F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x2A, 0x44, 0x54, 0xB4,
+	0x1A, 0x44, 0x64, 0xB4,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x38, 0x3D, 0x20, 0xE9,
+
+	0x88, 0x73, 0x5E, 0xE9,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x2A, 0x46, 0x56, 0xBF,
+	0x1A, 0x46, 0x66, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x3E, 0x30, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3F, 0x38, 0x4F, 0xE9,
+
+	0x0A, 0x47, 0x57, 0xBF,
+	0x02, 0x47, 0x67, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x3A, 0x31, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3B, 0x39, 0x4F, 0xE9,
+
+	0x2A, 0x43, 0x53, 0xBF,
+	0x1A, 0x43, 0x63, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x36, 0x31, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x37, 0x39, 0x4F, 0xE9,
+
+	0x0A, 0x48, 0x58, 0xBF,
+	0x02, 0x48, 0x68, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x80, 0x31, 0x57, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x81, 0x39, 0x57, 0xE9,
+
+	0x2A, 0x49, 0x59, 0xBF,
+	0x1A, 0x49, 0x69, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x82, 0x30, 0x57, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x83, 0x38, 0x57, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x84, 0x31, 0x5E, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x85, 0x39, 0x5E, 0xE9,
+
+	0x86, 0x76, 0x57, 0xE9,
+	0x8A, 0x36, 0x20, 0xE9,
+
+	0x87, 0x77, 0x57, 0xE9,
+	0x8B, 0x3E, 0xBF, 0xEA,
+
+	0x80, 0x30, 0x57, 0xE9,
+	0x81, 0x38, 0x57, 0xE9,
+
+	0x82, 0x31, 0x57, 0xE9,
+	0x86, 0x78, 0x57, 0xE9,
+
+	0x83, 0x39, 0x57, 0xE9,
+	0x87, 0x79, 0x57, 0xE9,
+
+	0x30, 0x1F, 0x5F, 0xE9,
+	0x8A, 0x34, 0x20, 0xE9,
+
+	0x8B, 0x3C, 0x20, 0xE9,
+	0x37, 0x50, 0x60, 0xBD,
+
+	0x57, 0x0D, 0x20, 0xE9,
+	0x35, 0x51, 0x61, 0xBD,
+
+	0x2B, 0x50, 0x20, 0xE9,
+	0x1D, 0x37, 0xE1, 0xEA,
+
+	0x1E, 0x35, 0xE1, 0xEA,
+	0x00, 0xE0,
+	0x0E, 0x77,
+
+	0x24, 0x51, 0x20, 0xE9,
+	0x9F, 0xFF, 0x20, 0xEA,
+
+	0x16, 0x0E, 0x20, 0xE9,
+	0x57, 0x2E, 0xBF, 0xEA,
+
+	0x0B, 0x46, 0xA0, 0xE8,
+	0x1B, 0x56, 0xA0, 0xE8,
+
+	0x2B, 0x66, 0xA0, 0xE8,
+	0x0C, 0x47, 0xA0, 0xE8,
+
+	0x1C, 0x57, 0xA0, 0xE8,
+	0x2C, 0x67, 0xA0, 0xE8,
+
+	0x0B, 0x00,
+	0x1B, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x0C, 0x00,
+	0x1C, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x0B, 0x65,
+	0x1B, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x0C, 0x65,
+	0x1C, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x0B, 0x1B, 0x60, 0xEC,
+	0x36, 0xD7, 0x36, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x0C, 0x1C, 0x60, 0xEC,
+
+	0x3E, 0xD7, 0x3E, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x0B, 0x2B, 0xDE, 0xE8,
+	0x1B, 0x80, 0xDE, 0xE8,
+
+	0x36, 0x80, 0x36, 0xBD,
+	0x3E, 0x80, 0x3E, 0xBD,
+
+	0x33, 0xD7, 0x0B, 0xBD,
+	0x3B, 0xD7, 0x1B, 0xBD,
+
+	0x46, 0x80, 0x46, 0xCF,
+	0x57, 0x80, 0x57, 0xCF,
+
+	0x66, 0x33, 0x66, 0xCF,
+	0x47, 0x3B, 0x47, 0xCF,
+
+	0x56, 0x33, 0x56, 0xCF,
+	0x67, 0x3B, 0x67, 0xCF,
+
+	0x0B, 0x48, 0xA0, 0xE8,
+	0x1B, 0x58, 0xA0, 0xE8,
+
+	0x2B, 0x68, 0xA0, 0xE8,
+	0x0C, 0x49, 0xA0, 0xE8,
+
+	0x1C, 0x59, 0xA0, 0xE8,
+	0x2C, 0x69, 0xA0, 0xE8,
+
+	0x0B, 0x00,
+	0x1B, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x0C, 0x00,
+	0x1C, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x0B, 0x65,
+	0x1B, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x0C, 0x65,
+	0x1C, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x0B, 0x1B, 0x60, 0xEC,
+	0x34, 0xD7, 0x34, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x0C, 0x1C, 0x60, 0xEC,
+
+	0x3C, 0xD7, 0x3C, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x0B, 0x2B, 0xDE, 0xE8,
+	0x1B, 0x80, 0xDE, 0xE8,
+
+	0x34, 0x80, 0x34, 0xBD,
+	0x3C, 0x80, 0x3C, 0xBD,
+
+	0x33, 0xD7, 0x0B, 0xBD,
+	0x3B, 0xD7, 0x1B, 0xBD,
+
+	0x48, 0x80, 0x48, 0xCF,
+	0x59, 0x80, 0x59, 0xCF,
+
+	0x68, 0x33, 0x68, 0xCF,
+	0x49, 0x3B, 0x49, 0xCF,
+
+	0xBE, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x58, 0x33, 0x58, 0xCF,
+	0x69, 0x3B, 0x69, 0xCF,
+
+	0x7D, 0xFF, 0x20, 0xEA,
+	0x57, 0xC0, 0xBF, 0xEA,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+};
+
+static unsigned char warp_g400_t2gza[] = {
+
+	0x00, 0x8A, 0x98, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x0A, 0x40, 0x50, 0xBF,
+	0x2A, 0x40, 0x60, 0xBF,
+
+	0x32, 0x41, 0x51, 0xBF,
+	0x3A, 0x41, 0x61, 0xBF,
+
+	0xC3, 0x6B,
+	0xD3, 0x6B,
+	0x00, 0x8A, 0x98, 0xE9,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x96, 0xE2,
+	0x41, 0x04,
+
+	0x7B, 0x43, 0xA0, 0xE8,
+	0x73, 0x53, 0xA0, 0xE8,
+
+	0xAD, 0xEE, 0x23, 0x9F,
+	0x00, 0xE0,
+	0x51, 0x04,
+
+	0x90, 0xE2,
+	0x61, 0x04,
+	0x31, 0x46, 0xB1, 0xE8,
+
+	0x51, 0x41, 0xE0, 0xEC,
+	0x39, 0x67, 0xB1, 0xE8,
+
+	0x00, 0x04,
+	0x46, 0xE2,
+	0x73, 0x63, 0xA0, 0xE8,
+
+	0x61, 0x41, 0xE0, 0xEC,
+	0x31, 0x00,
+	0x39, 0x00,
+
+	0x7C, 0x80, 0x15, 0xEA,
+	0x10, 0x04,
+	0x20, 0x04,
+
+	0x61, 0x51, 0xE0, 0xEC,
+	0x2F, 0x41, 0x60, 0xEA,
+
+	0x31, 0x20,
+	0x39, 0x20,
+	0x1F, 0x42, 0xA0, 0xE8,
+
+	0x2A, 0x42, 0x52, 0xBF,
+	0x0F, 0x52, 0xA0, 0xE8,
+
+	0x1A, 0x42, 0x62, 0xBF,
+	0x1E, 0x51, 0x60, 0xEA,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x0E, 0x61, 0x60, 0xEA,
+
+	0x32, 0x40, 0x50, 0xBD,
+	0x22, 0x40, 0x60, 0xBD,
+
+	0x12, 0x41, 0x51, 0xBD,
+	0x3A, 0x41, 0x61, 0xBD,
+
+	0xBF, 0x2F, 0x0E, 0xBD,
+	0x97, 0xE2,
+	0x7B, 0x72,
+
+	0x32, 0x20,
+	0x22, 0x20,
+	0x12, 0x20,
+	0x3A, 0x20,
+
+	0x35, 0x48, 0xB1, 0xE8,
+	0x3D, 0x59, 0xB1, 0xE8,
+
+	0x46, 0x31, 0x46, 0xBF,
+	0x56, 0x31, 0x56, 0xBF,
+
+	0xB3, 0xE2, 0x2D, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x66, 0x31, 0x66, 0xBF,
+	0x47, 0x39, 0x47, 0xBF,
+
+	0x57, 0x39, 0x57, 0xBF,
+	0x67, 0x39, 0x67, 0xBF,
+
+	0x6D, 0x80, 0x07, 0xEA,
+	0x24, 0x41, 0x20, 0xE9,
+
+	0x35, 0x00,
+	0x3D, 0x00,
+	0x00, 0xE0,
+	0x2D, 0x73,
+
+	0x33, 0x72,
+	0x0C, 0xE3,
+	0x8D, 0x2F, 0x1E, 0xBD,
+
+	0x43, 0x75, 0xF8, 0xEC,
+	0x35, 0x20,
+	0x3D, 0x20,
+
+	0x43, 0x43, 0x2D, 0xDF,
+	0x53, 0x53, 0x2D, 0xDF,
+
+	0xAE, 0x1E, 0x0E, 0xBD,
+	0x58, 0xE3,
+	0x33, 0x66,
+
+	0x48, 0x35, 0x48, 0xBF,
+	0x58, 0x35, 0x58, 0xBF,
+
+	0x68, 0x35, 0x68, 0xBF,
+	0x49, 0x3D, 0x49, 0xBF,
+
+	0x59, 0x3D, 0x59, 0xBF,
+	0x69, 0x3D, 0x69, 0xBF,
+
+	0x63, 0x63, 0x2D, 0xDF,
+	0x4D, 0x7D, 0xF8, 0xEC,
+
+	0x59, 0xE3,
+	0x00, 0xE0,
+	0xB8, 0x38, 0x33, 0xBF,
+
+	0x2D, 0x73,
+	0x30, 0x76,
+	0x18, 0x3A, 0x41, 0xE9,
+
+	0x3F, 0x53, 0xA0, 0xE8,
+	0x05, 0x80, 0x3D, 0xEA,
+
+	0x37, 0x43, 0xA0, 0xE8,
+	0x3D, 0x63, 0xA0, 0xE8,
+
+	0x50, 0x70, 0xF8, 0xEC,
+	0x2B, 0x50, 0x3C, 0xE9,
+
+	0x1F, 0x0F, 0xBC, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x59, 0x78, 0xF8, 0xEC,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x1E, 0x12, 0x41, 0xE9,
+	0x1A, 0x22, 0x41, 0xE9,
+
+	0x46, 0x37, 0x46, 0xDF,
+	0x56, 0x3F, 0x56, 0xDF,
+
+	0x2B, 0x40, 0x3D, 0xE9,
+	0x66, 0x3D, 0x66, 0xDF,
+
+	0x1D, 0x32, 0x41, 0xE9,
+	0x67, 0x3D, 0x67, 0xDF,
+
+	0x47, 0x37, 0x47, 0xDF,
+	0x57, 0x3F, 0x57, 0xDF,
+
+	0x2A, 0x40, 0x20, 0xE9,
+	0x59, 0x3F, 0x59, 0xDF,
+
+	0x16, 0x30, 0x20, 0xE9,
+	0x69, 0x3D, 0x69, 0xDF,
+
+	0x48, 0x37, 0x48, 0xDF,
+	0x58, 0x3F, 0x58, 0xDF,
+
+	0x12, 0x12, 0x2D, 0xDF,
+	0x22, 0x22, 0x2D, 0xDF,
+
+	0x32, 0x32, 0x2D, 0xDF,
+	0x3A, 0x3A, 0x2D, 0xDF,
+
+	0x68, 0x3D, 0x68, 0xDF,
+	0x49, 0x37, 0x49, 0xDF,
+
+	0x3D, 0xCF, 0x74, 0xC0,
+	0x37, 0xCF, 0x74, 0xC4,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x34, 0x80, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3C, 0x3D, 0x20, 0xE9,
+
+	0x0A, 0x44, 0x54, 0xB0,
+	0x02, 0x44, 0x64, 0xB0,
+
+	0x2A, 0x44, 0x54, 0xB2,
+	0x1A, 0x44, 0x64, 0xB2,
+
+	0x29, 0x80, 0x3A, 0xEA,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x0F, 0xCF, 0x74, 0xC6,
+	0x3D, 0xCF, 0x74, 0xC2,
+
+	0x88, 0x73, 0x5E, 0xE9,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x32, 0x31, 0x5F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x33, 0x39, 0x5F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x9C, 0x0F, 0x20, 0xE9,
+
+	0x0A, 0x44, 0x54, 0xB4,
+	0x02, 0x44, 0x64, 0xB4,
+
+	0x2A, 0x44, 0x54, 0xB6,
+	0x1A, 0x44, 0x64, 0xB6,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x38, 0x3D, 0x20, 0xE9,
+
+	0x0A, 0x20,
+	0x02, 0x20,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x0A, 0x47, 0x57, 0xBF,
+	0x02, 0x47, 0x67, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x3E, 0x30, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x3F, 0x38, 0x4F, 0xE9,
+
+	0x2A, 0x46, 0x56, 0xBF,
+	0x1A, 0x46, 0x66, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x3A, 0x31, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3B, 0x39, 0x4F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x36, 0x30, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x37, 0x38, 0x4F, 0xE9,
+
+	0x2A, 0x43, 0x53, 0xBF,
+	0x1A, 0x43, 0x63, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x9D, 0x31, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x9E, 0x39, 0x4F, 0xE9,
+
+	0x0A, 0x48, 0x58, 0xBF,
+	0x02, 0x48, 0x68, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x80, 0x31, 0x57, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x81, 0x39, 0x57, 0xE9,
+
+	0x2A, 0x49, 0x59, 0xBF,
+	0x1A, 0x49, 0x69, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x82, 0x30, 0x57, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x83, 0x38, 0x57, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x84, 0x31, 0x5E, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x85, 0x39, 0x5E, 0xE9,
+
+	0x86, 0x76, 0x57, 0xE9,
+	0x8A, 0x36, 0x20, 0xE9,
+
+	0x87, 0x77, 0x57, 0xE9,
+	0x8B, 0x3E, 0xBF, 0xEA,
+
+	0x80, 0x30, 0x57, 0xE9,
+	0x81, 0x38, 0x57, 0xE9,
+
+	0x82, 0x31, 0x57, 0xE9,
+	0x86, 0x78, 0x57, 0xE9,
+
+	0x83, 0x39, 0x57, 0xE9,
+	0x87, 0x79, 0x57, 0xE9,
+
+	0x30, 0x1F, 0x5F, 0xE9,
+	0x8A, 0x34, 0x20, 0xE9,
+
+	0x8B, 0x3C, 0x20, 0xE9,
+	0x37, 0x50, 0x60, 0xBD,
+
+	0x57, 0x0D, 0x20, 0xE9,
+	0x35, 0x51, 0x61, 0xBD,
+
+	0x2B, 0x50, 0x20, 0xE9,
+	0x1D, 0x37, 0xE1, 0xEA,
+
+	0x1E, 0x35, 0xE1, 0xEA,
+	0x00, 0xE0,
+	0x0E, 0x77,
+
+	0x24, 0x51, 0x20, 0xE9,
+	0x9B, 0xFF, 0x20, 0xEA,
+
+	0x16, 0x0E, 0x20, 0xE9,
+	0x57, 0x2E, 0xBF, 0xEA,
+
+	0x0B, 0x46, 0xA0, 0xE8,
+	0x1B, 0x56, 0xA0, 0xE8,
+
+	0x2B, 0x66, 0xA0, 0xE8,
+	0x0C, 0x47, 0xA0, 0xE8,
+
+	0x1C, 0x57, 0xA0, 0xE8,
+	0x2C, 0x67, 0xA0, 0xE8,
+
+	0x0B, 0x00,
+	0x1B, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x0C, 0x00,
+	0x1C, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x0B, 0x65,
+	0x1B, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x0C, 0x65,
+	0x1C, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x0B, 0x1B, 0x60, 0xEC,
+	0x36, 0xD7, 0x36, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x0C, 0x1C, 0x60, 0xEC,
+
+	0x3E, 0xD7, 0x3E, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x0B, 0x2B, 0xDE, 0xE8,
+	0x1B, 0x80, 0xDE, 0xE8,
+
+	0x36, 0x80, 0x36, 0xBD,
+	0x3E, 0x80, 0x3E, 0xBD,
+
+	0x33, 0xD7, 0x0B, 0xBD,
+	0x3B, 0xD7, 0x1B, 0xBD,
+
+	0x46, 0x80, 0x46, 0xCF,
+	0x57, 0x80, 0x57, 0xCF,
+
+	0x66, 0x33, 0x66, 0xCF,
+	0x47, 0x3B, 0x47, 0xCF,
+
+	0x56, 0x33, 0x56, 0xCF,
+	0x67, 0x3B, 0x67, 0xCF,
+
+	0x0B, 0x48, 0xA0, 0xE8,
+	0x1B, 0x58, 0xA0, 0xE8,
+
+	0x2B, 0x68, 0xA0, 0xE8,
+	0x0C, 0x49, 0xA0, 0xE8,
+
+	0x1C, 0x59, 0xA0, 0xE8,
+	0x2C, 0x69, 0xA0, 0xE8,
+
+	0x0B, 0x00,
+	0x1B, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x0C, 0x00,
+	0x1C, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x0B, 0x65,
+	0x1B, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x0C, 0x65,
+	0x1C, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x0B, 0x1B, 0x60, 0xEC,
+	0x34, 0xD7, 0x34, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x0C, 0x1C, 0x60, 0xEC,
+
+	0x3C, 0xD7, 0x3C, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x0B, 0x2B, 0xDE, 0xE8,
+	0x1B, 0x80, 0xDE, 0xE8,
+
+	0x34, 0x80, 0x34, 0xBD,
+	0x3C, 0x80, 0x3C, 0xBD,
+
+	0x33, 0xD7, 0x0B, 0xBD,
+	0x3B, 0xD7, 0x1B, 0xBD,
+
+	0x48, 0x80, 0x48, 0xCF,
+	0x59, 0x80, 0x59, 0xCF,
+
+	0x68, 0x33, 0x68, 0xCF,
+	0x49, 0x3B, 0x49, 0xCF,
+
+	0xBA, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x58, 0x33, 0x58, 0xCF,
+	0x69, 0x3B, 0x69, 0xCF,
+
+	0x79, 0xFF, 0x20, 0xEA,
+	0x57, 0xC0, 0xBF, 0xEA,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+};
+
+static unsigned char warp_g400_t2gzaf[] = {
+
+	0x00, 0x8A, 0x98, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x0A, 0x40, 0x50, 0xBF,
+	0x2A, 0x40, 0x60, 0xBF,
+
+	0x32, 0x41, 0x51, 0xBF,
+	0x3A, 0x41, 0x61, 0xBF,
+
+	0xC3, 0x6B,
+	0xD3, 0x6B,
+	0x00, 0x8A, 0x98, 0xE9,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x96, 0xE2,
+	0x41, 0x04,
+
+	0x7B, 0x43, 0xA0, 0xE8,
+	0x73, 0x53, 0xA0, 0xE8,
+
+	0xAD, 0xEE, 0x23, 0x9F,
+	0x00, 0xE0,
+	0x51, 0x04,
+
+	0x90, 0xE2,
+	0x61, 0x04,
+	0x31, 0x46, 0xB1, 0xE8,
+
+	0x51, 0x41, 0xE0, 0xEC,
+	0x39, 0x67, 0xB1, 0xE8,
+
+	0x00, 0x04,
+	0x46, 0xE2,
+	0x73, 0x63, 0xA0, 0xE8,
+
+	0x61, 0x41, 0xE0, 0xEC,
+	0x31, 0x00,
+	0x39, 0x00,
+
+	0x81, 0x80, 0x15, 0xEA,
+	0x10, 0x04,
+	0x20, 0x04,
+
+	0x61, 0x51, 0xE0, 0xEC,
+	0x2F, 0x41, 0x60, 0xEA,
+
+	0x31, 0x20,
+	0x39, 0x20,
+	0x1F, 0x42, 0xA0, 0xE8,
+
+	0x2A, 0x42, 0x52, 0xBF,
+	0x0F, 0x52, 0xA0, 0xE8,
+
+	0x1A, 0x42, 0x62, 0xBF,
+	0x1E, 0x51, 0x60, 0xEA,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x0E, 0x61, 0x60, 0xEA,
+
+	0x32, 0x40, 0x50, 0xBD,
+	0x22, 0x40, 0x60, 0xBD,
+
+	0x12, 0x41, 0x51, 0xBD,
+	0x3A, 0x41, 0x61, 0xBD,
+
+	0xBF, 0x2F, 0x0E, 0xBD,
+	0x97, 0xE2,
+	0x7B, 0x72,
+
+	0x32, 0x20,
+	0x22, 0x20,
+	0x12, 0x20,
+	0x3A, 0x20,
+
+	0x35, 0x48, 0xB1, 0xE8,
+	0x3D, 0x59, 0xB1, 0xE8,
+
+	0x46, 0x31, 0x46, 0xBF,
+	0x56, 0x31, 0x56, 0xBF,
+
+	0xB3, 0xE2, 0x2D, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x66, 0x31, 0x66, 0xBF,
+	0x47, 0x39, 0x47, 0xBF,
+
+	0x57, 0x39, 0x57, 0xBF,
+	0x67, 0x39, 0x67, 0xBF,
+
+	0x72, 0x80, 0x07, 0xEA,
+	0x24, 0x41, 0x20, 0xE9,
+
+	0x35, 0x00,
+	0x3D, 0x00,
+	0x00, 0xE0,
+	0x2D, 0x73,
+
+	0x33, 0x72,
+	0x0C, 0xE3,
+	0x8D, 0x2F, 0x1E, 0xBD,
+
+	0x43, 0x75, 0xF8, 0xEC,
+	0x35, 0x20,
+	0x3D, 0x20,
+
+	0x43, 0x43, 0x2D, 0xDF,
+	0x53, 0x53, 0x2D, 0xDF,
+
+	0xAE, 0x1E, 0x0E, 0xBD,
+	0x58, 0xE3,
+	0x33, 0x66,
+
+	0x48, 0x35, 0x48, 0xBF,
+	0x58, 0x35, 0x58, 0xBF,
+
+	0x68, 0x35, 0x68, 0xBF,
+	0x49, 0x3D, 0x49, 0xBF,
+
+	0x59, 0x3D, 0x59, 0xBF,
+	0x69, 0x3D, 0x69, 0xBF,
+
+	0x63, 0x63, 0x2D, 0xDF,
+	0x4D, 0x7D, 0xF8, 0xEC,
+
+	0x59, 0xE3,
+	0x00, 0xE0,
+	0xB8, 0x38, 0x33, 0xBF,
+
+	0x2D, 0x73,
+	0x30, 0x76,
+	0x18, 0x3A, 0x41, 0xE9,
+
+	0x3F, 0x53, 0xA0, 0xE8,
+	0x05, 0x80, 0x3D, 0xEA,
+
+	0x37, 0x43, 0xA0, 0xE8,
+	0x3D, 0x63, 0xA0, 0xE8,
+
+	0x50, 0x70, 0xF8, 0xEC,
+	0x2B, 0x50, 0x3C, 0xE9,
+
+	0x1F, 0x0F, 0xBC, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x59, 0x78, 0xF8, 0xEC,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x1E, 0x12, 0x41, 0xE9,
+	0x1A, 0x22, 0x41, 0xE9,
+
+	0x46, 0x37, 0x46, 0xDF,
+	0x56, 0x3F, 0x56, 0xDF,
+
+	0x2B, 0x40, 0x3D, 0xE9,
+	0x66, 0x3D, 0x66, 0xDF,
+
+	0x1D, 0x32, 0x41, 0xE9,
+	0x67, 0x3D, 0x67, 0xDF,
+
+	0x47, 0x37, 0x47, 0xDF,
+	0x57, 0x3F, 0x57, 0xDF,
+
+	0x2A, 0x40, 0x20, 0xE9,
+	0x59, 0x3F, 0x59, 0xDF,
+
+	0x16, 0x30, 0x20, 0xE9,
+	0x69, 0x3D, 0x69, 0xDF,
+
+	0x48, 0x37, 0x48, 0xDF,
+	0x58, 0x3F, 0x58, 0xDF,
+
+	0x12, 0x12, 0x2D, 0xDF,
+	0x22, 0x22, 0x2D, 0xDF,
+
+	0x32, 0x32, 0x2D, 0xDF,
+	0x3A, 0x3A, 0x2D, 0xDF,
+
+	0x68, 0x3D, 0x68, 0xDF,
+	0x49, 0x37, 0x49, 0xDF,
+
+	0x3D, 0xCF, 0x74, 0xC0,
+	0x37, 0xCF, 0x74, 0xC4,
+
+	0x0A, 0x44, 0x54, 0xB0,
+	0x02, 0x44, 0x64, 0xB0,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x34, 0x37, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3C, 0x3D, 0x20, 0xE9,
+
+	0x2A, 0x44, 0x54, 0xB2,
+	0x1A, 0x44, 0x64, 0xB2,
+
+	0x2E, 0x80, 0x3A, 0xEA,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x88, 0x73, 0x5E, 0xE9,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x3D, 0xCF, 0x74, 0xC2,
+	0x0F, 0xCF, 0x74, 0xC6,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x32, 0x31, 0x5F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x33, 0x39, 0x5F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x9C, 0x0F, 0x20, 0xE9,
+
+	0x0A, 0x44, 0x54, 0xB4,
+	0x02, 0x44, 0x64, 0xB4,
+
+	0x2A, 0x44, 0x54, 0xB6,
+	0x1A, 0x44, 0x64, 0xB6,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x38, 0x3D, 0x20, 0xE9,
+
+	0x0A, 0x20,
+	0x02, 0x20,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x3D, 0xCF, 0x75, 0xC6,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x3E, 0x30, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x3F, 0x38, 0x4F, 0xE9,
+
+	0x0A, 0x45, 0x55, 0xB6,
+	0x02, 0x45, 0x65, 0xB6,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x3A, 0x31, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3B, 0x39, 0x4F, 0xE9,
+
+	0x31, 0x3D, 0x20, 0xE9,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x2A, 0x46, 0x56, 0xBF,
+	0x1A, 0x46, 0x66, 0xBF,
+
+	0x0A, 0x47, 0x57, 0xBF,
+	0x02, 0x47, 0x67, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x36, 0x30, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x37, 0x38, 0x4F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x9D, 0x31, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x9E, 0x39, 0x4F, 0xE9,
+
+	0x2A, 0x43, 0x53, 0xBF,
+	0x1A, 0x43, 0x63, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x35, 0x30, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x39, 0x38, 0x4F, 0xE9,
+
+	0x0A, 0x48, 0x58, 0xBF,
+	0x02, 0x48, 0x68, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x80, 0x31, 0x57, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x81, 0x39, 0x57, 0xE9,
+
+	0x2A, 0x49, 0x59, 0xBF,
+	0x1A, 0x49, 0x69, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x82, 0x30, 0x57, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x83, 0x38, 0x57, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x84, 0x31, 0x5E, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x85, 0x39, 0x5E, 0xE9,
+
+	0x86, 0x76, 0x57, 0xE9,
+	0x8A, 0x36, 0x20, 0xE9,
+
+	0x87, 0x77, 0x57, 0xE9,
+	0x8B, 0x3E, 0xBF, 0xEA,
+
+	0x80, 0x30, 0x57, 0xE9,
+	0x81, 0x38, 0x57, 0xE9,
+
+	0x82, 0x31, 0x57, 0xE9,
+	0x86, 0x78, 0x57, 0xE9,
+
+	0x83, 0x39, 0x57, 0xE9,
+	0x87, 0x79, 0x57, 0xE9,
+
+	0x30, 0x1F, 0x5F, 0xE9,
+	0x8A, 0x34, 0x20, 0xE9,
+
+	0x8B, 0x3C, 0x20, 0xE9,
+	0x37, 0x50, 0x60, 0xBD,
+
+	0x57, 0x0D, 0x20, 0xE9,
+	0x35, 0x51, 0x61, 0xBD,
+
+	0x2B, 0x50, 0x20, 0xE9,
+	0x1D, 0x37, 0xE1, 0xEA,
+
+	0x1E, 0x35, 0xE1, 0xEA,
+	0x00, 0xE0,
+	0x0E, 0x77,
+
+	0x24, 0x51, 0x20, 0xE9,
+	0x96, 0xFF, 0x20, 0xEA,
+
+	0x16, 0x0E, 0x20, 0xE9,
+	0x57, 0x2E, 0xBF, 0xEA,
+
+	0x0B, 0x46, 0xA0, 0xE8,
+	0x1B, 0x56, 0xA0, 0xE8,
+
+	0x2B, 0x66, 0xA0, 0xE8,
+	0x0C, 0x47, 0xA0, 0xE8,
+
+	0x1C, 0x57, 0xA0, 0xE8,
+	0x2C, 0x67, 0xA0, 0xE8,
+
+	0x0B, 0x00,
+	0x1B, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x0C, 0x00,
+	0x1C, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x0B, 0x65,
+	0x1B, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x0C, 0x65,
+	0x1C, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x0B, 0x1B, 0x60, 0xEC,
+	0x36, 0xD7, 0x36, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x0C, 0x1C, 0x60, 0xEC,
+
+	0x3E, 0xD7, 0x3E, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x0B, 0x2B, 0xDE, 0xE8,
+	0x1B, 0x80, 0xDE, 0xE8,
+
+	0x36, 0x80, 0x36, 0xBD,
+	0x3E, 0x80, 0x3E, 0xBD,
+
+	0x33, 0xD7, 0x0B, 0xBD,
+	0x3B, 0xD7, 0x1B, 0xBD,
+
+	0x46, 0x80, 0x46, 0xCF,
+	0x57, 0x80, 0x57, 0xCF,
+
+	0x66, 0x33, 0x66, 0xCF,
+	0x47, 0x3B, 0x47, 0xCF,
+
+	0x56, 0x33, 0x56, 0xCF,
+	0x67, 0x3B, 0x67, 0xCF,
+
+	0x0B, 0x48, 0xA0, 0xE8,
+	0x1B, 0x58, 0xA0, 0xE8,
+
+	0x2B, 0x68, 0xA0, 0xE8,
+	0x0C, 0x49, 0xA0, 0xE8,
+
+	0x1C, 0x59, 0xA0, 0xE8,
+	0x2C, 0x69, 0xA0, 0xE8,
+
+	0x0B, 0x00,
+	0x1B, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x0C, 0x00,
+	0x1C, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x0B, 0x65,
+	0x1B, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x0C, 0x65,
+	0x1C, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x0B, 0x1B, 0x60, 0xEC,
+	0x34, 0xD7, 0x34, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x0C, 0x1C, 0x60, 0xEC,
+
+	0x3C, 0xD7, 0x3C, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x0B, 0x2B, 0xDE, 0xE8,
+	0x1B, 0x80, 0xDE, 0xE8,
+
+	0x34, 0x80, 0x34, 0xBD,
+	0x3C, 0x80, 0x3C, 0xBD,
+
+	0x33, 0xD7, 0x0B, 0xBD,
+	0x3B, 0xD7, 0x1B, 0xBD,
+
+	0x48, 0x80, 0x48, 0xCF,
+	0x59, 0x80, 0x59, 0xCF,
+
+	0x68, 0x33, 0x68, 0xCF,
+	0x49, 0x3B, 0x49, 0xCF,
+
+	0xB5, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x58, 0x33, 0x58, 0xCF,
+	0x69, 0x3B, 0x69, 0xCF,
+
+	0x74, 0xFF, 0x20, 0xEA,
+	0x57, 0xC0, 0xBF, 0xEA,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+};
+
+static unsigned char warp_g400_t2gzf[] = {
+
+	0x00, 0x8A, 0x98, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x0A, 0x40, 0x50, 0xBF,
+	0x2A, 0x40, 0x60, 0xBF,
+
+	0x32, 0x41, 0x51, 0xBF,
+	0x3A, 0x41, 0x61, 0xBF,
+
+	0xC3, 0x6B,
+	0xD3, 0x6B,
+	0x00, 0x8A, 0x98, 0xE9,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x96, 0xE2,
+	0x41, 0x04,
+
+	0x7B, 0x43, 0xA0, 0xE8,
+	0x73, 0x53, 0xA0, 0xE8,
+
+	0xAD, 0xEE, 0x23, 0x9F,
+	0x00, 0xE0,
+	0x51, 0x04,
+
+	0x90, 0xE2,
+	0x61, 0x04,
+	0x31, 0x46, 0xB1, 0xE8,
+
+	0x51, 0x41, 0xE0, 0xEC,
+	0x39, 0x67, 0xB1, 0xE8,
+
+	0x00, 0x04,
+	0x46, 0xE2,
+	0x73, 0x63, 0xA0, 0xE8,
+
+	0x61, 0x41, 0xE0, 0xEC,
+	0x31, 0x00,
+	0x39, 0x00,
+
+	0x7D, 0x80, 0x15, 0xEA,
+	0x10, 0x04,
+	0x20, 0x04,
+
+	0x61, 0x51, 0xE0, 0xEC,
+	0x2F, 0x41, 0x60, 0xEA,
+
+	0x31, 0x20,
+	0x39, 0x20,
+	0x1F, 0x42, 0xA0, 0xE8,
+
+	0x2A, 0x42, 0x52, 0xBF,
+	0x0F, 0x52, 0xA0, 0xE8,
+
+	0x1A, 0x42, 0x62, 0xBF,
+	0x1E, 0x51, 0x60, 0xEA,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x0E, 0x61, 0x60, 0xEA,
+
+	0x32, 0x40, 0x50, 0xBD,
+	0x22, 0x40, 0x60, 0xBD,
+
+	0x12, 0x41, 0x51, 0xBD,
+	0x3A, 0x41, 0x61, 0xBD,
+
+	0xBF, 0x2F, 0x0E, 0xBD,
+	0x97, 0xE2,
+	0x7B, 0x72,
+
+	0x32, 0x20,
+	0x22, 0x20,
+	0x12, 0x20,
+	0x3A, 0x20,
+
+	0x35, 0x48, 0xB1, 0xE8,
+	0x3D, 0x59, 0xB1, 0xE8,
+
+	0x46, 0x31, 0x46, 0xBF,
+	0x56, 0x31, 0x56, 0xBF,
+
+	0xB3, 0xE2, 0x2D, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x66, 0x31, 0x66, 0xBF,
+	0x47, 0x39, 0x47, 0xBF,
+
+	0x57, 0x39, 0x57, 0xBF,
+	0x67, 0x39, 0x67, 0xBF,
+
+	0x6E, 0x80, 0x07, 0xEA,
+	0x24, 0x41, 0x20, 0xE9,
+
+	0x35, 0x00,
+	0x3D, 0x00,
+	0x00, 0xE0,
+	0x2D, 0x73,
+
+	0x33, 0x72,
+	0x0C, 0xE3,
+	0x8D, 0x2F, 0x1E, 0xBD,
+
+	0x43, 0x75, 0xF8, 0xEC,
+	0x35, 0x20,
+	0x3D, 0x20,
+
+	0x43, 0x43, 0x2D, 0xDF,
+	0x53, 0x53, 0x2D, 0xDF,
+
+	0xAE, 0x1E, 0x0E, 0xBD,
+	0x58, 0xE3,
+	0x33, 0x66,
+
+	0x48, 0x35, 0x48, 0xBF,
+	0x58, 0x35, 0x58, 0xBF,
+
+	0x68, 0x35, 0x68, 0xBF,
+	0x49, 0x3D, 0x49, 0xBF,
+
+	0x59, 0x3D, 0x59, 0xBF,
+	0x69, 0x3D, 0x69, 0xBF,
+
+	0x63, 0x63, 0x2D, 0xDF,
+	0x4D, 0x7D, 0xF8, 0xEC,
+
+	0x59, 0xE3,
+	0x00, 0xE0,
+	0xB8, 0x38, 0x33, 0xBF,
+
+	0x2D, 0x73,
+	0x30, 0x76,
+	0x18, 0x3A, 0x41, 0xE9,
+
+	0x3F, 0x53, 0xA0, 0xE8,
+	0x05, 0x80, 0x3D, 0xEA,
+
+	0x37, 0x43, 0xA0, 0xE8,
+	0x3D, 0x63, 0xA0, 0xE8,
+
+	0x50, 0x70, 0xF8, 0xEC,
+	0x2B, 0x50, 0x3C, 0xE9,
+
+	0x1F, 0x0F, 0xBC, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x59, 0x78, 0xF8, 0xEC,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x1E, 0x12, 0x41, 0xE9,
+	0x1A, 0x22, 0x41, 0xE9,
+
+	0x46, 0x37, 0x46, 0xDF,
+	0x56, 0x3F, 0x56, 0xDF,
+
+	0x2B, 0x40, 0x3D, 0xE9,
+	0x66, 0x3D, 0x66, 0xDF,
+
+	0x1D, 0x32, 0x41, 0xE9,
+	0x67, 0x3D, 0x67, 0xDF,
+
+	0x47, 0x37, 0x47, 0xDF,
+	0x57, 0x3F, 0x57, 0xDF,
+
+	0x2A, 0x40, 0x20, 0xE9,
+	0x59, 0x3F, 0x59, 0xDF,
+
+	0x16, 0x30, 0x20, 0xE9,
+	0x69, 0x3D, 0x69, 0xDF,
+
+	0x48, 0x37, 0x48, 0xDF,
+	0x58, 0x3F, 0x58, 0xDF,
+
+	0x12, 0x12, 0x2D, 0xDF,
+	0x22, 0x22, 0x2D, 0xDF,
+
+	0x32, 0x32, 0x2D, 0xDF,
+	0x3A, 0x3A, 0x2D, 0xDF,
+
+	0x68, 0x3D, 0x68, 0xDF,
+	0x49, 0x37, 0x49, 0xDF,
+
+	0x3D, 0xCF, 0x74, 0xC0,
+	0x37, 0xCF, 0x74, 0xC4,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x34, 0x80, 0x20, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x88, 0x73, 0x5E, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x0F, 0xCF, 0x75, 0xC6,
+	0x3C, 0x3D, 0x20, 0xE9,
+
+	0x0A, 0x44, 0x54, 0xB0,
+	0x02, 0x44, 0x64, 0xB0,
+
+	0x2A, 0x44, 0x54, 0xB2,
+	0x1A, 0x44, 0x64, 0xB2,
+
+	0x28, 0x80, 0x3A, 0xEA,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x3D, 0xCF, 0x74, 0xC2,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x32, 0x31, 0x5F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x33, 0x39, 0x5F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x31, 0x0F, 0x20, 0xE9,
+
+	0x0A, 0x44, 0x54, 0xB4,
+	0x02, 0x44, 0x64, 0xB4,
+
+	0x2A, 0x45, 0x55, 0xB6,
+	0x1A, 0x45, 0x65, 0xB6,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x38, 0x3D, 0x20, 0xE9,
+
+	0x0A, 0x20,
+	0x02, 0x20,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x0A, 0x47, 0x57, 0xBF,
+	0x02, 0x47, 0x67, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x3E, 0x30, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x3F, 0x38, 0x4F, 0xE9,
+
+	0x2A, 0x46, 0x56, 0xBF,
+	0x1A, 0x46, 0x66, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x3A, 0x31, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3B, 0x39, 0x4F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x36, 0x30, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x37, 0x38, 0x4F, 0xE9,
+
+	0x2A, 0x43, 0x53, 0xBF,
+	0x1A, 0x43, 0x63, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x35, 0x31, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x39, 0x39, 0x4F, 0xE9,
+
+	0x0A, 0x48, 0x58, 0xBF,
+	0x02, 0x48, 0x68, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x80, 0x31, 0x57, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x81, 0x39, 0x57, 0xE9,
+
+	0x2A, 0x49, 0x59, 0xBF,
+	0x1A, 0x49, 0x69, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x82, 0x30, 0x57, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x83, 0x38, 0x57, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x84, 0x31, 0x5E, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x85, 0x39, 0x5E, 0xE9,
+
+	0x86, 0x76, 0x57, 0xE9,
+	0x8A, 0x36, 0x20, 0xE9,
+
+	0x87, 0x77, 0x57, 0xE9,
+	0x8B, 0x3E, 0xBF, 0xEA,
+
+	0x80, 0x30, 0x57, 0xE9,
+	0x81, 0x38, 0x57, 0xE9,
+
+	0x82, 0x31, 0x57, 0xE9,
+	0x86, 0x78, 0x57, 0xE9,
+
+	0x83, 0x39, 0x57, 0xE9,
+	0x87, 0x79, 0x57, 0xE9,
+
+	0x30, 0x1F, 0x5F, 0xE9,
+	0x8A, 0x34, 0x20, 0xE9,
+
+	0x8B, 0x3C, 0x20, 0xE9,
+	0x37, 0x50, 0x60, 0xBD,
+
+	0x57, 0x0D, 0x20, 0xE9,
+	0x35, 0x51, 0x61, 0xBD,
+
+	0x2B, 0x50, 0x20, 0xE9,
+	0x1D, 0x37, 0xE1, 0xEA,
+
+	0x1E, 0x35, 0xE1, 0xEA,
+	0x00, 0xE0,
+	0x0E, 0x77,
+
+	0x24, 0x51, 0x20, 0xE9,
+	0x9A, 0xFF, 0x20, 0xEA,
+
+	0x16, 0x0E, 0x20, 0xE9,
+	0x57, 0x2E, 0xBF, 0xEA,
+
+	0x0B, 0x46, 0xA0, 0xE8,
+	0x1B, 0x56, 0xA0, 0xE8,
+
+	0x2B, 0x66, 0xA0, 0xE8,
+	0x0C, 0x47, 0xA0, 0xE8,
+
+	0x1C, 0x57, 0xA0, 0xE8,
+	0x2C, 0x67, 0xA0, 0xE8,
+
+	0x0B, 0x00,
+	0x1B, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x0C, 0x00,
+	0x1C, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x0B, 0x65,
+	0x1B, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x0C, 0x65,
+	0x1C, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x0B, 0x1B, 0x60, 0xEC,
+	0x36, 0xD7, 0x36, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x0C, 0x1C, 0x60, 0xEC,
+
+	0x3E, 0xD7, 0x3E, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x0B, 0x2B, 0xDE, 0xE8,
+	0x1B, 0x80, 0xDE, 0xE8,
+
+	0x36, 0x80, 0x36, 0xBD,
+	0x3E, 0x80, 0x3E, 0xBD,
+
+	0x33, 0xD7, 0x0B, 0xBD,
+	0x3B, 0xD7, 0x1B, 0xBD,
+
+	0x46, 0x80, 0x46, 0xCF,
+	0x57, 0x80, 0x57, 0xCF,
+
+	0x66, 0x33, 0x66, 0xCF,
+	0x47, 0x3B, 0x47, 0xCF,
+
+	0x56, 0x33, 0x56, 0xCF,
+	0x67, 0x3B, 0x67, 0xCF,
+
+	0x0B, 0x48, 0xA0, 0xE8,
+	0x1B, 0x58, 0xA0, 0xE8,
+
+	0x2B, 0x68, 0xA0, 0xE8,
+	0x0C, 0x49, 0xA0, 0xE8,
+
+	0x1C, 0x59, 0xA0, 0xE8,
+	0x2C, 0x69, 0xA0, 0xE8,
+
+	0x0B, 0x00,
+	0x1B, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x0C, 0x00,
+	0x1C, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x0B, 0x65,
+	0x1B, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x0C, 0x65,
+	0x1C, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x0B, 0x1B, 0x60, 0xEC,
+	0x34, 0xD7, 0x34, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x0C, 0x1C, 0x60, 0xEC,
+
+	0x3C, 0xD7, 0x3C, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x0B, 0x2B, 0xDE, 0xE8,
+	0x1B, 0x80, 0xDE, 0xE8,
+
+	0x34, 0x80, 0x34, 0xBD,
+	0x3C, 0x80, 0x3C, 0xBD,
+
+	0x33, 0xD7, 0x0B, 0xBD,
+	0x3B, 0xD7, 0x1B, 0xBD,
+
+	0x48, 0x80, 0x48, 0xCF,
+	0x59, 0x80, 0x59, 0xCF,
+
+	0x68, 0x33, 0x68, 0xCF,
+	0x49, 0x3B, 0x49, 0xCF,
+
+	0xBB, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x58, 0x33, 0x58, 0xCF,
+	0x69, 0x3B, 0x69, 0xCF,
+
+	0x78, 0xFF, 0x20, 0xEA,
+	0x57, 0xC0, 0xBF, 0xEA,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+};
+
+static unsigned char warp_g400_t2gzs[] = {
+
+	0x00, 0x8A, 0x98, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x0A, 0x40, 0x50, 0xBF,
+	0x2A, 0x40, 0x60, 0xBF,
+
+	0x32, 0x41, 0x51, 0xBF,
+	0x3A, 0x41, 0x61, 0xBF,
+
+	0xC3, 0x6B,
+	0xD3, 0x6B,
+	0x00, 0x8A, 0x98, 0xE9,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x96, 0xE2,
+	0x41, 0x04,
+
+	0x7B, 0x43, 0xA0, 0xE8,
+	0x73, 0x53, 0xA0, 0xE8,
+
+	0xAD, 0xEE, 0x23, 0x9F,
+	0x00, 0xE0,
+	0x51, 0x04,
+
+	0x90, 0xE2,
+	0x61, 0x04,
+	0x31, 0x46, 0xB1, 0xE8,
+
+	0x51, 0x41, 0xE0, 0xEC,
+	0x39, 0x67, 0xB1, 0xE8,
+
+	0x00, 0x04,
+	0x46, 0xE2,
+	0x73, 0x63, 0xA0, 0xE8,
+
+	0x61, 0x41, 0xE0, 0xEC,
+	0x31, 0x00,
+	0x39, 0x00,
+
+	0x85, 0x80, 0x15, 0xEA,
+	0x10, 0x04,
+	0x20, 0x04,
+
+	0x61, 0x51, 0xE0, 0xEC,
+	0x2F, 0x41, 0x60, 0xEA,
+
+	0x31, 0x20,
+	0x39, 0x20,
+	0x1F, 0x42, 0xA0, 0xE8,
+
+	0x2A, 0x42, 0x52, 0xBF,
+	0x0F, 0x52, 0xA0, 0xE8,
+
+	0x1A, 0x42, 0x62, 0xBF,
+	0x1E, 0x51, 0x60, 0xEA,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x0E, 0x61, 0x60, 0xEA,
+
+	0x32, 0x40, 0x50, 0xBD,
+	0x22, 0x40, 0x60, 0xBD,
+
+	0x12, 0x41, 0x51, 0xBD,
+	0x3A, 0x41, 0x61, 0xBD,
+
+	0xBF, 0x2F, 0x0E, 0xBD,
+	0x97, 0xE2,
+	0x7B, 0x72,
+
+	0x32, 0x20,
+	0x22, 0x20,
+	0x12, 0x20,
+	0x3A, 0x20,
+
+	0x35, 0x48, 0xB1, 0xE8,
+	0x3D, 0x59, 0xB1, 0xE8,
+
+	0x46, 0x31, 0x46, 0xBF,
+	0x56, 0x31, 0x56, 0xBF,
+
+	0xB3, 0xE2, 0x2D, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x66, 0x31, 0x66, 0xBF,
+	0x47, 0x39, 0x47, 0xBF,
+
+	0x57, 0x39, 0x57, 0xBF,
+	0x67, 0x39, 0x67, 0xBF,
+
+	0x76, 0x80, 0x07, 0xEA,
+	0x24, 0x41, 0x20, 0xE9,
+
+	0x35, 0x00,
+	0x3D, 0x00,
+	0x00, 0xE0,
+	0x2D, 0x73,
+
+	0x33, 0x72,
+	0x0C, 0xE3,
+	0x8D, 0x2F, 0x1E, 0xBD,
+
+	0x43, 0x75, 0xF8, 0xEC,
+	0x35, 0x20,
+	0x3D, 0x20,
+
+	0x43, 0x43, 0x2D, 0xDF,
+	0x53, 0x53, 0x2D, 0xDF,
+
+	0xAE, 0x1E, 0x0E, 0xBD,
+	0x58, 0xE3,
+	0x33, 0x66,
+
+	0x48, 0x35, 0x48, 0xBF,
+	0x58, 0x35, 0x58, 0xBF,
+
+	0x68, 0x35, 0x68, 0xBF,
+	0x49, 0x3D, 0x49, 0xBF,
+
+	0x59, 0x3D, 0x59, 0xBF,
+	0x69, 0x3D, 0x69, 0xBF,
+
+	0x63, 0x63, 0x2D, 0xDF,
+	0x4D, 0x7D, 0xF8, 0xEC,
+
+	0x59, 0xE3,
+	0x00, 0xE0,
+	0xB8, 0x38, 0x33, 0xBF,
+
+	0x2D, 0x73,
+	0x30, 0x76,
+	0x18, 0x3A, 0x41, 0xE9,
+
+	0x3F, 0x53, 0xA0, 0xE8,
+	0x05, 0x80, 0x3D, 0xEA,
+
+	0x37, 0x43, 0xA0, 0xE8,
+	0x3D, 0x63, 0xA0, 0xE8,
+
+	0x50, 0x70, 0xF8, 0xEC,
+	0x2B, 0x50, 0x3C, 0xE9,
+
+	0x1F, 0x0F, 0xBC, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x59, 0x78, 0xF8, 0xEC,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x1E, 0x12, 0x41, 0xE9,
+	0x1A, 0x22, 0x41, 0xE9,
+
+	0x46, 0x37, 0x46, 0xDF,
+	0x56, 0x3F, 0x56, 0xDF,
+
+	0x2B, 0x40, 0x3D, 0xE9,
+	0x66, 0x3D, 0x66, 0xDF,
+
+	0x1D, 0x32, 0x41, 0xE9,
+	0x67, 0x3D, 0x67, 0xDF,
+
+	0x47, 0x37, 0x47, 0xDF,
+	0x57, 0x3F, 0x57, 0xDF,
+
+	0x2A, 0x40, 0x20, 0xE9,
+	0x59, 0x3F, 0x59, 0xDF,
+
+	0x16, 0x30, 0x20, 0xE9,
+	0x69, 0x3D, 0x69, 0xDF,
+
+	0x48, 0x37, 0x48, 0xDF,
+	0x58, 0x3F, 0x58, 0xDF,
+
+	0x68, 0x3D, 0x68, 0xDF,
+	0x49, 0x37, 0x49, 0xDF,
+
+	0x32, 0x32, 0x2D, 0xDF,
+	0x22, 0x22, 0x2D, 0xDF,
+
+	0x12, 0x12, 0x2D, 0xDF,
+	0x3A, 0x3A, 0x2D, 0xDF,
+
+	0x0F, 0xCF, 0x74, 0xC2,
+	0x37, 0xCF, 0x74, 0xC4,
+
+	0x0A, 0x44, 0x54, 0xB0,
+	0x02, 0x44, 0x64, 0xB0,
+
+	0x3D, 0xCF, 0x74, 0xC0,
+	0x34, 0x37, 0x20, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x38, 0x0F, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3C, 0x3D, 0x20, 0xE9,
+
+	0x2A, 0x44, 0x54, 0xB2,
+	0x1A, 0x44, 0x64, 0xB2,
+
+	0x31, 0x80, 0x3A, 0xEA,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x0F, 0xCF, 0x75, 0xC0,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x32, 0x31, 0x5F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x33, 0x39, 0x5F, 0xE9,
+
+	0x3D, 0xCF, 0x75, 0xC2,
+	0x37, 0xCF, 0x75, 0xC4,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0xA6, 0x0F, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA3, 0x3D, 0x20, 0xE9,
+
+	0x2A, 0x44, 0x54, 0xB4,
+	0x1A, 0x44, 0x64, 0xB4,
+
+	0x0A, 0x45, 0x55, 0xB0,
+	0x02, 0x45, 0x65, 0xB0,
+
+	0x88, 0x73, 0x5E, 0xE9,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0xA0, 0x37, 0x20, 0xE9,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x3E, 0x30, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3F, 0x38, 0x4F, 0xE9,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x3A, 0x31, 0x4F, 0xE9,
+
+	0x2A, 0x45, 0x55, 0xB2,
+	0x1A, 0x45, 0x65, 0xB2,
+
+	0x0A, 0x45, 0x55, 0xB4,
+	0x02, 0x45, 0x65, 0xB4,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x3B, 0x39, 0x4F, 0xE9,
+
+	0x2A, 0x20,
+	0x1A, 0x20,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x2A, 0x46, 0x56, 0xBF,
+	0x1A, 0x46, 0x66, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x36, 0x31, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x37, 0x39, 0x4F, 0xE9,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0xA7, 0x30, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0xA8, 0x38, 0x4F, 0xE9,
+
+	0x0A, 0x47, 0x57, 0xBF,
+	0x02, 0x47, 0x67, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0xA4, 0x31, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA5, 0x39, 0x4F, 0xE9,
+
+	0x2A, 0x43, 0x53, 0xBF,
+	0x1A, 0x43, 0x63, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0xA1, 0x30, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0xA2, 0x38, 0x4F, 0xE9,
+
+	0x0A, 0x48, 0x58, 0xBF,
+	0x02, 0x48, 0x68, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x80, 0x31, 0x57, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x81, 0x39, 0x57, 0xE9,
+
+	0x2A, 0x49, 0x59, 0xBF,
+	0x1A, 0x49, 0x69, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x82, 0x30, 0x57, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x83, 0x38, 0x57, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x84, 0x31, 0x5E, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x85, 0x39, 0x5E, 0xE9,
+
+	0x86, 0x76, 0x57, 0xE9,
+	0x8A, 0x36, 0x20, 0xE9,
+
+	0x87, 0x77, 0x57, 0xE9,
+	0x8B, 0x3E, 0xBF, 0xEA,
+
+	0x80, 0x30, 0x57, 0xE9,
+	0x81, 0x38, 0x57, 0xE9,
+
+	0x82, 0x31, 0x57, 0xE9,
+	0x86, 0x78, 0x57, 0xE9,
+
+	0x83, 0x39, 0x57, 0xE9,
+	0x87, 0x79, 0x57, 0xE9,
+
+	0x30, 0x1F, 0x5F, 0xE9,
+	0x8A, 0x34, 0x20, 0xE9,
+
+	0x8B, 0x3C, 0x20, 0xE9,
+	0x37, 0x50, 0x60, 0xBD,
+
+	0x57, 0x0D, 0x20, 0xE9,
+	0x35, 0x51, 0x61, 0xBD,
+
+	0x2B, 0x50, 0x20, 0xE9,
+	0x1D, 0x37, 0xE1, 0xEA,
+
+	0x1E, 0x35, 0xE1, 0xEA,
+	0x00, 0xE0,
+	0x0E, 0x77,
+
+	0x24, 0x51, 0x20, 0xE9,
+	0x92, 0xFF, 0x20, 0xEA,
+
+	0x16, 0x0E, 0x20, 0xE9,
+	0x57, 0x2E, 0xBF, 0xEA,
+
+	0x0B, 0x46, 0xA0, 0xE8,
+	0x1B, 0x56, 0xA0, 0xE8,
+
+	0x2B, 0x66, 0xA0, 0xE8,
+	0x0C, 0x47, 0xA0, 0xE8,
+
+	0x1C, 0x57, 0xA0, 0xE8,
+	0x2C, 0x67, 0xA0, 0xE8,
+
+	0x0B, 0x00,
+	0x1B, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x0C, 0x00,
+	0x1C, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x0B, 0x65,
+	0x1B, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x0C, 0x65,
+	0x1C, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x0B, 0x1B, 0x60, 0xEC,
+	0x36, 0xD7, 0x36, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x0C, 0x1C, 0x60, 0xEC,
+
+	0x3E, 0xD7, 0x3E, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x0B, 0x2B, 0xDE, 0xE8,
+	0x1B, 0x80, 0xDE, 0xE8,
+
+	0x36, 0x80, 0x36, 0xBD,
+	0x3E, 0x80, 0x3E, 0xBD,
+
+	0x33, 0xD7, 0x0B, 0xBD,
+	0x3B, 0xD7, 0x1B, 0xBD,
+
+	0x46, 0x80, 0x46, 0xCF,
+	0x57, 0x80, 0x57, 0xCF,
+
+	0x66, 0x33, 0x66, 0xCF,
+	0x47, 0x3B, 0x47, 0xCF,
+
+	0x56, 0x33, 0x56, 0xCF,
+	0x67, 0x3B, 0x67, 0xCF,
+
+	0x0B, 0x48, 0xA0, 0xE8,
+	0x1B, 0x58, 0xA0, 0xE8,
+
+	0x2B, 0x68, 0xA0, 0xE8,
+	0x0C, 0x49, 0xA0, 0xE8,
+
+	0x1C, 0x59, 0xA0, 0xE8,
+	0x2C, 0x69, 0xA0, 0xE8,
+
+	0x0B, 0x00,
+	0x1B, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x0C, 0x00,
+	0x1C, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x0B, 0x65,
+	0x1B, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x0C, 0x65,
+	0x1C, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x0B, 0x1B, 0x60, 0xEC,
+	0x34, 0xD7, 0x34, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x0C, 0x1C, 0x60, 0xEC,
+
+	0x3C, 0xD7, 0x3C, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x0B, 0x2B, 0xDE, 0xE8,
+	0x1B, 0x80, 0xDE, 0xE8,
+
+	0x34, 0x80, 0x34, 0xBD,
+	0x3C, 0x80, 0x3C, 0xBD,
+
+	0x33, 0xD7, 0x0B, 0xBD,
+	0x3B, 0xD7, 0x1B, 0xBD,
+
+	0x48, 0x80, 0x48, 0xCF,
+	0x59, 0x80, 0x59, 0xCF,
+
+	0x68, 0x33, 0x68, 0xCF,
+	0x49, 0x3B, 0x49, 0xCF,
+
+	0xB2, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x58, 0x33, 0x58, 0xCF,
+	0x69, 0x3B, 0x69, 0xCF,
+
+	0x70, 0xFF, 0x20, 0xEA,
+	0x57, 0xC0, 0xBF, 0xEA,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+};
+
+static unsigned char warp_g400_t2gzsa[] = {
+
+	0x00, 0x8A, 0x98, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x0A, 0x40, 0x50, 0xBF,
+	0x2A, 0x40, 0x60, 0xBF,
+
+	0x32, 0x41, 0x51, 0xBF,
+	0x3A, 0x41, 0x61, 0xBF,
+
+	0xC3, 0x6B,
+	0xD3, 0x6B,
+	0x00, 0x8A, 0x98, 0xE9,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x96, 0xE2,
+	0x41, 0x04,
+
+	0x7B, 0x43, 0xA0, 0xE8,
+	0x73, 0x53, 0xA0, 0xE8,
+
+	0xAD, 0xEE, 0x23, 0x9F,
+	0x00, 0xE0,
+	0x51, 0x04,
+
+	0x90, 0xE2,
+	0x61, 0x04,
+	0x31, 0x46, 0xB1, 0xE8,
+
+	0x51, 0x41, 0xE0, 0xEC,
+	0x39, 0x67, 0xB1, 0xE8,
+
+	0x00, 0x04,
+	0x46, 0xE2,
+	0x73, 0x63, 0xA0, 0xE8,
+
+	0x61, 0x41, 0xE0, 0xEC,
+	0x31, 0x00,
+	0x39, 0x00,
+
+	0x8A, 0x80, 0x15, 0xEA,
+	0x10, 0x04,
+	0x20, 0x04,
+
+	0x61, 0x51, 0xE0, 0xEC,
+	0x2F, 0x41, 0x60, 0xEA,
+
+	0x31, 0x20,
+	0x39, 0x20,
+	0x1F, 0x42, 0xA0, 0xE8,
+
+	0x2A, 0x42, 0x52, 0xBF,
+	0x0F, 0x52, 0xA0, 0xE8,
+
+	0x1A, 0x42, 0x62, 0xBF,
+	0x1E, 0x51, 0x60, 0xEA,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x0E, 0x61, 0x60, 0xEA,
+
+	0x32, 0x40, 0x50, 0xBD,
+	0x22, 0x40, 0x60, 0xBD,
+
+	0x12, 0x41, 0x51, 0xBD,
+	0x3A, 0x41, 0x61, 0xBD,
+
+	0xBF, 0x2F, 0x0E, 0xBD,
+	0x97, 0xE2,
+	0x7B, 0x72,
+
+	0x32, 0x20,
+	0x22, 0x20,
+	0x12, 0x20,
+	0x3A, 0x20,
+
+	0x35, 0x48, 0xB1, 0xE8,
+	0x3D, 0x59, 0xB1, 0xE8,
+
+	0x46, 0x31, 0x46, 0xBF,
+	0x56, 0x31, 0x56, 0xBF,
+
+	0xB3, 0xE2, 0x2D, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x66, 0x31, 0x66, 0xBF,
+	0x47, 0x39, 0x47, 0xBF,
+
+	0x57, 0x39, 0x57, 0xBF,
+	0x67, 0x39, 0x67, 0xBF,
+
+	0x7B, 0x80, 0x07, 0xEA,
+	0x24, 0x41, 0x20, 0xE9,
+
+	0x35, 0x00,
+	0x3D, 0x00,
+	0x00, 0xE0,
+	0x2D, 0x73,
+
+	0x33, 0x72,
+	0x0C, 0xE3,
+	0x8D, 0x2F, 0x1E, 0xBD,
+
+	0x43, 0x75, 0xF8, 0xEC,
+	0x35, 0x20,
+	0x3D, 0x20,
+
+	0x43, 0x43, 0x2D, 0xDF,
+	0x53, 0x53, 0x2D, 0xDF,
+
+	0xAE, 0x1E, 0x0E, 0xBD,
+	0x58, 0xE3,
+	0x33, 0x66,
+
+	0x48, 0x35, 0x48, 0xBF,
+	0x58, 0x35, 0x58, 0xBF,
+
+	0x68, 0x35, 0x68, 0xBF,
+	0x49, 0x3D, 0x49, 0xBF,
+
+	0x59, 0x3D, 0x59, 0xBF,
+	0x69, 0x3D, 0x69, 0xBF,
+
+	0x63, 0x63, 0x2D, 0xDF,
+	0x4D, 0x7D, 0xF8, 0xEC,
+
+	0x59, 0xE3,
+	0x00, 0xE0,
+	0xB8, 0x38, 0x33, 0xBF,
+
+	0x2D, 0x73,
+	0x30, 0x76,
+	0x18, 0x3A, 0x41, 0xE9,
+
+	0x3F, 0x53, 0xA0, 0xE8,
+	0x05, 0x80, 0x3D, 0xEA,
+
+	0x37, 0x43, 0xA0, 0xE8,
+	0x3D, 0x63, 0xA0, 0xE8,
+
+	0x50, 0x70, 0xF8, 0xEC,
+	0x2B, 0x50, 0x3C, 0xE9,
+
+	0x1F, 0x0F, 0xBC, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x59, 0x78, 0xF8, 0xEC,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x1E, 0x12, 0x41, 0xE9,
+	0x1A, 0x22, 0x41, 0xE9,
+
+	0x46, 0x37, 0x46, 0xDF,
+	0x56, 0x3F, 0x56, 0xDF,
+
+	0x2B, 0x40, 0x3D, 0xE9,
+	0x66, 0x3D, 0x66, 0xDF,
+
+	0x1D, 0x32, 0x41, 0xE9,
+	0x67, 0x3D, 0x67, 0xDF,
+
+	0x47, 0x37, 0x47, 0xDF,
+	0x57, 0x3F, 0x57, 0xDF,
+
+	0x2A, 0x40, 0x20, 0xE9,
+	0x59, 0x3F, 0x59, 0xDF,
+
+	0x16, 0x30, 0x20, 0xE9,
+	0x69, 0x3D, 0x69, 0xDF,
+
+	0x48, 0x37, 0x48, 0xDF,
+	0x58, 0x3F, 0x58, 0xDF,
+
+	0x68, 0x3D, 0x68, 0xDF,
+	0x49, 0x37, 0x49, 0xDF,
+
+	0x32, 0x32, 0x2D, 0xDF,
+	0x22, 0x22, 0x2D, 0xDF,
+
+	0x12, 0x12, 0x2D, 0xDF,
+	0x3A, 0x3A, 0x2D, 0xDF,
+
+	0x0F, 0xCF, 0x74, 0xC2,
+	0x37, 0xCF, 0x74, 0xC4,
+
+	0x0A, 0x44, 0x54, 0xB0,
+	0x02, 0x44, 0x64, 0xB0,
+
+	0x3D, 0xCF, 0x74, 0xC0,
+	0x34, 0x37, 0x20, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x38, 0x0F, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3C, 0x3D, 0x20, 0xE9,
+
+	0x2A, 0x44, 0x54, 0xB2,
+	0x1A, 0x44, 0x64, 0xB2,
+
+	0x36, 0x80, 0x3A, 0xEA,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x0F, 0xCF, 0x75, 0xC0,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x32, 0x31, 0x5F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x33, 0x39, 0x5F, 0xE9,
+
+	0x3D, 0xCF, 0x75, 0xC2,
+	0x37, 0xCF, 0x75, 0xC4,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0xA6, 0x0F, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA3, 0x3D, 0x20, 0xE9,
+
+	0x2A, 0x44, 0x54, 0xB4,
+	0x1A, 0x44, 0x64, 0xB4,
+
+	0x0A, 0x45, 0x55, 0xB0,
+	0x02, 0x45, 0x65, 0xB0,
+
+	0x88, 0x73, 0x5E, 0xE9,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0xA0, 0x37, 0x20, 0xE9,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x3E, 0x30, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3F, 0x38, 0x4F, 0xE9,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x3A, 0x31, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x3B, 0x39, 0x4F, 0xE9,
+
+	0x2A, 0x45, 0x55, 0xB2,
+	0x1A, 0x45, 0x65, 0xB2,
+
+	0x0A, 0x45, 0x55, 0xB4,
+	0x02, 0x45, 0x65, 0xB4,
+
+	0x0F, 0xCF, 0x74, 0xC6,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0xA7, 0x30, 0x4F, 0xE9,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x9C, 0x0F, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA8, 0x38, 0x4F, 0xE9,
+
+	0x2A, 0x44, 0x54, 0xB6,
+	0x1A, 0x44, 0x64, 0xB6,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x36, 0x31, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x37, 0x39, 0x4F, 0xE9,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x2A, 0x46, 0x56, 0xBF,
+	0x1A, 0x46, 0x66, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0xA4, 0x31, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA5, 0x39, 0x4F, 0xE9,
+
+	0x0A, 0x47, 0x57, 0xBF,
+	0x02, 0x47, 0x67, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0xA1, 0x30, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA2, 0x38, 0x4F, 0xE9,
+
+	0x2A, 0x43, 0x53, 0xBF,
+	0x1A, 0x43, 0x63, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x9D, 0x31, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x9E, 0x39, 0x4F, 0xE9,
+
+	0x0A, 0x48, 0x58, 0xBF,
+	0x02, 0x48, 0x68, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x80, 0x31, 0x57, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x81, 0x39, 0x57, 0xE9,
+
+	0x2A, 0x49, 0x59, 0xBF,
+	0x1A, 0x49, 0x69, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x82, 0x30, 0x57, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x83, 0x38, 0x57, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x84, 0x31, 0x5E, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x85, 0x39, 0x5E, 0xE9,
+
+	0x86, 0x76, 0x57, 0xE9,
+	0x8A, 0x36, 0x20, 0xE9,
+
+	0x87, 0x77, 0x57, 0xE9,
+	0x8B, 0x3E, 0xBF, 0xEA,
+
+	0x80, 0x30, 0x57, 0xE9,
+	0x81, 0x38, 0x57, 0xE9,
+
+	0x82, 0x31, 0x57, 0xE9,
+	0x86, 0x78, 0x57, 0xE9,
+
+	0x83, 0x39, 0x57, 0xE9,
+	0x87, 0x79, 0x57, 0xE9,
+
+	0x30, 0x1F, 0x5F, 0xE9,
+	0x8A, 0x34, 0x20, 0xE9,
+
+	0x8B, 0x3C, 0x20, 0xE9,
+	0x37, 0x50, 0x60, 0xBD,
+
+	0x57, 0x0D, 0x20, 0xE9,
+	0x35, 0x51, 0x61, 0xBD,
+
+	0x2B, 0x50, 0x20, 0xE9,
+	0x1D, 0x37, 0xE1, 0xEA,
+
+	0x1E, 0x35, 0xE1, 0xEA,
+	0x00, 0xE0,
+	0x0E, 0x77,
+
+	0x24, 0x51, 0x20, 0xE9,
+	0x8D, 0xFF, 0x20, 0xEA,
+
+	0x16, 0x0E, 0x20, 0xE9,
+	0x57, 0x2E, 0xBF, 0xEA,
+
+	0x0B, 0x46, 0xA0, 0xE8,
+	0x1B, 0x56, 0xA0, 0xE8,
+
+	0x2B, 0x66, 0xA0, 0xE8,
+	0x0C, 0x47, 0xA0, 0xE8,
+
+	0x1C, 0x57, 0xA0, 0xE8,
+	0x2C, 0x67, 0xA0, 0xE8,
+
+	0x0B, 0x00,
+	0x1B, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x0C, 0x00,
+	0x1C, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x0B, 0x65,
+	0x1B, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x0C, 0x65,
+	0x1C, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x0B, 0x1B, 0x60, 0xEC,
+	0x36, 0xD7, 0x36, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x0C, 0x1C, 0x60, 0xEC,
+
+	0x3E, 0xD7, 0x3E, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x0B, 0x2B, 0xDE, 0xE8,
+	0x1B, 0x80, 0xDE, 0xE8,
+
+	0x36, 0x80, 0x36, 0xBD,
+	0x3E, 0x80, 0x3E, 0xBD,
+
+	0x33, 0xD7, 0x0B, 0xBD,
+	0x3B, 0xD7, 0x1B, 0xBD,
+
+	0x46, 0x80, 0x46, 0xCF,
+	0x57, 0x80, 0x57, 0xCF,
+
+	0x66, 0x33, 0x66, 0xCF,
+	0x47, 0x3B, 0x47, 0xCF,
+
+	0x56, 0x33, 0x56, 0xCF,
+	0x67, 0x3B, 0x67, 0xCF,
+
+	0x0B, 0x48, 0xA0, 0xE8,
+	0x1B, 0x58, 0xA0, 0xE8,
+
+	0x2B, 0x68, 0xA0, 0xE8,
+	0x0C, 0x49, 0xA0, 0xE8,
+
+	0x1C, 0x59, 0xA0, 0xE8,
+	0x2C, 0x69, 0xA0, 0xE8,
+
+	0x0B, 0x00,
+	0x1B, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x0C, 0x00,
+	0x1C, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x0B, 0x65,
+	0x1B, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x0C, 0x65,
+	0x1C, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x0B, 0x1B, 0x60, 0xEC,
+	0x34, 0xD7, 0x34, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x0C, 0x1C, 0x60, 0xEC,
+
+	0x3C, 0xD7, 0x3C, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x0B, 0x2B, 0xDE, 0xE8,
+	0x1B, 0x80, 0xDE, 0xE8,
+
+	0x34, 0x80, 0x34, 0xBD,
+	0x3C, 0x80, 0x3C, 0xBD,
+
+	0x33, 0xD7, 0x0B, 0xBD,
+	0x3B, 0xD7, 0x1B, 0xBD,
+
+	0x48, 0x80, 0x48, 0xCF,
+	0x59, 0x80, 0x59, 0xCF,
+
+	0x68, 0x33, 0x68, 0xCF,
+	0x49, 0x3B, 0x49, 0xCF,
+
+	0xAD, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x58, 0x33, 0x58, 0xCF,
+	0x69, 0x3B, 0x69, 0xCF,
+
+	0x6B, 0xFF, 0x20, 0xEA,
+	0x57, 0xC0, 0xBF, 0xEA,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+};
+
+static unsigned char warp_g400_t2gzsaf[] = {
+
+	0x00, 0x8A, 0x98, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x0A, 0x40, 0x50, 0xBF,
+	0x2A, 0x40, 0x60, 0xBF,
+
+	0x32, 0x41, 0x51, 0xBF,
+	0x3A, 0x41, 0x61, 0xBF,
+
+	0xC3, 0x6B,
+	0xD3, 0x6B,
+	0x00, 0x8A, 0x98, 0xE9,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x96, 0xE2,
+	0x41, 0x04,
+
+	0x7B, 0x43, 0xA0, 0xE8,
+	0x73, 0x53, 0xA0, 0xE8,
+
+	0xAD, 0xEE, 0x23, 0x9F,
+	0x00, 0xE0,
+	0x51, 0x04,
+
+	0x90, 0xE2,
+	0x61, 0x04,
+	0x31, 0x46, 0xB1, 0xE8,
+
+	0x51, 0x41, 0xE0, 0xEC,
+	0x39, 0x67, 0xB1, 0xE8,
+
+	0x00, 0x04,
+	0x46, 0xE2,
+	0x73, 0x63, 0xA0, 0xE8,
+
+	0x61, 0x41, 0xE0, 0xEC,
+	0x31, 0x00,
+	0x39, 0x00,
+
+	0x8E, 0x80, 0x15, 0xEA,
+	0x10, 0x04,
+	0x20, 0x04,
+
+	0x61, 0x51, 0xE0, 0xEC,
+	0x2F, 0x41, 0x60, 0xEA,
+
+	0x31, 0x20,
+	0x39, 0x20,
+	0x1F, 0x42, 0xA0, 0xE8,
+
+	0x2A, 0x42, 0x52, 0xBF,
+	0x0F, 0x52, 0xA0, 0xE8,
+
+	0x1A, 0x42, 0x62, 0xBF,
+	0x1E, 0x51, 0x60, 0xEA,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x0E, 0x61, 0x60, 0xEA,
+
+	0x32, 0x40, 0x50, 0xBD,
+	0x22, 0x40, 0x60, 0xBD,
+
+	0x12, 0x41, 0x51, 0xBD,
+	0x3A, 0x41, 0x61, 0xBD,
+
+	0xBF, 0x2F, 0x0E, 0xBD,
+	0x97, 0xE2,
+	0x7B, 0x72,
+
+	0x32, 0x20,
+	0x22, 0x20,
+	0x12, 0x20,
+	0x3A, 0x20,
+
+	0x35, 0x48, 0xB1, 0xE8,
+	0x3D, 0x59, 0xB1, 0xE8,
+
+	0x46, 0x31, 0x46, 0xBF,
+	0x56, 0x31, 0x56, 0xBF,
+
+	0xB3, 0xE2, 0x2D, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x66, 0x31, 0x66, 0xBF,
+	0x47, 0x39, 0x47, 0xBF,
+
+	0x57, 0x39, 0x57, 0xBF,
+	0x67, 0x39, 0x67, 0xBF,
+
+	0x7F, 0x80, 0x07, 0xEA,
+	0x24, 0x41, 0x20, 0xE9,
+
+	0x35, 0x00,
+	0x3D, 0x00,
+	0x00, 0xE0,
+	0x2D, 0x73,
+
+	0x33, 0x72,
+	0x0C, 0xE3,
+	0x8D, 0x2F, 0x1E, 0xBD,
+
+	0x43, 0x75, 0xF8, 0xEC,
+	0x35, 0x20,
+	0x3D, 0x20,
+
+	0x43, 0x43, 0x2D, 0xDF,
+	0x53, 0x53, 0x2D, 0xDF,
+
+	0xAE, 0x1E, 0x0E, 0xBD,
+	0x58, 0xE3,
+	0x33, 0x66,
+
+	0x48, 0x35, 0x48, 0xBF,
+	0x58, 0x35, 0x58, 0xBF,
+
+	0x68, 0x35, 0x68, 0xBF,
+	0x49, 0x3D, 0x49, 0xBF,
+
+	0x59, 0x3D, 0x59, 0xBF,
+	0x69, 0x3D, 0x69, 0xBF,
+
+	0x63, 0x63, 0x2D, 0xDF,
+	0x4D, 0x7D, 0xF8, 0xEC,
+
+	0x59, 0xE3,
+	0x00, 0xE0,
+	0xB8, 0x38, 0x33, 0xBF,
+
+	0x2D, 0x73,
+	0x30, 0x76,
+	0x18, 0x3A, 0x41, 0xE9,
+
+	0x3F, 0x53, 0xA0, 0xE8,
+	0x05, 0x80, 0x3D, 0xEA,
+
+	0x37, 0x43, 0xA0, 0xE8,
+	0x3D, 0x63, 0xA0, 0xE8,
+
+	0x50, 0x70, 0xF8, 0xEC,
+	0x2B, 0x50, 0x3C, 0xE9,
+
+	0x1F, 0x0F, 0xBC, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x59, 0x78, 0xF8, 0xEC,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x1E, 0x12, 0x41, 0xE9,
+	0x1A, 0x22, 0x41, 0xE9,
+
+	0x46, 0x37, 0x46, 0xDF,
+	0x56, 0x3F, 0x56, 0xDF,
+
+	0x2B, 0x40, 0x3D, 0xE9,
+	0x66, 0x3D, 0x66, 0xDF,
+
+	0x1D, 0x32, 0x41, 0xE9,
+	0x67, 0x3D, 0x67, 0xDF,
+
+	0x47, 0x37, 0x47, 0xDF,
+	0x57, 0x3F, 0x57, 0xDF,
+
+	0x2A, 0x40, 0x20, 0xE9,
+	0x59, 0x3F, 0x59, 0xDF,
+
+	0x16, 0x30, 0x20, 0xE9,
+	0x69, 0x3D, 0x69, 0xDF,
+
+	0x48, 0x37, 0x48, 0xDF,
+	0x58, 0x3F, 0x58, 0xDF,
+
+	0x68, 0x3D, 0x68, 0xDF,
+	0x49, 0x37, 0x49, 0xDF,
+
+	0x32, 0x32, 0x2D, 0xDF,
+	0x22, 0x22, 0x2D, 0xDF,
+
+	0x12, 0x12, 0x2D, 0xDF,
+	0x3A, 0x3A, 0x2D, 0xDF,
+
+	0x0F, 0xCF, 0x74, 0xC2,
+	0x37, 0xCF, 0x74, 0xC4,
+
+	0x0A, 0x44, 0x54, 0xB0,
+	0x02, 0x44, 0x64, 0xB0,
+
+	0x3D, 0xCF, 0x74, 0xC0,
+	0x34, 0x37, 0x20, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x38, 0x0F, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3C, 0x3D, 0x20, 0xE9,
+
+	0x2A, 0x44, 0x54, 0xB2,
+	0x1A, 0x44, 0x64, 0xB2,
+
+	0x3A, 0x80, 0x3A, 0xEA,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x0F, 0xCF, 0x75, 0xC0,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x32, 0x31, 0x5F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x33, 0x39, 0x5F, 0xE9,
+
+	0x3D, 0xCF, 0x75, 0xC2,
+	0x37, 0xCF, 0x75, 0xC4,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0xA6, 0x0F, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA3, 0x3D, 0x20, 0xE9,
+
+	0x2A, 0x44, 0x54, 0xB4,
+	0x1A, 0x44, 0x64, 0xB4,
+
+	0x0A, 0x45, 0x55, 0xB0,
+	0x02, 0x45, 0x65, 0xB0,
+
+	0x88, 0x73, 0x5E, 0xE9,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0xA0, 0x37, 0x20, 0xE9,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x3E, 0x30, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3F, 0x38, 0x4F, 0xE9,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x3A, 0x31, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x3B, 0x39, 0x4F, 0xE9,
+
+	0x2A, 0x45, 0x55, 0xB2,
+	0x1A, 0x45, 0x65, 0xB2,
+
+	0x0A, 0x45, 0x55, 0xB4,
+	0x02, 0x45, 0x65, 0xB4,
+
+	0x0F, 0xCF, 0x74, 0xC6,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0xA7, 0x30, 0x4F, 0xE9,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x9C, 0x0F, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA8, 0x38, 0x4F, 0xE9,
+
+	0x2A, 0x44, 0x54, 0xB6,
+	0x1A, 0x44, 0x64, 0xB6,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x36, 0x31, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x37, 0x39, 0x4F, 0xE9,
+
+	0x0A, 0x45, 0x55, 0xB6,
+	0x02, 0x45, 0x65, 0xB6,
+
+	0x3D, 0xCF, 0x75, 0xC6,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x2A, 0x46, 0x56, 0xBF,
+	0x1A, 0x46, 0x66, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0xA4, 0x31, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA5, 0x39, 0x4F, 0xE9,
+
+	0x31, 0x3D, 0x20, 0xE9,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x0A, 0x47, 0x57, 0xBF,
+	0x02, 0x47, 0x67, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0xA1, 0x30, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0xA2, 0x38, 0x4F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x9D, 0x31, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x9E, 0x39, 0x4F, 0xE9,
+
+	0x2A, 0x43, 0x53, 0xBF,
+	0x1A, 0x43, 0x63, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x35, 0x30, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x39, 0x38, 0x4F, 0xE9,
+
+	0x0A, 0x48, 0x58, 0xBF,
+	0x02, 0x48, 0x68, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x80, 0x31, 0x57, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x81, 0x39, 0x57, 0xE9,
+
+	0x2A, 0x49, 0x59, 0xBF,
+	0x1A, 0x49, 0x69, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x82, 0x30, 0x57, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x83, 0x38, 0x57, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x84, 0x31, 0x5E, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x85, 0x39, 0x5E, 0xE9,
+
+	0x86, 0x76, 0x57, 0xE9,
+	0x8A, 0x36, 0x20, 0xE9,
+
+	0x87, 0x77, 0x57, 0xE9,
+	0x8B, 0x3E, 0xBF, 0xEA,
+
+	0x80, 0x30, 0x57, 0xE9,
+	0x81, 0x38, 0x57, 0xE9,
+
+	0x82, 0x31, 0x57, 0xE9,
+	0x86, 0x78, 0x57, 0xE9,
+
+	0x83, 0x39, 0x57, 0xE9,
+	0x87, 0x79, 0x57, 0xE9,
+
+	0x30, 0x1F, 0x5F, 0xE9,
+	0x8A, 0x34, 0x20, 0xE9,
+
+	0x8B, 0x3C, 0x20, 0xE9,
+	0x37, 0x50, 0x60, 0xBD,
+
+	0x57, 0x0D, 0x20, 0xE9,
+	0x35, 0x51, 0x61, 0xBD,
+
+	0x2B, 0x50, 0x20, 0xE9,
+	0x1D, 0x37, 0xE1, 0xEA,
+
+	0x1E, 0x35, 0xE1, 0xEA,
+	0x00, 0xE0,
+	0x0E, 0x77,
+
+	0x24, 0x51, 0x20, 0xE9,
+	0x89, 0xFF, 0x20, 0xEA,
+
+	0x16, 0x0E, 0x20, 0xE9,
+	0x57, 0x2E, 0xBF, 0xEA,
+
+	0x0B, 0x46, 0xA0, 0xE8,
+	0x1B, 0x56, 0xA0, 0xE8,
+
+	0x2B, 0x66, 0xA0, 0xE8,
+	0x0C, 0x47, 0xA0, 0xE8,
+
+	0x1C, 0x57, 0xA0, 0xE8,
+	0x2C, 0x67, 0xA0, 0xE8,
+
+	0x0B, 0x00,
+	0x1B, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x0C, 0x00,
+	0x1C, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x0B, 0x65,
+	0x1B, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x0C, 0x65,
+	0x1C, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x0B, 0x1B, 0x60, 0xEC,
+	0x36, 0xD7, 0x36, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x0C, 0x1C, 0x60, 0xEC,
+
+	0x3E, 0xD7, 0x3E, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x0B, 0x2B, 0xDE, 0xE8,
+	0x1B, 0x80, 0xDE, 0xE8,
+
+	0x36, 0x80, 0x36, 0xBD,
+	0x3E, 0x80, 0x3E, 0xBD,
+
+	0x33, 0xD7, 0x0B, 0xBD,
+	0x3B, 0xD7, 0x1B, 0xBD,
+
+	0x46, 0x80, 0x46, 0xCF,
+	0x57, 0x80, 0x57, 0xCF,
+
+	0x66, 0x33, 0x66, 0xCF,
+	0x47, 0x3B, 0x47, 0xCF,
+
+	0x56, 0x33, 0x56, 0xCF,
+	0x67, 0x3B, 0x67, 0xCF,
+
+	0x0B, 0x48, 0xA0, 0xE8,
+	0x1B, 0x58, 0xA0, 0xE8,
+
+	0x2B, 0x68, 0xA0, 0xE8,
+	0x0C, 0x49, 0xA0, 0xE8,
+
+	0x1C, 0x59, 0xA0, 0xE8,
+	0x2C, 0x69, 0xA0, 0xE8,
+
+	0x0B, 0x00,
+	0x1B, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x0C, 0x00,
+	0x1C, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x0B, 0x65,
+	0x1B, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x0C, 0x65,
+	0x1C, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x0B, 0x1B, 0x60, 0xEC,
+	0x34, 0xD7, 0x34, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x0C, 0x1C, 0x60, 0xEC,
+
+	0x3C, 0xD7, 0x3C, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x0B, 0x2B, 0xDE, 0xE8,
+	0x1B, 0x80, 0xDE, 0xE8,
+
+	0x34, 0x80, 0x34, 0xBD,
+	0x3C, 0x80, 0x3C, 0xBD,
+
+	0x33, 0xD7, 0x0B, 0xBD,
+	0x3B, 0xD7, 0x1B, 0xBD,
+
+	0x48, 0x80, 0x48, 0xCF,
+	0x59, 0x80, 0x59, 0xCF,
+
+	0x68, 0x33, 0x68, 0xCF,
+	0x49, 0x3B, 0x49, 0xCF,
+
+	0xA9, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x58, 0x33, 0x58, 0xCF,
+	0x69, 0x3B, 0x69, 0xCF,
+
+	0x67, 0xFF, 0x20, 0xEA,
+	0x57, 0xC0, 0xBF, 0xEA,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+};
+
+static unsigned char warp_g400_t2gzsf[] = {
+
+	0x00, 0x8A, 0x98, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x0A, 0x40, 0x50, 0xBF,
+	0x2A, 0x40, 0x60, 0xBF,
+
+	0x32, 0x41, 0x51, 0xBF,
+	0x3A, 0x41, 0x61, 0xBF,
+
+	0xC3, 0x6B,
+	0xD3, 0x6B,
+	0x00, 0x8A, 0x98, 0xE9,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x96, 0xE2,
+	0x41, 0x04,
+
+	0x7B, 0x43, 0xA0, 0xE8,
+	0x73, 0x53, 0xA0, 0xE8,
+
+	0xAD, 0xEE, 0x23, 0x9F,
+	0x00, 0xE0,
+	0x51, 0x04,
+
+	0x90, 0xE2,
+	0x61, 0x04,
+	0x31, 0x46, 0xB1, 0xE8,
+
+	0x51, 0x41, 0xE0, 0xEC,
+	0x39, 0x67, 0xB1, 0xE8,
+
+	0x00, 0x04,
+	0x46, 0xE2,
+	0x73, 0x63, 0xA0, 0xE8,
+
+	0x61, 0x41, 0xE0, 0xEC,
+	0x31, 0x00,
+	0x39, 0x00,
+
+	0x8A, 0x80, 0x15, 0xEA,
+	0x10, 0x04,
+	0x20, 0x04,
+
+	0x61, 0x51, 0xE0, 0xEC,
+	0x2F, 0x41, 0x60, 0xEA,
+
+	0x31, 0x20,
+	0x39, 0x20,
+	0x1F, 0x42, 0xA0, 0xE8,
+
+	0x2A, 0x42, 0x52, 0xBF,
+	0x0F, 0x52, 0xA0, 0xE8,
+
+	0x1A, 0x42, 0x62, 0xBF,
+	0x1E, 0x51, 0x60, 0xEA,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x0E, 0x61, 0x60, 0xEA,
+
+	0x32, 0x40, 0x50, 0xBD,
+	0x22, 0x40, 0x60, 0xBD,
+
+	0x12, 0x41, 0x51, 0xBD,
+	0x3A, 0x41, 0x61, 0xBD,
+
+	0xBF, 0x2F, 0x0E, 0xBD,
+	0x97, 0xE2,
+	0x7B, 0x72,
+
+	0x32, 0x20,
+	0x22, 0x20,
+	0x12, 0x20,
+	0x3A, 0x20,
+
+	0x35, 0x48, 0xB1, 0xE8,
+	0x3D, 0x59, 0xB1, 0xE8,
+
+	0x46, 0x31, 0x46, 0xBF,
+	0x56, 0x31, 0x56, 0xBF,
+
+	0xB3, 0xE2, 0x2D, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x66, 0x31, 0x66, 0xBF,
+	0x47, 0x39, 0x47, 0xBF,
+
+	0x57, 0x39, 0x57, 0xBF,
+	0x67, 0x39, 0x67, 0xBF,
+
+	0x7B, 0x80, 0x07, 0xEA,
+	0x24, 0x41, 0x20, 0xE9,
+
+	0x35, 0x00,
+	0x3D, 0x00,
+	0x00, 0xE0,
+	0x2D, 0x73,
+
+	0x33, 0x72,
+	0x0C, 0xE3,
+	0x8D, 0x2F, 0x1E, 0xBD,
+
+	0x43, 0x75, 0xF8, 0xEC,
+	0x35, 0x20,
+	0x3D, 0x20,
+
+	0x43, 0x43, 0x2D, 0xDF,
+	0x53, 0x53, 0x2D, 0xDF,
+
+	0xAE, 0x1E, 0x0E, 0xBD,
+	0x58, 0xE3,
+	0x33, 0x66,
+
+	0x48, 0x35, 0x48, 0xBF,
+	0x58, 0x35, 0x58, 0xBF,
+
+	0x68, 0x35, 0x68, 0xBF,
+	0x49, 0x3D, 0x49, 0xBF,
+
+	0x59, 0x3D, 0x59, 0xBF,
+	0x69, 0x3D, 0x69, 0xBF,
+
+	0x63, 0x63, 0x2D, 0xDF,
+	0x4D, 0x7D, 0xF8, 0xEC,
+
+	0x59, 0xE3,
+	0x00, 0xE0,
+	0xB8, 0x38, 0x33, 0xBF,
+
+	0x2D, 0x73,
+	0x30, 0x76,
+	0x18, 0x3A, 0x41, 0xE9,
+
+	0x3F, 0x53, 0xA0, 0xE8,
+	0x05, 0x80, 0x3D, 0xEA,
+
+	0x37, 0x43, 0xA0, 0xE8,
+	0x3D, 0x63, 0xA0, 0xE8,
+
+	0x50, 0x70, 0xF8, 0xEC,
+	0x2B, 0x50, 0x3C, 0xE9,
+
+	0x1F, 0x0F, 0xBC, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x59, 0x78, 0xF8, 0xEC,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x1E, 0x12, 0x41, 0xE9,
+	0x1A, 0x22, 0x41, 0xE9,
+
+	0x46, 0x37, 0x46, 0xDF,
+	0x56, 0x3F, 0x56, 0xDF,
+
+	0x2B, 0x40, 0x3D, 0xE9,
+	0x66, 0x3D, 0x66, 0xDF,
+
+	0x1D, 0x32, 0x41, 0xE9,
+	0x67, 0x3D, 0x67, 0xDF,
+
+	0x47, 0x37, 0x47, 0xDF,
+	0x57, 0x3F, 0x57, 0xDF,
+
+	0x2A, 0x40, 0x20, 0xE9,
+	0x59, 0x3F, 0x59, 0xDF,
+
+	0x16, 0x30, 0x20, 0xE9,
+	0x69, 0x3D, 0x69, 0xDF,
+
+	0x48, 0x37, 0x48, 0xDF,
+	0x58, 0x3F, 0x58, 0xDF,
+
+	0x68, 0x3D, 0x68, 0xDF,
+	0x49, 0x37, 0x49, 0xDF,
+
+	0x32, 0x32, 0x2D, 0xDF,
+	0x22, 0x22, 0x2D, 0xDF,
+
+	0x12, 0x12, 0x2D, 0xDF,
+	0x3A, 0x3A, 0x2D, 0xDF,
+
+	0x0F, 0xCF, 0x74, 0xC2,
+	0x37, 0xCF, 0x74, 0xC4,
+
+	0x0A, 0x44, 0x54, 0xB0,
+	0x02, 0x44, 0x64, 0xB0,
+
+	0x3D, 0xCF, 0x74, 0xC0,
+	0x34, 0x37, 0x20, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x38, 0x0F, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3C, 0x3D, 0x20, 0xE9,
+
+	0x2A, 0x44, 0x54, 0xB2,
+	0x1A, 0x44, 0x64, 0xB2,
+
+	0x36, 0x80, 0x3A, 0xEA,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x0F, 0xCF, 0x75, 0xC0,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x32, 0x31, 0x5F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x33, 0x39, 0x5F, 0xE9,
+
+	0x3D, 0xCF, 0x75, 0xC2,
+	0x37, 0xCF, 0x75, 0xC4,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0xA6, 0x0F, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA3, 0x3D, 0x20, 0xE9,
+
+	0x2A, 0x44, 0x54, 0xB4,
+	0x1A, 0x44, 0x64, 0xB4,
+
+	0x0A, 0x45, 0x55, 0xB0,
+	0x02, 0x45, 0x65, 0xB0,
+
+	0x88, 0x73, 0x5E, 0xE9,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0xA0, 0x37, 0x20, 0xE9,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x3E, 0x30, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3F, 0x38, 0x4F, 0xE9,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x3A, 0x31, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x3B, 0x39, 0x4F, 0xE9,
+
+	0x2A, 0x45, 0x55, 0xB2,
+	0x1A, 0x45, 0x65, 0xB2,
+
+	0x0A, 0x45, 0x55, 0xB4,
+	0x02, 0x45, 0x65, 0xB4,
+
+	0x0F, 0xCF, 0x75, 0xC6,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0xA7, 0x30, 0x4F, 0xE9,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x31, 0x0F, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA8, 0x38, 0x4F, 0xE9,
+
+	0x2A, 0x45, 0x55, 0xB6,
+	0x1A, 0x45, 0x65, 0xB6,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x36, 0x31, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x37, 0x39, 0x4F, 0xE9,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x2A, 0x46, 0x56, 0xBF,
+	0x1A, 0x46, 0x66, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0xA4, 0x31, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA5, 0x39, 0x4F, 0xE9,
+
+	0x0A, 0x47, 0x57, 0xBF,
+	0x02, 0x47, 0x67, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0xA1, 0x30, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA2, 0x38, 0x4F, 0xE9,
+
+	0x2A, 0x43, 0x53, 0xBF,
+	0x1A, 0x43, 0x63, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x35, 0x31, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x39, 0x39, 0x4F, 0xE9,
+
+	0x0A, 0x48, 0x58, 0xBF,
+	0x02, 0x48, 0x68, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x80, 0x31, 0x57, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x81, 0x39, 0x57, 0xE9,
+
+	0x2A, 0x49, 0x59, 0xBF,
+	0x1A, 0x49, 0x69, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x82, 0x30, 0x57, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x83, 0x38, 0x57, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x84, 0x31, 0x5E, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x85, 0x39, 0x5E, 0xE9,
+
+	0x86, 0x76, 0x57, 0xE9,
+	0x8A, 0x36, 0x20, 0xE9,
+
+	0x87, 0x77, 0x57, 0xE9,
+	0x8B, 0x3E, 0xBF, 0xEA,
+
+	0x80, 0x30, 0x57, 0xE9,
+	0x81, 0x38, 0x57, 0xE9,
+
+	0x82, 0x31, 0x57, 0xE9,
+	0x86, 0x78, 0x57, 0xE9,
+
+	0x83, 0x39, 0x57, 0xE9,
+	0x87, 0x79, 0x57, 0xE9,
+
+	0x30, 0x1F, 0x5F, 0xE9,
+	0x8A, 0x34, 0x20, 0xE9,
+
+	0x8B, 0x3C, 0x20, 0xE9,
+	0x37, 0x50, 0x60, 0xBD,
+
+	0x57, 0x0D, 0x20, 0xE9,
+	0x35, 0x51, 0x61, 0xBD,
+
+	0x2B, 0x50, 0x20, 0xE9,
+	0x1D, 0x37, 0xE1, 0xEA,
+
+	0x1E, 0x35, 0xE1, 0xEA,
+	0x00, 0xE0,
+	0x0E, 0x77,
+
+	0x24, 0x51, 0x20, 0xE9,
+	0x8D, 0xFF, 0x20, 0xEA,
+
+	0x16, 0x0E, 0x20, 0xE9,
+	0x57, 0x2E, 0xBF, 0xEA,
+
+	0x0B, 0x46, 0xA0, 0xE8,
+	0x1B, 0x56, 0xA0, 0xE8,
+
+	0x2B, 0x66, 0xA0, 0xE8,
+	0x0C, 0x47, 0xA0, 0xE8,
+
+	0x1C, 0x57, 0xA0, 0xE8,
+	0x2C, 0x67, 0xA0, 0xE8,
+
+	0x0B, 0x00,
+	0x1B, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x0C, 0x00,
+	0x1C, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x0B, 0x65,
+	0x1B, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x0C, 0x65,
+	0x1C, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x0B, 0x1B, 0x60, 0xEC,
+	0x36, 0xD7, 0x36, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x0C, 0x1C, 0x60, 0xEC,
+
+	0x3E, 0xD7, 0x3E, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x0B, 0x2B, 0xDE, 0xE8,
+	0x1B, 0x80, 0xDE, 0xE8,
+
+	0x36, 0x80, 0x36, 0xBD,
+	0x3E, 0x80, 0x3E, 0xBD,
+
+	0x33, 0xD7, 0x0B, 0xBD,
+	0x3B, 0xD7, 0x1B, 0xBD,
+
+	0x46, 0x80, 0x46, 0xCF,
+	0x57, 0x80, 0x57, 0xCF,
+
+	0x66, 0x33, 0x66, 0xCF,
+	0x47, 0x3B, 0x47, 0xCF,
+
+	0x56, 0x33, 0x56, 0xCF,
+	0x67, 0x3B, 0x67, 0xCF,
+
+	0x0B, 0x48, 0xA0, 0xE8,
+	0x1B, 0x58, 0xA0, 0xE8,
+
+	0x2B, 0x68, 0xA0, 0xE8,
+	0x0C, 0x49, 0xA0, 0xE8,
+
+	0x1C, 0x59, 0xA0, 0xE8,
+	0x2C, 0x69, 0xA0, 0xE8,
+
+	0x0B, 0x00,
+	0x1B, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x0C, 0x00,
+	0x1C, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x0B, 0x65,
+	0x1B, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x0C, 0x65,
+	0x1C, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x0B, 0x1B, 0x60, 0xEC,
+	0x34, 0xD7, 0x34, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x0C, 0x1C, 0x60, 0xEC,
+
+	0x3C, 0xD7, 0x3C, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x0B, 0x2B, 0xDE, 0xE8,
+	0x1B, 0x80, 0xDE, 0xE8,
+
+	0x34, 0x80, 0x34, 0xBD,
+	0x3C, 0x80, 0x3C, 0xBD,
+
+	0x33, 0xD7, 0x0B, 0xBD,
+	0x3B, 0xD7, 0x1B, 0xBD,
+
+	0x48, 0x80, 0x48, 0xCF,
+	0x59, 0x80, 0x59, 0xCF,
+
+	0x68, 0x33, 0x68, 0xCF,
+	0x49, 0x3B, 0x49, 0xCF,
+
+	0xAD, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x58, 0x33, 0x58, 0xCF,
+	0x69, 0x3B, 0x69, 0xCF,
+
+	0x6B, 0xFF, 0x20, 0xEA,
+	0x57, 0xC0, 0xBF, 0xEA,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+};
+
+static unsigned char warp_g400_tgz[] = {
+
+	0x00, 0x88, 0x98, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x22, 0x40, 0x48, 0xBF,
+	0x2A, 0x40, 0x50, 0xBF,
+
+	0x32, 0x41, 0x49, 0xBF,
+	0x3A, 0x41, 0x51, 0xBF,
+
+	0xC3, 0x6B,
+	0xCB, 0x6B,
+	0x00, 0x88, 0x98, 0xE9,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x96, 0xE2,
+	0x41, 0x04,
+
+	0x7B, 0x43, 0xA0, 0xE8,
+	0x73, 0x4B, 0xA0, 0xE8,
+
+	0xAD, 0xEE, 0x29, 0x9F,
+	0x00, 0xE0,
+	0x49, 0x04,
+
+	0x90, 0xE2,
+	0x51, 0x04,
+	0x31, 0x46, 0xB1, 0xE8,
+
+	0x49, 0x41, 0xC0, 0xEC,
+	0x39, 0x57, 0xB1, 0xE8,
+
+	0x00, 0x04,
+	0x46, 0xE2,
+	0x73, 0x53, 0xA0, 0xE8,
+
+	0x51, 0x41, 0xC0, 0xEC,
+	0x31, 0x00,
+	0x39, 0x00,
+
+	0x58, 0x80, 0x15, 0xEA,
+	0x08, 0x04,
+	0x10, 0x04,
+
+	0x51, 0x49, 0xC0, 0xEC,
+	0x2F, 0x41, 0x60, 0xEA,
+
+	0x31, 0x20,
+	0x39, 0x20,
+	0x1F, 0x42, 0xA0, 0xE8,
+
+	0x2A, 0x42, 0x4A, 0xBF,
+	0x27, 0x4A, 0xA0, 0xE8,
+
+	0x1A, 0x42, 0x52, 0xBF,
+	0x1E, 0x49, 0x60, 0xEA,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x26, 0x51, 0x60, 0xEA,
+
+	0x32, 0x40, 0x48, 0xBD,
+	0x22, 0x40, 0x50, 0xBD,
+
+	0x12, 0x41, 0x49, 0xBD,
+	0x3A, 0x41, 0x51, 0xBD,
+
+	0xBF, 0x2F, 0x26, 0xBD,
+	0x00, 0xE0,
+	0x7B, 0x72,
+
+	0x32, 0x20,
+	0x22, 0x20,
+	0x12, 0x20,
+	0x3A, 0x20,
+
+	0x46, 0x31, 0x46, 0xBF,
+	0x4E, 0x31, 0x4E, 0xBF,
+
+	0xB3, 0xE2, 0x2D, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x56, 0x31, 0x56, 0xBF,
+	0x47, 0x39, 0x47, 0xBF,
+
+	0x4F, 0x39, 0x4F, 0xBF,
+	0x57, 0x39, 0x57, 0xBF,
+
+	0x4A, 0x80, 0x07, 0xEA,
+	0x24, 0x41, 0x20, 0xE9,
+
+	0x42, 0x73, 0xF8, 0xEC,
+	0x00, 0xE0,
+	0x2D, 0x73,
+
+	0x33, 0x72,
+	0x0C, 0xE3,
+	0xA5, 0x2F, 0x1E, 0xBD,
+
+	0x43, 0x43, 0x2D, 0xDF,
+	0x4B, 0x4B, 0x2D, 0xDF,
+
+	0xAE, 0x1E, 0x26, 0xBD,
+	0x58, 0xE3,
+	0x33, 0x66,
+
+	0x53, 0x53, 0x2D, 0xDF,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xB8, 0x38, 0x33, 0xBF,
+	0x00, 0xE0,
+	0x59, 0xE3,
+
+	0x1E, 0x12, 0x41, 0xE9,
+	0x1A, 0x22, 0x41, 0xE9,
+
+	0x2B, 0x40, 0x3D, 0xE9,
+	0x3F, 0x4B, 0xA0, 0xE8,
+
+	0x2D, 0x73,
+	0x30, 0x76,
+	0x05, 0x80, 0x3D, 0xEA,
+
+	0x37, 0x43, 0xA0, 0xE8,
+	0x3D, 0x53, 0xA0, 0xE8,
+
+	0x48, 0x70, 0xF8, 0xEC,
+	0x2B, 0x48, 0x3C, 0xE9,
+
+	0x1F, 0x27, 0xBC, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x18, 0x3A, 0x41, 0xE9,
+	0x1D, 0x32, 0x41, 0xE9,
+
+	0x2A, 0x40, 0x20, 0xE9,
+	0x56, 0x3D, 0x56, 0xDF,
+
+	0x46, 0x37, 0x46, 0xDF,
+	0x4E, 0x3F, 0x4E, 0xDF,
+
+	0x16, 0x30, 0x20, 0xE9,
+	0x4F, 0x3F, 0x4F, 0xDF,
+
+	0x32, 0x32, 0x2D, 0xDF,
+	0x22, 0x22, 0x2D, 0xDF,
+
+	0x12, 0x12, 0x2D, 0xDF,
+	0x3A, 0x3A, 0x2D, 0xDF,
+
+	0x47, 0x37, 0x47, 0xDF,
+	0x57, 0x3D, 0x57, 0xDF,
+
+	0x3D, 0xCF, 0x74, 0xC0,
+	0x37, 0xCF, 0x74, 0xC4,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x34, 0x80, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3C, 0x3D, 0x20, 0xE9,
+
+	0x0A, 0x44, 0x4C, 0xB0,
+	0x02, 0x44, 0x54, 0xB0,
+
+	0x2A, 0x44, 0x4C, 0xB2,
+	0x1A, 0x44, 0x54, 0xB2,
+
+	0x1D, 0x80, 0x3A, 0xEA,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x3D, 0xCF, 0x74, 0xC2,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x32, 0x31, 0x5F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x33, 0x39, 0x5F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x2A, 0x44, 0x4C, 0xB4,
+	0x1A, 0x44, 0x54, 0xB4,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x38, 0x3D, 0x20, 0xE9,
+
+	0x88, 0x73, 0x5E, 0xE9,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x2A, 0x46, 0x4E, 0xBF,
+	0x1A, 0x46, 0x56, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x3E, 0x30, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3F, 0x38, 0x4F, 0xE9,
+
+	0x0A, 0x47, 0x4F, 0xBF,
+	0x02, 0x47, 0x57, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x3A, 0x31, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3B, 0x39, 0x4F, 0xE9,
+
+	0x2A, 0x43, 0x4B, 0xBF,
+	0x1A, 0x43, 0x53, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x36, 0x31, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x37, 0x39, 0x4F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x80, 0x31, 0x57, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x81, 0x39, 0x57, 0xE9,
+
+	0x37, 0x48, 0x50, 0xBD,
+	0x8A, 0x36, 0x20, 0xE9,
+
+	0x86, 0x76, 0x57, 0xE9,
+	0x8B, 0x3E, 0x20, 0xE9,
+
+	0x82, 0x30, 0x57, 0xE9,
+	0x87, 0x77, 0x57, 0xE9,
+
+	0x83, 0x38, 0x57, 0xE9,
+	0x35, 0x49, 0x51, 0xBD,
+
+	0x84, 0x31, 0x5E, 0xE9,
+	0x30, 0x1F, 0x5F, 0xE9,
+
+	0x85, 0x39, 0x5E, 0xE9,
+	0x57, 0x25, 0x20, 0xE9,
+
+	0x2B, 0x48, 0x20, 0xE9,
+	0x1D, 0x37, 0xE1, 0xEA,
+
+	0x1E, 0x35, 0xE1, 0xEA,
+	0x00, 0xE0,
+	0x26, 0x77,
+
+	0x24, 0x49, 0x20, 0xE9,
+	0xAF, 0xFF, 0x20, 0xEA,
+
+	0x16, 0x26, 0x20, 0xE9,
+	0x57, 0x2E, 0xBF, 0xEA,
+
+	0x1C, 0x46, 0xA0, 0xE8,
+	0x23, 0x4E, 0xA0, 0xE8,
+
+	0x2B, 0x56, 0xA0, 0xE8,
+	0x1D, 0x47, 0xA0, 0xE8,
+
+	0x24, 0x4F, 0xA0, 0xE8,
+	0x2C, 0x57, 0xA0, 0xE8,
+
+	0x1C, 0x00,
+	0x23, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x1D, 0x00,
+	0x24, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x1C, 0x65,
+	0x23, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x1D, 0x65,
+	0x24, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x1C, 0x23, 0x60, 0xEC,
+	0x36, 0xD7, 0x36, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x1D, 0x24, 0x60, 0xEC,
+
+	0x3E, 0xD7, 0x3E, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x1C, 0x2B, 0xDE, 0xE8,
+	0x23, 0x80, 0xDE, 0xE8,
+
+	0x36, 0x80, 0x36, 0xBD,
+	0x3E, 0x80, 0x3E, 0xBD,
+
+	0x33, 0xD7, 0x1C, 0xBD,
+	0x3B, 0xD7, 0x23, 0xBD,
+
+	0x46, 0x80, 0x46, 0xCF,
+	0x4F, 0x80, 0x4F, 0xCF,
+
+	0x56, 0x33, 0x56, 0xCF,
+	0x47, 0x3B, 0x47, 0xCF,
+
+	0xD6, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x4E, 0x33, 0x4E, 0xCF,
+	0x57, 0x3B, 0x57, 0xCF,
+
+	0x9D, 0xFF, 0x20, 0xEA,
+	0x57, 0xC0, 0xBF, 0xEA,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+};
+
+static unsigned char warp_g400_tgza[] = {
+
+	0x00, 0x88, 0x98, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x22, 0x40, 0x48, 0xBF,
+	0x2A, 0x40, 0x50, 0xBF,
+
+	0x32, 0x41, 0x49, 0xBF,
+	0x3A, 0x41, 0x51, 0xBF,
+
+	0xC3, 0x6B,
+	0xCB, 0x6B,
+	0x00, 0x88, 0x98, 0xE9,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x96, 0xE2,
+	0x41, 0x04,
+
+	0x7B, 0x43, 0xA0, 0xE8,
+	0x73, 0x4B, 0xA0, 0xE8,
+
+	0xAD, 0xEE, 0x29, 0x9F,
+	0x00, 0xE0,
+	0x49, 0x04,
+
+	0x90, 0xE2,
+	0x51, 0x04,
+	0x31, 0x46, 0xB1, 0xE8,
+
+	0x49, 0x41, 0xC0, 0xEC,
+	0x39, 0x57, 0xB1, 0xE8,
+
+	0x00, 0x04,
+	0x46, 0xE2,
+	0x73, 0x53, 0xA0, 0xE8,
+
+	0x51, 0x41, 0xC0, 0xEC,
+	0x31, 0x00,
+	0x39, 0x00,
+
+	0x5C, 0x80, 0x15, 0xEA,
+	0x08, 0x04,
+	0x10, 0x04,
+
+	0x51, 0x49, 0xC0, 0xEC,
+	0x2F, 0x41, 0x60, 0xEA,
+
+	0x31, 0x20,
+	0x39, 0x20,
+	0x1F, 0x42, 0xA0, 0xE8,
+
+	0x2A, 0x42, 0x4A, 0xBF,
+	0x27, 0x4A, 0xA0, 0xE8,
+
+	0x1A, 0x42, 0x52, 0xBF,
+	0x1E, 0x49, 0x60, 0xEA,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x26, 0x51, 0x60, 0xEA,
+
+	0x32, 0x40, 0x48, 0xBD,
+	0x22, 0x40, 0x50, 0xBD,
+
+	0x12, 0x41, 0x49, 0xBD,
+	0x3A, 0x41, 0x51, 0xBD,
+
+	0xBF, 0x2F, 0x26, 0xBD,
+	0x00, 0xE0,
+	0x7B, 0x72,
+
+	0x32, 0x20,
+	0x22, 0x20,
+	0x12, 0x20,
+	0x3A, 0x20,
+
+	0x46, 0x31, 0x46, 0xBF,
+	0x4E, 0x31, 0x4E, 0xBF,
+
+	0xB3, 0xE2, 0x2D, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x56, 0x31, 0x56, 0xBF,
+	0x47, 0x39, 0x47, 0xBF,
+
+	0x4F, 0x39, 0x4F, 0xBF,
+	0x57, 0x39, 0x57, 0xBF,
+
+	0x4E, 0x80, 0x07, 0xEA,
+	0x24, 0x41, 0x20, 0xE9,
+
+	0x42, 0x73, 0xF8, 0xEC,
+	0x00, 0xE0,
+	0x2D, 0x73,
+
+	0x33, 0x72,
+	0x0C, 0xE3,
+	0xA5, 0x2F, 0x1E, 0xBD,
+
+	0x43, 0x43, 0x2D, 0xDF,
+	0x4B, 0x4B, 0x2D, 0xDF,
+
+	0xAE, 0x1E, 0x26, 0xBD,
+	0x58, 0xE3,
+	0x33, 0x66,
+
+	0x53, 0x53, 0x2D, 0xDF,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xB8, 0x38, 0x33, 0xBF,
+	0x00, 0xE0,
+	0x59, 0xE3,
+
+	0x1E, 0x12, 0x41, 0xE9,
+	0x1A, 0x22, 0x41, 0xE9,
+
+	0x2B, 0x40, 0x3D, 0xE9,
+	0x3F, 0x4B, 0xA0, 0xE8,
+
+	0x2D, 0x73,
+	0x30, 0x76,
+	0x05, 0x80, 0x3D, 0xEA,
+
+	0x37, 0x43, 0xA0, 0xE8,
+	0x3D, 0x53, 0xA0, 0xE8,
+
+	0x48, 0x70, 0xF8, 0xEC,
+	0x2B, 0x48, 0x3C, 0xE9,
+
+	0x1F, 0x27, 0xBC, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x18, 0x3A, 0x41, 0xE9,
+	0x1D, 0x32, 0x41, 0xE9,
+
+	0x2A, 0x40, 0x20, 0xE9,
+	0x56, 0x3D, 0x56, 0xDF,
+
+	0x46, 0x37, 0x46, 0xDF,
+	0x4E, 0x3F, 0x4E, 0xDF,
+
+	0x16, 0x30, 0x20, 0xE9,
+	0x4F, 0x3F, 0x4F, 0xDF,
+
+	0x32, 0x32, 0x2D, 0xDF,
+	0x22, 0x22, 0x2D, 0xDF,
+
+	0x12, 0x12, 0x2D, 0xDF,
+	0x3A, 0x3A, 0x2D, 0xDF,
+
+	0x47, 0x37, 0x47, 0xDF,
+	0x57, 0x3D, 0x57, 0xDF,
+
+	0x3D, 0xCF, 0x74, 0xC0,
+	0x37, 0xCF, 0x74, 0xC4,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x34, 0x80, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3C, 0x3D, 0x20, 0xE9,
+
+	0x27, 0xCF, 0x74, 0xC6,
+	0x3D, 0xCF, 0x74, 0xC2,
+
+	0x0A, 0x44, 0x4C, 0xB0,
+	0x02, 0x44, 0x54, 0xB0,
+
+	0x2A, 0x44, 0x4C, 0xB2,
+	0x1A, 0x44, 0x54, 0xB2,
+
+	0x20, 0x80, 0x3A, 0xEA,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x88, 0x73, 0x5E, 0xE9,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x32, 0x31, 0x5F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x33, 0x39, 0x5F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x9C, 0x27, 0x20, 0xE9,
+
+	0x0A, 0x44, 0x4C, 0xB4,
+	0x02, 0x44, 0x54, 0xB4,
+
+	0x2A, 0x44, 0x4C, 0xB6,
+	0x1A, 0x44, 0x54, 0xB6,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x38, 0x3D, 0x20, 0xE9,
+
+	0x0A, 0x20,
+	0x02, 0x20,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x0A, 0x47, 0x4F, 0xBF,
+	0x02, 0x47, 0x57, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x3E, 0x30, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x3F, 0x38, 0x4F, 0xE9,
+
+	0x2A, 0x46, 0x4E, 0xBF,
+	0x1A, 0x46, 0x56, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x3A, 0x31, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3B, 0x39, 0x4F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x36, 0x30, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x37, 0x38, 0x4F, 0xE9,
+
+	0x2A, 0x43, 0x4B, 0xBF,
+	0x1A, 0x43, 0x53, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x9D, 0x31, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x9E, 0x39, 0x4F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x80, 0x31, 0x57, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x81, 0x39, 0x57, 0xE9,
+
+	0x37, 0x48, 0x50, 0xBD,
+	0x8A, 0x36, 0x20, 0xE9,
+
+	0x86, 0x76, 0x57, 0xE9,
+	0x8B, 0x3E, 0x20, 0xE9,
+
+	0x82, 0x30, 0x57, 0xE9,
+	0x87, 0x77, 0x57, 0xE9,
+
+	0x83, 0x38, 0x57, 0xE9,
+	0x35, 0x49, 0x51, 0xBD,
+
+	0x84, 0x31, 0x5E, 0xE9,
+	0x30, 0x1F, 0x5F, 0xE9,
+
+	0x85, 0x39, 0x5E, 0xE9,
+	0x57, 0x25, 0x20, 0xE9,
+
+	0x2B, 0x48, 0x20, 0xE9,
+	0x1D, 0x37, 0xE1, 0xEA,
+
+	0x1E, 0x35, 0xE1, 0xEA,
+	0x00, 0xE0,
+	0x26, 0x77,
+
+	0x24, 0x49, 0x20, 0xE9,
+	0xAB, 0xFF, 0x20, 0xEA,
+
+	0x16, 0x26, 0x20, 0xE9,
+	0x57, 0x2E, 0xBF, 0xEA,
+
+	0x1C, 0x46, 0xA0, 0xE8,
+	0x23, 0x4E, 0xA0, 0xE8,
+
+	0x2B, 0x56, 0xA0, 0xE8,
+	0x1D, 0x47, 0xA0, 0xE8,
+
+	0x24, 0x4F, 0xA0, 0xE8,
+	0x2C, 0x57, 0xA0, 0xE8,
+
+	0x1C, 0x00,
+	0x23, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x1D, 0x00,
+	0x24, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x1C, 0x65,
+	0x23, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x1D, 0x65,
+	0x24, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x1C, 0x23, 0x60, 0xEC,
+	0x36, 0xD7, 0x36, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x1D, 0x24, 0x60, 0xEC,
+
+	0x3E, 0xD7, 0x3E, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x1C, 0x2B, 0xDE, 0xE8,
+	0x23, 0x80, 0xDE, 0xE8,
+
+	0x36, 0x80, 0x36, 0xBD,
+	0x3E, 0x80, 0x3E, 0xBD,
+
+	0x33, 0xD7, 0x1C, 0xBD,
+	0x3B, 0xD7, 0x23, 0xBD,
+
+	0x46, 0x80, 0x46, 0xCF,
+	0x4F, 0x80, 0x4F, 0xCF,
+
+	0x56, 0x33, 0x56, 0xCF,
+	0x47, 0x3B, 0x47, 0xCF,
+
+	0xD3, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x4E, 0x33, 0x4E, 0xCF,
+	0x57, 0x3B, 0x57, 0xCF,
+
+	0x99, 0xFF, 0x20, 0xEA,
+	0x57, 0xC0, 0xBF, 0xEA,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+};
+
+static unsigned char warp_g400_tgzaf[] = {
+
+	0x00, 0x88, 0x98, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x22, 0x40, 0x48, 0xBF,
+	0x2A, 0x40, 0x50, 0xBF,
+
+	0x32, 0x41, 0x49, 0xBF,
+	0x3A, 0x41, 0x51, 0xBF,
+
+	0xC3, 0x6B,
+	0xCB, 0x6B,
+	0x00, 0x88, 0x98, 0xE9,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x96, 0xE2,
+	0x41, 0x04,
+
+	0x7B, 0x43, 0xA0, 0xE8,
+	0x73, 0x4B, 0xA0, 0xE8,
+
+	0xAD, 0xEE, 0x29, 0x9F,
+	0x00, 0xE0,
+	0x49, 0x04,
+
+	0x90, 0xE2,
+	0x51, 0x04,
+	0x31, 0x46, 0xB1, 0xE8,
+
+	0x49, 0x41, 0xC0, 0xEC,
+	0x39, 0x57, 0xB1, 0xE8,
+
+	0x00, 0x04,
+	0x46, 0xE2,
+	0x73, 0x53, 0xA0, 0xE8,
+
+	0x51, 0x41, 0xC0, 0xEC,
+	0x31, 0x00,
+	0x39, 0x00,
+
+	0x61, 0x80, 0x15, 0xEA,
+	0x08, 0x04,
+	0x10, 0x04,
+
+	0x51, 0x49, 0xC0, 0xEC,
+	0x2F, 0x41, 0x60, 0xEA,
+
+	0x31, 0x20,
+	0x39, 0x20,
+	0x1F, 0x42, 0xA0, 0xE8,
+
+	0x2A, 0x42, 0x4A, 0xBF,
+	0x27, 0x4A, 0xA0, 0xE8,
+
+	0x1A, 0x42, 0x52, 0xBF,
+	0x1E, 0x49, 0x60, 0xEA,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x26, 0x51, 0x60, 0xEA,
+
+	0x32, 0x40, 0x48, 0xBD,
+	0x22, 0x40, 0x50, 0xBD,
+
+	0x12, 0x41, 0x49, 0xBD,
+	0x3A, 0x41, 0x51, 0xBD,
+
+	0xBF, 0x2F, 0x26, 0xBD,
+	0x00, 0xE0,
+	0x7B, 0x72,
+
+	0x32, 0x20,
+	0x22, 0x20,
+	0x12, 0x20,
+	0x3A, 0x20,
+
+	0x46, 0x31, 0x46, 0xBF,
+	0x4E, 0x31, 0x4E, 0xBF,
+
+	0xB3, 0xE2, 0x2D, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x56, 0x31, 0x56, 0xBF,
+	0x47, 0x39, 0x47, 0xBF,
+
+	0x4F, 0x39, 0x4F, 0xBF,
+	0x57, 0x39, 0x57, 0xBF,
+
+	0x53, 0x80, 0x07, 0xEA,
+	0x24, 0x41, 0x20, 0xE9,
+
+	0x42, 0x73, 0xF8, 0xEC,
+	0x00, 0xE0,
+	0x2D, 0x73,
+
+	0x33, 0x72,
+	0x0C, 0xE3,
+	0xA5, 0x2F, 0x1E, 0xBD,
+
+	0x43, 0x43, 0x2D, 0xDF,
+	0x4B, 0x4B, 0x2D, 0xDF,
+
+	0xAE, 0x1E, 0x26, 0xBD,
+	0x58, 0xE3,
+	0x33, 0x66,
+
+	0x53, 0x53, 0x2D, 0xDF,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xB8, 0x38, 0x33, 0xBF,
+	0x00, 0xE0,
+	0x59, 0xE3,
+
+	0x1E, 0x12, 0x41, 0xE9,
+	0x1A, 0x22, 0x41, 0xE9,
+
+	0x2B, 0x40, 0x3D, 0xE9,
+	0x3F, 0x4B, 0xA0, 0xE8,
+
+	0x2D, 0x73,
+	0x30, 0x76,
+	0x05, 0x80, 0x3D, 0xEA,
+
+	0x37, 0x43, 0xA0, 0xE8,
+	0x3D, 0x53, 0xA0, 0xE8,
+
+	0x48, 0x70, 0xF8, 0xEC,
+	0x2B, 0x48, 0x3C, 0xE9,
+
+	0x1F, 0x27, 0xBC, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x18, 0x3A, 0x41, 0xE9,
+	0x1D, 0x32, 0x41, 0xE9,
+
+	0x2A, 0x40, 0x20, 0xE9,
+	0x56, 0x3D, 0x56, 0xDF,
+
+	0x46, 0x37, 0x46, 0xDF,
+	0x4E, 0x3F, 0x4E, 0xDF,
+
+	0x16, 0x30, 0x20, 0xE9,
+	0x4F, 0x3F, 0x4F, 0xDF,
+
+	0x32, 0x32, 0x2D, 0xDF,
+	0x22, 0x22, 0x2D, 0xDF,
+
+	0x12, 0x12, 0x2D, 0xDF,
+	0x3A, 0x3A, 0x2D, 0xDF,
+
+	0x47, 0x37, 0x47, 0xDF,
+	0x57, 0x3D, 0x57, 0xDF,
+
+	0x3D, 0xCF, 0x74, 0xC0,
+	0x37, 0xCF, 0x74, 0xC4,
+
+	0x0A, 0x44, 0x4C, 0xB0,
+	0x02, 0x44, 0x54, 0xB0,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x34, 0x37, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3C, 0x3D, 0x20, 0xE9,
+
+	0x2A, 0x44, 0x4C, 0xB2,
+	0x1A, 0x44, 0x54, 0xB2,
+
+	0x26, 0x80, 0x3A, 0xEA,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x88, 0x73, 0x5E, 0xE9,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x3D, 0xCF, 0x74, 0xC2,
+	0x27, 0xCF, 0x74, 0xC6,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x32, 0x31, 0x5F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x33, 0x39, 0x5F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x9C, 0x27, 0x20, 0xE9,
+
+	0x0A, 0x44, 0x4C, 0xB4,
+	0x02, 0x44, 0x54, 0xB4,
+
+	0x2A, 0x44, 0x4C, 0xB6,
+	0x1A, 0x44, 0x54, 0xB6,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x38, 0x3D, 0x20, 0xE9,
+
+	0x0A, 0x20,
+	0x02, 0x20,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x3D, 0xCF, 0x75, 0xC6,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x3E, 0x30, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x3F, 0x38, 0x4F, 0xE9,
+
+	0x0A, 0x45, 0x4D, 0xB6,
+	0x02, 0x45, 0x55, 0xB6,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x3A, 0x31, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3B, 0x39, 0x4F, 0xE9,
+
+	0x31, 0x3D, 0x20, 0xE9,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x2A, 0x46, 0x4E, 0xBF,
+	0x1A, 0x46, 0x56, 0xBF,
+
+	0x0A, 0x47, 0x4F, 0xBF,
+	0x02, 0x47, 0x57, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x36, 0x30, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x37, 0x38, 0x4F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x9D, 0x31, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x9E, 0x39, 0x4F, 0xE9,
+
+	0x2A, 0x43, 0x4B, 0xBF,
+	0x1A, 0x43, 0x53, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x35, 0x30, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x39, 0x38, 0x4F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x80, 0x31, 0x57, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x81, 0x39, 0x57, 0xE9,
+
+	0x37, 0x48, 0x50, 0xBD,
+	0x8A, 0x36, 0x20, 0xE9,
+
+	0x86, 0x76, 0x57, 0xE9,
+	0x8B, 0x3E, 0x20, 0xE9,
+
+	0x82, 0x30, 0x57, 0xE9,
+	0x87, 0x77, 0x57, 0xE9,
+
+	0x83, 0x38, 0x57, 0xE9,
+	0x35, 0x49, 0x51, 0xBD,
+
+	0x84, 0x31, 0x5E, 0xE9,
+	0x30, 0x1F, 0x5F, 0xE9,
+
+	0x85, 0x39, 0x5E, 0xE9,
+	0x57, 0x25, 0x20, 0xE9,
+
+	0x2B, 0x48, 0x20, 0xE9,
+	0x1D, 0x37, 0xE1, 0xEA,
+
+	0x1E, 0x35, 0xE1, 0xEA,
+	0x00, 0xE0,
+	0x26, 0x77,
+
+	0x24, 0x49, 0x20, 0xE9,
+	0xA6, 0xFF, 0x20, 0xEA,
+
+	0x16, 0x26, 0x20, 0xE9,
+	0x57, 0x2E, 0xBF, 0xEA,
+
+	0x1C, 0x46, 0xA0, 0xE8,
+	0x23, 0x4E, 0xA0, 0xE8,
+
+	0x2B, 0x56, 0xA0, 0xE8,
+	0x1D, 0x47, 0xA0, 0xE8,
+
+	0x24, 0x4F, 0xA0, 0xE8,
+	0x2C, 0x57, 0xA0, 0xE8,
+
+	0x1C, 0x00,
+	0x23, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x1D, 0x00,
+	0x24, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x1C, 0x65,
+	0x23, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x1D, 0x65,
+	0x24, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x1C, 0x23, 0x60, 0xEC,
+	0x36, 0xD7, 0x36, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x1D, 0x24, 0x60, 0xEC,
+
+	0x3E, 0xD7, 0x3E, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x1C, 0x2B, 0xDE, 0xE8,
+	0x23, 0x80, 0xDE, 0xE8,
+
+	0x36, 0x80, 0x36, 0xBD,
+	0x3E, 0x80, 0x3E, 0xBD,
+
+	0x33, 0xD7, 0x1C, 0xBD,
+	0x3B, 0xD7, 0x23, 0xBD,
+
+	0x46, 0x80, 0x46, 0xCF,
+	0x4F, 0x80, 0x4F, 0xCF,
+
+	0x56, 0x33, 0x56, 0xCF,
+	0x47, 0x3B, 0x47, 0xCF,
+
+	0xCD, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x4E, 0x33, 0x4E, 0xCF,
+	0x57, 0x3B, 0x57, 0xCF,
+
+	0x94, 0xFF, 0x20, 0xEA,
+	0x57, 0xC0, 0xBF, 0xEA,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+};
+
+static unsigned char warp_g400_tgzf[] = {
+
+	0x00, 0x88, 0x98, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x22, 0x40, 0x48, 0xBF,
+	0x2A, 0x40, 0x50, 0xBF,
+
+	0x32, 0x41, 0x49, 0xBF,
+	0x3A, 0x41, 0x51, 0xBF,
+
+	0xC3, 0x6B,
+	0xCB, 0x6B,
+	0x00, 0x88, 0x98, 0xE9,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x96, 0xE2,
+	0x41, 0x04,
+
+	0x7B, 0x43, 0xA0, 0xE8,
+	0x73, 0x4B, 0xA0, 0xE8,
+
+	0xAD, 0xEE, 0x29, 0x9F,
+	0x00, 0xE0,
+	0x49, 0x04,
+
+	0x90, 0xE2,
+	0x51, 0x04,
+	0x31, 0x46, 0xB1, 0xE8,
+
+	0x49, 0x41, 0xC0, 0xEC,
+	0x39, 0x57, 0xB1, 0xE8,
+
+	0x00, 0x04,
+	0x46, 0xE2,
+	0x73, 0x53, 0xA0, 0xE8,
+
+	0x51, 0x41, 0xC0, 0xEC,
+	0x31, 0x00,
+	0x39, 0x00,
+
+	0x5D, 0x80, 0x15, 0xEA,
+	0x08, 0x04,
+	0x10, 0x04,
+
+	0x51, 0x49, 0xC0, 0xEC,
+	0x2F, 0x41, 0x60, 0xEA,
+
+	0x31, 0x20,
+	0x39, 0x20,
+	0x1F, 0x42, 0xA0, 0xE8,
+
+	0x2A, 0x42, 0x4A, 0xBF,
+	0x27, 0x4A, 0xA0, 0xE8,
+
+	0x1A, 0x42, 0x52, 0xBF,
+	0x1E, 0x49, 0x60, 0xEA,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x26, 0x51, 0x60, 0xEA,
+
+	0x32, 0x40, 0x48, 0xBD,
+	0x22, 0x40, 0x50, 0xBD,
+
+	0x12, 0x41, 0x49, 0xBD,
+	0x3A, 0x41, 0x51, 0xBD,
+
+	0xBF, 0x2F, 0x26, 0xBD,
+	0x00, 0xE0,
+	0x7B, 0x72,
+
+	0x32, 0x20,
+	0x22, 0x20,
+	0x12, 0x20,
+	0x3A, 0x20,
+
+	0x46, 0x31, 0x46, 0xBF,
+	0x4E, 0x31, 0x4E, 0xBF,
+
+	0xB3, 0xE2, 0x2D, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x56, 0x31, 0x56, 0xBF,
+	0x47, 0x39, 0x47, 0xBF,
+
+	0x4F, 0x39, 0x4F, 0xBF,
+	0x57, 0x39, 0x57, 0xBF,
+
+	0x4F, 0x80, 0x07, 0xEA,
+	0x24, 0x41, 0x20, 0xE9,
+
+	0x42, 0x73, 0xF8, 0xEC,
+	0x00, 0xE0,
+	0x2D, 0x73,
+
+	0x33, 0x72,
+	0x0C, 0xE3,
+	0xA5, 0x2F, 0x1E, 0xBD,
+
+	0x43, 0x43, 0x2D, 0xDF,
+	0x4B, 0x4B, 0x2D, 0xDF,
+
+	0xAE, 0x1E, 0x26, 0xBD,
+	0x58, 0xE3,
+	0x33, 0x66,
+
+	0x53, 0x53, 0x2D, 0xDF,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xB8, 0x38, 0x33, 0xBF,
+	0x00, 0xE0,
+	0x59, 0xE3,
+
+	0x1E, 0x12, 0x41, 0xE9,
+	0x1A, 0x22, 0x41, 0xE9,
+
+	0x2B, 0x40, 0x3D, 0xE9,
+	0x3F, 0x4B, 0xA0, 0xE8,
+
+	0x2D, 0x73,
+	0x30, 0x76,
+	0x05, 0x80, 0x3D, 0xEA,
+
+	0x37, 0x43, 0xA0, 0xE8,
+	0x3D, 0x53, 0xA0, 0xE8,
+
+	0x48, 0x70, 0xF8, 0xEC,
+	0x2B, 0x48, 0x3C, 0xE9,
+
+	0x1F, 0x27, 0xBC, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x18, 0x3A, 0x41, 0xE9,
+	0x1D, 0x32, 0x41, 0xE9,
+
+	0x2A, 0x40, 0x20, 0xE9,
+	0x56, 0x3D, 0x56, 0xDF,
+
+	0x46, 0x37, 0x46, 0xDF,
+	0x4E, 0x3F, 0x4E, 0xDF,
+
+	0x16, 0x30, 0x20, 0xE9,
+	0x4F, 0x3F, 0x4F, 0xDF,
+
+	0x32, 0x32, 0x2D, 0xDF,
+	0x22, 0x22, 0x2D, 0xDF,
+
+	0x12, 0x12, 0x2D, 0xDF,
+	0x3A, 0x3A, 0x2D, 0xDF,
+
+	0x47, 0x37, 0x47, 0xDF,
+	0x57, 0x3D, 0x57, 0xDF,
+
+	0x3D, 0xCF, 0x74, 0xC0,
+	0x37, 0xCF, 0x74, 0xC4,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x34, 0x80, 0x20, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x88, 0x73, 0x5E, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x27, 0xCF, 0x75, 0xC6,
+	0x3C, 0x3D, 0x20, 0xE9,
+
+	0x0A, 0x44, 0x4C, 0xB0,
+	0x02, 0x44, 0x54, 0xB0,
+
+	0x2A, 0x44, 0x4C, 0xB2,
+	0x1A, 0x44, 0x54, 0xB2,
+
+	0x20, 0x80, 0x3A, 0xEA,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x3D, 0xCF, 0x74, 0xC2,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x32, 0x31, 0x5F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x33, 0x39, 0x5F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x31, 0x27, 0x20, 0xE9,
+
+	0x0A, 0x44, 0x4C, 0xB4,
+	0x02, 0x44, 0x54, 0xB4,
+
+	0x2A, 0x45, 0x4D, 0xB6,
+	0x1A, 0x45, 0x55, 0xB6,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x38, 0x3D, 0x20, 0xE9,
+
+	0x0A, 0x20,
+	0x02, 0x20,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x0A, 0x47, 0x4F, 0xBF,
+	0x02, 0x47, 0x57, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x3E, 0x30, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x3F, 0x38, 0x4F, 0xE9,
+
+	0x2A, 0x46, 0x4E, 0xBF,
+	0x1A, 0x46, 0x56, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x3A, 0x31, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3B, 0x39, 0x4F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x36, 0x30, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x37, 0x38, 0x4F, 0xE9,
+
+	0x2A, 0x43, 0x4B, 0xBF,
+	0x1A, 0x43, 0x53, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x35, 0x31, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x39, 0x39, 0x4F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x80, 0x31, 0x57, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x81, 0x39, 0x57, 0xE9,
+
+	0x37, 0x48, 0x50, 0xBD,
+	0x8A, 0x36, 0x20, 0xE9,
+
+	0x86, 0x76, 0x57, 0xE9,
+	0x8B, 0x3E, 0x20, 0xE9,
+
+	0x82, 0x30, 0x57, 0xE9,
+	0x87, 0x77, 0x57, 0xE9,
+
+	0x83, 0x38, 0x57, 0xE9,
+	0x35, 0x49, 0x51, 0xBD,
+
+	0x84, 0x31, 0x5E, 0xE9,
+	0x30, 0x1F, 0x5F, 0xE9,
+
+	0x85, 0x39, 0x5E, 0xE9,
+	0x57, 0x25, 0x20, 0xE9,
+
+	0x2B, 0x48, 0x20, 0xE9,
+	0x1D, 0x37, 0xE1, 0xEA,
+
+	0x1E, 0x35, 0xE1, 0xEA,
+	0x00, 0xE0,
+	0x26, 0x77,
+
+	0x24, 0x49, 0x20, 0xE9,
+	0xAA, 0xFF, 0x20, 0xEA,
+
+	0x16, 0x26, 0x20, 0xE9,
+	0x57, 0x2E, 0xBF, 0xEA,
+
+	0x1C, 0x46, 0xA0, 0xE8,
+	0x23, 0x4E, 0xA0, 0xE8,
+
+	0x2B, 0x56, 0xA0, 0xE8,
+	0x1D, 0x47, 0xA0, 0xE8,
+
+	0x24, 0x4F, 0xA0, 0xE8,
+	0x2C, 0x57, 0xA0, 0xE8,
+
+	0x1C, 0x00,
+	0x23, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x1D, 0x00,
+	0x24, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x1C, 0x65,
+	0x23, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x1D, 0x65,
+	0x24, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x1C, 0x23, 0x60, 0xEC,
+	0x36, 0xD7, 0x36, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x1D, 0x24, 0x60, 0xEC,
+
+	0x3E, 0xD7, 0x3E, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x1C, 0x2B, 0xDE, 0xE8,
+	0x23, 0x80, 0xDE, 0xE8,
+
+	0x36, 0x80, 0x36, 0xBD,
+	0x3E, 0x80, 0x3E, 0xBD,
+
+	0x33, 0xD7, 0x1C, 0xBD,
+	0x3B, 0xD7, 0x23, 0xBD,
+
+	0x46, 0x80, 0x46, 0xCF,
+	0x4F, 0x80, 0x4F, 0xCF,
+
+	0x56, 0x33, 0x56, 0xCF,
+	0x47, 0x3B, 0x47, 0xCF,
+
+	0xD3, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x4E, 0x33, 0x4E, 0xCF,
+	0x57, 0x3B, 0x57, 0xCF,
+
+	0x98, 0xFF, 0x20, 0xEA,
+	0x57, 0xC0, 0xBF, 0xEA,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+};
+
+static unsigned char warp_g400_tgzs[] = {
+
+	0x00, 0x88, 0x98, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x22, 0x40, 0x48, 0xBF,
+	0x2A, 0x40, 0x50, 0xBF,
+
+	0x32, 0x41, 0x49, 0xBF,
+	0x3A, 0x41, 0x51, 0xBF,
+
+	0xC3, 0x6B,
+	0xCB, 0x6B,
+	0x00, 0x88, 0x98, 0xE9,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x96, 0xE2,
+	0x41, 0x04,
+
+	0x7B, 0x43, 0xA0, 0xE8,
+	0x73, 0x4B, 0xA0, 0xE8,
+
+	0xAD, 0xEE, 0x29, 0x9F,
+	0x00, 0xE0,
+	0x49, 0x04,
+
+	0x90, 0xE2,
+	0x51, 0x04,
+	0x31, 0x46, 0xB1, 0xE8,
+
+	0x49, 0x41, 0xC0, 0xEC,
+	0x39, 0x57, 0xB1, 0xE8,
+
+	0x00, 0x04,
+	0x46, 0xE2,
+	0x73, 0x53, 0xA0, 0xE8,
+
+	0x51, 0x41, 0xC0, 0xEC,
+	0x31, 0x00,
+	0x39, 0x00,
+
+	0x65, 0x80, 0x15, 0xEA,
+	0x08, 0x04,
+	0x10, 0x04,
+
+	0x51, 0x49, 0xC0, 0xEC,
+	0x2F, 0x41, 0x60, 0xEA,
+
+	0x31, 0x20,
+	0x39, 0x20,
+	0x1F, 0x42, 0xA0, 0xE8,
+
+	0x2A, 0x42, 0x4A, 0xBF,
+	0x27, 0x4A, 0xA0, 0xE8,
+
+	0x1A, 0x42, 0x52, 0xBF,
+	0x1E, 0x49, 0x60, 0xEA,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x26, 0x51, 0x60, 0xEA,
+
+	0x32, 0x40, 0x48, 0xBD,
+	0x22, 0x40, 0x50, 0xBD,
+
+	0x12, 0x41, 0x49, 0xBD,
+	0x3A, 0x41, 0x51, 0xBD,
+
+	0xBF, 0x2F, 0x26, 0xBD,
+	0x00, 0xE0,
+	0x7B, 0x72,
+
+	0x32, 0x20,
+	0x22, 0x20,
+	0x12, 0x20,
+	0x3A, 0x20,
+
+	0x46, 0x31, 0x46, 0xBF,
+	0x4E, 0x31, 0x4E, 0xBF,
+
+	0xB3, 0xE2, 0x2D, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x56, 0x31, 0x56, 0xBF,
+	0x47, 0x39, 0x47, 0xBF,
+
+	0x4F, 0x39, 0x4F, 0xBF,
+	0x57, 0x39, 0x57, 0xBF,
+
+	0x57, 0x80, 0x07, 0xEA,
+	0x24, 0x41, 0x20, 0xE9,
+
+	0x42, 0x73, 0xF8, 0xEC,
+	0x00, 0xE0,
+	0x2D, 0x73,
+
+	0x33, 0x72,
+	0x0C, 0xE3,
+	0xA5, 0x2F, 0x1E, 0xBD,
+
+	0x43, 0x43, 0x2D, 0xDF,
+	0x4B, 0x4B, 0x2D, 0xDF,
+
+	0xAE, 0x1E, 0x26, 0xBD,
+	0x58, 0xE3,
+	0x33, 0x66,
+
+	0x53, 0x53, 0x2D, 0xDF,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xB8, 0x38, 0x33, 0xBF,
+	0x00, 0xE0,
+	0x59, 0xE3,
+
+	0x1E, 0x12, 0x41, 0xE9,
+	0x1A, 0x22, 0x41, 0xE9,
+
+	0x2B, 0x40, 0x3D, 0xE9,
+	0x3F, 0x4B, 0xA0, 0xE8,
+
+	0x2D, 0x73,
+	0x30, 0x76,
+	0x05, 0x80, 0x3D, 0xEA,
+
+	0x37, 0x43, 0xA0, 0xE8,
+	0x3D, 0x53, 0xA0, 0xE8,
+
+	0x48, 0x70, 0xF8, 0xEC,
+	0x2B, 0x48, 0x3C, 0xE9,
+
+	0x1F, 0x27, 0xBC, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x18, 0x3A, 0x41, 0xE9,
+	0x1D, 0x32, 0x41, 0xE9,
+
+	0x2A, 0x40, 0x20, 0xE9,
+	0x56, 0x3D, 0x56, 0xDF,
+
+	0x46, 0x37, 0x46, 0xDF,
+	0x4E, 0x3F, 0x4E, 0xDF,
+
+	0x16, 0x30, 0x20, 0xE9,
+	0x4F, 0x3F, 0x4F, 0xDF,
+
+	0x47, 0x37, 0x47, 0xDF,
+	0x57, 0x3D, 0x57, 0xDF,
+
+	0x32, 0x32, 0x2D, 0xDF,
+	0x22, 0x22, 0x2D, 0xDF,
+
+	0x12, 0x12, 0x2D, 0xDF,
+	0x3A, 0x3A, 0x2D, 0xDF,
+
+	0x27, 0xCF, 0x74, 0xC2,
+	0x37, 0xCF, 0x74, 0xC4,
+
+	0x0A, 0x44, 0x4C, 0xB0,
+	0x02, 0x44, 0x54, 0xB0,
+
+	0x3D, 0xCF, 0x74, 0xC0,
+	0x34, 0x37, 0x20, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x38, 0x27, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3C, 0x3D, 0x20, 0xE9,
+
+	0x2A, 0x44, 0x4C, 0xB2,
+	0x1A, 0x44, 0x54, 0xB2,
+
+	0x29, 0x80, 0x3A, 0xEA,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x27, 0xCF, 0x75, 0xC0,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x32, 0x31, 0x5F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x33, 0x39, 0x5F, 0xE9,
+
+	0x3D, 0xCF, 0x75, 0xC2,
+	0x37, 0xCF, 0x75, 0xC4,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0xA6, 0x27, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA3, 0x3D, 0x20, 0xE9,
+
+	0x2A, 0x44, 0x4C, 0xB4,
+	0x1A, 0x44, 0x54, 0xB4,
+
+	0x0A, 0x45, 0x4D, 0xB0,
+	0x02, 0x45, 0x55, 0xB0,
+
+	0x88, 0x73, 0x5E, 0xE9,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0xA0, 0x37, 0x20, 0xE9,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x3E, 0x30, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3F, 0x38, 0x4F, 0xE9,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x3A, 0x31, 0x4F, 0xE9,
+
+	0x2A, 0x45, 0x4D, 0xB2,
+	0x1A, 0x45, 0x55, 0xB2,
+
+	0x0A, 0x45, 0x4D, 0xB4,
+	0x02, 0x45, 0x55, 0xB4,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x3B, 0x39, 0x4F, 0xE9,
+
+	0x0A, 0x20,
+	0x02, 0x20,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x2A, 0x46, 0x4E, 0xBF,
+	0x1A, 0x46, 0x56, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x36, 0x31, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x37, 0x39, 0x4F, 0xE9,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0xA7, 0x30, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0xA8, 0x38, 0x4F, 0xE9,
+
+	0x0A, 0x47, 0x4F, 0xBF,
+	0x02, 0x47, 0x57, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0xA4, 0x31, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA5, 0x39, 0x4F, 0xE9,
+
+	0x2A, 0x43, 0x4B, 0xBF,
+	0x1A, 0x43, 0x53, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0xA1, 0x30, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0xA2, 0x38, 0x4F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x80, 0x31, 0x57, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x81, 0x39, 0x57, 0xE9,
+
+	0x37, 0x48, 0x50, 0xBD,
+	0x8A, 0x36, 0x20, 0xE9,
+
+	0x86, 0x76, 0x57, 0xE9,
+	0x8B, 0x3E, 0x20, 0xE9,
+
+	0x82, 0x30, 0x57, 0xE9,
+	0x87, 0x77, 0x57, 0xE9,
+
+	0x83, 0x38, 0x57, 0xE9,
+	0x35, 0x49, 0x51, 0xBD,
+
+	0x84, 0x31, 0x5E, 0xE9,
+	0x30, 0x1F, 0x5F, 0xE9,
+
+	0x85, 0x39, 0x5E, 0xE9,
+	0x57, 0x25, 0x20, 0xE9,
+
+	0x2B, 0x48, 0x20, 0xE9,
+	0x1D, 0x37, 0xE1, 0xEA,
+
+	0x1E, 0x35, 0xE1, 0xEA,
+	0x00, 0xE0,
+	0x26, 0x77,
+
+	0x24, 0x49, 0x20, 0xE9,
+	0xA2, 0xFF, 0x20, 0xEA,
+
+	0x16, 0x26, 0x20, 0xE9,
+	0x57, 0x2E, 0xBF, 0xEA,
+
+	0x1C, 0x46, 0xA0, 0xE8,
+	0x23, 0x4E, 0xA0, 0xE8,
+
+	0x2B, 0x56, 0xA0, 0xE8,
+	0x1D, 0x47, 0xA0, 0xE8,
+
+	0x24, 0x4F, 0xA0, 0xE8,
+	0x2C, 0x57, 0xA0, 0xE8,
+
+	0x1C, 0x00,
+	0x23, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x1D, 0x00,
+	0x24, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x1C, 0x65,
+	0x23, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x1D, 0x65,
+	0x24, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x1C, 0x23, 0x60, 0xEC,
+	0x36, 0xD7, 0x36, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x1D, 0x24, 0x60, 0xEC,
+
+	0x3E, 0xD7, 0x3E, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x1C, 0x2B, 0xDE, 0xE8,
+	0x23, 0x80, 0xDE, 0xE8,
+
+	0x36, 0x80, 0x36, 0xBD,
+	0x3E, 0x80, 0x3E, 0xBD,
+
+	0x33, 0xD7, 0x1C, 0xBD,
+	0x3B, 0xD7, 0x23, 0xBD,
+
+	0x46, 0x80, 0x46, 0xCF,
+	0x4F, 0x80, 0x4F, 0xCF,
+
+	0x56, 0x33, 0x56, 0xCF,
+	0x47, 0x3B, 0x47, 0xCF,
+
+	0xCA, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x4E, 0x33, 0x4E, 0xCF,
+	0x57, 0x3B, 0x57, 0xCF,
+
+	0x90, 0xFF, 0x20, 0xEA,
+	0x57, 0xC0, 0xBF, 0xEA,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+};
+
+static unsigned char warp_g400_tgzsa[] = {
+
+	0x00, 0x88, 0x98, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x22, 0x40, 0x48, 0xBF,
+	0x2A, 0x40, 0x50, 0xBF,
+
+	0x32, 0x41, 0x49, 0xBF,
+	0x3A, 0x41, 0x51, 0xBF,
+
+	0xC3, 0x6B,
+	0xCB, 0x6B,
+	0x00, 0x88, 0x98, 0xE9,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x96, 0xE2,
+	0x41, 0x04,
+
+	0x7B, 0x43, 0xA0, 0xE8,
+	0x73, 0x4B, 0xA0, 0xE8,
+
+	0xAD, 0xEE, 0x29, 0x9F,
+	0x00, 0xE0,
+	0x49, 0x04,
+
+	0x90, 0xE2,
+	0x51, 0x04,
+	0x31, 0x46, 0xB1, 0xE8,
+
+	0x49, 0x41, 0xC0, 0xEC,
+	0x39, 0x57, 0xB1, 0xE8,
+
+	0x00, 0x04,
+	0x46, 0xE2,
+	0x73, 0x53, 0xA0, 0xE8,
+
+	0x51, 0x41, 0xC0, 0xEC,
+	0x31, 0x00,
+	0x39, 0x00,
+
+	0x6A, 0x80, 0x15, 0xEA,
+	0x08, 0x04,
+	0x10, 0x04,
+
+	0x51, 0x49, 0xC0, 0xEC,
+	0x2F, 0x41, 0x60, 0xEA,
+
+	0x31, 0x20,
+	0x39, 0x20,
+	0x1F, 0x42, 0xA0, 0xE8,
+
+	0x2A, 0x42, 0x4A, 0xBF,
+	0x27, 0x4A, 0xA0, 0xE8,
+
+	0x1A, 0x42, 0x52, 0xBF,
+	0x1E, 0x49, 0x60, 0xEA,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x26, 0x51, 0x60, 0xEA,
+
+	0x32, 0x40, 0x48, 0xBD,
+	0x22, 0x40, 0x50, 0xBD,
+
+	0x12, 0x41, 0x49, 0xBD,
+	0x3A, 0x41, 0x51, 0xBD,
+
+	0xBF, 0x2F, 0x26, 0xBD,
+	0x00, 0xE0,
+	0x7B, 0x72,
+
+	0x32, 0x20,
+	0x22, 0x20,
+	0x12, 0x20,
+	0x3A, 0x20,
+
+	0x46, 0x31, 0x46, 0xBF,
+	0x4E, 0x31, 0x4E, 0xBF,
+
+	0xB3, 0xE2, 0x2D, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x56, 0x31, 0x56, 0xBF,
+	0x47, 0x39, 0x47, 0xBF,
+
+	0x4F, 0x39, 0x4F, 0xBF,
+	0x57, 0x39, 0x57, 0xBF,
+
+	0x5C, 0x80, 0x07, 0xEA,
+	0x24, 0x41, 0x20, 0xE9,
+
+	0x42, 0x73, 0xF8, 0xEC,
+	0x00, 0xE0,
+	0x2D, 0x73,
+
+	0x33, 0x72,
+	0x0C, 0xE3,
+	0xA5, 0x2F, 0x1E, 0xBD,
+
+	0x43, 0x43, 0x2D, 0xDF,
+	0x4B, 0x4B, 0x2D, 0xDF,
+
+	0xAE, 0x1E, 0x26, 0xBD,
+	0x58, 0xE3,
+	0x33, 0x66,
+
+	0x53, 0x53, 0x2D, 0xDF,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xB8, 0x38, 0x33, 0xBF,
+	0x00, 0xE0,
+	0x59, 0xE3,
+
+	0x1E, 0x12, 0x41, 0xE9,
+	0x1A, 0x22, 0x41, 0xE9,
+
+	0x2B, 0x40, 0x3D, 0xE9,
+	0x3F, 0x4B, 0xA0, 0xE8,
+
+	0x2D, 0x73,
+	0x30, 0x76,
+	0x05, 0x80, 0x3D, 0xEA,
+
+	0x37, 0x43, 0xA0, 0xE8,
+	0x3D, 0x53, 0xA0, 0xE8,
+
+	0x48, 0x70, 0xF8, 0xEC,
+	0x2B, 0x48, 0x3C, 0xE9,
+
+	0x1F, 0x27, 0xBC, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x18, 0x3A, 0x41, 0xE9,
+	0x1D, 0x32, 0x41, 0xE9,
+
+	0x2A, 0x40, 0x20, 0xE9,
+	0x56, 0x3D, 0x56, 0xDF,
+
+	0x46, 0x37, 0x46, 0xDF,
+	0x4E, 0x3F, 0x4E, 0xDF,
+
+	0x16, 0x30, 0x20, 0xE9,
+	0x4F, 0x3F, 0x4F, 0xDF,
+
+	0x47, 0x37, 0x47, 0xDF,
+	0x57, 0x3D, 0x57, 0xDF,
+
+	0x32, 0x32, 0x2D, 0xDF,
+	0x22, 0x22, 0x2D, 0xDF,
+
+	0x12, 0x12, 0x2D, 0xDF,
+	0x3A, 0x3A, 0x2D, 0xDF,
+
+	0x27, 0xCF, 0x74, 0xC2,
+	0x37, 0xCF, 0x74, 0xC4,
+
+	0x0A, 0x44, 0x4C, 0xB0,
+	0x02, 0x44, 0x54, 0xB0,
+
+	0x3D, 0xCF, 0x74, 0xC0,
+	0x34, 0x37, 0x20, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x38, 0x27, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3C, 0x3D, 0x20, 0xE9,
+
+	0x2A, 0x44, 0x4C, 0xB2,
+	0x1A, 0x44, 0x54, 0xB2,
+
+	0x2E, 0x80, 0x3A, 0xEA,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x27, 0xCF, 0x75, 0xC0,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x32, 0x31, 0x5F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x33, 0x39, 0x5F, 0xE9,
+
+	0x3D, 0xCF, 0x75, 0xC2,
+	0x37, 0xCF, 0x75, 0xC4,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0xA6, 0x27, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA3, 0x3D, 0x20, 0xE9,
+
+	0x2A, 0x44, 0x4C, 0xB4,
+	0x1A, 0x44, 0x54, 0xB4,
+
+	0x0A, 0x45, 0x4D, 0xB0,
+	0x02, 0x45, 0x55, 0xB0,
+
+	0x88, 0x73, 0x5E, 0xE9,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0xA0, 0x37, 0x20, 0xE9,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x3E, 0x30, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3F, 0x38, 0x4F, 0xE9,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x3A, 0x31, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x3B, 0x39, 0x4F, 0xE9,
+
+	0x2A, 0x45, 0x4D, 0xB2,
+	0x1A, 0x45, 0x55, 0xB2,
+
+	0x0A, 0x45, 0x4D, 0xB4,
+	0x02, 0x45, 0x55, 0xB4,
+
+	0x27, 0xCF, 0x74, 0xC6,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0xA7, 0x30, 0x4F, 0xE9,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x9C, 0x27, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA8, 0x38, 0x4F, 0xE9,
+
+	0x2A, 0x44, 0x4C, 0xB6,
+	0x1A, 0x44, 0x54, 0xB6,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x36, 0x31, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x37, 0x39, 0x4F, 0xE9,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x2A, 0x46, 0x4E, 0xBF,
+	0x1A, 0x46, 0x56, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0xA4, 0x31, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA5, 0x39, 0x4F, 0xE9,
+
+	0x0A, 0x47, 0x4F, 0xBF,
+	0x02, 0x47, 0x57, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0xA1, 0x30, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA2, 0x38, 0x4F, 0xE9,
+
+	0x2A, 0x43, 0x4B, 0xBF,
+	0x1A, 0x43, 0x53, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x9D, 0x31, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x9E, 0x39, 0x4F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x80, 0x31, 0x57, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x81, 0x39, 0x57, 0xE9,
+
+	0x37, 0x48, 0x50, 0xBD,
+	0x8A, 0x36, 0x20, 0xE9,
+
+	0x86, 0x76, 0x57, 0xE9,
+	0x8B, 0x3E, 0x20, 0xE9,
+
+	0x82, 0x30, 0x57, 0xE9,
+	0x87, 0x77, 0x57, 0xE9,
+
+	0x83, 0x38, 0x57, 0xE9,
+	0x35, 0x49, 0x51, 0xBD,
+
+	0x84, 0x31, 0x5E, 0xE9,
+	0x30, 0x1F, 0x5F, 0xE9,
+
+	0x85, 0x39, 0x5E, 0xE9,
+	0x57, 0x25, 0x20, 0xE9,
+
+	0x2B, 0x48, 0x20, 0xE9,
+	0x1D, 0x37, 0xE1, 0xEA,
+
+	0x1E, 0x35, 0xE1, 0xEA,
+	0x00, 0xE0,
+	0x26, 0x77,
+
+	0x24, 0x49, 0x20, 0xE9,
+	0x9D, 0xFF, 0x20, 0xEA,
+
+	0x16, 0x26, 0x20, 0xE9,
+	0x57, 0x2E, 0xBF, 0xEA,
+
+	0x1C, 0x46, 0xA0, 0xE8,
+	0x23, 0x4E, 0xA0, 0xE8,
+
+	0x2B, 0x56, 0xA0, 0xE8,
+	0x1D, 0x47, 0xA0, 0xE8,
+
+	0x24, 0x4F, 0xA0, 0xE8,
+	0x2C, 0x57, 0xA0, 0xE8,
+
+	0x1C, 0x00,
+	0x23, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x1D, 0x00,
+	0x24, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x1C, 0x65,
+	0x23, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x1D, 0x65,
+	0x24, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x1C, 0x23, 0x60, 0xEC,
+	0x36, 0xD7, 0x36, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x1D, 0x24, 0x60, 0xEC,
+
+	0x3E, 0xD7, 0x3E, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x1C, 0x2B, 0xDE, 0xE8,
+	0x23, 0x80, 0xDE, 0xE8,
+
+	0x36, 0x80, 0x36, 0xBD,
+	0x3E, 0x80, 0x3E, 0xBD,
+
+	0x33, 0xD7, 0x1C, 0xBD,
+	0x3B, 0xD7, 0x23, 0xBD,
+
+	0x46, 0x80, 0x46, 0xCF,
+	0x4F, 0x80, 0x4F, 0xCF,
+
+	0x56, 0x33, 0x56, 0xCF,
+	0x47, 0x3B, 0x47, 0xCF,
+
+	0xC5, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x4E, 0x33, 0x4E, 0xCF,
+	0x57, 0x3B, 0x57, 0xCF,
+
+	0x8B, 0xFF, 0x20, 0xEA,
+	0x57, 0xC0, 0xBF, 0xEA,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+};
+
+static unsigned char warp_g400_tgzsaf[] = {
+
+	0x00, 0x88, 0x98, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x22, 0x40, 0x48, 0xBF,
+	0x2A, 0x40, 0x50, 0xBF,
+
+	0x32, 0x41, 0x49, 0xBF,
+	0x3A, 0x41, 0x51, 0xBF,
+
+	0xC3, 0x6B,
+	0xCB, 0x6B,
+	0x00, 0x88, 0x98, 0xE9,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x96, 0xE2,
+	0x41, 0x04,
+
+	0x7B, 0x43, 0xA0, 0xE8,
+	0x73, 0x4B, 0xA0, 0xE8,
+
+	0xAD, 0xEE, 0x29, 0x9F,
+	0x00, 0xE0,
+	0x49, 0x04,
+
+	0x90, 0xE2,
+	0x51, 0x04,
+	0x31, 0x46, 0xB1, 0xE8,
+
+	0x49, 0x41, 0xC0, 0xEC,
+	0x39, 0x57, 0xB1, 0xE8,
+
+	0x00, 0x04,
+	0x46, 0xE2,
+	0x73, 0x53, 0xA0, 0xE8,
+
+	0x51, 0x41, 0xC0, 0xEC,
+	0x31, 0x00,
+	0x39, 0x00,
+
+	0x6E, 0x80, 0x15, 0xEA,
+	0x08, 0x04,
+	0x10, 0x04,
+
+	0x51, 0x49, 0xC0, 0xEC,
+	0x2F, 0x41, 0x60, 0xEA,
+
+	0x31, 0x20,
+	0x39, 0x20,
+	0x1F, 0x42, 0xA0, 0xE8,
+
+	0x2A, 0x42, 0x4A, 0xBF,
+	0x27, 0x4A, 0xA0, 0xE8,
+
+	0x1A, 0x42, 0x52, 0xBF,
+	0x1E, 0x49, 0x60, 0xEA,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x26, 0x51, 0x60, 0xEA,
+
+	0x32, 0x40, 0x48, 0xBD,
+	0x22, 0x40, 0x50, 0xBD,
+
+	0x12, 0x41, 0x49, 0xBD,
+	0x3A, 0x41, 0x51, 0xBD,
+
+	0xBF, 0x2F, 0x26, 0xBD,
+	0x00, 0xE0,
+	0x7B, 0x72,
+
+	0x32, 0x20,
+	0x22, 0x20,
+	0x12, 0x20,
+	0x3A, 0x20,
+
+	0x46, 0x31, 0x46, 0xBF,
+	0x4E, 0x31, 0x4E, 0xBF,
+
+	0xB3, 0xE2, 0x2D, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x56, 0x31, 0x56, 0xBF,
+	0x47, 0x39, 0x47, 0xBF,
+
+	0x4F, 0x39, 0x4F, 0xBF,
+	0x57, 0x39, 0x57, 0xBF,
+
+	0x60, 0x80, 0x07, 0xEA,
+	0x24, 0x41, 0x20, 0xE9,
+
+	0x42, 0x73, 0xF8, 0xEC,
+	0x00, 0xE0,
+	0x2D, 0x73,
+
+	0x33, 0x72,
+	0x0C, 0xE3,
+	0xA5, 0x2F, 0x1E, 0xBD,
+
+	0x43, 0x43, 0x2D, 0xDF,
+	0x4B, 0x4B, 0x2D, 0xDF,
+
+	0xAE, 0x1E, 0x26, 0xBD,
+	0x58, 0xE3,
+	0x33, 0x66,
+
+	0x53, 0x53, 0x2D, 0xDF,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xB8, 0x38, 0x33, 0xBF,
+	0x00, 0xE0,
+	0x59, 0xE3,
+
+	0x1E, 0x12, 0x41, 0xE9,
+	0x1A, 0x22, 0x41, 0xE9,
+
+	0x2B, 0x40, 0x3D, 0xE9,
+	0x3F, 0x4B, 0xA0, 0xE8,
+
+	0x2D, 0x73,
+	0x30, 0x76,
+	0x05, 0x80, 0x3D, 0xEA,
+
+	0x37, 0x43, 0xA0, 0xE8,
+	0x3D, 0x53, 0xA0, 0xE8,
+
+	0x48, 0x70, 0xF8, 0xEC,
+	0x2B, 0x48, 0x3C, 0xE9,
+
+	0x1F, 0x27, 0xBC, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x18, 0x3A, 0x41, 0xE9,
+	0x1D, 0x32, 0x41, 0xE9,
+
+	0x2A, 0x40, 0x20, 0xE9,
+	0x56, 0x3D, 0x56, 0xDF,
+
+	0x46, 0x37, 0x46, 0xDF,
+	0x4E, 0x3F, 0x4E, 0xDF,
+
+	0x16, 0x30, 0x20, 0xE9,
+	0x4F, 0x3F, 0x4F, 0xDF,
+
+	0x47, 0x37, 0x47, 0xDF,
+	0x57, 0x3D, 0x57, 0xDF,
+
+	0x32, 0x32, 0x2D, 0xDF,
+	0x22, 0x22, 0x2D, 0xDF,
+
+	0x12, 0x12, 0x2D, 0xDF,
+	0x3A, 0x3A, 0x2D, 0xDF,
+
+	0x27, 0xCF, 0x74, 0xC2,
+	0x37, 0xCF, 0x74, 0xC4,
+
+	0x0A, 0x44, 0x4C, 0xB0,
+	0x02, 0x44, 0x54, 0xB0,
+
+	0x3D, 0xCF, 0x74, 0xC0,
+	0x34, 0x37, 0x20, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x38, 0x27, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3C, 0x3D, 0x20, 0xE9,
+
+	0x2A, 0x44, 0x4C, 0xB2,
+	0x1A, 0x44, 0x54, 0xB2,
+
+	0x32, 0x80, 0x3A, 0xEA,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x27, 0xCF, 0x75, 0xC0,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x32, 0x31, 0x5F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x33, 0x39, 0x5F, 0xE9,
+
+	0x3D, 0xCF, 0x75, 0xC2,
+	0x37, 0xCF, 0x75, 0xC4,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0xA6, 0x27, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA3, 0x3D, 0x20, 0xE9,
+
+	0x2A, 0x44, 0x4C, 0xB4,
+	0x1A, 0x44, 0x54, 0xB4,
+
+	0x0A, 0x45, 0x4D, 0xB0,
+	0x02, 0x45, 0x55, 0xB0,
+
+	0x88, 0x73, 0x5E, 0xE9,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0xA0, 0x37, 0x20, 0xE9,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x3E, 0x30, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3F, 0x38, 0x4F, 0xE9,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x3A, 0x31, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x3B, 0x39, 0x4F, 0xE9,
+
+	0x2A, 0x45, 0x4D, 0xB2,
+	0x1A, 0x45, 0x55, 0xB2,
+
+	0x0A, 0x45, 0x4D, 0xB4,
+	0x02, 0x45, 0x55, 0xB4,
+
+	0x27, 0xCF, 0x74, 0xC6,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0xA7, 0x30, 0x4F, 0xE9,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x9C, 0x27, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA8, 0x38, 0x4F, 0xE9,
+
+	0x2A, 0x44, 0x4C, 0xB6,
+	0x1A, 0x44, 0x54, 0xB6,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x36, 0x31, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x37, 0x39, 0x4F, 0xE9,
+
+	0x0A, 0x45, 0x4D, 0xB6,
+	0x02, 0x45, 0x55, 0xB6,
+
+	0x3D, 0xCF, 0x75, 0xC6,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x2A, 0x46, 0x4E, 0xBF,
+	0x1A, 0x46, 0x56, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0xA4, 0x31, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA5, 0x39, 0x4F, 0xE9,
+
+	0x31, 0x3D, 0x20, 0xE9,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x0A, 0x47, 0x4F, 0xBF,
+	0x02, 0x47, 0x57, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0xA1, 0x30, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0xA2, 0x38, 0x4F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x9D, 0x31, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x9E, 0x39, 0x4F, 0xE9,
+
+	0x2A, 0x43, 0x4B, 0xBF,
+	0x1A, 0x43, 0x53, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x35, 0x30, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x39, 0x38, 0x4F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x80, 0x31, 0x57, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x81, 0x39, 0x57, 0xE9,
+
+	0x37, 0x48, 0x50, 0xBD,
+	0x8A, 0x36, 0x20, 0xE9,
+
+	0x86, 0x76, 0x57, 0xE9,
+	0x8B, 0x3E, 0x20, 0xE9,
+
+	0x82, 0x30, 0x57, 0xE9,
+	0x87, 0x77, 0x57, 0xE9,
+
+	0x83, 0x38, 0x57, 0xE9,
+	0x35, 0x49, 0x51, 0xBD,
+
+	0x84, 0x31, 0x5E, 0xE9,
+	0x30, 0x1F, 0x5F, 0xE9,
+
+	0x85, 0x39, 0x5E, 0xE9,
+	0x57, 0x25, 0x20, 0xE9,
+
+	0x2B, 0x48, 0x20, 0xE9,
+	0x1D, 0x37, 0xE1, 0xEA,
+
+	0x1E, 0x35, 0xE1, 0xEA,
+	0x00, 0xE0,
+	0x26, 0x77,
+
+	0x24, 0x49, 0x20, 0xE9,
+	0x99, 0xFF, 0x20, 0xEA,
+
+	0x16, 0x26, 0x20, 0xE9,
+	0x57, 0x2E, 0xBF, 0xEA,
+
+	0x1C, 0x46, 0xA0, 0xE8,
+	0x23, 0x4E, 0xA0, 0xE8,
+
+	0x2B, 0x56, 0xA0, 0xE8,
+	0x1D, 0x47, 0xA0, 0xE8,
+
+	0x24, 0x4F, 0xA0, 0xE8,
+	0x2C, 0x57, 0xA0, 0xE8,
+
+	0x1C, 0x00,
+	0x23, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x1D, 0x00,
+	0x24, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x1C, 0x65,
+	0x23, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x1D, 0x65,
+	0x24, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x1C, 0x23, 0x60, 0xEC,
+	0x36, 0xD7, 0x36, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x1D, 0x24, 0x60, 0xEC,
+
+	0x3E, 0xD7, 0x3E, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x1C, 0x2B, 0xDE, 0xE8,
+	0x23, 0x80, 0xDE, 0xE8,
+
+	0x36, 0x80, 0x36, 0xBD,
+	0x3E, 0x80, 0x3E, 0xBD,
+
+	0x33, 0xD7, 0x1C, 0xBD,
+	0x3B, 0xD7, 0x23, 0xBD,
+
+	0x46, 0x80, 0x46, 0xCF,
+	0x4F, 0x80, 0x4F, 0xCF,
+
+	0x56, 0x33, 0x56, 0xCF,
+	0x47, 0x3B, 0x47, 0xCF,
+
+	0xC1, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x4E, 0x33, 0x4E, 0xCF,
+	0x57, 0x3B, 0x57, 0xCF,
+
+	0x87, 0xFF, 0x20, 0xEA,
+	0x57, 0xC0, 0xBF, 0xEA,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+};
+
+static unsigned char warp_g400_tgzsf[] = {
+
+	0x00, 0x88, 0x98, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+	0xFF, 0x80, 0xC0, 0xE9,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x22, 0x40, 0x48, 0xBF,
+	0x2A, 0x40, 0x50, 0xBF,
+
+	0x32, 0x41, 0x49, 0xBF,
+	0x3A, 0x41, 0x51, 0xBF,
+
+	0xC3, 0x6B,
+	0xCB, 0x6B,
+	0x00, 0x88, 0x98, 0xE9,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x96, 0xE2,
+	0x41, 0x04,
+
+	0x7B, 0x43, 0xA0, 0xE8,
+	0x73, 0x4B, 0xA0, 0xE8,
+
+	0xAD, 0xEE, 0x29, 0x9F,
+	0x00, 0xE0,
+	0x49, 0x04,
+
+	0x90, 0xE2,
+	0x51, 0x04,
+	0x31, 0x46, 0xB1, 0xE8,
+
+	0x49, 0x41, 0xC0, 0xEC,
+	0x39, 0x57, 0xB1, 0xE8,
+
+	0x00, 0x04,
+	0x46, 0xE2,
+	0x73, 0x53, 0xA0, 0xE8,
+
+	0x51, 0x41, 0xC0, 0xEC,
+	0x31, 0x00,
+	0x39, 0x00,
+
+	0x6A, 0x80, 0x15, 0xEA,
+	0x08, 0x04,
+	0x10, 0x04,
+
+	0x51, 0x49, 0xC0, 0xEC,
+	0x2F, 0x41, 0x60, 0xEA,
+
+	0x31, 0x20,
+	0x39, 0x20,
+	0x1F, 0x42, 0xA0, 0xE8,
+
+	0x2A, 0x42, 0x4A, 0xBF,
+	0x27, 0x4A, 0xA0, 0xE8,
+
+	0x1A, 0x42, 0x52, 0xBF,
+	0x1E, 0x49, 0x60, 0xEA,
+
+	0x73, 0x7B, 0xC8, 0xEC,
+	0x26, 0x51, 0x60, 0xEA,
+
+	0x32, 0x40, 0x48, 0xBD,
+	0x22, 0x40, 0x50, 0xBD,
+
+	0x12, 0x41, 0x49, 0xBD,
+	0x3A, 0x41, 0x51, 0xBD,
+
+	0xBF, 0x2F, 0x26, 0xBD,
+	0x00, 0xE0,
+	0x7B, 0x72,
+
+	0x32, 0x20,
+	0x22, 0x20,
+	0x12, 0x20,
+	0x3A, 0x20,
+
+	0x46, 0x31, 0x46, 0xBF,
+	0x4E, 0x31, 0x4E, 0xBF,
+
+	0xB3, 0xE2, 0x2D, 0x9F,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x56, 0x31, 0x56, 0xBF,
+	0x47, 0x39, 0x47, 0xBF,
+
+	0x4F, 0x39, 0x4F, 0xBF,
+	0x57, 0x39, 0x57, 0xBF,
+
+	0x5C, 0x80, 0x07, 0xEA,
+	0x24, 0x41, 0x20, 0xE9,
+
+	0x42, 0x73, 0xF8, 0xEC,
+	0x00, 0xE0,
+	0x2D, 0x73,
+
+	0x33, 0x72,
+	0x0C, 0xE3,
+	0xA5, 0x2F, 0x1E, 0xBD,
+
+	0x43, 0x43, 0x2D, 0xDF,
+	0x4B, 0x4B, 0x2D, 0xDF,
+
+	0xAE, 0x1E, 0x26, 0xBD,
+	0x58, 0xE3,
+	0x33, 0x66,
+
+	0x53, 0x53, 0x2D, 0xDF,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0xB8, 0x38, 0x33, 0xBF,
+	0x00, 0xE0,
+	0x59, 0xE3,
+
+	0x1E, 0x12, 0x41, 0xE9,
+	0x1A, 0x22, 0x41, 0xE9,
+
+	0x2B, 0x40, 0x3D, 0xE9,
+	0x3F, 0x4B, 0xA0, 0xE8,
+
+	0x2D, 0x73,
+	0x30, 0x76,
+	0x05, 0x80, 0x3D, 0xEA,
+
+	0x37, 0x43, 0xA0, 0xE8,
+	0x3D, 0x53, 0xA0, 0xE8,
+
+	0x48, 0x70, 0xF8, 0xEC,
+	0x2B, 0x48, 0x3C, 0xE9,
+
+	0x1F, 0x27, 0xBC, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x15, 0xC0, 0x20, 0xE9,
+	0x15, 0xC0, 0x20, 0xE9,
+
+	0x18, 0x3A, 0x41, 0xE9,
+	0x1D, 0x32, 0x41, 0xE9,
+
+	0x2A, 0x40, 0x20, 0xE9,
+	0x56, 0x3D, 0x56, 0xDF,
+
+	0x46, 0x37, 0x46, 0xDF,
+	0x4E, 0x3F, 0x4E, 0xDF,
+
+	0x16, 0x30, 0x20, 0xE9,
+	0x4F, 0x3F, 0x4F, 0xDF,
+
+	0x47, 0x37, 0x47, 0xDF,
+	0x57, 0x3D, 0x57, 0xDF,
+
+	0x32, 0x32, 0x2D, 0xDF,
+	0x22, 0x22, 0x2D, 0xDF,
+
+	0x12, 0x12, 0x2D, 0xDF,
+	0x3A, 0x3A, 0x2D, 0xDF,
+
+	0x27, 0xCF, 0x74, 0xC2,
+	0x37, 0xCF, 0x74, 0xC4,
+
+	0x0A, 0x44, 0x4C, 0xB0,
+	0x02, 0x44, 0x54, 0xB0,
+
+	0x3D, 0xCF, 0x74, 0xC0,
+	0x34, 0x37, 0x20, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x38, 0x27, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3C, 0x3D, 0x20, 0xE9,
+
+	0x2A, 0x44, 0x4C, 0xB2,
+	0x1A, 0x44, 0x54, 0xB2,
+
+	0x2E, 0x80, 0x3A, 0xEA,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x27, 0xCF, 0x75, 0xC0,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x32, 0x31, 0x5F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x33, 0x39, 0x5F, 0xE9,
+
+	0x3D, 0xCF, 0x75, 0xC2,
+	0x37, 0xCF, 0x75, 0xC4,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0xA6, 0x27, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA3, 0x3D, 0x20, 0xE9,
+
+	0x2A, 0x44, 0x4C, 0xB4,
+	0x1A, 0x44, 0x54, 0xB4,
+
+	0x0A, 0x45, 0x4D, 0xB0,
+	0x02, 0x45, 0x55, 0xB0,
+
+	0x88, 0x73, 0x5E, 0xE9,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0xA0, 0x37, 0x20, 0xE9,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x3E, 0x30, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x3F, 0x38, 0x4F, 0xE9,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x3A, 0x31, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x3B, 0x39, 0x4F, 0xE9,
+
+	0x2A, 0x45, 0x4D, 0xB2,
+	0x1A, 0x45, 0x55, 0xB2,
+
+	0x0A, 0x45, 0x4D, 0xB4,
+	0x02, 0x45, 0x55, 0xB4,
+
+	0x27, 0xCF, 0x75, 0xC6,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0xA7, 0x30, 0x4F, 0xE9,
+	0x0A, 0x20,
+	0x02, 0x20,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x31, 0x27, 0x20, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA8, 0x38, 0x4F, 0xE9,
+
+	0x2A, 0x45, 0x4D, 0xB6,
+	0x1A, 0x45, 0x55, 0xB6,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x36, 0x31, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x37, 0x39, 0x4F, 0xE9,
+
+	0x00, 0x80, 0x00, 0xE8,
+	0x2A, 0x20,
+	0x1A, 0x20,
+
+	0x2A, 0x46, 0x4E, 0xBF,
+	0x1A, 0x46, 0x56, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0xA4, 0x31, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA5, 0x39, 0x4F, 0xE9,
+
+	0x0A, 0x47, 0x4F, 0xBF,
+	0x02, 0x47, 0x57, 0xBF,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0xA1, 0x30, 0x4F, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0xA2, 0x38, 0x4F, 0xE9,
+
+	0x2A, 0x43, 0x4B, 0xBF,
+	0x1A, 0x43, 0x53, 0xBF,
+
+	0x30, 0x50, 0x2E, 0x9F,
+	0x35, 0x31, 0x4F, 0xE9,
+
+	0x38, 0x21, 0x2C, 0x9F,
+	0x39, 0x39, 0x4F, 0xE9,
+
+	0x31, 0x53, 0x2F, 0x9F,
+	0x80, 0x31, 0x57, 0xE9,
+
+	0x39, 0xE5, 0x2C, 0x9F,
+	0x81, 0x39, 0x57, 0xE9,
+
+	0x37, 0x48, 0x50, 0xBD,
+	0x8A, 0x36, 0x20, 0xE9,
+
+	0x86, 0x76, 0x57, 0xE9,
+	0x8B, 0x3E, 0x20, 0xE9,
+
+	0x82, 0x30, 0x57, 0xE9,
+	0x87, 0x77, 0x57, 0xE9,
+
+	0x83, 0x38, 0x57, 0xE9,
+	0x35, 0x49, 0x51, 0xBD,
+
+	0x84, 0x31, 0x5E, 0xE9,
+	0x30, 0x1F, 0x5F, 0xE9,
+
+	0x85, 0x39, 0x5E, 0xE9,
+	0x57, 0x25, 0x20, 0xE9,
+
+	0x2B, 0x48, 0x20, 0xE9,
+	0x1D, 0x37, 0xE1, 0xEA,
+
+	0x1E, 0x35, 0xE1, 0xEA,
+	0x00, 0xE0,
+	0x26, 0x77,
+
+	0x24, 0x49, 0x20, 0xE9,
+	0x9D, 0xFF, 0x20, 0xEA,
+
+	0x16, 0x26, 0x20, 0xE9,
+	0x57, 0x2E, 0xBF, 0xEA,
+
+	0x1C, 0x46, 0xA0, 0xE8,
+	0x23, 0x4E, 0xA0, 0xE8,
+
+	0x2B, 0x56, 0xA0, 0xE8,
+	0x1D, 0x47, 0xA0, 0xE8,
+
+	0x24, 0x4F, 0xA0, 0xE8,
+	0x2C, 0x57, 0xA0, 0xE8,
+
+	0x1C, 0x00,
+	0x23, 0x00,
+	0x2B, 0x00,
+	0x00, 0xE0,
+
+	0x1D, 0x00,
+	0x24, 0x00,
+	0x2C, 0x00,
+	0x00, 0xE0,
+
+	0x1C, 0x65,
+	0x23, 0x65,
+	0x2B, 0x65,
+	0x00, 0xE0,
+
+	0x1D, 0x65,
+	0x24, 0x65,
+	0x2C, 0x65,
+	0x00, 0xE0,
+
+	0x1C, 0x23, 0x60, 0xEC,
+	0x36, 0xD7, 0x36, 0xAD,
+
+	0x2B, 0x80, 0x60, 0xEC,
+	0x1D, 0x24, 0x60, 0xEC,
+
+	0x3E, 0xD7, 0x3E, 0xAD,
+	0x2C, 0x80, 0x60, 0xEC,
+
+	0x1C, 0x2B, 0xDE, 0xE8,
+	0x23, 0x80, 0xDE, 0xE8,
+
+	0x36, 0x80, 0x36, 0xBD,
+	0x3E, 0x80, 0x3E, 0xBD,
+
+	0x33, 0xD7, 0x1C, 0xBD,
+	0x3B, 0xD7, 0x23, 0xBD,
+
+	0x46, 0x80, 0x46, 0xCF,
+	0x4F, 0x80, 0x4F, 0xCF,
+
+	0x56, 0x33, 0x56, 0xCF,
+	0x47, 0x3B, 0x47, 0xCF,
+
+	0xC5, 0xFF, 0x20, 0xEA,
+	0x00, 0x80, 0x00, 0xE8,
+
+	0x4E, 0x33, 0x4E, 0xCF,
+	0x57, 0x3B, 0x57, 0xCF,
+
+	0x8B, 0xFF, 0x20, 0xEA,
+	0x57, 0xC0, 0xBF, 0xEA,
+
+	0x00, 0x80, 0xA0, 0xE9,
+	0x00, 0x00, 0xD8, 0xEC,
+
+};
diff --git a/drivers/gpu/drm/mga/mga_warp.c b/drivers/gpu/drm/mga/mga_warp.c
new file mode 100644
index 000000000000..651b93c8ab5d
--- /dev/null
+++ b/drivers/gpu/drm/mga/mga_warp.c
@@ -0,0 +1,193 @@
+/* mga_warp.c -- Matrox G200/G400 WARP engine management -*- linux-c -*-
+ * Created: Thu Jan 11 21:29:32 2001 by gareth@valinux.com
+ *
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "mga_drm.h"
+#include "mga_drv.h"
+#include "mga_ucode.h"
+
+#define MGA_WARP_CODE_ALIGN		256	/* in bytes */
+
+#define WARP_UCODE_SIZE( which )					\
+	((sizeof(which) / MGA_WARP_CODE_ALIGN + 1) * MGA_WARP_CODE_ALIGN)
+
+#define WARP_UCODE_INSTALL( which, where )				\
+do {									\
+	DRM_DEBUG( " pcbase = 0x%08lx  vcbase = %p\n", pcbase, vcbase );\
+	dev_priv->warp_pipe_phys[where] = pcbase;			\
+	memcpy( vcbase, which, sizeof(which) );				\
+	pcbase += WARP_UCODE_SIZE( which );				\
+	vcbase += WARP_UCODE_SIZE( which );				\
+} while (0)
+
+static const unsigned int mga_warp_g400_microcode_size =
+    (WARP_UCODE_SIZE(warp_g400_tgz) +
+     WARP_UCODE_SIZE(warp_g400_tgza) +
+     WARP_UCODE_SIZE(warp_g400_tgzaf) +
+     WARP_UCODE_SIZE(warp_g400_tgzf) +
+     WARP_UCODE_SIZE(warp_g400_tgzs) +
+     WARP_UCODE_SIZE(warp_g400_tgzsa) +
+     WARP_UCODE_SIZE(warp_g400_tgzsaf) +
+     WARP_UCODE_SIZE(warp_g400_tgzsf) +
+     WARP_UCODE_SIZE(warp_g400_t2gz) +
+     WARP_UCODE_SIZE(warp_g400_t2gza) +
+     WARP_UCODE_SIZE(warp_g400_t2gzaf) +
+     WARP_UCODE_SIZE(warp_g400_t2gzf) +
+     WARP_UCODE_SIZE(warp_g400_t2gzs) +
+     WARP_UCODE_SIZE(warp_g400_t2gzsa) +
+     WARP_UCODE_SIZE(warp_g400_t2gzsaf) + WARP_UCODE_SIZE(warp_g400_t2gzsf));
+
+static const unsigned int mga_warp_g200_microcode_size =
+    (WARP_UCODE_SIZE(warp_g200_tgz) +
+     WARP_UCODE_SIZE(warp_g200_tgza) +
+     WARP_UCODE_SIZE(warp_g200_tgzaf) +
+     WARP_UCODE_SIZE(warp_g200_tgzf) +
+     WARP_UCODE_SIZE(warp_g200_tgzs) +
+     WARP_UCODE_SIZE(warp_g200_tgzsa) +
+     WARP_UCODE_SIZE(warp_g200_tgzsaf) + WARP_UCODE_SIZE(warp_g200_tgzsf));
+
+unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv)
+{
+	switch (dev_priv->chipset) {
+	case MGA_CARD_TYPE_G400:
+	case MGA_CARD_TYPE_G550:
+		return PAGE_ALIGN(mga_warp_g400_microcode_size);
+	case MGA_CARD_TYPE_G200:
+		return PAGE_ALIGN(mga_warp_g200_microcode_size);
+	default:
+		return 0;
+	}
+}
+
+static int mga_warp_install_g400_microcode(drm_mga_private_t * dev_priv)
+{
+	unsigned char *vcbase = dev_priv->warp->handle;
+	unsigned long pcbase = dev_priv->warp->offset;
+
+	memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
+
+	WARP_UCODE_INSTALL(warp_g400_tgz, MGA_WARP_TGZ);
+	WARP_UCODE_INSTALL(warp_g400_tgzf, MGA_WARP_TGZF);
+	WARP_UCODE_INSTALL(warp_g400_tgza, MGA_WARP_TGZA);
+	WARP_UCODE_INSTALL(warp_g400_tgzaf, MGA_WARP_TGZAF);
+	WARP_UCODE_INSTALL(warp_g400_tgzs, MGA_WARP_TGZS);
+	WARP_UCODE_INSTALL(warp_g400_tgzsf, MGA_WARP_TGZSF);
+	WARP_UCODE_INSTALL(warp_g400_tgzsa, MGA_WARP_TGZSA);
+	WARP_UCODE_INSTALL(warp_g400_tgzsaf, MGA_WARP_TGZSAF);
+
+	WARP_UCODE_INSTALL(warp_g400_t2gz, MGA_WARP_T2GZ);
+	WARP_UCODE_INSTALL(warp_g400_t2gzf, MGA_WARP_T2GZF);
+	WARP_UCODE_INSTALL(warp_g400_t2gza, MGA_WARP_T2GZA);
+	WARP_UCODE_INSTALL(warp_g400_t2gzaf, MGA_WARP_T2GZAF);
+	WARP_UCODE_INSTALL(warp_g400_t2gzs, MGA_WARP_T2GZS);
+	WARP_UCODE_INSTALL(warp_g400_t2gzsf, MGA_WARP_T2GZSF);
+	WARP_UCODE_INSTALL(warp_g400_t2gzsa, MGA_WARP_T2GZSA);
+	WARP_UCODE_INSTALL(warp_g400_t2gzsaf, MGA_WARP_T2GZSAF);
+
+	return 0;
+}
+
+static int mga_warp_install_g200_microcode(drm_mga_private_t * dev_priv)
+{
+	unsigned char *vcbase = dev_priv->warp->handle;
+	unsigned long pcbase = dev_priv->warp->offset;
+
+	memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
+
+	WARP_UCODE_INSTALL(warp_g200_tgz, MGA_WARP_TGZ);
+	WARP_UCODE_INSTALL(warp_g200_tgzf, MGA_WARP_TGZF);
+	WARP_UCODE_INSTALL(warp_g200_tgza, MGA_WARP_TGZA);
+	WARP_UCODE_INSTALL(warp_g200_tgzaf, MGA_WARP_TGZAF);
+	WARP_UCODE_INSTALL(warp_g200_tgzs, MGA_WARP_TGZS);
+	WARP_UCODE_INSTALL(warp_g200_tgzsf, MGA_WARP_TGZSF);
+	WARP_UCODE_INSTALL(warp_g200_tgzsa, MGA_WARP_TGZSA);
+	WARP_UCODE_INSTALL(warp_g200_tgzsaf, MGA_WARP_TGZSAF);
+
+	return 0;
+}
+
+int mga_warp_install_microcode(drm_mga_private_t * dev_priv)
+{
+	const unsigned int size = mga_warp_microcode_size(dev_priv);
+
+	DRM_DEBUG("MGA ucode size = %d bytes\n", size);
+	if (size > dev_priv->warp->size) {
+		DRM_ERROR("microcode too large! (%u > %lu)\n",
+			  size, dev_priv->warp->size);
+		return -ENOMEM;
+	}
+
+	switch (dev_priv->chipset) {
+	case MGA_CARD_TYPE_G400:
+	case MGA_CARD_TYPE_G550:
+		return mga_warp_install_g400_microcode(dev_priv);
+	case MGA_CARD_TYPE_G200:
+		return mga_warp_install_g200_microcode(dev_priv);
+	default:
+		return -EINVAL;
+	}
+}
+
+#define WMISC_EXPECTED		(MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE)
+
+int mga_warp_init(drm_mga_private_t * dev_priv)
+{
+	u32 wmisc;
+
+	/* FIXME: Get rid of these damned magic numbers...
+	 */
+	switch (dev_priv->chipset) {
+	case MGA_CARD_TYPE_G400:
+	case MGA_CARD_TYPE_G550:
+		MGA_WRITE(MGA_WIADDR2, MGA_WMODE_SUSPEND);
+		MGA_WRITE(MGA_WGETMSB, 0x00000E00);
+		MGA_WRITE(MGA_WVRTXSZ, 0x00001807);
+		MGA_WRITE(MGA_WACCEPTSEQ, 0x18000000);
+		break;
+	case MGA_CARD_TYPE_G200:
+		MGA_WRITE(MGA_WIADDR, MGA_WMODE_SUSPEND);
+		MGA_WRITE(MGA_WGETMSB, 0x1606);
+		MGA_WRITE(MGA_WVRTXSZ, 7);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE |
+			      MGA_WMASTER_ENABLE | MGA_WCACHEFLUSH_ENABLE));
+	wmisc = MGA_READ(MGA_WMISC);
+	if (wmisc != WMISC_EXPECTED) {
+		DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n",
+			  wmisc, WMISC_EXPECTED);
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/r128/Makefile b/drivers/gpu/drm/r128/Makefile
new file mode 100644
index 000000000000..1cc72ae3a880
--- /dev/null
+++ b/drivers/gpu/drm/r128/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+r128-y   := r128_drv.o r128_cce.o r128_state.o r128_irq.o
+
+r128-$(CONFIG_COMPAT)   += r128_ioc32.o
+
+obj-$(CONFIG_DRM_R128)	+= r128.o
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
new file mode 100644
index 000000000000..c31afbde62e7
--- /dev/null
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -0,0 +1,935 @@
+/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
+ * Created: Wed Apr  5 19:24:19 2000 by kevin@precisioninsight.com
+ */
+/*
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "r128_drm.h"
+#include "r128_drv.h"
+
+#define R128_FIFO_DEBUG		0
+
+/* CCE microcode (from ATI) */
+static u32 r128_cce_microcode[] = {
+	0, 276838400, 0, 268449792, 2, 142, 2, 145, 0, 1076765731, 0,
+	1617039951, 0, 774592877, 0, 1987540286, 0, 2307490946U, 0,
+	599558925, 0, 589505315, 0, 596487092, 0, 589505315, 1,
+	11544576, 1, 206848, 1, 311296, 1, 198656, 2, 912273422, 11,
+	262144, 0, 0, 1, 33559837, 1, 7438, 1, 14809, 1, 6615, 12, 28,
+	1, 6614, 12, 28, 2, 23, 11, 18874368, 0, 16790922, 1, 409600, 9,
+	30, 1, 147854772, 16, 420483072, 3, 8192, 0, 10240, 1, 198656,
+	1, 15630, 1, 51200, 10, 34858, 9, 42, 1, 33559823, 2, 10276, 1,
+	15717, 1, 15718, 2, 43, 1, 15936948, 1, 570480831, 1, 14715071,
+	12, 322123831, 1, 33953125, 12, 55, 1, 33559908, 1, 15718, 2,
+	46, 4, 2099258, 1, 526336, 1, 442623, 4, 4194365, 1, 509952, 1,
+	459007, 3, 0, 12, 92, 2, 46, 12, 176, 1, 15734, 1, 206848, 1,
+	18432, 1, 133120, 1, 100670734, 1, 149504, 1, 165888, 1,
+	15975928, 1, 1048576, 6, 3145806, 1, 15715, 16, 2150645232U, 2,
+	268449859, 2, 10307, 12, 176, 1, 15734, 1, 15735, 1, 15630, 1,
+	15631, 1, 5253120, 6, 3145810, 16, 2150645232U, 1, 15864, 2, 82,
+	1, 343310, 1, 1064207, 2, 3145813, 1, 15728, 1, 7817, 1, 15729,
+	3, 15730, 12, 92, 2, 98, 1, 16168, 1, 16167, 1, 16002, 1, 16008,
+	1, 15974, 1, 15975, 1, 15990, 1, 15976, 1, 15977, 1, 15980, 0,
+	15981, 1, 10240, 1, 5253120, 1, 15720, 1, 198656, 6, 110, 1,
+	180224, 1, 103824738, 2, 112, 2, 3145839, 0, 536885440, 1,
+	114880, 14, 125, 12, 206975, 1, 33559995, 12, 198784, 0,
+	33570236, 1, 15803, 0, 15804, 3, 294912, 1, 294912, 3, 442370,
+	1, 11544576, 0, 811612160, 1, 12593152, 1, 11536384, 1,
+	14024704, 7, 310382726, 0, 10240, 1, 14796, 1, 14797, 1, 14793,
+	1, 14794, 0, 14795, 1, 268679168, 1, 9437184, 1, 268449792, 1,
+	198656, 1, 9452827, 1, 1075854602, 1, 1075854603, 1, 557056, 1,
+	114880, 14, 159, 12, 198784, 1, 1109409213, 12, 198783, 1,
+	1107312059, 12, 198784, 1, 1109409212, 2, 162, 1, 1075854781, 1,
+	1073757627, 1, 1075854780, 1, 540672, 1, 10485760, 6, 3145894,
+	16, 274741248, 9, 168, 3, 4194304, 3, 4209949, 0, 0, 0, 256, 14,
+	174, 1, 114857, 1, 33560007, 12, 176, 0, 10240, 1, 114858, 1,
+	33560018, 1, 114857, 3, 33560007, 1, 16008, 1, 114874, 1,
+	33560360, 1, 114875, 1, 33560154, 0, 15963, 0, 256, 0, 4096, 1,
+	409611, 9, 188, 0, 10240, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static int R128_READ_PLL(struct drm_device * dev, int addr)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+
+	R128_WRITE8(R128_CLOCK_CNTL_INDEX, addr & 0x1f);
+	return R128_READ(R128_CLOCK_CNTL_DATA);
+}
+
+#if R128_FIFO_DEBUG
+static void r128_status(drm_r128_private_t * dev_priv)
+{
+	printk("GUI_STAT           = 0x%08x\n",
+	       (unsigned int)R128_READ(R128_GUI_STAT));
+	printk("PM4_STAT           = 0x%08x\n",
+	       (unsigned int)R128_READ(R128_PM4_STAT));
+	printk("PM4_BUFFER_DL_WPTR = 0x%08x\n",
+	       (unsigned int)R128_READ(R128_PM4_BUFFER_DL_WPTR));
+	printk("PM4_BUFFER_DL_RPTR = 0x%08x\n",
+	       (unsigned int)R128_READ(R128_PM4_BUFFER_DL_RPTR));
+	printk("PM4_MICRO_CNTL     = 0x%08x\n",
+	       (unsigned int)R128_READ(R128_PM4_MICRO_CNTL));
+	printk("PM4_BUFFER_CNTL    = 0x%08x\n",
+	       (unsigned int)R128_READ(R128_PM4_BUFFER_CNTL));
+}
+#endif
+
+/* ================================================================
+ * Engine, FIFO control
+ */
+
+static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv)
+{
+	u32 tmp;
+	int i;
+
+	tmp = R128_READ(R128_PC_NGUI_CTLSTAT) | R128_PC_FLUSH_ALL;
+	R128_WRITE(R128_PC_NGUI_CTLSTAT, tmp);
+
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY)) {
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+
+#if R128_FIFO_DEBUG
+	DRM_ERROR("failed!\n");
+#endif
+	return -EBUSY;
+}
+
+static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries)
+{
+	int i;
+
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		int slots = R128_READ(R128_GUI_STAT) & R128_GUI_FIFOCNT_MASK;
+		if (slots >= entries)
+			return 0;
+		DRM_UDELAY(1);
+	}
+
+#if R128_FIFO_DEBUG
+	DRM_ERROR("failed!\n");
+#endif
+	return -EBUSY;
+}
+
+static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv)
+{
+	int i, ret;
+
+	ret = r128_do_wait_for_fifo(dev_priv, 64);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		if (!(R128_READ(R128_GUI_STAT) & R128_GUI_ACTIVE)) {
+			r128_do_pixcache_flush(dev_priv);
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+
+#if R128_FIFO_DEBUG
+	DRM_ERROR("failed!\n");
+#endif
+	return -EBUSY;
+}
+
+/* ================================================================
+ * CCE control, initialization
+ */
+
+/* Load the microcode for the CCE */
+static void r128_cce_load_microcode(drm_r128_private_t * dev_priv)
+{
+	int i;
+
+	DRM_DEBUG("\n");
+
+	r128_do_wait_for_idle(dev_priv);
+
+	R128_WRITE(R128_PM4_MICROCODE_ADDR, 0);
+	for (i = 0; i < 256; i++) {
+		R128_WRITE(R128_PM4_MICROCODE_DATAH, r128_cce_microcode[i * 2]);
+		R128_WRITE(R128_PM4_MICROCODE_DATAL,
+			   r128_cce_microcode[i * 2 + 1]);
+	}
+}
+
+/* Flush any pending commands to the CCE.  This should only be used just
+ * prior to a wait for idle, as it informs the engine that the command
+ * stream is ending.
+ */
+static void r128_do_cce_flush(drm_r128_private_t * dev_priv)
+{
+	u32 tmp;
+
+	tmp = R128_READ(R128_PM4_BUFFER_DL_WPTR) | R128_PM4_BUFFER_DL_DONE;
+	R128_WRITE(R128_PM4_BUFFER_DL_WPTR, tmp);
+}
+
+/* Wait for the CCE to go idle.
+ */
+int r128_do_cce_idle(drm_r128_private_t * dev_priv)
+{
+	int i;
+
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		if (GET_RING_HEAD(dev_priv) == dev_priv->ring.tail) {
+			int pm4stat = R128_READ(R128_PM4_STAT);
+			if (((pm4stat & R128_PM4_FIFOCNT_MASK) >=
+			     dev_priv->cce_fifo_size) &&
+			    !(pm4stat & (R128_PM4_BUSY |
+					 R128_PM4_GUI_ACTIVE))) {
+				return r128_do_pixcache_flush(dev_priv);
+			}
+		}
+		DRM_UDELAY(1);
+	}
+
+#if R128_FIFO_DEBUG
+	DRM_ERROR("failed!\n");
+	r128_status(dev_priv);
+#endif
+	return -EBUSY;
+}
+
+/* Start the Concurrent Command Engine.
+ */
+static void r128_do_cce_start(drm_r128_private_t * dev_priv)
+{
+	r128_do_wait_for_idle(dev_priv);
+
+	R128_WRITE(R128_PM4_BUFFER_CNTL,
+		   dev_priv->cce_mode | dev_priv->ring.size_l2qw
+		   | R128_PM4_BUFFER_CNTL_NOUPDATE);
+	R128_READ(R128_PM4_BUFFER_ADDR);	/* as per the sample code */
+	R128_WRITE(R128_PM4_MICRO_CNTL, R128_PM4_MICRO_FREERUN);
+
+	dev_priv->cce_running = 1;
+}
+
+/* Reset the Concurrent Command Engine.  This will not flush any pending
+ * commands, so you must wait for the CCE command stream to complete
+ * before calling this routine.
+ */
+static void r128_do_cce_reset(drm_r128_private_t * dev_priv)
+{
+	R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
+	R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
+	dev_priv->ring.tail = 0;
+}
+
+/* Stop the Concurrent Command Engine.  This will not flush any pending
+ * commands, so you must flush the command stream and wait for the CCE
+ * to go idle before calling this routine.
+ */
+static void r128_do_cce_stop(drm_r128_private_t * dev_priv)
+{
+	R128_WRITE(R128_PM4_MICRO_CNTL, 0);
+	R128_WRITE(R128_PM4_BUFFER_CNTL,
+		   R128_PM4_NONPM4 | R128_PM4_BUFFER_CNTL_NOUPDATE);
+
+	dev_priv->cce_running = 0;
+}
+
+/* Reset the engine.  This will stop the CCE if it is running.
+ */
+static int r128_do_engine_reset(struct drm_device * dev)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	u32 clock_cntl_index, mclk_cntl, gen_reset_cntl;
+
+	r128_do_pixcache_flush(dev_priv);
+
+	clock_cntl_index = R128_READ(R128_CLOCK_CNTL_INDEX);
+	mclk_cntl = R128_READ_PLL(dev, R128_MCLK_CNTL);
+
+	R128_WRITE_PLL(R128_MCLK_CNTL,
+		       mclk_cntl | R128_FORCE_GCP | R128_FORCE_PIPE3D_CP);
+
+	gen_reset_cntl = R128_READ(R128_GEN_RESET_CNTL);
+
+	/* Taken from the sample code - do not change */
+	R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl | R128_SOFT_RESET_GUI);
+	R128_READ(R128_GEN_RESET_CNTL);
+	R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl & ~R128_SOFT_RESET_GUI);
+	R128_READ(R128_GEN_RESET_CNTL);
+
+	R128_WRITE_PLL(R128_MCLK_CNTL, mclk_cntl);
+	R128_WRITE(R128_CLOCK_CNTL_INDEX, clock_cntl_index);
+	R128_WRITE(R128_GEN_RESET_CNTL, gen_reset_cntl);
+
+	/* Reset the CCE ring */
+	r128_do_cce_reset(dev_priv);
+
+	/* The CCE is no longer running after an engine reset */
+	dev_priv->cce_running = 0;
+
+	/* Reset any pending vertex, indirect buffers */
+	r128_freelist_reset(dev);
+
+	return 0;
+}
+
+static void r128_cce_init_ring_buffer(struct drm_device * dev,
+				      drm_r128_private_t * dev_priv)
+{
+	u32 ring_start;
+	u32 tmp;
+
+	DRM_DEBUG("\n");
+
+	/* The manual (p. 2) says this address is in "VM space".  This
+	 * means it's an offset from the start of AGP space.
+	 */
+#if __OS_HAS_AGP
+	if (!dev_priv->is_pci)
+		ring_start = dev_priv->cce_ring->offset - dev->agp->base;
+	else
+#endif
+		ring_start = dev_priv->cce_ring->offset -
+		    (unsigned long)dev->sg->virtual;
+
+	R128_WRITE(R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET);
+
+	R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
+	R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
+
+	/* Set watermark control */
+	R128_WRITE(R128_PM4_BUFFER_WM_CNTL,
+		   ((R128_WATERMARK_L / 4) << R128_WMA_SHIFT)
+		   | ((R128_WATERMARK_M / 4) << R128_WMB_SHIFT)
+		   | ((R128_WATERMARK_N / 4) << R128_WMC_SHIFT)
+		   | ((R128_WATERMARK_K / 64) << R128_WB_WM_SHIFT));
+
+	/* Force read.  Why?  Because it's in the examples... */
+	R128_READ(R128_PM4_BUFFER_ADDR);
+
+	/* Turn on bus mastering */
+	tmp = R128_READ(R128_BUS_CNTL) & ~R128_BUS_MASTER_DIS;
+	R128_WRITE(R128_BUS_CNTL, tmp);
+}
+
+static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
+{
+	drm_r128_private_t *dev_priv;
+
+	DRM_DEBUG("\n");
+
+	dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER);
+	if (dev_priv == NULL)
+		return -ENOMEM;
+
+	memset(dev_priv, 0, sizeof(drm_r128_private_t));
+
+	dev_priv->is_pci = init->is_pci;
+
+	if (dev_priv->is_pci && !dev->sg) {
+		DRM_ERROR("PCI GART memory not allocated!\n");
+		dev->dev_private = (void *)dev_priv;
+		r128_do_cleanup_cce(dev);
+		return -EINVAL;
+	}
+
+	dev_priv->usec_timeout = init->usec_timeout;
+	if (dev_priv->usec_timeout < 1 ||
+	    dev_priv->usec_timeout > R128_MAX_USEC_TIMEOUT) {
+		DRM_DEBUG("TIMEOUT problem!\n");
+		dev->dev_private = (void *)dev_priv;
+		r128_do_cleanup_cce(dev);
+		return -EINVAL;
+	}
+
+	dev_priv->cce_mode = init->cce_mode;
+
+	/* GH: Simple idle check.
+	 */
+	atomic_set(&dev_priv->idle_count, 0);
+
+	/* We don't support anything other than bus-mastering ring mode,
+	 * but the ring can be in either AGP or PCI space for the ring
+	 * read pointer.
+	 */
+	if ((init->cce_mode != R128_PM4_192BM) &&
+	    (init->cce_mode != R128_PM4_128BM_64INDBM) &&
+	    (init->cce_mode != R128_PM4_64BM_128INDBM) &&
+	    (init->cce_mode != R128_PM4_64BM_64VCBM_64INDBM)) {
+		DRM_DEBUG("Bad cce_mode!\n");
+		dev->dev_private = (void *)dev_priv;
+		r128_do_cleanup_cce(dev);
+		return -EINVAL;
+	}
+
+	switch (init->cce_mode) {
+	case R128_PM4_NONPM4:
+		dev_priv->cce_fifo_size = 0;
+		break;
+	case R128_PM4_192PIO:
+	case R128_PM4_192BM:
+		dev_priv->cce_fifo_size = 192;
+		break;
+	case R128_PM4_128PIO_64INDBM:
+	case R128_PM4_128BM_64INDBM:
+		dev_priv->cce_fifo_size = 128;
+		break;
+	case R128_PM4_64PIO_128INDBM:
+	case R128_PM4_64BM_128INDBM:
+	case R128_PM4_64PIO_64VCBM_64INDBM:
+	case R128_PM4_64BM_64VCBM_64INDBM:
+	case R128_PM4_64PIO_64VCPIO_64INDPIO:
+		dev_priv->cce_fifo_size = 64;
+		break;
+	}
+
+	switch (init->fb_bpp) {
+	case 16:
+		dev_priv->color_fmt = R128_DATATYPE_RGB565;
+		break;
+	case 32:
+	default:
+		dev_priv->color_fmt = R128_DATATYPE_ARGB8888;
+		break;
+	}
+	dev_priv->front_offset = init->front_offset;
+	dev_priv->front_pitch = init->front_pitch;
+	dev_priv->back_offset = init->back_offset;
+	dev_priv->back_pitch = init->back_pitch;
+
+	switch (init->depth_bpp) {
+	case 16:
+		dev_priv->depth_fmt = R128_DATATYPE_RGB565;
+		break;
+	case 24:
+	case 32:
+	default:
+		dev_priv->depth_fmt = R128_DATATYPE_ARGB8888;
+		break;
+	}
+	dev_priv->depth_offset = init->depth_offset;
+	dev_priv->depth_pitch = init->depth_pitch;
+	dev_priv->span_offset = init->span_offset;
+
+	dev_priv->front_pitch_offset_c = (((dev_priv->front_pitch / 8) << 21) |
+					  (dev_priv->front_offset >> 5));
+	dev_priv->back_pitch_offset_c = (((dev_priv->back_pitch / 8) << 21) |
+					 (dev_priv->back_offset >> 5));
+	dev_priv->depth_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
+					  (dev_priv->depth_offset >> 5) |
+					  R128_DST_TILE);
+	dev_priv->span_pitch_offset_c = (((dev_priv->depth_pitch / 8) << 21) |
+					 (dev_priv->span_offset >> 5));
+
+	dev_priv->sarea = drm_getsarea(dev);
+	if (!dev_priv->sarea) {
+		DRM_ERROR("could not find sarea!\n");
+		dev->dev_private = (void *)dev_priv;
+		r128_do_cleanup_cce(dev);
+		return -EINVAL;
+	}
+
+	dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
+	if (!dev_priv->mmio) {
+		DRM_ERROR("could not find mmio region!\n");
+		dev->dev_private = (void *)dev_priv;
+		r128_do_cleanup_cce(dev);
+		return -EINVAL;
+	}
+	dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset);
+	if (!dev_priv->cce_ring) {
+		DRM_ERROR("could not find cce ring region!\n");
+		dev->dev_private = (void *)dev_priv;
+		r128_do_cleanup_cce(dev);
+		return -EINVAL;
+	}
+	dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
+	if (!dev_priv->ring_rptr) {
+		DRM_ERROR("could not find ring read pointer!\n");
+		dev->dev_private = (void *)dev_priv;
+		r128_do_cleanup_cce(dev);
+		return -EINVAL;
+	}
+	dev->agp_buffer_token = init->buffers_offset;
+	dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
+	if (!dev->agp_buffer_map) {
+		DRM_ERROR("could not find dma buffer region!\n");
+		dev->dev_private = (void *)dev_priv;
+		r128_do_cleanup_cce(dev);
+		return -EINVAL;
+	}
+
+	if (!dev_priv->is_pci) {
+		dev_priv->agp_textures =
+		    drm_core_findmap(dev, init->agp_textures_offset);
+		if (!dev_priv->agp_textures) {
+			DRM_ERROR("could not find agp texture region!\n");
+			dev->dev_private = (void *)dev_priv;
+			r128_do_cleanup_cce(dev);
+			return -EINVAL;
+		}
+	}
+
+	dev_priv->sarea_priv =
+	    (drm_r128_sarea_t *) ((u8 *) dev_priv->sarea->handle +
+				  init->sarea_priv_offset);
+
+#if __OS_HAS_AGP
+	if (!dev_priv->is_pci) {
+		drm_core_ioremap(dev_priv->cce_ring, dev);
+		drm_core_ioremap(dev_priv->ring_rptr, dev);
+		drm_core_ioremap(dev->agp_buffer_map, dev);
+		if (!dev_priv->cce_ring->handle ||
+		    !dev_priv->ring_rptr->handle ||
+		    !dev->agp_buffer_map->handle) {
+			DRM_ERROR("Could not ioremap agp regions!\n");
+			dev->dev_private = (void *)dev_priv;
+			r128_do_cleanup_cce(dev);
+			return -ENOMEM;
+		}
+	} else
+#endif
+	{
+		dev_priv->cce_ring->handle = (void *)dev_priv->cce_ring->offset;
+		dev_priv->ring_rptr->handle =
+		    (void *)dev_priv->ring_rptr->offset;
+		dev->agp_buffer_map->handle =
+		    (void *)dev->agp_buffer_map->offset;
+	}
+
+#if __OS_HAS_AGP
+	if (!dev_priv->is_pci)
+		dev_priv->cce_buffers_offset = dev->agp->base;
+	else
+#endif
+		dev_priv->cce_buffers_offset = (unsigned long)dev->sg->virtual;
+
+	dev_priv->ring.start = (u32 *) dev_priv->cce_ring->handle;
+	dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle
+			      + init->ring_size / sizeof(u32));
+	dev_priv->ring.size = init->ring_size;
+	dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+
+	dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
+
+	dev_priv->ring.high_mark = 128;
+
+	dev_priv->sarea_priv->last_frame = 0;
+	R128_WRITE(R128_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
+
+	dev_priv->sarea_priv->last_dispatch = 0;
+	R128_WRITE(R128_LAST_DISPATCH_REG, dev_priv->sarea_priv->last_dispatch);
+
+#if __OS_HAS_AGP
+	if (dev_priv->is_pci) {
+#endif
+		dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
+		dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
+		dev_priv->gart_info.table_size = R128_PCIGART_TABLE_SIZE;
+		dev_priv->gart_info.addr = NULL;
+		dev_priv->gart_info.bus_addr = 0;
+		dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
+		if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
+			DRM_ERROR("failed to init PCI GART!\n");
+			dev->dev_private = (void *)dev_priv;
+			r128_do_cleanup_cce(dev);
+			return -ENOMEM;
+		}
+		R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr);
+#if __OS_HAS_AGP
+	}
+#endif
+
+	r128_cce_init_ring_buffer(dev, dev_priv);
+	r128_cce_load_microcode(dev_priv);
+
+	dev->dev_private = (void *)dev_priv;
+
+	r128_do_engine_reset(dev);
+
+	return 0;
+}
+
+int r128_do_cleanup_cce(struct drm_device * dev)
+{
+
+	/* Make sure interrupts are disabled here because the uninstall ioctl
+	 * may not have been called from userspace and after dev_private
+	 * is freed, it's too late.
+	 */
+	if (dev->irq_enabled)
+		drm_irq_uninstall(dev);
+
+	if (dev->dev_private) {
+		drm_r128_private_t *dev_priv = dev->dev_private;
+
+#if __OS_HAS_AGP
+		if (!dev_priv->is_pci) {
+			if (dev_priv->cce_ring != NULL)
+				drm_core_ioremapfree(dev_priv->cce_ring, dev);
+			if (dev_priv->ring_rptr != NULL)
+				drm_core_ioremapfree(dev_priv->ring_rptr, dev);
+			if (dev->agp_buffer_map != NULL) {
+				drm_core_ioremapfree(dev->agp_buffer_map, dev);
+				dev->agp_buffer_map = NULL;
+			}
+		} else
+#endif
+		{
+			if (dev_priv->gart_info.bus_addr)
+				if (!drm_ati_pcigart_cleanup(dev,
+							&dev_priv->gart_info))
+					DRM_ERROR
+					    ("failed to cleanup PCI GART!\n");
+		}
+
+		drm_free(dev->dev_private, sizeof(drm_r128_private_t),
+			 DRM_MEM_DRIVER);
+		dev->dev_private = NULL;
+	}
+
+	return 0;
+}
+
+int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_init_t *init = data;
+
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	switch (init->func) {
+	case R128_INIT_CCE:
+		return r128_do_init_cce(dev, init);
+	case R128_CLEANUP_CCE:
+		return r128_do_cleanup_cce(dev);
+	}
+
+	return -EINVAL;
+}
+
+int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) {
+		DRM_DEBUG("while CCE running\n");
+		return 0;
+	}
+
+	r128_do_cce_start(dev_priv);
+
+	return 0;
+}
+
+/* Stop the CCE.  The engine must have been idled before calling this
+ * routine.
+ */
+int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_cce_stop_t *stop = data;
+	int ret;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	/* Flush any pending CCE commands.  This ensures any outstanding
+	 * commands are exectuted by the engine before we turn it off.
+	 */
+	if (stop->flush) {
+		r128_do_cce_flush(dev_priv);
+	}
+
+	/* If we fail to make the engine go idle, we return an error
+	 * code so that the DRM ioctl wrapper can try again.
+	 */
+	if (stop->idle) {
+		ret = r128_do_cce_idle(dev_priv);
+		if (ret)
+			return ret;
+	}
+
+	/* Finally, we can turn off the CCE.  If the engine isn't idle,
+	 * we will get some dropped triangles as they won't be fully
+	 * rendered before the CCE is shut down.
+	 */
+	r128_do_cce_stop(dev_priv);
+
+	/* Reset the engine */
+	r128_do_engine_reset(dev);
+
+	return 0;
+}
+
+/* Just reset the CCE ring.  Called as part of an X Server engine reset.
+ */
+int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (!dev_priv) {
+		DRM_DEBUG("called before init done\n");
+		return -EINVAL;
+	}
+
+	r128_do_cce_reset(dev_priv);
+
+	/* The CCE is no longer running after an engine reset */
+	dev_priv->cce_running = 0;
+
+	return 0;
+}
+
+int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (dev_priv->cce_running) {
+		r128_do_cce_flush(dev_priv);
+	}
+
+	return r128_do_cce_idle(dev_priv);
+}
+
+int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	return r128_do_engine_reset(dev);
+}
+
+int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	return -EINVAL;
+}
+
+/* ================================================================
+ * Freelist management
+ */
+#define R128_BUFFER_USED	0xffffffff
+#define R128_BUFFER_FREE	0
+
+#if 0
+static int r128_freelist_init(struct drm_device * dev)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	struct drm_buf *buf;
+	drm_r128_buf_priv_t *buf_priv;
+	drm_r128_freelist_t *entry;
+	int i;
+
+	dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
+	if (dev_priv->head == NULL)
+		return -ENOMEM;
+
+	memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t));
+	dev_priv->head->age = R128_BUFFER_USED;
+
+	for (i = 0; i < dma->buf_count; i++) {
+		buf = dma->buflist[i];
+		buf_priv = buf->dev_private;
+
+		entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
+		if (!entry)
+			return -ENOMEM;
+
+		entry->age = R128_BUFFER_FREE;
+		entry->buf = buf;
+		entry->prev = dev_priv->head;
+		entry->next = dev_priv->head->next;
+		if (!entry->next)
+			dev_priv->tail = entry;
+
+		buf_priv->discard = 0;
+		buf_priv->dispatched = 0;
+		buf_priv->list_entry = entry;
+
+		dev_priv->head->next = entry;
+
+		if (dev_priv->head->next)
+			dev_priv->head->next->prev = entry;
+	}
+
+	return 0;
+
+}
+#endif
+
+static struct drm_buf *r128_freelist_get(struct drm_device * dev)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_buf_priv_t *buf_priv;
+	struct drm_buf *buf;
+	int i, t;
+
+	/* FIXME: Optimize -- use freelist code */
+
+	for (i = 0; i < dma->buf_count; i++) {
+		buf = dma->buflist[i];
+		buf_priv = buf->dev_private;
+		if (!buf->file_priv)
+			return buf;
+	}
+
+	for (t = 0; t < dev_priv->usec_timeout; t++) {
+		u32 done_age = R128_READ(R128_LAST_DISPATCH_REG);
+
+		for (i = 0; i < dma->buf_count; i++) {
+			buf = dma->buflist[i];
+			buf_priv = buf->dev_private;
+			if (buf->pending && buf_priv->age <= done_age) {
+				/* The buffer has been processed, so it
+				 * can now be used.
+				 */
+				buf->pending = 0;
+				return buf;
+			}
+		}
+		DRM_UDELAY(1);
+	}
+
+	DRM_DEBUG("returning NULL!\n");
+	return NULL;
+}
+
+void r128_freelist_reset(struct drm_device * dev)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int i;
+
+	for (i = 0; i < dma->buf_count; i++) {
+		struct drm_buf *buf = dma->buflist[i];
+		drm_r128_buf_priv_t *buf_priv = buf->dev_private;
+		buf_priv->age = 0;
+	}
+}
+
+/* ================================================================
+ * CCE command submission
+ */
+
+int r128_wait_ring(drm_r128_private_t * dev_priv, int n)
+{
+	drm_r128_ring_buffer_t *ring = &dev_priv->ring;
+	int i;
+
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		r128_update_ring_snapshot(dev_priv);
+		if (ring->space >= n)
+			return 0;
+		DRM_UDELAY(1);
+	}
+
+	/* FIXME: This is being ignored... */
+	DRM_ERROR("failed!\n");
+	return -EBUSY;
+}
+
+static int r128_cce_get_buffers(struct drm_device * dev,
+				struct drm_file *file_priv,
+				struct drm_dma * d)
+{
+	int i;
+	struct drm_buf *buf;
+
+	for (i = d->granted_count; i < d->request_count; i++) {
+		buf = r128_freelist_get(dev);
+		if (!buf)
+			return -EAGAIN;
+
+		buf->file_priv = file_priv;
+
+		if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
+				     sizeof(buf->idx)))
+			return -EFAULT;
+		if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
+				     sizeof(buf->total)))
+			return -EFAULT;
+
+		d->granted_count++;
+	}
+	return 0;
+}
+
+int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int ret = 0;
+	struct drm_dma *d = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	/* Please don't send us buffers.
+	 */
+	if (d->send_count != 0) {
+		DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
+			  DRM_CURRENTPID, d->send_count);
+		return -EINVAL;
+	}
+
+	/* We'll send you buffers.
+	 */
+	if (d->request_count < 0 || d->request_count > dma->buf_count) {
+		DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
+			  DRM_CURRENTPID, d->request_count, dma->buf_count);
+		return -EINVAL;
+	}
+
+	d->granted_count = 0;
+
+	if (d->request_count) {
+		ret = r128_cce_get_buffers(dev, file_priv, d);
+	}
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
new file mode 100644
index 000000000000..6108e7587e12
--- /dev/null
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -0,0 +1,103 @@
+/* r128_drv.c -- ATI Rage 128 driver -*- linux-c -*-
+ * Created: Mon Dec 13 09:47:27 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Rickard E. (Rik) Faith <faith@valinux.com>
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "r128_drm.h"
+#include "r128_drv.h"
+
+#include "drm_pciids.h"
+
+static struct pci_device_id pciidlist[] = {
+	r128_PCI_IDS
+};
+
+static struct drm_driver driver = {
+	.driver_features =
+	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
+	    DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
+	    DRIVER_IRQ_VBL,
+	.dev_priv_size = sizeof(drm_r128_buf_priv_t),
+	.preclose = r128_driver_preclose,
+	.lastclose = r128_driver_lastclose,
+	.vblank_wait = r128_driver_vblank_wait,
+	.irq_preinstall = r128_driver_irq_preinstall,
+	.irq_postinstall = r128_driver_irq_postinstall,
+	.irq_uninstall = r128_driver_irq_uninstall,
+	.irq_handler = r128_driver_irq_handler,
+	.reclaim_buffers = drm_core_reclaim_buffers,
+	.get_map_ofs = drm_core_get_map_ofs,
+	.get_reg_ofs = drm_core_get_reg_ofs,
+	.ioctls = r128_ioctls,
+	.dma_ioctl = r128_cce_buffers,
+	.fops = {
+		 .owner = THIS_MODULE,
+		 .open = drm_open,
+		 .release = drm_release,
+		 .ioctl = drm_ioctl,
+		 .mmap = drm_mmap,
+		 .poll = drm_poll,
+		 .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+		 .compat_ioctl = r128_compat_ioctl,
+#endif
+	},
+
+	.pci_driver = {
+		 .name = DRIVER_NAME,
+		 .id_table = pciidlist,
+	},
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int __init r128_init(void)
+{
+	driver.num_ioctls = r128_max_ioctl;
+	return drm_init(&driver);
+}
+
+static void __exit r128_exit(void)
+{
+	drm_exit(&driver);
+}
+
+module_init(r128_init);
+module_exit(r128_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
new file mode 100644
index 000000000000..011105e51ac6
--- /dev/null
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -0,0 +1,522 @@
+/* r128_drv.h -- Private header for r128 driver -*- linux-c -*-
+ * Created: Mon Dec 13 09:51:11 1999 by faith@precisioninsight.com
+ */
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Rickard E. (Rik) Faith <faith@valinux.com>
+ *    Kevin E. Martin <martin@valinux.com>
+ *    Gareth Hughes <gareth@valinux.com>
+ *    Michel Dänzer <daenzerm@student.ethz.ch>
+ */
+
+#ifndef __R128_DRV_H__
+#define __R128_DRV_H__
+
+/* General customization:
+ */
+#define DRIVER_AUTHOR		"Gareth Hughes, VA Linux Systems Inc."
+
+#define DRIVER_NAME		"r128"
+#define DRIVER_DESC		"ATI Rage 128"
+#define DRIVER_DATE		"20030725"
+
+/* Interface history:
+ *
+ * ??  - ??
+ * 2.4 - Add support for ycbcr textures (no new ioctls)
+ * 2.5 - Add FLIP ioctl, disable FULLSCREEN.
+ */
+#define DRIVER_MAJOR		2
+#define DRIVER_MINOR		5
+#define DRIVER_PATCHLEVEL	0
+
+#define GET_RING_HEAD(dev_priv)		R128_READ( R128_PM4_BUFFER_DL_RPTR )
+
+typedef struct drm_r128_freelist {
+	unsigned int age;
+	struct drm_buf *buf;
+	struct drm_r128_freelist *next;
+	struct drm_r128_freelist *prev;
+} drm_r128_freelist_t;
+
+typedef struct drm_r128_ring_buffer {
+	u32 *start;
+	u32 *end;
+	int size;
+	int size_l2qw;
+
+	u32 tail;
+	u32 tail_mask;
+	int space;
+
+	int high_mark;
+} drm_r128_ring_buffer_t;
+
+typedef struct drm_r128_private {
+	drm_r128_ring_buffer_t ring;
+	drm_r128_sarea_t *sarea_priv;
+
+	int cce_mode;
+	int cce_fifo_size;
+	int cce_running;
+
+	drm_r128_freelist_t *head;
+	drm_r128_freelist_t *tail;
+
+	int usec_timeout;
+	int is_pci;
+	unsigned long cce_buffers_offset;
+
+	atomic_t idle_count;
+
+	int page_flipping;
+	int current_page;
+	u32 crtc_offset;
+	u32 crtc_offset_cntl;
+
+	u32 color_fmt;
+	unsigned int front_offset;
+	unsigned int front_pitch;
+	unsigned int back_offset;
+	unsigned int back_pitch;
+
+	u32 depth_fmt;
+	unsigned int depth_offset;
+	unsigned int depth_pitch;
+	unsigned int span_offset;
+
+	u32 front_pitch_offset_c;
+	u32 back_pitch_offset_c;
+	u32 depth_pitch_offset_c;
+	u32 span_pitch_offset_c;
+
+	drm_local_map_t *sarea;
+	drm_local_map_t *mmio;
+	drm_local_map_t *cce_ring;
+	drm_local_map_t *ring_rptr;
+	drm_local_map_t *agp_textures;
+	struct drm_ati_pcigart_info gart_info;
+} drm_r128_private_t;
+
+typedef struct drm_r128_buf_priv {
+	u32 age;
+	int prim;
+	int discard;
+	int dispatched;
+	drm_r128_freelist_t *list_entry;
+} drm_r128_buf_priv_t;
+
+extern struct drm_ioctl_desc r128_ioctls[];
+extern int r128_max_ioctl;
+
+				/* r128_cce.c */
+extern int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
+
+extern void r128_freelist_reset(struct drm_device * dev);
+
+extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n);
+
+extern int r128_do_cce_idle(drm_r128_private_t * dev_priv);
+extern int r128_do_cleanup_cce(struct drm_device * dev);
+
+extern int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
+
+extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
+extern void r128_driver_irq_preinstall(struct drm_device * dev);
+extern void r128_driver_irq_postinstall(struct drm_device * dev);
+extern void r128_driver_irq_uninstall(struct drm_device * dev);
+extern void r128_driver_lastclose(struct drm_device * dev);
+extern void r128_driver_preclose(struct drm_device * dev,
+				 struct drm_file *file_priv);
+
+extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
+			      unsigned long arg);
+
+/* Register definitions, register access macros and drmAddMap constants
+ * for Rage 128 kernel driver.
+ */
+
+#define R128_AUX_SC_CNTL		0x1660
+#	define R128_AUX1_SC_EN			(1 << 0)
+#	define R128_AUX1_SC_MODE_OR		(0 << 1)
+#	define R128_AUX1_SC_MODE_NAND		(1 << 1)
+#	define R128_AUX2_SC_EN			(1 << 2)
+#	define R128_AUX2_SC_MODE_OR		(0 << 3)
+#	define R128_AUX2_SC_MODE_NAND		(1 << 3)
+#	define R128_AUX3_SC_EN			(1 << 4)
+#	define R128_AUX3_SC_MODE_OR		(0 << 5)
+#	define R128_AUX3_SC_MODE_NAND		(1 << 5)
+#define R128_AUX1_SC_LEFT		0x1664
+#define R128_AUX1_SC_RIGHT		0x1668
+#define R128_AUX1_SC_TOP		0x166c
+#define R128_AUX1_SC_BOTTOM		0x1670
+#define R128_AUX2_SC_LEFT		0x1674
+#define R128_AUX2_SC_RIGHT		0x1678
+#define R128_AUX2_SC_TOP		0x167c
+#define R128_AUX2_SC_BOTTOM		0x1680
+#define R128_AUX3_SC_LEFT		0x1684
+#define R128_AUX3_SC_RIGHT		0x1688
+#define R128_AUX3_SC_TOP		0x168c
+#define R128_AUX3_SC_BOTTOM		0x1690
+
+#define R128_BRUSH_DATA0		0x1480
+#define R128_BUS_CNTL			0x0030
+#	define R128_BUS_MASTER_DIS		(1 << 6)
+
+#define R128_CLOCK_CNTL_INDEX		0x0008
+#define R128_CLOCK_CNTL_DATA		0x000c
+#	define R128_PLL_WR_EN			(1 << 7)
+#define R128_CONSTANT_COLOR_C		0x1d34
+#define R128_CRTC_OFFSET		0x0224
+#define R128_CRTC_OFFSET_CNTL		0x0228
+#	define R128_CRTC_OFFSET_FLIP_CNTL	(1 << 16)
+
+#define R128_DP_GUI_MASTER_CNTL		0x146c
+#       define R128_GMC_SRC_PITCH_OFFSET_CNTL	(1    <<  0)
+#       define R128_GMC_DST_PITCH_OFFSET_CNTL	(1    <<  1)
+#	define R128_GMC_BRUSH_SOLID_COLOR	(13   <<  4)
+#	define R128_GMC_BRUSH_NONE		(15   <<  4)
+#	define R128_GMC_DST_16BPP		(4    <<  8)
+#	define R128_GMC_DST_24BPP		(5    <<  8)
+#	define R128_GMC_DST_32BPP		(6    <<  8)
+#       define R128_GMC_DST_DATATYPE_SHIFT	8
+#	define R128_GMC_SRC_DATATYPE_COLOR	(3    << 12)
+#	define R128_DP_SRC_SOURCE_MEMORY	(2    << 24)
+#	define R128_DP_SRC_SOURCE_HOST_DATA	(3    << 24)
+#	define R128_GMC_CLR_CMP_CNTL_DIS	(1    << 28)
+#	define R128_GMC_AUX_CLIP_DIS		(1    << 29)
+#	define R128_GMC_WR_MSK_DIS		(1    << 30)
+#	define R128_ROP3_S			0x00cc0000
+#	define R128_ROP3_P			0x00f00000
+#define R128_DP_WRITE_MASK		0x16cc
+#define R128_DST_PITCH_OFFSET_C		0x1c80
+#	define R128_DST_TILE			(1 << 31)
+
+#define R128_GEN_INT_CNTL		0x0040
+#	define R128_CRTC_VBLANK_INT_EN		(1 <<  0)
+#define R128_GEN_INT_STATUS		0x0044
+#	define R128_CRTC_VBLANK_INT		(1 <<  0)
+#	define R128_CRTC_VBLANK_INT_AK		(1 <<  0)
+#define R128_GEN_RESET_CNTL		0x00f0
+#	define R128_SOFT_RESET_GUI		(1 <<  0)
+
+#define R128_GUI_SCRATCH_REG0		0x15e0
+#define R128_GUI_SCRATCH_REG1		0x15e4
+#define R128_GUI_SCRATCH_REG2		0x15e8
+#define R128_GUI_SCRATCH_REG3		0x15ec
+#define R128_GUI_SCRATCH_REG4		0x15f0
+#define R128_GUI_SCRATCH_REG5		0x15f4
+
+#define R128_GUI_STAT			0x1740
+#	define R128_GUI_FIFOCNT_MASK		0x0fff
+#	define R128_GUI_ACTIVE			(1 << 31)
+
+#define R128_MCLK_CNTL			0x000f
+#	define R128_FORCE_GCP			(1 << 16)
+#	define R128_FORCE_PIPE3D_CP		(1 << 17)
+#	define R128_FORCE_RCP			(1 << 18)
+
+#define R128_PC_GUI_CTLSTAT		0x1748
+#define R128_PC_NGUI_CTLSTAT		0x0184
+#	define R128_PC_FLUSH_GUI		(3 << 0)
+#	define R128_PC_RI_GUI			(1 << 2)
+#	define R128_PC_FLUSH_ALL		0x00ff
+#	define R128_PC_BUSY			(1 << 31)
+
+#define R128_PCI_GART_PAGE		0x017c
+#define R128_PRIM_TEX_CNTL_C		0x1cb0
+
+#define R128_SCALE_3D_CNTL		0x1a00
+#define R128_SEC_TEX_CNTL_C		0x1d00
+#define R128_SEC_TEXTURE_BORDER_COLOR_C	0x1d3c
+#define R128_SETUP_CNTL			0x1bc4
+#define R128_STEN_REF_MASK_C		0x1d40
+
+#define R128_TEX_CNTL_C			0x1c9c
+#	define R128_TEX_CACHE_FLUSH		(1 << 23)
+
+#define R128_WAIT_UNTIL			0x1720
+#	define R128_EVENT_CRTC_OFFSET		(1 << 0)
+#define R128_WINDOW_XY_OFFSET		0x1bcc
+
+/* CCE registers
+ */
+#define R128_PM4_BUFFER_OFFSET		0x0700
+#define R128_PM4_BUFFER_CNTL		0x0704
+#	define R128_PM4_MASK			(15 << 28)
+#	define R128_PM4_NONPM4			(0  << 28)
+#	define R128_PM4_192PIO			(1  << 28)
+#	define R128_PM4_192BM			(2  << 28)
+#	define R128_PM4_128PIO_64INDBM		(3  << 28)
+#	define R128_PM4_128BM_64INDBM		(4  << 28)
+#	define R128_PM4_64PIO_128INDBM		(5  << 28)
+#	define R128_PM4_64BM_128INDBM		(6  << 28)
+#	define R128_PM4_64PIO_64VCBM_64INDBM	(7  << 28)
+#	define R128_PM4_64BM_64VCBM_64INDBM	(8  << 28)
+#	define R128_PM4_64PIO_64VCPIO_64INDPIO	(15 << 28)
+#	define R128_PM4_BUFFER_CNTL_NOUPDATE	(1  << 27)
+
+#define R128_PM4_BUFFER_WM_CNTL		0x0708
+#	define R128_WMA_SHIFT			0
+#	define R128_WMB_SHIFT			8
+#	define R128_WMC_SHIFT			16
+#	define R128_WB_WM_SHIFT			24
+
+#define R128_PM4_BUFFER_DL_RPTR_ADDR	0x070c
+#define R128_PM4_BUFFER_DL_RPTR		0x0710
+#define R128_PM4_BUFFER_DL_WPTR		0x0714
+#	define R128_PM4_BUFFER_DL_DONE		(1 << 31)
+
+#define R128_PM4_VC_FPU_SETUP		0x071c
+
+#define R128_PM4_IW_INDOFF		0x0738
+#define R128_PM4_IW_INDSIZE		0x073c
+
+#define R128_PM4_STAT			0x07b8
+#	define R128_PM4_FIFOCNT_MASK		0x0fff
+#	define R128_PM4_BUSY			(1 << 16)
+#	define R128_PM4_GUI_ACTIVE		(1 << 31)
+
+#define R128_PM4_MICROCODE_ADDR		0x07d4
+#define R128_PM4_MICROCODE_RADDR	0x07d8
+#define R128_PM4_MICROCODE_DATAH	0x07dc
+#define R128_PM4_MICROCODE_DATAL	0x07e0
+
+#define R128_PM4_BUFFER_ADDR		0x07f0
+#define R128_PM4_MICRO_CNTL		0x07fc
+#	define R128_PM4_MICRO_FREERUN		(1 << 30)
+
+#define R128_PM4_FIFO_DATA_EVEN		0x1000
+#define R128_PM4_FIFO_DATA_ODD		0x1004
+
+/* CCE command packets
+ */
+#define R128_CCE_PACKET0		0x00000000
+#define R128_CCE_PACKET1		0x40000000
+#define R128_CCE_PACKET2		0x80000000
+#define R128_CCE_PACKET3		0xC0000000
+#	define R128_CNTL_HOSTDATA_BLT		0x00009400
+#	define R128_CNTL_PAINT_MULTI		0x00009A00
+#	define R128_CNTL_BITBLT_MULTI		0x00009B00
+#	define R128_3D_RNDR_GEN_INDX_PRIM	0x00002300
+
+#define R128_CCE_PACKET_MASK		0xC0000000
+#define R128_CCE_PACKET_COUNT_MASK	0x3fff0000
+#define R128_CCE_PACKET0_REG_MASK	0x000007ff
+#define R128_CCE_PACKET1_REG0_MASK	0x000007ff
+#define R128_CCE_PACKET1_REG1_MASK	0x003ff800
+
+#define R128_CCE_VC_CNTL_PRIM_TYPE_NONE		0x00000000
+#define R128_CCE_VC_CNTL_PRIM_TYPE_POINT	0x00000001
+#define R128_CCE_VC_CNTL_PRIM_TYPE_LINE		0x00000002
+#define R128_CCE_VC_CNTL_PRIM_TYPE_POLY_LINE	0x00000003
+#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_LIST	0x00000004
+#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_FAN	0x00000005
+#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_STRIP	0x00000006
+#define R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2	0x00000007
+#define R128_CCE_VC_CNTL_PRIM_WALK_IND		0x00000010
+#define R128_CCE_VC_CNTL_PRIM_WALK_LIST		0x00000020
+#define R128_CCE_VC_CNTL_PRIM_WALK_RING		0x00000030
+#define R128_CCE_VC_CNTL_NUM_SHIFT		16
+
+#define R128_DATATYPE_VQ		0
+#define R128_DATATYPE_CI4		1
+#define R128_DATATYPE_CI8		2
+#define R128_DATATYPE_ARGB1555		3
+#define R128_DATATYPE_RGB565		4
+#define R128_DATATYPE_RGB888		5
+#define R128_DATATYPE_ARGB8888		6
+#define R128_DATATYPE_RGB332		7
+#define R128_DATATYPE_Y8		8
+#define R128_DATATYPE_RGB8		9
+#define R128_DATATYPE_CI16		10
+#define R128_DATATYPE_YVYU422		11
+#define R128_DATATYPE_VYUY422		12
+#define R128_DATATYPE_AYUV444		14
+#define R128_DATATYPE_ARGB4444		15
+
+/* Constants */
+#define R128_AGP_OFFSET			0x02000000
+
+#define R128_WATERMARK_L		16
+#define R128_WATERMARK_M		8
+#define R128_WATERMARK_N		8
+#define R128_WATERMARK_K		128
+
+#define R128_MAX_USEC_TIMEOUT		100000	/* 100 ms */
+
+#define R128_LAST_FRAME_REG		R128_GUI_SCRATCH_REG0
+#define R128_LAST_DISPATCH_REG		R128_GUI_SCRATCH_REG1
+#define R128_MAX_VB_AGE			0x7fffffff
+#define R128_MAX_VB_VERTS		(0xffff)
+
+#define R128_RING_HIGH_MARK		128
+
+#define R128_PERFORMANCE_BOXES		0
+
+#define R128_PCIGART_TABLE_SIZE         32768
+
+#define R128_READ(reg)		DRM_READ32(  dev_priv->mmio, (reg) )
+#define R128_WRITE(reg,val)	DRM_WRITE32( dev_priv->mmio, (reg), (val) )
+#define R128_READ8(reg)		DRM_READ8(   dev_priv->mmio, (reg) )
+#define R128_WRITE8(reg,val)	DRM_WRITE8(  dev_priv->mmio, (reg), (val) )
+
+#define R128_WRITE_PLL(addr,val)					\
+do {									\
+	R128_WRITE8(R128_CLOCK_CNTL_INDEX,				\
+		    ((addr) & 0x1f) | R128_PLL_WR_EN);			\
+	R128_WRITE(R128_CLOCK_CNTL_DATA, (val));			\
+} while (0)
+
+#define CCE_PACKET0( reg, n )		(R128_CCE_PACKET0 |		\
+					 ((n) << 16) | ((reg) >> 2))
+#define CCE_PACKET1( reg0, reg1 )	(R128_CCE_PACKET1 |		\
+					 (((reg1) >> 2) << 11) | ((reg0) >> 2))
+#define CCE_PACKET2()			(R128_CCE_PACKET2)
+#define CCE_PACKET3( pkt, n )		(R128_CCE_PACKET3 |		\
+					 (pkt) | ((n) << 16))
+
+static __inline__ void r128_update_ring_snapshot(drm_r128_private_t * dev_priv)
+{
+	drm_r128_ring_buffer_t *ring = &dev_priv->ring;
+	ring->space = (GET_RING_HEAD(dev_priv) - ring->tail) * sizeof(u32);
+	if (ring->space <= 0)
+		ring->space += ring->size;
+}
+
+/* ================================================================
+ * Misc helper macros
+ */
+
+#define RING_SPACE_TEST_WITH_RETURN( dev_priv )				\
+do {									\
+	drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i;		\
+	if ( ring->space < ring->high_mark ) {				\
+		for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) {	\
+			r128_update_ring_snapshot( dev_priv );		\
+			if ( ring->space >= ring->high_mark )		\
+				goto __ring_space_done;			\
+			DRM_UDELAY(1);				\
+		}							\
+		DRM_ERROR( "ring space check failed!\n" );		\
+		return -EBUSY;				\
+	}								\
+ __ring_space_done:							\
+	;								\
+} while (0)
+
+#define VB_AGE_TEST_WITH_RETURN( dev_priv )				\
+do {									\
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;		\
+	if ( sarea_priv->last_dispatch >= R128_MAX_VB_AGE ) {		\
+		int __ret = r128_do_cce_idle( dev_priv );		\
+		if ( __ret ) return __ret;				\
+		sarea_priv->last_dispatch = 0;				\
+		r128_freelist_reset( dev );				\
+	}								\
+} while (0)
+
+#define R128_WAIT_UNTIL_PAGE_FLIPPED() do {				\
+	OUT_RING( CCE_PACKET0( R128_WAIT_UNTIL, 0 ) );			\
+	OUT_RING( R128_EVENT_CRTC_OFFSET );				\
+} while (0)
+
+/* ================================================================
+ * Ring control
+ */
+
+#define R128_VERBOSE	0
+
+#define RING_LOCALS							\
+	int write, _nr; unsigned int tail_mask; volatile u32 *ring;
+
+#define BEGIN_RING( n ) do {						\
+	if ( R128_VERBOSE ) {						\
+		DRM_INFO( "BEGIN_RING( %d )\n", (n));			\
+	}								\
+	if ( dev_priv->ring.space <= (n) * sizeof(u32) ) {		\
+		COMMIT_RING();						\
+		r128_wait_ring( dev_priv, (n) * sizeof(u32) );		\
+	}								\
+	_nr = n; dev_priv->ring.space -= (n) * sizeof(u32);		\
+	ring = dev_priv->ring.start;					\
+	write = dev_priv->ring.tail;					\
+	tail_mask = dev_priv->ring.tail_mask;				\
+} while (0)
+
+/* You can set this to zero if you want.  If the card locks up, you'll
+ * need to keep this set.  It works around a bug in early revs of the
+ * Rage 128 chipset, where the CCE would read 32 dwords past the end of
+ * the ring buffer before wrapping around.
+ */
+#define R128_BROKEN_CCE	1
+
+#define ADVANCE_RING() do {						\
+	if ( R128_VERBOSE ) {						\
+		DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n",	\
+			  write, dev_priv->ring.tail );			\
+	}								\
+	if ( R128_BROKEN_CCE && write < 32 ) {				\
+		memcpy( dev_priv->ring.end,				\
+			dev_priv->ring.start,				\
+			write * sizeof(u32) );				\
+	}								\
+	if (((dev_priv->ring.tail + _nr) & tail_mask) != write) {	\
+		DRM_ERROR(						\
+			"ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n",	\
+			((dev_priv->ring.tail + _nr) & tail_mask),	\
+			write, __LINE__);				\
+	} else								\
+		dev_priv->ring.tail = write;				\
+} while (0)
+
+#define COMMIT_RING() do {						\
+	if ( R128_VERBOSE ) {						\
+		DRM_INFO( "COMMIT_RING() tail=0x%06x\n",		\
+			dev_priv->ring.tail );				\
+	}								\
+	DRM_MEMORYBARRIER();						\
+	R128_WRITE( R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail );	\
+	R128_READ( R128_PM4_BUFFER_DL_WPTR );				\
+} while (0)
+
+#define OUT_RING( x ) do {						\
+	if ( R128_VERBOSE ) {						\
+		DRM_INFO( "   OUT_RING( 0x%08x ) at 0x%x\n",		\
+			   (unsigned int)(x), write );			\
+	}								\
+	ring[write++] = cpu_to_le32( x );				\
+	write &= tail_mask;						\
+} while (0)
+
+#endif				/* __R128_DRV_H__ */
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
new file mode 100644
index 000000000000..d3cb676eee84
--- /dev/null
+++ b/drivers/gpu/drm/r128/r128_ioc32.c
@@ -0,0 +1,221 @@
+/**
+ * \file r128_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the R128 DRM.
+ *
+ * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
+ *
+ * Copyright (C) Paul Mackerras 2005
+ * Copyright (C) Egbert Eich 2003,2004
+ * Copyright (C) Dave Airlie 2005
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <linux/compat.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "r128_drm.h"
+
+typedef struct drm_r128_init32 {
+	int func;
+	unsigned int sarea_priv_offset;
+	int is_pci;
+	int cce_mode;
+	int cce_secure;
+	int ring_size;
+	int usec_timeout;
+
+	unsigned int fb_bpp;
+	unsigned int front_offset, front_pitch;
+	unsigned int back_offset, back_pitch;
+	unsigned int depth_bpp;
+	unsigned int depth_offset, depth_pitch;
+	unsigned int span_offset;
+
+	unsigned int fb_offset;
+	unsigned int mmio_offset;
+	unsigned int ring_offset;
+	unsigned int ring_rptr_offset;
+	unsigned int buffers_offset;
+	unsigned int agp_textures_offset;
+} drm_r128_init32_t;
+
+static int compat_r128_init(struct file *file, unsigned int cmd,
+			    unsigned long arg)
+{
+	drm_r128_init32_t init32;
+	drm_r128_init_t __user *init;
+
+	if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
+		return -EFAULT;
+
+	init = compat_alloc_user_space(sizeof(*init));
+	if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
+	    || __put_user(init32.func, &init->func)
+	    || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
+	    || __put_user(init32.is_pci, &init->is_pci)
+	    || __put_user(init32.cce_mode, &init->cce_mode)
+	    || __put_user(init32.cce_secure, &init->cce_secure)
+	    || __put_user(init32.ring_size, &init->ring_size)
+	    || __put_user(init32.usec_timeout, &init->usec_timeout)
+	    || __put_user(init32.fb_bpp, &init->fb_bpp)
+	    || __put_user(init32.front_offset, &init->front_offset)
+	    || __put_user(init32.front_pitch, &init->front_pitch)
+	    || __put_user(init32.back_offset, &init->back_offset)
+	    || __put_user(init32.back_pitch, &init->back_pitch)
+	    || __put_user(init32.depth_bpp, &init->depth_bpp)
+	    || __put_user(init32.depth_offset, &init->depth_offset)
+	    || __put_user(init32.depth_pitch, &init->depth_pitch)
+	    || __put_user(init32.span_offset, &init->span_offset)
+	    || __put_user(init32.fb_offset, &init->fb_offset)
+	    || __put_user(init32.mmio_offset, &init->mmio_offset)
+	    || __put_user(init32.ring_offset, &init->ring_offset)
+	    || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset)
+	    || __put_user(init32.buffers_offset, &init->buffers_offset)
+	    || __put_user(init32.agp_textures_offset,
+			  &init->agp_textures_offset))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_R128_INIT, (unsigned long)init);
+}
+
+typedef struct drm_r128_depth32 {
+	int func;
+	int n;
+	u32 x;
+	u32 y;
+	u32 buffer;
+	u32 mask;
+} drm_r128_depth32_t;
+
+static int compat_r128_depth(struct file *file, unsigned int cmd,
+			     unsigned long arg)
+{
+	drm_r128_depth32_t depth32;
+	drm_r128_depth_t __user *depth;
+
+	if (copy_from_user(&depth32, (void __user *)arg, sizeof(depth32)))
+		return -EFAULT;
+
+	depth = compat_alloc_user_space(sizeof(*depth));
+	if (!access_ok(VERIFY_WRITE, depth, sizeof(*depth))
+	    || __put_user(depth32.func, &depth->func)
+	    || __put_user(depth32.n, &depth->n)
+	    || __put_user((int __user *)(unsigned long)depth32.x, &depth->x)
+	    || __put_user((int __user *)(unsigned long)depth32.y, &depth->y)
+	    || __put_user((unsigned int __user *)(unsigned long)depth32.buffer,
+			  &depth->buffer)
+	    || __put_user((unsigned char __user *)(unsigned long)depth32.mask,
+			  &depth->mask))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_R128_DEPTH, (unsigned long)depth);
+
+}
+
+typedef struct drm_r128_stipple32 {
+	u32 mask;
+} drm_r128_stipple32_t;
+
+static int compat_r128_stipple(struct file *file, unsigned int cmd,
+			       unsigned long arg)
+{
+	drm_r128_stipple32_t stipple32;
+	drm_r128_stipple_t __user *stipple;
+
+	if (copy_from_user(&stipple32, (void __user *)arg, sizeof(stipple32)))
+		return -EFAULT;
+
+	stipple = compat_alloc_user_space(sizeof(*stipple));
+	if (!access_ok(VERIFY_WRITE, stipple, sizeof(*stipple))
+	    || __put_user((unsigned int __user *)(unsigned long)stipple32.mask,
+			  &stipple->mask))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple);
+}
+
+typedef struct drm_r128_getparam32 {
+	int param;
+	u32 value;
+} drm_r128_getparam32_t;
+
+static int compat_r128_getparam(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	drm_r128_getparam32_t getparam32;
+	drm_r128_getparam_t __user *getparam;
+
+	if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32)))
+		return -EFAULT;
+
+	getparam = compat_alloc_user_space(sizeof(*getparam));
+	if (!access_ok(VERIFY_WRITE, getparam, sizeof(*getparam))
+	    || __put_user(getparam32.param, &getparam->param)
+	    || __put_user((void __user *)(unsigned long)getparam32.value,
+			  &getparam->value))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
+}
+
+drm_ioctl_compat_t *r128_compat_ioctls[] = {
+	[DRM_R128_INIT] = compat_r128_init,
+	[DRM_R128_DEPTH] = compat_r128_depth,
+	[DRM_R128_STIPPLE] = compat_r128_stipple,
+	[DRM_R128_GETPARAM] = compat_r128_getparam,
+};
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/dri/card<n>.
+ *
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	drm_ioctl_compat_t *fn = NULL;
+	int ret;
+
+	if (nr < DRM_COMMAND_BASE)
+		return drm_compat_ioctl(filp, cmd, arg);
+
+	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
+		fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
+
+	lock_kernel();		/* XXX for now */
+	if (fn != NULL)
+		ret = (*fn) (filp, cmd, arg);
+	else
+		ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
+	unlock_kernel();
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
new file mode 100644
index 000000000000..c76fdca7662d
--- /dev/null
+++ b/drivers/gpu/drm/r128/r128_irq.c
@@ -0,0 +1,101 @@
+/* r128_irq.c -- IRQ handling for radeon -*- linux-c -*- */
+/*
+ * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Keith Whitwell <keith@tungstengraphics.com>
+ *    Eric Anholt <anholt@FreeBSD.org>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "r128_drm.h"
+#include "r128_drv.h"
+
+irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
+	int status;
+
+	status = R128_READ(R128_GEN_INT_STATUS);
+
+	/* VBLANK interrupt */
+	if (status & R128_CRTC_VBLANK_INT) {
+		R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
+		atomic_inc(&dev->vbl_received);
+		DRM_WAKEUP(&dev->vbl_queue);
+		drm_vbl_send_signals(dev);
+		return IRQ_HANDLED;
+	}
+	return IRQ_NONE;
+}
+
+int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
+{
+	unsigned int cur_vblank;
+	int ret = 0;
+
+	/* Assume that the user has missed the current sequence number
+	 * by about a day rather than she wants to wait for years
+	 * using vertical blanks...
+	 */
+	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+		    (((cur_vblank = atomic_read(&dev->vbl_received))
+		      - *sequence) <= (1 << 23)));
+
+	*sequence = cur_vblank;
+
+	return ret;
+}
+
+void r128_driver_irq_preinstall(struct drm_device * dev)
+{
+	drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
+
+	/* Disable *all* interrupts */
+	R128_WRITE(R128_GEN_INT_CNTL, 0);
+	/* Clear vblank bit if it's already high */
+	R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
+}
+
+void r128_driver_irq_postinstall(struct drm_device * dev)
+{
+	drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
+
+	/* Turn on VBL interrupt */
+	R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
+}
+
+void r128_driver_irq_uninstall(struct drm_device * dev)
+{
+	drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
+	if (!dev_priv)
+		return;
+
+	/* Disable *all* interrupts */
+	R128_WRITE(R128_GEN_INT_CNTL, 0);
+}
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
new file mode 100644
index 000000000000..51a9afce7b9b
--- /dev/null
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -0,0 +1,1681 @@
+/* r128_state.c -- State support for r128 -*- linux-c -*-
+ * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com
+ */
+/*
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "r128_drm.h"
+#include "r128_drv.h"
+
+/* ================================================================
+ * CCE hardware state programming functions
+ */
+
+static void r128_emit_clip_rects(drm_r128_private_t * dev_priv,
+				 struct drm_clip_rect * boxes, int count)
+{
+	u32 aux_sc_cntl = 0x00000000;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING((count < 3 ? count : 3) * 5 + 2);
+
+	if (count >= 1) {
+		OUT_RING(CCE_PACKET0(R128_AUX1_SC_LEFT, 3));
+		OUT_RING(boxes[0].x1);
+		OUT_RING(boxes[0].x2 - 1);
+		OUT_RING(boxes[0].y1);
+		OUT_RING(boxes[0].y2 - 1);
+
+		aux_sc_cntl |= (R128_AUX1_SC_EN | R128_AUX1_SC_MODE_OR);
+	}
+	if (count >= 2) {
+		OUT_RING(CCE_PACKET0(R128_AUX2_SC_LEFT, 3));
+		OUT_RING(boxes[1].x1);
+		OUT_RING(boxes[1].x2 - 1);
+		OUT_RING(boxes[1].y1);
+		OUT_RING(boxes[1].y2 - 1);
+
+		aux_sc_cntl |= (R128_AUX2_SC_EN | R128_AUX2_SC_MODE_OR);
+	}
+	if (count >= 3) {
+		OUT_RING(CCE_PACKET0(R128_AUX3_SC_LEFT, 3));
+		OUT_RING(boxes[2].x1);
+		OUT_RING(boxes[2].x2 - 1);
+		OUT_RING(boxes[2].y1);
+		OUT_RING(boxes[2].y2 - 1);
+
+		aux_sc_cntl |= (R128_AUX3_SC_EN | R128_AUX3_SC_MODE_OR);
+	}
+
+	OUT_RING(CCE_PACKET0(R128_AUX_SC_CNTL, 0));
+	OUT_RING(aux_sc_cntl);
+
+	ADVANCE_RING();
+}
+
+static __inline__ void r128_emit_core(drm_r128_private_t * dev_priv)
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(2);
+
+	OUT_RING(CCE_PACKET0(R128_SCALE_3D_CNTL, 0));
+	OUT_RING(ctx->scale_3d_cntl);
+
+	ADVANCE_RING();
+}
+
+static __inline__ void r128_emit_context(drm_r128_private_t * dev_priv)
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(13);
+
+	OUT_RING(CCE_PACKET0(R128_DST_PITCH_OFFSET_C, 11));
+	OUT_RING(ctx->dst_pitch_offset_c);
+	OUT_RING(ctx->dp_gui_master_cntl_c);
+	OUT_RING(ctx->sc_top_left_c);
+	OUT_RING(ctx->sc_bottom_right_c);
+	OUT_RING(ctx->z_offset_c);
+	OUT_RING(ctx->z_pitch_c);
+	OUT_RING(ctx->z_sten_cntl_c);
+	OUT_RING(ctx->tex_cntl_c);
+	OUT_RING(ctx->misc_3d_state_cntl_reg);
+	OUT_RING(ctx->texture_clr_cmp_clr_c);
+	OUT_RING(ctx->texture_clr_cmp_msk_c);
+	OUT_RING(ctx->fog_color_c);
+
+	ADVANCE_RING();
+}
+
+static __inline__ void r128_emit_setup(drm_r128_private_t * dev_priv)
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(3);
+
+	OUT_RING(CCE_PACKET1(R128_SETUP_CNTL, R128_PM4_VC_FPU_SETUP));
+	OUT_RING(ctx->setup_cntl);
+	OUT_RING(ctx->pm4_vc_fpu_setup);
+
+	ADVANCE_RING();
+}
+
+static __inline__ void r128_emit_masks(drm_r128_private_t * dev_priv)
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(5);
+
+	OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
+	OUT_RING(ctx->dp_write_mask);
+
+	OUT_RING(CCE_PACKET0(R128_STEN_REF_MASK_C, 1));
+	OUT_RING(ctx->sten_ref_mask_c);
+	OUT_RING(ctx->plane_3d_mask_c);
+
+	ADVANCE_RING();
+}
+
+static __inline__ void r128_emit_window(drm_r128_private_t * dev_priv)
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(2);
+
+	OUT_RING(CCE_PACKET0(R128_WINDOW_XY_OFFSET, 0));
+	OUT_RING(ctx->window_xy_offset);
+
+	ADVANCE_RING();
+}
+
+static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv)
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
+	drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0];
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(7 + R128_MAX_TEXTURE_LEVELS);
+
+	OUT_RING(CCE_PACKET0(R128_PRIM_TEX_CNTL_C,
+			     2 + R128_MAX_TEXTURE_LEVELS));
+	OUT_RING(tex->tex_cntl);
+	OUT_RING(tex->tex_combine_cntl);
+	OUT_RING(ctx->tex_size_pitch_c);
+	for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) {
+		OUT_RING(tex->tex_offset[i]);
+	}
+
+	OUT_RING(CCE_PACKET0(R128_CONSTANT_COLOR_C, 1));
+	OUT_RING(ctx->constant_color_c);
+	OUT_RING(tex->tex_border_color);
+
+	ADVANCE_RING();
+}
+
+static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv)
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS);
+
+	OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS));
+	OUT_RING(tex->tex_cntl);
+	OUT_RING(tex->tex_combine_cntl);
+	for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) {
+		OUT_RING(tex->tex_offset[i]);
+	}
+
+	OUT_RING(CCE_PACKET0(R128_SEC_TEXTURE_BORDER_COLOR_C, 0));
+	OUT_RING(tex->tex_border_color);
+
+	ADVANCE_RING();
+}
+
+static void r128_emit_state(drm_r128_private_t * dev_priv)
+{
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	unsigned int dirty = sarea_priv->dirty;
+
+	DRM_DEBUG("dirty=0x%08x\n", dirty);
+
+	if (dirty & R128_UPLOAD_CORE) {
+		r128_emit_core(dev_priv);
+		sarea_priv->dirty &= ~R128_UPLOAD_CORE;
+	}
+
+	if (dirty & R128_UPLOAD_CONTEXT) {
+		r128_emit_context(dev_priv);
+		sarea_priv->dirty &= ~R128_UPLOAD_CONTEXT;
+	}
+
+	if (dirty & R128_UPLOAD_SETUP) {
+		r128_emit_setup(dev_priv);
+		sarea_priv->dirty &= ~R128_UPLOAD_SETUP;
+	}
+
+	if (dirty & R128_UPLOAD_MASKS) {
+		r128_emit_masks(dev_priv);
+		sarea_priv->dirty &= ~R128_UPLOAD_MASKS;
+	}
+
+	if (dirty & R128_UPLOAD_WINDOW) {
+		r128_emit_window(dev_priv);
+		sarea_priv->dirty &= ~R128_UPLOAD_WINDOW;
+	}
+
+	if (dirty & R128_UPLOAD_TEX0) {
+		r128_emit_tex0(dev_priv);
+		sarea_priv->dirty &= ~R128_UPLOAD_TEX0;
+	}
+
+	if (dirty & R128_UPLOAD_TEX1) {
+		r128_emit_tex1(dev_priv);
+		sarea_priv->dirty &= ~R128_UPLOAD_TEX1;
+	}
+
+	/* Turn off the texture cache flushing */
+	sarea_priv->context_state.tex_cntl_c &= ~R128_TEX_CACHE_FLUSH;
+
+	sarea_priv->dirty &= ~R128_REQUIRE_QUIESCENCE;
+}
+
+#if R128_PERFORMANCE_BOXES
+/* ================================================================
+ * Performance monitoring functions
+ */
+
+static void r128_clear_box(drm_r128_private_t * dev_priv,
+			   int x, int y, int w, int h, int r, int g, int b)
+{
+	u32 pitch, offset;
+	u32 fb_bpp, color;
+	RING_LOCALS;
+
+	switch (dev_priv->fb_bpp) {
+	case 16:
+		fb_bpp = R128_GMC_DST_16BPP;
+		color = (((r & 0xf8) << 8) |
+			 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
+		break;
+	case 24:
+		fb_bpp = R128_GMC_DST_24BPP;
+		color = ((r << 16) | (g << 8) | b);
+		break;
+	case 32:
+		fb_bpp = R128_GMC_DST_32BPP;
+		color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
+		break;
+	default:
+		return;
+	}
+
+	offset = dev_priv->back_offset;
+	pitch = dev_priv->back_pitch >> 3;
+
+	BEGIN_RING(6);
+
+	OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
+	OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
+		 R128_GMC_BRUSH_SOLID_COLOR |
+		 fb_bpp |
+		 R128_GMC_SRC_DATATYPE_COLOR |
+		 R128_ROP3_P |
+		 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_AUX_CLIP_DIS);
+
+	OUT_RING((pitch << 21) | (offset >> 5));
+	OUT_RING(color);
+
+	OUT_RING((x << 16) | y);
+	OUT_RING((w << 16) | h);
+
+	ADVANCE_RING();
+}
+
+static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
+{
+	if (atomic_read(&dev_priv->idle_count) == 0) {
+		r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
+	} else {
+		atomic_set(&dev_priv->idle_count, 0);
+	}
+}
+
+#endif
+
+/* ================================================================
+ * CCE command dispatch functions
+ */
+
+static void r128_print_dirty(const char *msg, unsigned int flags)
+{
+	DRM_INFO("%s: (0x%x) %s%s%s%s%s%s%s%s%s\n",
+		 msg,
+		 flags,
+		 (flags & R128_UPLOAD_CORE) ? "core, " : "",
+		 (flags & R128_UPLOAD_CONTEXT) ? "context, " : "",
+		 (flags & R128_UPLOAD_SETUP) ? "setup, " : "",
+		 (flags & R128_UPLOAD_TEX0) ? "tex0, " : "",
+		 (flags & R128_UPLOAD_TEX1) ? "tex1, " : "",
+		 (flags & R128_UPLOAD_MASKS) ? "masks, " : "",
+		 (flags & R128_UPLOAD_WINDOW) ? "window, " : "",
+		 (flags & R128_UPLOAD_CLIPRECTS) ? "cliprects, " : "",
+		 (flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "");
+}
+
+static void r128_cce_dispatch_clear(struct drm_device * dev,
+				    drm_r128_clear_t * clear)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	int nbox = sarea_priv->nbox;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	unsigned int flags = clear->flags;
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	if (dev_priv->page_flipping && dev_priv->current_page == 1) {
+		unsigned int tmp = flags;
+
+		flags &= ~(R128_FRONT | R128_BACK);
+		if (tmp & R128_FRONT)
+			flags |= R128_BACK;
+		if (tmp & R128_BACK)
+			flags |= R128_FRONT;
+	}
+
+	for (i = 0; i < nbox; i++) {
+		int x = pbox[i].x1;
+		int y = pbox[i].y1;
+		int w = pbox[i].x2 - x;
+		int h = pbox[i].y2 - y;
+
+		DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n",
+			  pbox[i].x1, pbox[i].y1, pbox[i].x2,
+			  pbox[i].y2, flags);
+
+		if (flags & (R128_FRONT | R128_BACK)) {
+			BEGIN_RING(2);
+
+			OUT_RING(CCE_PACKET0(R128_DP_WRITE_MASK, 0));
+			OUT_RING(clear->color_mask);
+
+			ADVANCE_RING();
+		}
+
+		if (flags & R128_FRONT) {
+			BEGIN_RING(6);
+
+			OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
+			OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
+				 R128_GMC_BRUSH_SOLID_COLOR |
+				 (dev_priv->color_fmt << 8) |
+				 R128_GMC_SRC_DATATYPE_COLOR |
+				 R128_ROP3_P |
+				 R128_GMC_CLR_CMP_CNTL_DIS |
+				 R128_GMC_AUX_CLIP_DIS);
+
+			OUT_RING(dev_priv->front_pitch_offset_c);
+			OUT_RING(clear->clear_color);
+
+			OUT_RING((x << 16) | y);
+			OUT_RING((w << 16) | h);
+
+			ADVANCE_RING();
+		}
+
+		if (flags & R128_BACK) {
+			BEGIN_RING(6);
+
+			OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
+			OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
+				 R128_GMC_BRUSH_SOLID_COLOR |
+				 (dev_priv->color_fmt << 8) |
+				 R128_GMC_SRC_DATATYPE_COLOR |
+				 R128_ROP3_P |
+				 R128_GMC_CLR_CMP_CNTL_DIS |
+				 R128_GMC_AUX_CLIP_DIS);
+
+			OUT_RING(dev_priv->back_pitch_offset_c);
+			OUT_RING(clear->clear_color);
+
+			OUT_RING((x << 16) | y);
+			OUT_RING((w << 16) | h);
+
+			ADVANCE_RING();
+		}
+
+		if (flags & R128_DEPTH) {
+			BEGIN_RING(6);
+
+			OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
+			OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
+				 R128_GMC_BRUSH_SOLID_COLOR |
+				 (dev_priv->depth_fmt << 8) |
+				 R128_GMC_SRC_DATATYPE_COLOR |
+				 R128_ROP3_P |
+				 R128_GMC_CLR_CMP_CNTL_DIS |
+				 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
+
+			OUT_RING(dev_priv->depth_pitch_offset_c);
+			OUT_RING(clear->clear_depth);
+
+			OUT_RING((x << 16) | y);
+			OUT_RING((w << 16) | h);
+
+			ADVANCE_RING();
+		}
+	}
+}
+
+static void r128_cce_dispatch_swap(struct drm_device * dev)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	int nbox = sarea_priv->nbox;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+#if R128_PERFORMANCE_BOXES
+	/* Do some trivial performance monitoring...
+	 */
+	r128_cce_performance_boxes(dev_priv);
+#endif
+
+	for (i = 0; i < nbox; i++) {
+		int x = pbox[i].x1;
+		int y = pbox[i].y1;
+		int w = pbox[i].x2 - x;
+		int h = pbox[i].y2 - y;
+
+		BEGIN_RING(7);
+
+		OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
+		OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
+			 R128_GMC_DST_PITCH_OFFSET_CNTL |
+			 R128_GMC_BRUSH_NONE |
+			 (dev_priv->color_fmt << 8) |
+			 R128_GMC_SRC_DATATYPE_COLOR |
+			 R128_ROP3_S |
+			 R128_DP_SRC_SOURCE_MEMORY |
+			 R128_GMC_CLR_CMP_CNTL_DIS |
+			 R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS);
+
+		/* Make this work even if front & back are flipped:
+		 */
+		if (dev_priv->current_page == 0) {
+			OUT_RING(dev_priv->back_pitch_offset_c);
+			OUT_RING(dev_priv->front_pitch_offset_c);
+		} else {
+			OUT_RING(dev_priv->front_pitch_offset_c);
+			OUT_RING(dev_priv->back_pitch_offset_c);
+		}
+
+		OUT_RING((x << 16) | y);
+		OUT_RING((x << 16) | y);
+		OUT_RING((w << 16) | h);
+
+		ADVANCE_RING();
+	}
+
+	/* Increment the frame counter.  The client-side 3D driver must
+	 * throttle the framerate by waiting for this value before
+	 * performing the swapbuffer ioctl.
+	 */
+	dev_priv->sarea_priv->last_frame++;
+
+	BEGIN_RING(2);
+
+	OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
+	OUT_RING(dev_priv->sarea_priv->last_frame);
+
+	ADVANCE_RING();
+}
+
+static void r128_cce_dispatch_flip(struct drm_device * dev)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+	DRM_DEBUG("page=%d pfCurrentPage=%d\n",
+		  dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);
+
+#if R128_PERFORMANCE_BOXES
+	/* Do some trivial performance monitoring...
+	 */
+	r128_cce_performance_boxes(dev_priv);
+#endif
+
+	BEGIN_RING(4);
+
+	R128_WAIT_UNTIL_PAGE_FLIPPED();
+	OUT_RING(CCE_PACKET0(R128_CRTC_OFFSET, 0));
+
+	if (dev_priv->current_page == 0) {
+		OUT_RING(dev_priv->back_offset);
+	} else {
+		OUT_RING(dev_priv->front_offset);
+	}
+
+	ADVANCE_RING();
+
+	/* Increment the frame counter.  The client-side 3D driver must
+	 * throttle the framerate by waiting for this value before
+	 * performing the swapbuffer ioctl.
+	 */
+	dev_priv->sarea_priv->last_frame++;
+	dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page =
+	    1 - dev_priv->current_page;
+
+	BEGIN_RING(2);
+
+	OUT_RING(CCE_PACKET0(R128_LAST_FRAME_REG, 0));
+	OUT_RING(dev_priv->sarea_priv->last_frame);
+
+	ADVANCE_RING();
+}
+
+static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_buf_priv_t *buf_priv = buf->dev_private;
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	int format = sarea_priv->vc_format;
+	int offset = buf->bus_address;
+	int size = buf->used;
+	int prim = buf_priv->prim;
+	int i = 0;
+	RING_LOCALS;
+	DRM_DEBUG("buf=%d nbox=%d\n", buf->idx, sarea_priv->nbox);
+
+	if (0)
+		r128_print_dirty("dispatch_vertex", sarea_priv->dirty);
+
+	if (buf->used) {
+		buf_priv->dispatched = 1;
+
+		if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) {
+			r128_emit_state(dev_priv);
+		}
+
+		do {
+			/* Emit the next set of up to three cliprects */
+			if (i < sarea_priv->nbox) {
+				r128_emit_clip_rects(dev_priv,
+						     &sarea_priv->boxes[i],
+						     sarea_priv->nbox - i);
+			}
+
+			/* Emit the vertex buffer rendering commands */
+			BEGIN_RING(5);
+
+			OUT_RING(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM, 3));
+			OUT_RING(offset);
+			OUT_RING(size);
+			OUT_RING(format);
+			OUT_RING(prim | R128_CCE_VC_CNTL_PRIM_WALK_LIST |
+				 (size << R128_CCE_VC_CNTL_NUM_SHIFT));
+
+			ADVANCE_RING();
+
+			i += 3;
+		} while (i < sarea_priv->nbox);
+	}
+
+	if (buf_priv->discard) {
+		buf_priv->age = dev_priv->sarea_priv->last_dispatch;
+
+		/* Emit the vertex buffer age */
+		BEGIN_RING(2);
+
+		OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
+		OUT_RING(buf_priv->age);
+
+		ADVANCE_RING();
+
+		buf->pending = 1;
+		buf->used = 0;
+		/* FIXME: Check dispatched field */
+		buf_priv->dispatched = 0;
+	}
+
+	dev_priv->sarea_priv->last_dispatch++;
+
+	sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
+	sarea_priv->nbox = 0;
+}
+
+static void r128_cce_dispatch_indirect(struct drm_device * dev,
+				       struct drm_buf * buf, int start, int end)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_buf_priv_t *buf_priv = buf->dev_private;
+	RING_LOCALS;
+	DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
+
+	if (start != end) {
+		int offset = buf->bus_address + start;
+		int dwords = (end - start + 3) / sizeof(u32);
+
+		/* Indirect buffer data must be an even number of
+		 * dwords, so if we've been given an odd number we must
+		 * pad the data with a Type-2 CCE packet.
+		 */
+		if (dwords & 1) {
+			u32 *data = (u32 *)
+			    ((char *)dev->agp_buffer_map->handle
+			     + buf->offset + start);
+			data[dwords++] = cpu_to_le32(R128_CCE_PACKET2);
+		}
+
+		buf_priv->dispatched = 1;
+
+		/* Fire off the indirect buffer */
+		BEGIN_RING(3);
+
+		OUT_RING(CCE_PACKET0(R128_PM4_IW_INDOFF, 1));
+		OUT_RING(offset);
+		OUT_RING(dwords);
+
+		ADVANCE_RING();
+	}
+
+	if (buf_priv->discard) {
+		buf_priv->age = dev_priv->sarea_priv->last_dispatch;
+
+		/* Emit the indirect buffer age */
+		BEGIN_RING(2);
+
+		OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
+		OUT_RING(buf_priv->age);
+
+		ADVANCE_RING();
+
+		buf->pending = 1;
+		buf->used = 0;
+		/* FIXME: Check dispatched field */
+		buf_priv->dispatched = 0;
+	}
+
+	dev_priv->sarea_priv->last_dispatch++;
+}
+
+static void r128_cce_dispatch_indices(struct drm_device * dev,
+				      struct drm_buf * buf,
+				      int start, int end, int count)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_buf_priv_t *buf_priv = buf->dev_private;
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	int format = sarea_priv->vc_format;
+	int offset = dev->agp_buffer_map->offset - dev_priv->cce_buffers_offset;
+	int prim = buf_priv->prim;
+	u32 *data;
+	int dwords;
+	int i = 0;
+	RING_LOCALS;
+	DRM_DEBUG("indices: s=%d e=%d c=%d\n", start, end, count);
+
+	if (0)
+		r128_print_dirty("dispatch_indices", sarea_priv->dirty);
+
+	if (start != end) {
+		buf_priv->dispatched = 1;
+
+		if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) {
+			r128_emit_state(dev_priv);
+		}
+
+		dwords = (end - start + 3) / sizeof(u32);
+
+		data = (u32 *) ((char *)dev->agp_buffer_map->handle
+				+ buf->offset + start);
+
+		data[0] = cpu_to_le32(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM,
+						  dwords - 2));
+
+		data[1] = cpu_to_le32(offset);
+		data[2] = cpu_to_le32(R128_MAX_VB_VERTS);
+		data[3] = cpu_to_le32(format);
+		data[4] = cpu_to_le32((prim | R128_CCE_VC_CNTL_PRIM_WALK_IND |
+				       (count << 16)));
+
+		if (count & 0x1) {
+#ifdef __LITTLE_ENDIAN
+			data[dwords - 1] &= 0x0000ffff;
+#else
+			data[dwords - 1] &= 0xffff0000;
+#endif
+		}
+
+		do {
+			/* Emit the next set of up to three cliprects */
+			if (i < sarea_priv->nbox) {
+				r128_emit_clip_rects(dev_priv,
+						     &sarea_priv->boxes[i],
+						     sarea_priv->nbox - i);
+			}
+
+			r128_cce_dispatch_indirect(dev, buf, start, end);
+
+			i += 3;
+		} while (i < sarea_priv->nbox);
+	}
+
+	if (buf_priv->discard) {
+		buf_priv->age = dev_priv->sarea_priv->last_dispatch;
+
+		/* Emit the vertex buffer age */
+		BEGIN_RING(2);
+
+		OUT_RING(CCE_PACKET0(R128_LAST_DISPATCH_REG, 0));
+		OUT_RING(buf_priv->age);
+
+		ADVANCE_RING();
+
+		buf->pending = 1;
+		/* FIXME: Check dispatched field */
+		buf_priv->dispatched = 0;
+	}
+
+	dev_priv->sarea_priv->last_dispatch++;
+
+	sarea_priv->dirty &= ~R128_UPLOAD_CLIPRECTS;
+	sarea_priv->nbox = 0;
+}
+
+static int r128_cce_dispatch_blit(struct drm_device * dev,
+				  struct drm_file *file_priv,
+				  drm_r128_blit_t * blit)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_r128_buf_priv_t *buf_priv;
+	u32 *data;
+	int dword_shift, dwords;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	/* The compiler won't optimize away a division by a variable,
+	 * even if the only legal values are powers of two.  Thus, we'll
+	 * use a shift instead.
+	 */
+	switch (blit->format) {
+	case R128_DATATYPE_ARGB8888:
+		dword_shift = 0;
+		break;
+	case R128_DATATYPE_ARGB1555:
+	case R128_DATATYPE_RGB565:
+	case R128_DATATYPE_ARGB4444:
+	case R128_DATATYPE_YVYU422:
+	case R128_DATATYPE_VYUY422:
+		dword_shift = 1;
+		break;
+	case R128_DATATYPE_CI8:
+	case R128_DATATYPE_RGB8:
+		dword_shift = 2;
+		break;
+	default:
+		DRM_ERROR("invalid blit format %d\n", blit->format);
+		return -EINVAL;
+	}
+
+	/* Flush the pixel cache, and mark the contents as Read Invalid.
+	 * This ensures no pixel data gets mixed up with the texture
+	 * data from the host data blit, otherwise part of the texture
+	 * image may be corrupted.
+	 */
+	BEGIN_RING(2);
+
+	OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
+	OUT_RING(R128_PC_RI_GUI | R128_PC_FLUSH_GUI);
+
+	ADVANCE_RING();
+
+	/* Dispatch the indirect buffer.
+	 */
+	buf = dma->buflist[blit->idx];
+	buf_priv = buf->dev_private;
+
+	if (buf->file_priv != file_priv) {
+		DRM_ERROR("process %d using buffer owned by %p\n",
+			  DRM_CURRENTPID, buf->file_priv);
+		return -EINVAL;
+	}
+	if (buf->pending) {
+		DRM_ERROR("sending pending buffer %d\n", blit->idx);
+		return -EINVAL;
+	}
+
+	buf_priv->discard = 1;
+
+	dwords = (blit->width * blit->height) >> dword_shift;
+
+	data = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
+
+	data[0] = cpu_to_le32(CCE_PACKET3(R128_CNTL_HOSTDATA_BLT, dwords + 6));
+	data[1] = cpu_to_le32((R128_GMC_DST_PITCH_OFFSET_CNTL |
+			       R128_GMC_BRUSH_NONE |
+			       (blit->format << 8) |
+			       R128_GMC_SRC_DATATYPE_COLOR |
+			       R128_ROP3_S |
+			       R128_DP_SRC_SOURCE_HOST_DATA |
+			       R128_GMC_CLR_CMP_CNTL_DIS |
+			       R128_GMC_AUX_CLIP_DIS | R128_GMC_WR_MSK_DIS));
+
+	data[2] = cpu_to_le32((blit->pitch << 21) | (blit->offset >> 5));
+	data[3] = cpu_to_le32(0xffffffff);
+	data[4] = cpu_to_le32(0xffffffff);
+	data[5] = cpu_to_le32((blit->y << 16) | blit->x);
+	data[6] = cpu_to_le32((blit->height << 16) | blit->width);
+	data[7] = cpu_to_le32(dwords);
+
+	buf->used = (dwords + 8) * sizeof(u32);
+
+	r128_cce_dispatch_indirect(dev, buf, 0, buf->used);
+
+	/* Flush the pixel cache after the blit completes.  This ensures
+	 * the texture data is written out to memory before rendering
+	 * continues.
+	 */
+	BEGIN_RING(2);
+
+	OUT_RING(CCE_PACKET0(R128_PC_GUI_CTLSTAT, 0));
+	OUT_RING(R128_PC_FLUSH_GUI);
+
+	ADVANCE_RING();
+
+	return 0;
+}
+
+/* ================================================================
+ * Tiled depth buffer management
+ *
+ * FIXME: These should all set the destination write mask for when we
+ * have hardware stencil support.
+ */
+
+static int r128_cce_dispatch_write_span(struct drm_device * dev,
+					drm_r128_depth_t * depth)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	int count, x, y;
+	u32 *buffer;
+	u8 *mask;
+	int i, buffer_size, mask_size;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	count = depth->n;
+	if (count > 4096 || count <= 0)
+		return -EMSGSIZE;
+
+	if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
+		return -EFAULT;
+	}
+	if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
+		return -EFAULT;
+	}
+
+	buffer_size = depth->n * sizeof(u32);
+	buffer = drm_alloc(buffer_size, DRM_MEM_BUFS);
+	if (buffer == NULL)
+		return -ENOMEM;
+	if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
+		drm_free(buffer, buffer_size, DRM_MEM_BUFS);
+		return -EFAULT;
+	}
+
+	mask_size = depth->n * sizeof(u8);
+	if (depth->mask) {
+		mask = drm_alloc(mask_size, DRM_MEM_BUFS);
+		if (mask == NULL) {
+			drm_free(buffer, buffer_size, DRM_MEM_BUFS);
+			return -ENOMEM;
+		}
+		if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
+			drm_free(buffer, buffer_size, DRM_MEM_BUFS);
+			drm_free(mask, mask_size, DRM_MEM_BUFS);
+			return -EFAULT;
+		}
+
+		for (i = 0; i < count; i++, x++) {
+			if (mask[i]) {
+				BEGIN_RING(6);
+
+				OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
+				OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
+					 R128_GMC_BRUSH_SOLID_COLOR |
+					 (dev_priv->depth_fmt << 8) |
+					 R128_GMC_SRC_DATATYPE_COLOR |
+					 R128_ROP3_P |
+					 R128_GMC_CLR_CMP_CNTL_DIS |
+					 R128_GMC_WR_MSK_DIS);
+
+				OUT_RING(dev_priv->depth_pitch_offset_c);
+				OUT_RING(buffer[i]);
+
+				OUT_RING((x << 16) | y);
+				OUT_RING((1 << 16) | 1);
+
+				ADVANCE_RING();
+			}
+		}
+
+		drm_free(mask, mask_size, DRM_MEM_BUFS);
+	} else {
+		for (i = 0; i < count; i++, x++) {
+			BEGIN_RING(6);
+
+			OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
+			OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
+				 R128_GMC_BRUSH_SOLID_COLOR |
+				 (dev_priv->depth_fmt << 8) |
+				 R128_GMC_SRC_DATATYPE_COLOR |
+				 R128_ROP3_P |
+				 R128_GMC_CLR_CMP_CNTL_DIS |
+				 R128_GMC_WR_MSK_DIS);
+
+			OUT_RING(dev_priv->depth_pitch_offset_c);
+			OUT_RING(buffer[i]);
+
+			OUT_RING((x << 16) | y);
+			OUT_RING((1 << 16) | 1);
+
+			ADVANCE_RING();
+		}
+	}
+
+	drm_free(buffer, buffer_size, DRM_MEM_BUFS);
+
+	return 0;
+}
+
+static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
+					  drm_r128_depth_t * depth)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	int count, *x, *y;
+	u32 *buffer;
+	u8 *mask;
+	int i, xbuf_size, ybuf_size, buffer_size, mask_size;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	count = depth->n;
+	if (count > 4096 || count <= 0)
+		return -EMSGSIZE;
+
+	xbuf_size = count * sizeof(*x);
+	ybuf_size = count * sizeof(*y);
+	x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
+	if (x == NULL) {
+		return -ENOMEM;
+	}
+	y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
+	if (y == NULL) {
+		drm_free(x, xbuf_size, DRM_MEM_BUFS);
+		return -ENOMEM;
+	}
+	if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
+		drm_free(x, xbuf_size, DRM_MEM_BUFS);
+		drm_free(y, ybuf_size, DRM_MEM_BUFS);
+		return -EFAULT;
+	}
+	if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) {
+		drm_free(x, xbuf_size, DRM_MEM_BUFS);
+		drm_free(y, ybuf_size, DRM_MEM_BUFS);
+		return -EFAULT;
+	}
+
+	buffer_size = depth->n * sizeof(u32);
+	buffer = drm_alloc(buffer_size, DRM_MEM_BUFS);
+	if (buffer == NULL) {
+		drm_free(x, xbuf_size, DRM_MEM_BUFS);
+		drm_free(y, ybuf_size, DRM_MEM_BUFS);
+		return -ENOMEM;
+	}
+	if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
+		drm_free(x, xbuf_size, DRM_MEM_BUFS);
+		drm_free(y, ybuf_size, DRM_MEM_BUFS);
+		drm_free(buffer, buffer_size, DRM_MEM_BUFS);
+		return -EFAULT;
+	}
+
+	if (depth->mask) {
+		mask_size = depth->n * sizeof(u8);
+		mask = drm_alloc(mask_size, DRM_MEM_BUFS);
+		if (mask == NULL) {
+			drm_free(x, xbuf_size, DRM_MEM_BUFS);
+			drm_free(y, ybuf_size, DRM_MEM_BUFS);
+			drm_free(buffer, buffer_size, DRM_MEM_BUFS);
+			return -ENOMEM;
+		}
+		if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
+			drm_free(x, xbuf_size, DRM_MEM_BUFS);
+			drm_free(y, ybuf_size, DRM_MEM_BUFS);
+			drm_free(buffer, buffer_size, DRM_MEM_BUFS);
+			drm_free(mask, mask_size, DRM_MEM_BUFS);
+			return -EFAULT;
+		}
+
+		for (i = 0; i < count; i++) {
+			if (mask[i]) {
+				BEGIN_RING(6);
+
+				OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
+				OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
+					 R128_GMC_BRUSH_SOLID_COLOR |
+					 (dev_priv->depth_fmt << 8) |
+					 R128_GMC_SRC_DATATYPE_COLOR |
+					 R128_ROP3_P |
+					 R128_GMC_CLR_CMP_CNTL_DIS |
+					 R128_GMC_WR_MSK_DIS);
+
+				OUT_RING(dev_priv->depth_pitch_offset_c);
+				OUT_RING(buffer[i]);
+
+				OUT_RING((x[i] << 16) | y[i]);
+				OUT_RING((1 << 16) | 1);
+
+				ADVANCE_RING();
+			}
+		}
+
+		drm_free(mask, mask_size, DRM_MEM_BUFS);
+	} else {
+		for (i = 0; i < count; i++) {
+			BEGIN_RING(6);
+
+			OUT_RING(CCE_PACKET3(R128_CNTL_PAINT_MULTI, 4));
+			OUT_RING(R128_GMC_DST_PITCH_OFFSET_CNTL |
+				 R128_GMC_BRUSH_SOLID_COLOR |
+				 (dev_priv->depth_fmt << 8) |
+				 R128_GMC_SRC_DATATYPE_COLOR |
+				 R128_ROP3_P |
+				 R128_GMC_CLR_CMP_CNTL_DIS |
+				 R128_GMC_WR_MSK_DIS);
+
+			OUT_RING(dev_priv->depth_pitch_offset_c);
+			OUT_RING(buffer[i]);
+
+			OUT_RING((x[i] << 16) | y[i]);
+			OUT_RING((1 << 16) | 1);
+
+			ADVANCE_RING();
+		}
+	}
+
+	drm_free(x, xbuf_size, DRM_MEM_BUFS);
+	drm_free(y, ybuf_size, DRM_MEM_BUFS);
+	drm_free(buffer, buffer_size, DRM_MEM_BUFS);
+
+	return 0;
+}
+
+static int r128_cce_dispatch_read_span(struct drm_device * dev,
+				       drm_r128_depth_t * depth)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	int count, x, y;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	count = depth->n;
+	if (count > 4096 || count <= 0)
+		return -EMSGSIZE;
+
+	if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
+		return -EFAULT;
+	}
+	if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
+		return -EFAULT;
+	}
+
+	BEGIN_RING(7);
+
+	OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
+	OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
+		 R128_GMC_DST_PITCH_OFFSET_CNTL |
+		 R128_GMC_BRUSH_NONE |
+		 (dev_priv->depth_fmt << 8) |
+		 R128_GMC_SRC_DATATYPE_COLOR |
+		 R128_ROP3_S |
+		 R128_DP_SRC_SOURCE_MEMORY |
+		 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
+
+	OUT_RING(dev_priv->depth_pitch_offset_c);
+	OUT_RING(dev_priv->span_pitch_offset_c);
+
+	OUT_RING((x << 16) | y);
+	OUT_RING((0 << 16) | 0);
+	OUT_RING((count << 16) | 1);
+
+	ADVANCE_RING();
+
+	return 0;
+}
+
+static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
+					 drm_r128_depth_t * depth)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	int count, *x, *y;
+	int i, xbuf_size, ybuf_size;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	count = depth->n;
+	if (count > 4096 || count <= 0)
+		return -EMSGSIZE;
+
+	if (count > dev_priv->depth_pitch) {
+		count = dev_priv->depth_pitch;
+	}
+
+	xbuf_size = count * sizeof(*x);
+	ybuf_size = count * sizeof(*y);
+	x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
+	if (x == NULL) {
+		return -ENOMEM;
+	}
+	y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
+	if (y == NULL) {
+		drm_free(x, xbuf_size, DRM_MEM_BUFS);
+		return -ENOMEM;
+	}
+	if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
+		drm_free(x, xbuf_size, DRM_MEM_BUFS);
+		drm_free(y, ybuf_size, DRM_MEM_BUFS);
+		return -EFAULT;
+	}
+	if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) {
+		drm_free(x, xbuf_size, DRM_MEM_BUFS);
+		drm_free(y, ybuf_size, DRM_MEM_BUFS);
+		return -EFAULT;
+	}
+
+	for (i = 0; i < count; i++) {
+		BEGIN_RING(7);
+
+		OUT_RING(CCE_PACKET3(R128_CNTL_BITBLT_MULTI, 5));
+		OUT_RING(R128_GMC_SRC_PITCH_OFFSET_CNTL |
+			 R128_GMC_DST_PITCH_OFFSET_CNTL |
+			 R128_GMC_BRUSH_NONE |
+			 (dev_priv->depth_fmt << 8) |
+			 R128_GMC_SRC_DATATYPE_COLOR |
+			 R128_ROP3_S |
+			 R128_DP_SRC_SOURCE_MEMORY |
+			 R128_GMC_CLR_CMP_CNTL_DIS | R128_GMC_WR_MSK_DIS);
+
+		OUT_RING(dev_priv->depth_pitch_offset_c);
+		OUT_RING(dev_priv->span_pitch_offset_c);
+
+		OUT_RING((x[i] << 16) | y[i]);
+		OUT_RING((i << 16) | 0);
+		OUT_RING((1 << 16) | 1);
+
+		ADVANCE_RING();
+	}
+
+	drm_free(x, xbuf_size, DRM_MEM_BUFS);
+	drm_free(y, ybuf_size, DRM_MEM_BUFS);
+
+	return 0;
+}
+
+/* ================================================================
+ * Polygon stipple
+ */
+
+static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(33);
+
+	OUT_RING(CCE_PACKET0(R128_BRUSH_DATA0, 31));
+	for (i = 0; i < 32; i++) {
+		OUT_RING(stipple[i]);
+	}
+
+	ADVANCE_RING();
+}
+
+/* ================================================================
+ * IOCTL functions
+ */
+
+static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_r128_clear_t *clear = data;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+	if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
+		sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
+
+	r128_cce_dispatch_clear(dev, clear);
+	COMMIT_RING();
+
+	/* Make sure we restore the 3D state next time.
+	 */
+	dev_priv->sarea_priv->dirty |= R128_UPLOAD_CONTEXT | R128_UPLOAD_MASKS;
+
+	return 0;
+}
+
+static int r128_do_init_pageflip(struct drm_device * dev)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	dev_priv->crtc_offset = R128_READ(R128_CRTC_OFFSET);
+	dev_priv->crtc_offset_cntl = R128_READ(R128_CRTC_OFFSET_CNTL);
+
+	R128_WRITE(R128_CRTC_OFFSET, dev_priv->front_offset);
+	R128_WRITE(R128_CRTC_OFFSET_CNTL,
+		   dev_priv->crtc_offset_cntl | R128_CRTC_OFFSET_FLIP_CNTL);
+
+	dev_priv->page_flipping = 1;
+	dev_priv->current_page = 0;
+	dev_priv->sarea_priv->pfCurrentPage = dev_priv->current_page;
+
+	return 0;
+}
+
+static int r128_do_cleanup_pageflip(struct drm_device * dev)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	R128_WRITE(R128_CRTC_OFFSET, dev_priv->crtc_offset);
+	R128_WRITE(R128_CRTC_OFFSET_CNTL, dev_priv->crtc_offset_cntl);
+
+	if (dev_priv->current_page != 0) {
+		r128_cce_dispatch_flip(dev);
+		COMMIT_RING();
+	}
+
+	dev_priv->page_flipping = 0;
+	return 0;
+}
+
+/* Swapping and flipping are different operations, need different ioctls.
+ * They can & should be intermixed to support multiple 3d windows.
+ */
+
+static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+	if (!dev_priv->page_flipping)
+		r128_do_init_pageflip(dev);
+
+	r128_cce_dispatch_flip(dev);
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+	if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
+		sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
+
+	r128_cce_dispatch_swap(dev);
+	dev_priv->sarea_priv->dirty |= (R128_UPLOAD_CONTEXT |
+					R128_UPLOAD_MASKS);
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_r128_buf_priv_t *buf_priv;
+	drm_r128_vertex_t *vertex = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
+		  DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
+
+	if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
+		DRM_ERROR("buffer index %d (of %d max)\n",
+			  vertex->idx, dma->buf_count - 1);
+		return -EINVAL;
+	}
+	if (vertex->prim < 0 ||
+	    vertex->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
+		DRM_ERROR("buffer prim %d\n", vertex->prim);
+		return -EINVAL;
+	}
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	buf = dma->buflist[vertex->idx];
+	buf_priv = buf->dev_private;
+
+	if (buf->file_priv != file_priv) {
+		DRM_ERROR("process %d using buffer owned by %p\n",
+			  DRM_CURRENTPID, buf->file_priv);
+		return -EINVAL;
+	}
+	if (buf->pending) {
+		DRM_ERROR("sending pending buffer %d\n", vertex->idx);
+		return -EINVAL;
+	}
+
+	buf->used = vertex->count;
+	buf_priv->prim = vertex->prim;
+	buf_priv->discard = vertex->discard;
+
+	r128_cce_dispatch_vertex(dev, buf);
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_r128_buf_priv_t *buf_priv;
+	drm_r128_indices_t *elts = data;
+	int count;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID,
+		  elts->idx, elts->start, elts->end, elts->discard);
+
+	if (elts->idx < 0 || elts->idx >= dma->buf_count) {
+		DRM_ERROR("buffer index %d (of %d max)\n",
+			  elts->idx, dma->buf_count - 1);
+		return -EINVAL;
+	}
+	if (elts->prim < 0 ||
+	    elts->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
+		DRM_ERROR("buffer prim %d\n", elts->prim);
+		return -EINVAL;
+	}
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	buf = dma->buflist[elts->idx];
+	buf_priv = buf->dev_private;
+
+	if (buf->file_priv != file_priv) {
+		DRM_ERROR("process %d using buffer owned by %p\n",
+			  DRM_CURRENTPID, buf->file_priv);
+		return -EINVAL;
+	}
+	if (buf->pending) {
+		DRM_ERROR("sending pending buffer %d\n", elts->idx);
+		return -EINVAL;
+	}
+
+	count = (elts->end - elts->start) / sizeof(u16);
+	elts->start -= R128_INDEX_PRIM_OFFSET;
+
+	if (elts->start & 0x7) {
+		DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
+		return -EINVAL;
+	}
+	if (elts->start < buf->used) {
+		DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
+		return -EINVAL;
+	}
+
+	buf->used = elts->end;
+	buf_priv->prim = elts->prim;
+	buf_priv->discard = elts->discard;
+
+	r128_cce_dispatch_indices(dev, buf, elts->start, elts->end, count);
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_blit_t *blit = data;
+	int ret;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit->idx);
+
+	if (blit->idx < 0 || blit->idx >= dma->buf_count) {
+		DRM_ERROR("buffer index %d (of %d max)\n",
+			  blit->idx, dma->buf_count - 1);
+		return -EINVAL;
+	}
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	ret = r128_cce_dispatch_blit(dev, file_priv, blit);
+
+	COMMIT_RING();
+	return ret;
+}
+
+static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_depth_t *depth = data;
+	int ret;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+	ret = -EINVAL;
+	switch (depth->func) {
+	case R128_WRITE_SPAN:
+		ret = r128_cce_dispatch_write_span(dev, depth);
+		break;
+	case R128_WRITE_PIXELS:
+		ret = r128_cce_dispatch_write_pixels(dev, depth);
+		break;
+	case R128_READ_SPAN:
+		ret = r128_cce_dispatch_read_span(dev, depth);
+		break;
+	case R128_READ_PIXELS:
+		ret = r128_cce_dispatch_read_pixels(dev, depth);
+		break;
+	}
+
+	COMMIT_RING();
+	return ret;
+}
+
+static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_stipple_t *stipple = data;
+	u32 mask[32];
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
+		return -EFAULT;
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+	r128_cce_dispatch_stipple(dev, mask);
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_r128_buf_priv_t *buf_priv;
+	drm_r128_indirect_t *indirect = data;
+#if 0
+	RING_LOCALS;
+#endif
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
+		  indirect->idx, indirect->start, indirect->end,
+		  indirect->discard);
+
+	if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
+		DRM_ERROR("buffer index %d (of %d max)\n",
+			  indirect->idx, dma->buf_count - 1);
+		return -EINVAL;
+	}
+
+	buf = dma->buflist[indirect->idx];
+	buf_priv = buf->dev_private;
+
+	if (buf->file_priv != file_priv) {
+		DRM_ERROR("process %d using buffer owned by %p\n",
+			  DRM_CURRENTPID, buf->file_priv);
+		return -EINVAL;
+	}
+	if (buf->pending) {
+		DRM_ERROR("sending pending buffer %d\n", indirect->idx);
+		return -EINVAL;
+	}
+
+	if (indirect->start < buf->used) {
+		DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
+			  indirect->start, buf->used);
+		return -EINVAL;
+	}
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	buf->used = indirect->end;
+	buf_priv->discard = indirect->discard;
+
+#if 0
+	/* Wait for the 3D stream to idle before the indirect buffer
+	 * containing 2D acceleration commands is processed.
+	 */
+	BEGIN_RING(2);
+	RADEON_WAIT_UNTIL_3D_IDLE();
+	ADVANCE_RING();
+#endif
+
+	/* Dispatch the indirect buffer full of commands from the
+	 * X server.  This is insecure and is thus only available to
+	 * privileged clients.
+	 */
+	r128_cce_dispatch_indirect(dev, buf, indirect->start, indirect->end);
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_r128_private_t *dev_priv = dev->dev_private;
+	drm_r128_getparam_t *param = data;
+	int value;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+
+	switch (param->param) {
+	case R128_PARAM_IRQ_NR:
+		value = dev->irq;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+		DRM_ERROR("copy_to_user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+void r128_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+{
+	if (dev->dev_private) {
+		drm_r128_private_t *dev_priv = dev->dev_private;
+		if (dev_priv->page_flipping) {
+			r128_do_cleanup_pageflip(dev);
+		}
+	}
+}
+
+void r128_driver_lastclose(struct drm_device * dev)
+{
+	r128_do_cleanup_cce(dev);
+}
+
+struct drm_ioctl_desc r128_ioctls[] = {
+	DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH),
+};
+
+int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
new file mode 100644
index 000000000000..feb521ebc393
--- /dev/null
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
+
+radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
+
+obj-$(CONFIG_DRM_RADEON)+= radeon.o
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
new file mode 100644
index 000000000000..702df45320f7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -0,0 +1,1071 @@
+/* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*-
+ *
+ * Copyright (C) The Weather Channel, Inc.  2002.
+ * Copyright (C) 2004 Nicolai Haehnle.
+ * All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Nicolai Haehnle <prefect_@gmx.net>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon_drv.h"
+#include "r300_reg.h"
+
+#define R300_SIMULTANEOUS_CLIPRECTS		4
+
+/* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects
+ */
+static const int r300_cliprect_cntl[4] = {
+	0xAAAA,
+	0xEEEE,
+	0xFEFE,
+	0xFFFE
+};
+
+/**
+ * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
+ * buffer, starting with index n.
+ */
+static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
+			       drm_radeon_kcmd_buffer_t *cmdbuf, int n)
+{
+	struct drm_clip_rect box;
+	int nr;
+	int i;
+	RING_LOCALS;
+
+	nr = cmdbuf->nbox - n;
+	if (nr > R300_SIMULTANEOUS_CLIPRECTS)
+		nr = R300_SIMULTANEOUS_CLIPRECTS;
+
+	DRM_DEBUG("%i cliprects\n", nr);
+
+	if (nr) {
+		BEGIN_RING(6 + nr * 2);
+		OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
+
+		for (i = 0; i < nr; ++i) {
+			if (DRM_COPY_FROM_USER_UNCHECKED
+			    (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
+				DRM_ERROR("copy cliprect faulted\n");
+				return -EFAULT;
+			}
+
+			if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
+				box.x1 = (box.x1) &
+					R300_CLIPRECT_MASK;
+				box.y1 = (box.y1) &
+					R300_CLIPRECT_MASK;
+				box.x2 = (box.x2) &
+					R300_CLIPRECT_MASK;
+				box.y2 = (box.y2) &
+					R300_CLIPRECT_MASK;
+			} else {
+				box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) &
+					R300_CLIPRECT_MASK;
+				box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) &
+					R300_CLIPRECT_MASK;
+				box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) &
+					R300_CLIPRECT_MASK;
+				box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
+					R300_CLIPRECT_MASK;
+
+			}
+			OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
+				 (box.y1 << R300_CLIPRECT_Y_SHIFT));
+			OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
+				 (box.y2 << R300_CLIPRECT_Y_SHIFT));
+
+		}
+
+		OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]);
+
+		/* TODO/SECURITY: Force scissors to a safe value, otherwise the
+		 * client might be able to trample over memory.
+		 * The impact should be very limited, but I'd rather be safe than
+		 * sorry.
+		 */
+		OUT_RING(CP_PACKET0(R300_RE_SCISSORS_TL, 1));
+		OUT_RING(0);
+		OUT_RING(R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK);
+		ADVANCE_RING();
+	} else {
+		/* Why we allow zero cliprect rendering:
+		 * There are some commands in a command buffer that must be submitted
+		 * even when there are no cliprects, e.g. DMA buffer discard
+		 * or state setting (though state setting could be avoided by
+		 * simulating a loss of context).
+		 *
+		 * Now since the cmdbuf interface is so chaotic right now (and is
+		 * bound to remain that way for a bit until things settle down),
+		 * it is basically impossible to filter out the commands that are
+		 * necessary and those that aren't.
+		 *
+		 * So I choose the safe way and don't do any filtering at all;
+		 * instead, I simply set up the engine so that all rendering
+		 * can't produce any fragments.
+		 */
+		BEGIN_RING(2);
+		OUT_RING_REG(R300_RE_CLIPRECT_CNTL, 0);
+		ADVANCE_RING();
+	}
+
+	return 0;
+}
+
+static u8 r300_reg_flags[0x10000 >> 2];
+
+void r300_init_reg_flags(struct drm_device *dev)
+{
+	int i;
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	memset(r300_reg_flags, 0, 0x10000 >> 2);
+#define ADD_RANGE_MARK(reg, count,mark) \
+		for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
+			r300_reg_flags[i]|=(mark);
+
+#define MARK_SAFE		1
+#define MARK_CHECK_OFFSET	2
+
+#define ADD_RANGE(reg, count)	ADD_RANGE_MARK(reg, count, MARK_SAFE)
+
+	/* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
+	ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
+	ADD_RANGE(R300_VAP_CNTL, 1);
+	ADD_RANGE(R300_SE_VTE_CNTL, 2);
+	ADD_RANGE(0x2134, 2);
+	ADD_RANGE(R300_VAP_CNTL_STATUS, 1);
+	ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
+	ADD_RANGE(0x21DC, 1);
+	ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
+	ADD_RANGE(R300_VAP_CLIP_X_0, 4);
+	ADD_RANGE(R300_VAP_PVS_WAITIDLE, 1);
+	ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
+	ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
+	ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
+	ADD_RANGE(R300_GB_ENABLE, 1);
+	ADD_RANGE(R300_GB_MSPOS0, 5);
+	ADD_RANGE(R300_TX_CNTL, 1);
+	ADD_RANGE(R300_TX_ENABLE, 1);
+	ADD_RANGE(0x4200, 4);
+	ADD_RANGE(0x4214, 1);
+	ADD_RANGE(R300_RE_POINTSIZE, 1);
+	ADD_RANGE(0x4230, 3);
+	ADD_RANGE(R300_RE_LINE_CNT, 1);
+	ADD_RANGE(R300_RE_UNK4238, 1);
+	ADD_RANGE(0x4260, 3);
+	ADD_RANGE(R300_RE_SHADE, 4);
+	ADD_RANGE(R300_RE_POLYGON_MODE, 5);
+	ADD_RANGE(R300_RE_ZBIAS_CNTL, 1);
+	ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
+	ADD_RANGE(R300_RE_OCCLUSION_CNTL, 1);
+	ADD_RANGE(R300_RE_CULL_CNTL, 1);
+	ADD_RANGE(0x42C0, 2);
+	ADD_RANGE(R300_RS_CNTL_0, 2);
+
+	ADD_RANGE(R300_SC_HYPERZ, 2);
+	ADD_RANGE(0x43E8, 1);
+
+	ADD_RANGE(0x46A4, 5);
+
+	ADD_RANGE(R300_RE_FOG_STATE, 1);
+	ADD_RANGE(R300_FOG_COLOR_R, 3);
+	ADD_RANGE(R300_PP_ALPHA_TEST, 2);
+	ADD_RANGE(0x4BD8, 1);
+	ADD_RANGE(R300_PFS_PARAM_0_X, 64);
+	ADD_RANGE(0x4E00, 1);
+	ADD_RANGE(R300_RB3D_CBLEND, 2);
+	ADD_RANGE(R300_RB3D_COLORMASK, 1);
+	ADD_RANGE(R300_RB3D_BLEND_COLOR, 3);
+	ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET);	/* check offset */
+	ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
+	ADD_RANGE(0x4E50, 9);
+	ADD_RANGE(0x4E88, 1);
+	ADD_RANGE(0x4EA0, 2);
+	ADD_RANGE(R300_ZB_CNTL, 3);
+	ADD_RANGE(R300_ZB_FORMAT, 4);
+	ADD_RANGE_MARK(R300_ZB_DEPTHOFFSET, 1, MARK_CHECK_OFFSET);	/* check offset */
+	ADD_RANGE(R300_ZB_DEPTHPITCH, 1);
+	ADD_RANGE(R300_ZB_DEPTHCLEARVALUE, 1);
+	ADD_RANGE(R300_ZB_ZMASK_OFFSET, 13);
+
+	ADD_RANGE(R300_TX_FILTER_0, 16);
+	ADD_RANGE(R300_TX_FILTER1_0, 16);
+	ADD_RANGE(R300_TX_SIZE_0, 16);
+	ADD_RANGE(R300_TX_FORMAT_0, 16);
+	ADD_RANGE(R300_TX_PITCH_0, 16);
+	/* Texture offset is dangerous and needs more checking */
+	ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
+	ADD_RANGE(R300_TX_CHROMA_KEY_0, 16);
+	ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
+
+	/* Sporadic registers used as primitives are emitted */
+	ADD_RANGE(R300_ZB_ZCACHE_CTLSTAT, 1);
+	ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
+	ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
+	ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
+		ADD_RANGE(R500_VAP_INDEX_OFFSET, 1);
+		ADD_RANGE(R500_US_CONFIG, 2);
+		ADD_RANGE(R500_US_CODE_ADDR, 3);
+		ADD_RANGE(R500_US_FC_CTRL, 1);
+		ADD_RANGE(R500_RS_IP_0, 16);
+		ADD_RANGE(R500_RS_INST_0, 16);
+		ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2);
+		ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2);
+		ADD_RANGE(R500_ZB_FIFO_SIZE, 2);
+	} else {
+		ADD_RANGE(R300_PFS_CNTL_0, 3);
+		ADD_RANGE(R300_PFS_NODE_0, 4);
+		ADD_RANGE(R300_PFS_TEXI_0, 64);
+		ADD_RANGE(R300_PFS_INSTR0_0, 64);
+		ADD_RANGE(R300_PFS_INSTR1_0, 64);
+		ADD_RANGE(R300_PFS_INSTR2_0, 64);
+		ADD_RANGE(R300_PFS_INSTR3_0, 64);
+		ADD_RANGE(R300_RS_INTERP_0, 8);
+		ADD_RANGE(R300_RS_ROUTE_0, 8);
+
+	}
+}
+
+static __inline__ int r300_check_range(unsigned reg, int count)
+{
+	int i;
+	if (reg & ~0xffff)
+		return -1;
+	for (i = (reg >> 2); i < (reg >> 2) + count; i++)
+		if (r300_reg_flags[i] != MARK_SAFE)
+			return 1;
+	return 0;
+}
+
+static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
+							  dev_priv,
+							  drm_radeon_kcmd_buffer_t
+							  * cmdbuf,
+							  drm_r300_cmd_header_t
+							  header)
+{
+	int reg;
+	int sz;
+	int i;
+	int values[64];
+	RING_LOCALS;
+
+	sz = header.packet0.count;
+	reg = (header.packet0.reghi << 8) | header.packet0.reglo;
+
+	if ((sz > 64) || (sz < 0)) {
+		DRM_ERROR
+		    ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
+		     reg, sz);
+		return -EINVAL;
+	}
+	for (i = 0; i < sz; i++) {
+		values[i] = ((int *)cmdbuf->buf)[i];
+		switch (r300_reg_flags[(reg >> 2) + i]) {
+		case MARK_SAFE:
+			break;
+		case MARK_CHECK_OFFSET:
+			if (!radeon_check_offset(dev_priv, (u32) values[i])) {
+				DRM_ERROR
+				    ("Offset failed range check (reg=%04x sz=%d)\n",
+				     reg, sz);
+				return -EINVAL;
+			}
+			break;
+		default:
+			DRM_ERROR("Register %04x failed check as flag=%02x\n",
+				  reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
+			return -EINVAL;
+		}
+	}
+
+	BEGIN_RING(1 + sz);
+	OUT_RING(CP_PACKET0(reg, sz - 1));
+	OUT_RING_TABLE(values, sz);
+	ADVANCE_RING();
+
+	cmdbuf->buf += sz * 4;
+	cmdbuf->bufsz -= sz * 4;
+
+	return 0;
+}
+
+/**
+ * Emits a packet0 setting arbitrary registers.
+ * Called by r300_do_cp_cmdbuf.
+ *
+ * Note that checks are performed on contents and addresses of the registers
+ */
+static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
+					drm_radeon_kcmd_buffer_t *cmdbuf,
+					drm_r300_cmd_header_t header)
+{
+	int reg;
+	int sz;
+	RING_LOCALS;
+
+	sz = header.packet0.count;
+	reg = (header.packet0.reghi << 8) | header.packet0.reglo;
+
+	if (!sz)
+		return 0;
+
+	if (sz * 4 > cmdbuf->bufsz)
+		return -EINVAL;
+
+	if (reg + sz * 4 >= 0x10000) {
+		DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg,
+			  sz);
+		return -EINVAL;
+	}
+
+	if (r300_check_range(reg, sz)) {
+		/* go and check everything */
+		return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf,
+							   header);
+	}
+	/* the rest of the data is safe to emit, whatever the values the user passed */
+
+	BEGIN_RING(1 + sz);
+	OUT_RING(CP_PACKET0(reg, sz - 1));
+	OUT_RING_TABLE((int *)cmdbuf->buf, sz);
+	ADVANCE_RING();
+
+	cmdbuf->buf += sz * 4;
+	cmdbuf->bufsz -= sz * 4;
+
+	return 0;
+}
+
+/**
+ * Uploads user-supplied vertex program instructions or parameters onto
+ * the graphics card.
+ * Called by r300_do_cp_cmdbuf.
+ */
+static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
+				    drm_radeon_kcmd_buffer_t *cmdbuf,
+				    drm_r300_cmd_header_t header)
+{
+	int sz;
+	int addr;
+	RING_LOCALS;
+
+	sz = header.vpu.count;
+	addr = (header.vpu.adrhi << 8) | header.vpu.adrlo;
+
+	if (!sz)
+		return 0;
+	if (sz * 16 > cmdbuf->bufsz)
+		return -EINVAL;
+
+	BEGIN_RING(5 + sz * 4);
+	/* Wait for VAP to come to senses.. */
+	/* there is no need to emit it multiple times, (only once before VAP is programmed,
+	   but this optimization is for later */
+	OUT_RING_REG(R300_VAP_PVS_WAITIDLE, 0);
+	OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
+	OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
+	OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4);
+
+	ADVANCE_RING();
+
+	cmdbuf->buf += sz * 16;
+	cmdbuf->bufsz -= sz * 16;
+
+	return 0;
+}
+
+/**
+ * Emit a clear packet from userspace.
+ * Called by r300_emit_packet3.
+ */
+static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
+				      drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	RING_LOCALS;
+
+	if (8 * 4 > cmdbuf->bufsz)
+		return -EINVAL;
+
+	BEGIN_RING(10);
+	OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
+	OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
+		 (1 << R300_PRIM_NUM_VERTICES_SHIFT));
+	OUT_RING_TABLE((int *)cmdbuf->buf, 8);
+	ADVANCE_RING();
+
+	cmdbuf->buf += 8 * 4;
+	cmdbuf->bufsz -= 8 * 4;
+
+	return 0;
+}
+
+static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
+					       drm_radeon_kcmd_buffer_t *cmdbuf,
+					       u32 header)
+{
+	int count, i, k;
+#define MAX_ARRAY_PACKET  64
+	u32 payload[MAX_ARRAY_PACKET];
+	u32 narrays;
+	RING_LOCALS;
+
+	count = (header >> 16) & 0x3fff;
+
+	if ((count + 1) > MAX_ARRAY_PACKET) {
+		DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
+			  count);
+		return -EINVAL;
+	}
+	memset(payload, 0, MAX_ARRAY_PACKET * 4);
+	memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
+
+	/* carefully check packet contents */
+
+	narrays = payload[0];
+	k = 0;
+	i = 1;
+	while ((k < narrays) && (i < (count + 1))) {
+		i++;		/* skip attribute field */
+		if (!radeon_check_offset(dev_priv, payload[i])) {
+			DRM_ERROR
+			    ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
+			     k, i);
+			return -EINVAL;
+		}
+		k++;
+		i++;
+		if (k == narrays)
+			break;
+		/* have one more to process, they come in pairs */
+		if (!radeon_check_offset(dev_priv, payload[i])) {
+			DRM_ERROR
+			    ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
+			     k, i);
+			return -EINVAL;
+		}
+		k++;
+		i++;
+	}
+	/* do the counts match what we expect ? */
+	if ((k != narrays) || (i != (count + 1))) {
+		DRM_ERROR
+		    ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
+		     k, i, narrays, count + 1);
+		return -EINVAL;
+	}
+
+	/* all clear, output packet */
+
+	BEGIN_RING(count + 2);
+	OUT_RING(header);
+	OUT_RING_TABLE(payload, count + 1);
+	ADVANCE_RING();
+
+	cmdbuf->buf += (count + 2) * 4;
+	cmdbuf->bufsz -= (count + 2) * 4;
+
+	return 0;
+}
+
+static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
+					     drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	u32 *cmd = (u32 *) cmdbuf->buf;
+	int count, ret;
+	RING_LOCALS;
+
+	count=(cmd[0]>>16) & 0x3fff;
+
+	if (cmd[0] & 0x8000) {
+		u32 offset;
+
+		if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
+			      | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+			offset = cmd[2] << 10;
+			ret = !radeon_check_offset(dev_priv, offset);
+			if (ret) {
+				DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
+				return -EINVAL;
+			}
+		}
+
+		if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
+		    (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+			offset = cmd[3] << 10;
+			ret = !radeon_check_offset(dev_priv, offset);
+			if (ret) {
+				DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
+				return -EINVAL;
+			}
+
+		}
+	}
+
+	BEGIN_RING(count+2);
+	OUT_RING(cmd[0]);
+	OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
+	ADVANCE_RING();
+
+	cmdbuf->buf += (count+2)*4;
+	cmdbuf->bufsz -= (count+2)*4;
+
+	return 0;
+}
+
+static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv,
+					     drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	u32 *cmd = (u32 *) cmdbuf->buf;
+	int count, ret;
+	RING_LOCALS;
+
+	count=(cmd[0]>>16) & 0x3fff;
+
+	if ((cmd[1] & 0x8000ffff) != 0x80000810) {
+		DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
+		return -EINVAL;
+	}
+	ret = !radeon_check_offset(dev_priv, cmd[2]);
+	if (ret) {
+		DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
+		return -EINVAL;
+	}
+
+	BEGIN_RING(count+2);
+	OUT_RING(cmd[0]);
+	OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
+	ADVANCE_RING();
+
+	cmdbuf->buf += (count+2)*4;
+	cmdbuf->bufsz -= (count+2)*4;
+
+	return 0;
+}
+
+static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
+					    drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	u32 header;
+	int count;
+	RING_LOCALS;
+
+	if (4 > cmdbuf->bufsz)
+		return -EINVAL;
+
+	/* Fixme !! This simply emits a packet without much checking.
+	   We need to be smarter. */
+
+	/* obtain first word - actual packet3 header */
+	header = *(u32 *) cmdbuf->buf;
+
+	/* Is it packet 3 ? */
+	if ((header >> 30) != 0x3) {
+		DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
+		return -EINVAL;
+	}
+
+	count = (header >> 16) & 0x3fff;
+
+	/* Check again now that we know how much data to expect */
+	if ((count + 2) * 4 > cmdbuf->bufsz) {
+		DRM_ERROR
+		    ("Expected packet3 of length %d but have only %d bytes left\n",
+		     (count + 2) * 4, cmdbuf->bufsz);
+		return -EINVAL;
+	}
+
+	/* Is it a packet type we know about ? */
+	switch (header & 0xff00) {
+	case RADEON_3D_LOAD_VBPNTR:	/* load vertex array pointers */
+		return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
+
+	case RADEON_CNTL_BITBLT_MULTI:
+		return r300_emit_bitblt_multi(dev_priv, cmdbuf);
+
+	case RADEON_CP_INDX_BUFFER:	/* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */
+		return r300_emit_indx_buffer(dev_priv, cmdbuf);
+	case RADEON_CP_3D_DRAW_IMMD_2:	/* triggers drawing using in-packet vertex data */
+	case RADEON_CP_3D_DRAW_VBUF_2:	/* triggers drawing of vertex buffers setup elsewhere */
+	case RADEON_CP_3D_DRAW_INDX_2:	/* triggers drawing using indices to vertex buffer */
+	case RADEON_WAIT_FOR_IDLE:
+	case RADEON_CP_NOP:
+		/* these packets are safe */
+		break;
+	default:
+		DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
+		return -EINVAL;
+	}
+
+	BEGIN_RING(count + 2);
+	OUT_RING(header);
+	OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
+	ADVANCE_RING();
+
+	cmdbuf->buf += (count + 2) * 4;
+	cmdbuf->bufsz -= (count + 2) * 4;
+
+	return 0;
+}
+
+/**
+ * Emit a rendering packet3 from userspace.
+ * Called by r300_do_cp_cmdbuf.
+ */
+static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
+					drm_radeon_kcmd_buffer_t *cmdbuf,
+					drm_r300_cmd_header_t header)
+{
+	int n;
+	int ret;
+	char *orig_buf = cmdbuf->buf;
+	int orig_bufsz = cmdbuf->bufsz;
+
+	/* This is a do-while-loop so that we run the interior at least once,
+	 * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
+	 */
+	n = 0;
+	do {
+		if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) {
+			ret = r300_emit_cliprects(dev_priv, cmdbuf, n);
+			if (ret)
+				return ret;
+
+			cmdbuf->buf = orig_buf;
+			cmdbuf->bufsz = orig_bufsz;
+		}
+
+		switch (header.packet3.packet) {
+		case R300_CMD_PACKET3_CLEAR:
+			DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
+			ret = r300_emit_clear(dev_priv, cmdbuf);
+			if (ret) {
+				DRM_ERROR("r300_emit_clear failed\n");
+				return ret;
+			}
+			break;
+
+		case R300_CMD_PACKET3_RAW:
+			DRM_DEBUG("R300_CMD_PACKET3_RAW\n");
+			ret = r300_emit_raw_packet3(dev_priv, cmdbuf);
+			if (ret) {
+				DRM_ERROR("r300_emit_raw_packet3 failed\n");
+				return ret;
+			}
+			break;
+
+		default:
+			DRM_ERROR("bad packet3 type %i at %p\n",
+				  header.packet3.packet,
+				  cmdbuf->buf - sizeof(header));
+			return -EINVAL;
+		}
+
+		n += R300_SIMULTANEOUS_CLIPRECTS;
+	} while (n < cmdbuf->nbox);
+
+	return 0;
+}
+
+/* Some of the R300 chips seem to be extremely touchy about the two registers
+ * that are configured in r300_pacify.
+ * Among the worst offenders seems to be the R300 ND (0x4E44): When userspace
+ * sends a command buffer that contains only state setting commands and a
+ * vertex program/parameter upload sequence, this will eventually lead to a
+ * lockup, unless the sequence is bracketed by calls to r300_pacify.
+ * So we should take great care to *always* call r300_pacify before
+ * *anything* 3D related, and again afterwards. This is what the
+ * call bracket in r300_do_cp_cmdbuf is for.
+ */
+
+/**
+ * Emit the sequence to pacify R300.
+ */
+static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
+{
+	RING_LOCALS;
+
+	BEGIN_RING(6);
+	OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+	OUT_RING(R300_RB3D_DSTCACHE_UNKNOWN_0A);
+	OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));
+	OUT_RING(R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE|
+		 R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE);
+	OUT_RING(CP_PACKET3(RADEON_CP_NOP, 0));
+	OUT_RING(0x0);
+	ADVANCE_RING();
+}
+
+/**
+ * Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
+ * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
+ * be careful about how this function is called.
+ */
+static void r300_discard_buffer(struct drm_device * dev, struct drm_buf * buf)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
+
+	buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
+	buf->pending = 1;
+	buf->used = 0;
+}
+
+static void r300_cmd_wait(drm_radeon_private_t * dev_priv,
+			  drm_r300_cmd_header_t header)
+{
+	u32 wait_until;
+	RING_LOCALS;
+
+	if (!header.wait.flags)
+		return;
+
+	wait_until = 0;
+
+	switch(header.wait.flags) {
+	case R300_WAIT_2D:
+		wait_until = RADEON_WAIT_2D_IDLE;
+		break;
+	case R300_WAIT_3D:
+		wait_until = RADEON_WAIT_3D_IDLE;
+		break;
+	case R300_NEW_WAIT_2D_3D:
+		wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_3D_IDLE;
+		break;
+	case R300_NEW_WAIT_2D_2D_CLEAN:
+		wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
+		break;
+	case R300_NEW_WAIT_3D_3D_CLEAN:
+		wait_until = RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
+		break;
+	case R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN:
+		wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
+		wait_until |= RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
+		break;
+	default:
+		return;
+	}
+
+	BEGIN_RING(2);
+	OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
+	OUT_RING(wait_until);
+	ADVANCE_RING();
+}
+
+static int r300_scratch(drm_radeon_private_t *dev_priv,
+			drm_radeon_kcmd_buffer_t *cmdbuf,
+			drm_r300_cmd_header_t header)
+{
+	u32 *ref_age_base;
+	u32 i, buf_idx, h_pending;
+	RING_LOCALS;
+
+	if (cmdbuf->bufsz <
+	    (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) {
+		return -EINVAL;
+	}
+
+	if (header.scratch.reg >= 5) {
+		return -EINVAL;
+	}
+
+	dev_priv->scratch_ages[header.scratch.reg]++;
+
+	ref_age_base =  (u32 *)(unsigned long)*((uint64_t *)cmdbuf->buf);
+
+	cmdbuf->buf += sizeof(u64);
+	cmdbuf->bufsz -= sizeof(u64);
+
+	for (i=0; i < header.scratch.n_bufs; i++) {
+		buf_idx = *(u32 *)cmdbuf->buf;
+		buf_idx *= 2; /* 8 bytes per buf */
+
+		if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
+			return -EINVAL;
+		}
+
+		if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
+			return -EINVAL;
+		}
+
+		if (h_pending == 0) {
+			return -EINVAL;
+		}
+
+		h_pending--;
+
+		if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
+			return -EINVAL;
+		}
+
+		cmdbuf->buf += sizeof(buf_idx);
+		cmdbuf->bufsz -= sizeof(buf_idx);
+	}
+
+	BEGIN_RING(2);
+	OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
+	OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
+	ADVANCE_RING();
+
+	return 0;
+}
+
+/**
+ * Uploads user-supplied vertex program instructions or parameters onto
+ * the graphics card.
+ * Called by r300_do_cp_cmdbuf.
+ */
+static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
+				       drm_radeon_kcmd_buffer_t *cmdbuf,
+				       drm_r300_cmd_header_t header)
+{
+	int sz;
+	int addr;
+	int type;
+	int clamp;
+	int stride;
+	RING_LOCALS;
+
+	sz = header.r500fp.count;
+	/* address is 9 bits 0 - 8, bit 1 of flags is part of address */
+	addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
+
+	type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
+	clamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
+
+	addr |= (type << 16);
+	addr |= (clamp << 17);
+
+	stride = type ? 4 : 6;
+
+	DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
+	if (!sz)
+		return 0;
+	if (sz * stride * 4 > cmdbuf->bufsz)
+		return -EINVAL;
+
+	BEGIN_RING(3 + sz * stride);
+	OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
+	OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
+	OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride);
+
+	ADVANCE_RING();
+
+	cmdbuf->buf += sz * stride * 4;
+	cmdbuf->bufsz -= sz * stride * 4;
+
+	return 0;
+}
+
+
+/**
+ * Parses and validates a user-supplied command buffer and emits appropriate
+ * commands on the DMA ring buffer.
+ * Called by the ioctl handler function radeon_cp_cmdbuf.
+ */
+int r300_do_cp_cmdbuf(struct drm_device *dev,
+		      struct drm_file *file_priv,
+		      drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf = NULL;
+	int emit_dispatch_age = 0;
+	int ret = 0;
+
+	DRM_DEBUG("\n");
+
+	/* See the comment above r300_emit_begin3d for why this call must be here,
+	 * and what the cleanup gotos are for. */
+	r300_pacify(dev_priv);
+
+	if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
+		ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
+		if (ret)
+			goto cleanup;
+	}
+
+	while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) {
+		int idx;
+		drm_r300_cmd_header_t header;
+
+		header.u = *(unsigned int *)cmdbuf->buf;
+
+		cmdbuf->buf += sizeof(header);
+		cmdbuf->bufsz -= sizeof(header);
+
+		switch (header.header.cmd_type) {
+		case R300_CMD_PACKET0:
+			DRM_DEBUG("R300_CMD_PACKET0\n");
+			ret = r300_emit_packet0(dev_priv, cmdbuf, header);
+			if (ret) {
+				DRM_ERROR("r300_emit_packet0 failed\n");
+				goto cleanup;
+			}
+			break;
+
+		case R300_CMD_VPU:
+			DRM_DEBUG("R300_CMD_VPU\n");
+			ret = r300_emit_vpu(dev_priv, cmdbuf, header);
+			if (ret) {
+				DRM_ERROR("r300_emit_vpu failed\n");
+				goto cleanup;
+			}
+			break;
+
+		case R300_CMD_PACKET3:
+			DRM_DEBUG("R300_CMD_PACKET3\n");
+			ret = r300_emit_packet3(dev_priv, cmdbuf, header);
+			if (ret) {
+				DRM_ERROR("r300_emit_packet3 failed\n");
+				goto cleanup;
+			}
+			break;
+
+		case R300_CMD_END3D:
+			DRM_DEBUG("R300_CMD_END3D\n");
+			/* TODO:
+			   Ideally userspace driver should not need to issue this call,
+			   i.e. the drm driver should issue it automatically and prevent
+			   lockups.
+
+			   In practice, we do not understand why this call is needed and what
+			   it does (except for some vague guesses that it has to do with cache
+			   coherence) and so the user space driver does it.
+
+			   Once we are sure which uses prevent lockups the code could be moved
+			   into the kernel and the userspace driver will not
+			   need to use this command.
+
+			   Note that issuing this command does not hurt anything
+			   except, possibly, performance */
+			r300_pacify(dev_priv);
+			break;
+
+		case R300_CMD_CP_DELAY:
+			/* simple enough, we can do it here */
+			DRM_DEBUG("R300_CMD_CP_DELAY\n");
+			{
+				int i;
+				RING_LOCALS;
+
+				BEGIN_RING(header.delay.count);
+				for (i = 0; i < header.delay.count; i++)
+					OUT_RING(RADEON_CP_PACKET2);
+				ADVANCE_RING();
+			}
+			break;
+
+		case R300_CMD_DMA_DISCARD:
+			DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
+			idx = header.dma.buf_idx;
+			if (idx < 0 || idx >= dma->buf_count) {
+				DRM_ERROR("buffer index %d (of %d max)\n",
+					  idx, dma->buf_count - 1);
+				ret = -EINVAL;
+				goto cleanup;
+			}
+
+			buf = dma->buflist[idx];
+			if (buf->file_priv != file_priv || buf->pending) {
+				DRM_ERROR("bad buffer %p %p %d\n",
+					  buf->file_priv, file_priv,
+					  buf->pending);
+				ret = -EINVAL;
+				goto cleanup;
+			}
+
+			emit_dispatch_age = 1;
+			r300_discard_buffer(dev, buf);
+			break;
+
+		case R300_CMD_WAIT:
+			DRM_DEBUG("R300_CMD_WAIT\n");
+			r300_cmd_wait(dev_priv, header);
+			break;
+
+		case R300_CMD_SCRATCH:
+			DRM_DEBUG("R300_CMD_SCRATCH\n");
+			ret = r300_scratch(dev_priv, cmdbuf, header);
+			if (ret) {
+				DRM_ERROR("r300_scratch failed\n");
+				goto cleanup;
+			}
+			break;
+
+		case R300_CMD_R500FP:
+			if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) {
+				DRM_ERROR("Calling r500 command on r300 card\n");
+				ret = -EINVAL;
+				goto cleanup;
+			}
+			DRM_DEBUG("R300_CMD_R500FP\n");
+			ret = r300_emit_r500fp(dev_priv, cmdbuf, header);
+			if (ret) {
+				DRM_ERROR("r300_emit_r500fp failed\n");
+				goto cleanup;
+			}
+			break;
+		default:
+			DRM_ERROR("bad cmd_type %i at %p\n",
+				  header.header.cmd_type,
+				  cmdbuf->buf - sizeof(header));
+			ret = -EINVAL;
+			goto cleanup;
+		}
+	}
+
+	DRM_DEBUG("END\n");
+
+      cleanup:
+	r300_pacify(dev_priv);
+
+	/* We emit the vertex buffer age here, outside the pacifier "brackets"
+	 * for two reasons:
+	 *  (1) This may coalesce multiple age emissions into a single one and
+	 *  (2) more importantly, some chips lock up hard when scratch registers
+	 *      are written inside the pacifier bracket.
+	 */
+	if (emit_dispatch_age) {
+		RING_LOCALS;
+
+		/* Emit the vertex buffer age */
+		BEGIN_RING(2);
+		RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch);
+		ADVANCE_RING();
+	}
+
+	COMMIT_RING();
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
new file mode 100644
index 000000000000..a6802f26afc4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -0,0 +1,1772 @@
+/**************************************************************************
+
+Copyright (C) 2004-2005 Nicolai Haehnle et al.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+on the rights to use, copy, modify, merge, publish, distribute, sub
+license, and/or sell copies of the Software, and to permit persons to whom
+the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the next
+paragraph) shall be included in all copies or substantial portions of the
+Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#ifndef _R300_REG_H
+#define _R300_REG_H
+
+#define R300_MC_INIT_MISC_LAT_TIMER	0x180
+#	define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT	0
+#	define R300_MC_MISC__MC_VF_INIT_LAT_SHIFT	4
+#	define R300_MC_MISC__MC_DISP0R_INIT_LAT_SHIFT	8
+#	define R300_MC_MISC__MC_DISP1R_INIT_LAT_SHIFT	12
+#	define R300_MC_MISC__MC_FIXED_INIT_LAT_SHIFT	16
+#	define R300_MC_MISC__MC_E2R_INIT_LAT_SHIFT	20
+#	define R300_MC_MISC__MC_SAME_PAGE_PRIO_SHIFT	24
+#	define R300_MC_MISC__MC_GLOBW_INIT_LAT_SHIFT	28
+
+#define R300_MC_INIT_GFX_LAT_TIMER	0x154
+#	define R300_MC_MISC__MC_G3D0R_INIT_LAT_SHIFT	0
+#	define R300_MC_MISC__MC_G3D1R_INIT_LAT_SHIFT	4
+#	define R300_MC_MISC__MC_G3D2R_INIT_LAT_SHIFT	8
+#	define R300_MC_MISC__MC_G3D3R_INIT_LAT_SHIFT	12
+#	define R300_MC_MISC__MC_TX0R_INIT_LAT_SHIFT	16
+#	define R300_MC_MISC__MC_TX1R_INIT_LAT_SHIFT	20
+#	define R300_MC_MISC__MC_GLOBR_INIT_LAT_SHIFT	24
+#	define R300_MC_MISC__MC_GLOBW_FULL_LAT_SHIFT	28
+
+/*
+ * This file contains registers and constants for the R300. They have been
+ * found mostly by examining command buffers captured using glxtest, as well
+ * as by extrapolating some known registers and constants from the R200.
+ * I am fairly certain that they are correct unless stated otherwise
+ * in comments.
+ */
+
+#define R300_SE_VPORT_XSCALE                0x1D98
+#define R300_SE_VPORT_XOFFSET               0x1D9C
+#define R300_SE_VPORT_YSCALE                0x1DA0
+#define R300_SE_VPORT_YOFFSET               0x1DA4
+#define R300_SE_VPORT_ZSCALE                0x1DA8
+#define R300_SE_VPORT_ZOFFSET               0x1DAC
+
+
+/*
+ * Vertex Array Processing (VAP) Control
+ * Stolen from r200 code from Christoph Brill (It's a guess!)
+ */
+#define R300_VAP_CNTL	0x2080
+
+/* This register is written directly and also starts data section
+ * in many 3d CP_PACKET3's
+ */
+#define R300_VAP_VF_CNTL	0x2084
+#	define	R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT              0
+#	define  R300_VAP_VF_CNTL__PRIM_NONE                     (0<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_POINTS                   (1<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_LINES                    (2<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_LINE_STRIP               (3<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_TRIANGLES                (4<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN             (5<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP           (6<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_LINE_LOOP                (12<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_QUADS                    (13<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_QUAD_STRIP               (14<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_POLYGON                  (15<<0)
+
+#	define	R300_VAP_VF_CNTL__PRIM_WALK__SHIFT              4
+	/* State based - direct writes to registers trigger vertex
+           generation */
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED         (0<<4)
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_INDICES             (1<<4)
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST         (2<<4)
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED     (3<<4)
+
+	/* I don't think I saw these three used.. */
+#	define	R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT            6
+#	define	R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT     9
+#	define	R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT        10
+
+	/* index size - when not set the indices are assumed to be 16 bit */
+#	define	R300_VAP_VF_CNTL__INDEX_SIZE_32bit              (1<<11)
+	/* number of vertices */
+#	define	R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT           16
+
+/* BEGIN: Wild guesses */
+#define R300_VAP_OUTPUT_VTX_FMT_0           0x2090
+#       define R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT     (1<<0)
+#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_PRESENT   (1<<1)
+#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2)  /* GUESS */
+#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3)  /* GUESS */
+#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4)  /* GUESS */
+#       define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */
+
+#define R300_VAP_OUTPUT_VTX_FMT_1           0x2094
+	/* each of the following is 3 bits wide, specifies number
+	   of components */
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
+/* END: Wild guesses */
+
+#define R300_SE_VTE_CNTL                  0x20b0
+#	define     R300_VPORT_X_SCALE_ENA                0x00000001
+#	define     R300_VPORT_X_OFFSET_ENA               0x00000002
+#	define     R300_VPORT_Y_SCALE_ENA                0x00000004
+#	define     R300_VPORT_Y_OFFSET_ENA               0x00000008
+#	define     R300_VPORT_Z_SCALE_ENA                0x00000010
+#	define     R300_VPORT_Z_OFFSET_ENA               0x00000020
+#	define     R300_VTX_XY_FMT                       0x00000100
+#	define     R300_VTX_Z_FMT                        0x00000200
+#	define     R300_VTX_W0_FMT                       0x00000400
+#	define     R300_VTX_W0_NORMALIZE                 0x00000800
+#	define     R300_VTX_ST_DENORMALIZED              0x00001000
+
+/* BEGIN: Vertex data assembly - lots of uncertainties */
+
+/* gap */
+
+#define R300_VAP_CNTL_STATUS              0x2140
+#	define R300_VC_NO_SWAP                  (0 << 0)
+#	define R300_VC_16BIT_SWAP               (1 << 0)
+#	define R300_VC_32BIT_SWAP               (2 << 0)
+#	define R300_VAP_TCL_BYPASS		(1 << 8)
+
+/* gap */
+
+/* Where do we get our vertex data?
+ *
+ * Vertex data either comes either from immediate mode registers or from
+ * vertex arrays.
+ * There appears to be no mixed mode (though we can force the pitch of
+ * vertex arrays to 0, effectively reusing the same element over and over
+ * again).
+ *
+ * Immediate mode is controlled by the INPUT_CNTL registers. I am not sure
+ * if these registers influence vertex array processing.
+ *
+ * Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3.
+ *
+ * In both cases, vertex attributes are then passed through INPUT_ROUTE.
+ *
+ * Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data
+ * into the vertex processor's input registers.
+ * The first word routes the first input, the second word the second, etc.
+ * The corresponding input is routed into the register with the given index.
+ * The list is ended by a word with INPUT_ROUTE_END set.
+ *
+ * Always set COMPONENTS_4 in immediate mode.
+ */
+
+#define R300_VAP_INPUT_ROUTE_0_0            0x2150
+#       define R300_INPUT_ROUTE_COMPONENTS_1     (0 << 0)
+#       define R300_INPUT_ROUTE_COMPONENTS_2     (1 << 0)
+#       define R300_INPUT_ROUTE_COMPONENTS_3     (2 << 0)
+#       define R300_INPUT_ROUTE_COMPONENTS_4     (3 << 0)
+#       define R300_INPUT_ROUTE_COMPONENTS_RGBA  (4 << 0) /* GUESS */
+#       define R300_VAP_INPUT_ROUTE_IDX_SHIFT    8
+#       define R300_VAP_INPUT_ROUTE_IDX_MASK     (31 << 8) /* GUESS */
+#       define R300_VAP_INPUT_ROUTE_END          (1 << 13)
+#       define R300_INPUT_ROUTE_IMMEDIATE_MODE   (0 << 14) /* GUESS */
+#       define R300_INPUT_ROUTE_FLOAT            (1 << 14) /* GUESS */
+#       define R300_INPUT_ROUTE_UNSIGNED_BYTE    (2 << 14) /* GUESS */
+#       define R300_INPUT_ROUTE_FLOAT_COLOR      (3 << 14) /* GUESS */
+#define R300_VAP_INPUT_ROUTE_0_1            0x2154
+#define R300_VAP_INPUT_ROUTE_0_2            0x2158
+#define R300_VAP_INPUT_ROUTE_0_3            0x215C
+#define R300_VAP_INPUT_ROUTE_0_4            0x2160
+#define R300_VAP_INPUT_ROUTE_0_5            0x2164
+#define R300_VAP_INPUT_ROUTE_0_6            0x2168
+#define R300_VAP_INPUT_ROUTE_0_7            0x216C
+
+/* gap */
+
+/* Notes:
+ *  - always set up to produce at least two attributes:
+ *    if vertex program uses only position, fglrx will set normal, too
+ *  - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal.
+ */
+#define R300_VAP_INPUT_CNTL_0               0x2180
+#       define R300_INPUT_CNTL_0_COLOR           0x00000001
+#define R300_VAP_INPUT_CNTL_1               0x2184
+#       define R300_INPUT_CNTL_POS               0x00000001
+#       define R300_INPUT_CNTL_NORMAL            0x00000002
+#       define R300_INPUT_CNTL_COLOR             0x00000004
+#       define R300_INPUT_CNTL_TC0               0x00000400
+#       define R300_INPUT_CNTL_TC1               0x00000800
+#       define R300_INPUT_CNTL_TC2               0x00001000 /* GUESS */
+#       define R300_INPUT_CNTL_TC3               0x00002000 /* GUESS */
+#       define R300_INPUT_CNTL_TC4               0x00004000 /* GUESS */
+#       define R300_INPUT_CNTL_TC5               0x00008000 /* GUESS */
+#       define R300_INPUT_CNTL_TC6               0x00010000 /* GUESS */
+#       define R300_INPUT_CNTL_TC7               0x00020000 /* GUESS */
+
+/* gap */
+
+/* Words parallel to INPUT_ROUTE_0; All words that are active in INPUT_ROUTE_0
+ * are set to a swizzling bit pattern, other words are 0.
+ *
+ * In immediate mode, the pattern is always set to xyzw. In vertex array
+ * mode, the swizzling pattern is e.g. used to set zw components in texture
+ * coordinates with only tweo components.
+ */
+#define R300_VAP_INPUT_ROUTE_1_0            0x21E0
+#       define R300_INPUT_ROUTE_SELECT_X    0
+#       define R300_INPUT_ROUTE_SELECT_Y    1
+#       define R300_INPUT_ROUTE_SELECT_Z    2
+#       define R300_INPUT_ROUTE_SELECT_W    3
+#       define R300_INPUT_ROUTE_SELECT_ZERO 4
+#       define R300_INPUT_ROUTE_SELECT_ONE  5
+#       define R300_INPUT_ROUTE_SELECT_MASK 7
+#       define R300_INPUT_ROUTE_X_SHIFT     0
+#       define R300_INPUT_ROUTE_Y_SHIFT     3
+#       define R300_INPUT_ROUTE_Z_SHIFT     6
+#       define R300_INPUT_ROUTE_W_SHIFT     9
+#       define R300_INPUT_ROUTE_ENABLE      (15 << 12)
+#define R300_VAP_INPUT_ROUTE_1_1            0x21E4
+#define R300_VAP_INPUT_ROUTE_1_2            0x21E8
+#define R300_VAP_INPUT_ROUTE_1_3            0x21EC
+#define R300_VAP_INPUT_ROUTE_1_4            0x21F0
+#define R300_VAP_INPUT_ROUTE_1_5            0x21F4
+#define R300_VAP_INPUT_ROUTE_1_6            0x21F8
+#define R300_VAP_INPUT_ROUTE_1_7            0x21FC
+
+/* END: Vertex data assembly */
+
+/* gap */
+
+/* BEGIN: Upload vertex program and data */
+
+/*
+ * The programmable vertex shader unit has a memory bank of unknown size
+ * that can be written to in 16 byte units by writing the address into
+ * UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs).
+ *
+ * Pointers into the memory bank are always in multiples of 16 bytes.
+ *
+ * The memory bank is divided into areas with fixed meaning.
+ *
+ * Starting at address UPLOAD_PROGRAM: Vertex program instructions.
+ * Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB),
+ * whereas the difference between known addresses suggests size 512.
+ *
+ * Starting at address UPLOAD_PARAMETERS: Vertex program parameters.
+ * Native reported limits and the VPI layout suggest size 256, whereas
+ * difference between known addresses suggests size 512.
+ *
+ * At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the
+ * floating point pointsize. The exact purpose of this state is uncertain,
+ * as there is also the R300_RE_POINTSIZE register.
+ *
+ * Multiple vertex programs and parameter sets can be loaded at once,
+ * which could explain the size discrepancy.
+ */
+#define R300_VAP_PVS_UPLOAD_ADDRESS         0x2200
+#       define R300_PVS_UPLOAD_PROGRAM           0x00000000
+#       define R300_PVS_UPLOAD_PARAMETERS        0x00000200
+#       define R300_PVS_UPLOAD_POINTSIZE         0x00000406
+
+/* gap */
+
+#define R300_VAP_PVS_UPLOAD_DATA            0x2208
+
+/* END: Upload vertex program and data */
+
+/* gap */
+
+/* I do not know the purpose of this register. However, I do know that
+ * it is set to 221C_CLEAR for clear operations and to 221C_NORMAL
+ * for normal rendering.
+ */
+#define R300_VAP_UNKNOWN_221C               0x221C
+#       define R300_221C_NORMAL                  0x00000000
+#       define R300_221C_CLEAR                   0x0001C000
+
+/* These seem to be per-pixel and per-vertex X and Y clipping planes. The first
+ * plane is per-pixel and the second plane is per-vertex.
+ *
+ * This was determined by experimentation alone but I believe it is correct.
+ *
+ * These registers are called X_QUAD0_1_FL to X_QUAD0_4_FL by glxtest.
+ */
+#define R300_VAP_CLIP_X_0                   0x2220
+#define R300_VAP_CLIP_X_1                   0x2224
+#define R300_VAP_CLIP_Y_0                   0x2228
+#define R300_VAP_CLIP_Y_1                   0x2230
+
+/* gap */
+
+/* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between
+ * rendering commands and overwriting vertex program parameters.
+ * Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
+ * avoids bugs caused by still running shaders reading bad data from memory.
+ */
+#define R300_VAP_PVS_WAITIDLE               0x2284 /* GUESS */
+
+/* Absolutely no clue what this register is about. */
+#define R300_VAP_UNKNOWN_2288               0x2288
+#       define R300_2288_R300                    0x00750000 /* -- nh */
+#       define R300_2288_RV350                   0x0000FFFF /* -- Vladimir */
+
+/* gap */
+
+/* Addresses are relative to the vertex program instruction area of the
+ * memory bank. PROGRAM_END points to the last instruction of the active
+ * program
+ *
+ * The meaning of the two UNKNOWN fields is obviously not known. However,
+ * experiments so far have shown that both *must* point to an instruction
+ * inside the vertex program, otherwise the GPU locks up.
+ *
+ * fglrx usually sets CNTL_3_UNKNOWN to the end of the program and
+ * R300_PVS_CNTL_1_POS_END_SHIFT points to instruction where last write to
+ * position takes place.
+ *
+ * Most likely this is used to ignore rest of the program in cases
+ * where group of verts arent visible. For some reason this "section"
+ * is sometimes accepted other instruction that have no relationship with
+ * position calculations.
+ */
+#define R300_VAP_PVS_CNTL_1                 0x22D0
+#       define R300_PVS_CNTL_1_PROGRAM_START_SHIFT   0
+#       define R300_PVS_CNTL_1_POS_END_SHIFT         10
+#       define R300_PVS_CNTL_1_PROGRAM_END_SHIFT     20
+/* Addresses are relative the the vertex program parameters area. */
+#define R300_VAP_PVS_CNTL_2                 0x22D4
+#       define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0
+#       define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT  16
+#define R300_VAP_PVS_CNTL_3	           0x22D8
+#       define R300_PVS_CNTL_3_PROGRAM_UNKNOWN_SHIFT 10
+#       define R300_PVS_CNTL_3_PROGRAM_UNKNOWN2_SHIFT 0
+
+/* The entire range from 0x2300 to 0x2AC inclusive seems to be used for
+ * immediate vertices
+ */
+#define R300_VAP_VTX_COLOR_R                0x2464
+#define R300_VAP_VTX_COLOR_G                0x2468
+#define R300_VAP_VTX_COLOR_B                0x246C
+#define R300_VAP_VTX_POS_0_X_1              0x2490 /* used for glVertex2*() */
+#define R300_VAP_VTX_POS_0_Y_1              0x2494
+#define R300_VAP_VTX_COLOR_PKD              0x249C /* RGBA */
+#define R300_VAP_VTX_POS_0_X_2              0x24A0 /* used for glVertex3*() */
+#define R300_VAP_VTX_POS_0_Y_2              0x24A4
+#define R300_VAP_VTX_POS_0_Z_2              0x24A8
+/* write 0 to indicate end of packet? */
+#define R300_VAP_VTX_END_OF_PKT             0x24AC
+
+/* gap */
+
+/* These are values from r300_reg/r300_reg.h - they are known to be correct
+ * and are here so we can use one register file instead of several
+ * - Vladimir
+ */
+#define R300_GB_VAP_RASTER_VTX_FMT_0	0x4000
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__POS_PRESENT	(1<<0)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_0_PRESENT	(1<<1)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_1_PRESENT	(1<<2)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_2_PRESENT	(1<<3)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_3_PRESENT	(1<<4)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_SPACE	(0xf<<5)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__PT_SIZE_PRESENT	(0x1<<16)
+
+#define R300_GB_VAP_RASTER_VTX_FMT_1	0x4004
+	/* each of the following is 3 bits wide, specifies number
+	   of components */
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT	0
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT	3
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT	6
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT	9
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT	12
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT	15
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT	18
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT	21
+
+/* UNK30 seems to enables point to quad transformation on textures
+ * (or something closely related to that).
+ * This bit is rather fatal at the time being due to lackings at pixel
+ * shader side
+ */
+#define R300_GB_ENABLE	0x4008
+#	define R300_GB_POINT_STUFF_ENABLE	(1<<0)
+#	define R300_GB_LINE_STUFF_ENABLE	(1<<1)
+#	define R300_GB_TRIANGLE_STUFF_ENABLE	(1<<2)
+#	define R300_GB_STENCIL_AUTO_ENABLE	(1<<4)
+#	define R300_GB_UNK31			(1<<31)
+	/* each of the following is 2 bits wide */
+#define R300_GB_TEX_REPLICATE	0
+#define R300_GB_TEX_ST		1
+#define R300_GB_TEX_STR		2
+#	define R300_GB_TEX0_SOURCE_SHIFT	16
+#	define R300_GB_TEX1_SOURCE_SHIFT	18
+#	define R300_GB_TEX2_SOURCE_SHIFT	20
+#	define R300_GB_TEX3_SOURCE_SHIFT	22
+#	define R300_GB_TEX4_SOURCE_SHIFT	24
+#	define R300_GB_TEX5_SOURCE_SHIFT	26
+#	define R300_GB_TEX6_SOURCE_SHIFT	28
+#	define R300_GB_TEX7_SOURCE_SHIFT	30
+
+/* MSPOS - positions for multisample antialiasing (?) */
+#define R300_GB_MSPOS0	0x4010
+	/* shifts - each of the fields is 4 bits */
+#	define R300_GB_MSPOS0__MS_X0_SHIFT	0
+#	define R300_GB_MSPOS0__MS_Y0_SHIFT	4
+#	define R300_GB_MSPOS0__MS_X1_SHIFT	8
+#	define R300_GB_MSPOS0__MS_Y1_SHIFT	12
+#	define R300_GB_MSPOS0__MS_X2_SHIFT	16
+#	define R300_GB_MSPOS0__MS_Y2_SHIFT	20
+#	define R300_GB_MSPOS0__MSBD0_Y		24
+#	define R300_GB_MSPOS0__MSBD0_X		28
+
+#define R300_GB_MSPOS1	0x4014
+#	define R300_GB_MSPOS1__MS_X3_SHIFT	0
+#	define R300_GB_MSPOS1__MS_Y3_SHIFT	4
+#	define R300_GB_MSPOS1__MS_X4_SHIFT	8
+#	define R300_GB_MSPOS1__MS_Y4_SHIFT	12
+#	define R300_GB_MSPOS1__MS_X5_SHIFT	16
+#	define R300_GB_MSPOS1__MS_Y5_SHIFT	20
+#	define R300_GB_MSPOS1__MSBD1		24
+
+
+#define R300_GB_TILE_CONFIG	0x4018
+#	define R300_GB_TILE_ENABLE	(1<<0)
+#	define R300_GB_TILE_PIPE_COUNT_RV300	0
+#	define R300_GB_TILE_PIPE_COUNT_R300	(3<<1)
+#	define R300_GB_TILE_PIPE_COUNT_R420	(7<<1)
+#	define R300_GB_TILE_PIPE_COUNT_RV410	(3<<1)
+#	define R300_GB_TILE_SIZE_8		0
+#	define R300_GB_TILE_SIZE_16		(1<<4)
+#	define R300_GB_TILE_SIZE_32		(2<<4)
+#	define R300_GB_SUPER_SIZE_1		(0<<6)
+#	define R300_GB_SUPER_SIZE_2		(1<<6)
+#	define R300_GB_SUPER_SIZE_4		(2<<6)
+#	define R300_GB_SUPER_SIZE_8		(3<<6)
+#	define R300_GB_SUPER_SIZE_16		(4<<6)
+#	define R300_GB_SUPER_SIZE_32		(5<<6)
+#	define R300_GB_SUPER_SIZE_64		(6<<6)
+#	define R300_GB_SUPER_SIZE_128		(7<<6)
+#	define R300_GB_SUPER_X_SHIFT		9	/* 3 bits wide */
+#	define R300_GB_SUPER_Y_SHIFT		12	/* 3 bits wide */
+#	define R300_GB_SUPER_TILE_A		0
+#	define R300_GB_SUPER_TILE_B		(1<<15)
+#	define R300_GB_SUBPIXEL_1_12		0
+#	define R300_GB_SUBPIXEL_1_16		(1<<16)
+
+#define R300_GB_FIFO_SIZE	0x4024
+	/* each of the following is 2 bits wide */
+#define R300_GB_FIFO_SIZE_32	0
+#define R300_GB_FIFO_SIZE_64	1
+#define R300_GB_FIFO_SIZE_128	2
+#define R300_GB_FIFO_SIZE_256	3
+#	define R300_SC_IFIFO_SIZE_SHIFT	0
+#	define R300_SC_TZFIFO_SIZE_SHIFT	2
+#	define R300_SC_BFIFO_SIZE_SHIFT	4
+
+#	define R300_US_OFIFO_SIZE_SHIFT	12
+#	define R300_US_WFIFO_SIZE_SHIFT	14
+	/* the following use the same constants as above, but meaning is
+	   is times 2 (i.e. instead of 32 words it means 64 */
+#	define R300_RS_TFIFO_SIZE_SHIFT	6
+#	define R300_RS_CFIFO_SIZE_SHIFT	8
+#	define R300_US_RAM_SIZE_SHIFT		10
+	/* watermarks, 3 bits wide */
+#	define R300_RS_HIGHWATER_COL_SHIFT	16
+#	define R300_RS_HIGHWATER_TEX_SHIFT	19
+#	define R300_OFIFO_HIGHWATER_SHIFT	22	/* two bits only */
+#	define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT	24
+
+#define R300_GB_SELECT	0x401C
+#	define R300_GB_FOG_SELECT_C0A		0
+#	define R300_GB_FOG_SELECT_C1A		1
+#	define R300_GB_FOG_SELECT_C2A		2
+#	define R300_GB_FOG_SELECT_C3A		3
+#	define R300_GB_FOG_SELECT_1_1_W	4
+#	define R300_GB_FOG_SELECT_Z		5
+#	define R300_GB_DEPTH_SELECT_Z		0
+#	define R300_GB_DEPTH_SELECT_1_1_W	(1<<3)
+#	define R300_GB_W_SELECT_1_W		0
+#	define R300_GB_W_SELECT_1		(1<<4)
+
+#define R300_GB_AA_CONFIG		0x4020
+#	define R300_AA_DISABLE			0x00
+#	define R300_AA_ENABLE			0x01
+#	define R300_AA_SUBSAMPLES_2		0
+#	define R300_AA_SUBSAMPLES_3		(1<<1)
+#	define R300_AA_SUBSAMPLES_4		(2<<1)
+#	define R300_AA_SUBSAMPLES_6		(3<<1)
+
+/* gap */
+
+/* Zero to flush caches. */
+#define R300_TX_CNTL                        0x4100
+#define R300_TX_FLUSH                       0x0
+
+/* The upper enable bits are guessed, based on fglrx reported limits. */
+#define R300_TX_ENABLE                      0x4104
+#       define R300_TX_ENABLE_0                  (1 << 0)
+#       define R300_TX_ENABLE_1                  (1 << 1)
+#       define R300_TX_ENABLE_2                  (1 << 2)
+#       define R300_TX_ENABLE_3                  (1 << 3)
+#       define R300_TX_ENABLE_4                  (1 << 4)
+#       define R300_TX_ENABLE_5                  (1 << 5)
+#       define R300_TX_ENABLE_6                  (1 << 6)
+#       define R300_TX_ENABLE_7                  (1 << 7)
+#       define R300_TX_ENABLE_8                  (1 << 8)
+#       define R300_TX_ENABLE_9                  (1 << 9)
+#       define R300_TX_ENABLE_10                 (1 << 10)
+#       define R300_TX_ENABLE_11                 (1 << 11)
+#       define R300_TX_ENABLE_12                 (1 << 12)
+#       define R300_TX_ENABLE_13                 (1 << 13)
+#       define R300_TX_ENABLE_14                 (1 << 14)
+#       define R300_TX_ENABLE_15                 (1 << 15)
+
+/* The pointsize is given in multiples of 6. The pointsize can be
+ * enormous: Clear() renders a single point that fills the entire
+ * framebuffer.
+ */
+#define R300_RE_POINTSIZE                   0x421C
+#       define R300_POINTSIZE_Y_SHIFT            0
+#       define R300_POINTSIZE_Y_MASK             (0xFFFF << 0) /* GUESS */
+#       define R300_POINTSIZE_X_SHIFT            16
+#       define R300_POINTSIZE_X_MASK             (0xFFFF << 16) /* GUESS */
+#       define R300_POINTSIZE_MAX             (R300_POINTSIZE_Y_MASK / 6)
+
+/* The line width is given in multiples of 6.
+ * In default mode lines are classified as vertical lines.
+ * HO: horizontal
+ * VE: vertical or horizontal
+ * HO & VE: no classification
+ */
+#define R300_RE_LINE_CNT                      0x4234
+#       define R300_LINESIZE_SHIFT            0
+#       define R300_LINESIZE_MASK             (0xFFFF << 0) /* GUESS */
+#       define R300_LINESIZE_MAX             (R300_LINESIZE_MASK / 6)
+#       define R300_LINE_CNT_HO               (1 << 16)
+#       define R300_LINE_CNT_VE               (1 << 17)
+
+/* Some sort of scale or clamp value for texcoordless textures. */
+#define R300_RE_UNK4238                       0x4238
+
+/* Something shade related */
+#define R300_RE_SHADE                         0x4274
+
+#define R300_RE_SHADE_MODEL                   0x4278
+#	define R300_RE_SHADE_MODEL_SMOOTH     0x3aaaa
+#	define R300_RE_SHADE_MODEL_FLAT       0x39595
+
+/* Dangerous */
+#define R300_RE_POLYGON_MODE                  0x4288
+#	define R300_PM_ENABLED                (1 << 0)
+#	define R300_PM_FRONT_POINT            (0 << 0)
+#	define R300_PM_BACK_POINT             (0 << 0)
+#	define R300_PM_FRONT_LINE             (1 << 4)
+#	define R300_PM_FRONT_FILL             (1 << 5)
+#	define R300_PM_BACK_LINE              (1 << 7)
+#	define R300_PM_BACK_FILL              (1 << 8)
+
+/* Fog parameters */
+#define R300_RE_FOG_SCALE                     0x4294
+#define R300_RE_FOG_START                     0x4298
+
+/* Not sure why there are duplicate of factor and constant values.
+ * My best guess so far is that there are separate zbiases for test and write.
+ * Ordering might be wrong.
+ * Some of the tests indicate that fgl has a fallback implementation of zbias
+ * via pixel shaders.
+ */
+#define R300_RE_ZBIAS_CNTL                    0x42A0 /* GUESS */
+#define R300_RE_ZBIAS_T_FACTOR                0x42A4
+#define R300_RE_ZBIAS_T_CONSTANT              0x42A8
+#define R300_RE_ZBIAS_W_FACTOR                0x42AC
+#define R300_RE_ZBIAS_W_CONSTANT              0x42B0
+
+/* This register needs to be set to (1<<1) for RV350 to correctly
+ * perform depth test (see --vb-triangles in r300_demo)
+ * Don't know about other chips. - Vladimir
+ * This is set to 3 when GL_POLYGON_OFFSET_FILL is on.
+ * My guess is that there are two bits for each zbias primitive
+ * (FILL, LINE, POINT).
+ *  One to enable depth test and one for depth write.
+ * Yet this doesnt explain why depth writes work ...
+ */
+#define R300_RE_OCCLUSION_CNTL		    0x42B4
+#	define R300_OCCLUSION_ON		(1<<1)
+
+#define R300_RE_CULL_CNTL                   0x42B8
+#       define R300_CULL_FRONT                   (1 << 0)
+#       define R300_CULL_BACK                    (1 << 1)
+#       define R300_FRONT_FACE_CCW               (0 << 2)
+#       define R300_FRONT_FACE_CW                (1 << 2)
+
+
+/* BEGIN: Rasterization / Interpolators - many guesses */
+
+/* 0_UNKNOWN_18 has always been set except for clear operations.
+ * TC_CNT is the number of incoming texture coordinate sets (i.e. it depends
+ * on the vertex program, *not* the fragment program)
+ */
+#define R300_RS_CNTL_0                      0x4300
+#       define R300_RS_CNTL_TC_CNT_SHIFT         2
+#       define R300_RS_CNTL_TC_CNT_MASK          (7 << 2)
+	/* number of color interpolators used */
+#	define R300_RS_CNTL_CI_CNT_SHIFT         7
+#       define R300_RS_CNTL_0_UNKNOWN_18         (1 << 18)
+	/* Guess: RS_CNTL_1 holds the index of the highest used RS_ROUTE_n
+	   register. */
+#define R300_RS_CNTL_1                      0x4304
+
+/* gap */
+
+/* Only used for texture coordinates.
+ * Use the source field to route texture coordinate input from the
+ * vertex program to the desired interpolator. Note that the source
+ * field is relative to the outputs the vertex program *actually*
+ * writes. If a vertex program only writes texcoord[1], this will
+ * be source index 0.
+ * Set INTERP_USED on all interpolators that produce data used by
+ * the fragment program. INTERP_USED looks like a swizzling mask,
+ * but I haven't seen it used that way.
+ *
+ * Note: The _UNKNOWN constants are always set in their respective
+ * register. I don't know if this is necessary.
+ */
+#define R300_RS_INTERP_0                    0x4310
+#define R300_RS_INTERP_1                    0x4314
+#       define R300_RS_INTERP_1_UNKNOWN          0x40
+#define R300_RS_INTERP_2                    0x4318
+#       define R300_RS_INTERP_2_UNKNOWN          0x80
+#define R300_RS_INTERP_3                    0x431C
+#       define R300_RS_INTERP_3_UNKNOWN          0xC0
+#define R300_RS_INTERP_4                    0x4320
+#define R300_RS_INTERP_5                    0x4324
+#define R300_RS_INTERP_6                    0x4328
+#define R300_RS_INTERP_7                    0x432C
+#       define R300_RS_INTERP_SRC_SHIFT          2
+#       define R300_RS_INTERP_SRC_MASK           (7 << 2)
+#       define R300_RS_INTERP_USED               0x00D10000
+
+/* These DWORDs control how vertex data is routed into fragment program
+ * registers, after interpolators.
+ */
+#define R300_RS_ROUTE_0                     0x4330
+#define R300_RS_ROUTE_1                     0x4334
+#define R300_RS_ROUTE_2                     0x4338
+#define R300_RS_ROUTE_3                     0x433C /* GUESS */
+#define R300_RS_ROUTE_4                     0x4340 /* GUESS */
+#define R300_RS_ROUTE_5                     0x4344 /* GUESS */
+#define R300_RS_ROUTE_6                     0x4348 /* GUESS */
+#define R300_RS_ROUTE_7                     0x434C /* GUESS */
+#       define R300_RS_ROUTE_SOURCE_INTERP_0     0
+#       define R300_RS_ROUTE_SOURCE_INTERP_1     1
+#       define R300_RS_ROUTE_SOURCE_INTERP_2     2
+#       define R300_RS_ROUTE_SOURCE_INTERP_3     3
+#       define R300_RS_ROUTE_SOURCE_INTERP_4     4
+#       define R300_RS_ROUTE_SOURCE_INTERP_5     5 /* GUESS */
+#       define R300_RS_ROUTE_SOURCE_INTERP_6     6 /* GUESS */
+#       define R300_RS_ROUTE_SOURCE_INTERP_7     7 /* GUESS */
+#       define R300_RS_ROUTE_ENABLE              (1 << 3) /* GUESS */
+#       define R300_RS_ROUTE_DEST_SHIFT          6
+#       define R300_RS_ROUTE_DEST_MASK           (31 << 6) /* GUESS */
+
+/* Special handling for color: When the fragment program uses color,
+ * the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the
+ * color register index.
+ *
+ * Apperently you may set the R300_RS_ROUTE_0_COLOR bit, but not provide any
+ * R300_RS_ROUTE_0_COLOR_DEST value; this setup is used for clearing the state.
+ * See r300_ioctl.c:r300EmitClearState. I'm not sure if this setup is strictly
+ * correct or not. - Oliver.
+ */
+#       define R300_RS_ROUTE_0_COLOR             (1 << 14)
+#       define R300_RS_ROUTE_0_COLOR_DEST_SHIFT  17
+#       define R300_RS_ROUTE_0_COLOR_DEST_MASK   (31 << 17) /* GUESS */
+/* As above, but for secondary color */
+#		define R300_RS_ROUTE_1_COLOR1            (1 << 14)
+#		define R300_RS_ROUTE_1_COLOR1_DEST_SHIFT 17
+#		define R300_RS_ROUTE_1_COLOR1_DEST_MASK  (31 << 17)
+#		define R300_RS_ROUTE_1_UNKNOWN11         (1 << 11)
+/* END: Rasterization / Interpolators - many guesses */
+
+/* Hierarchical Z Enable */
+#define R300_SC_HYPERZ                   0x43a4
+#	define R300_SC_HYPERZ_DISABLE     (0 << 0)
+#	define R300_SC_HYPERZ_ENABLE      (1 << 0)
+#	define R300_SC_HYPERZ_MIN         (0 << 1)
+#	define R300_SC_HYPERZ_MAX         (1 << 1)
+#	define R300_SC_HYPERZ_ADJ_256     (0 << 2)
+#	define R300_SC_HYPERZ_ADJ_128     (1 << 2)
+#	define R300_SC_HYPERZ_ADJ_64      (2 << 2)
+#	define R300_SC_HYPERZ_ADJ_32      (3 << 2)
+#	define R300_SC_HYPERZ_ADJ_16      (4 << 2)
+#	define R300_SC_HYPERZ_ADJ_8       (5 << 2)
+#	define R300_SC_HYPERZ_ADJ_4       (6 << 2)
+#	define R300_SC_HYPERZ_ADJ_2       (7 << 2)
+#	define R300_SC_HYPERZ_HZ_Z0MIN_NO (0 << 5)
+#	define R300_SC_HYPERZ_HZ_Z0MIN    (1 << 5)
+#	define R300_SC_HYPERZ_HZ_Z0MAX_NO (0 << 6)
+#	define R300_SC_HYPERZ_HZ_Z0MAX    (1 << 6)
+
+#define R300_SC_EDGERULE                 0x43a8
+
+/* BEGIN: Scissors and cliprects */
+
+/* There are four clipping rectangles. Their corner coordinates are inclusive.
+ * Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending
+ * on whether the pixel is inside cliprects 0-3, respectively. For example,
+ * if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned
+ * the number 3 (binary 0011).
+ * Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set,
+ * the pixel is rasterized.
+ *
+ * In addition to this, there is a scissors rectangle. Only pixels inside the
+ * scissors rectangle are drawn. (coordinates are inclusive)
+ *
+ * For some reason, the top-left corner of the framebuffer is at (1440, 1440)
+ * for the purpose of clipping and scissors.
+ */
+#define R300_RE_CLIPRECT_TL_0               0x43B0
+#define R300_RE_CLIPRECT_BR_0               0x43B4
+#define R300_RE_CLIPRECT_TL_1               0x43B8
+#define R300_RE_CLIPRECT_BR_1               0x43BC
+#define R300_RE_CLIPRECT_TL_2               0x43C0
+#define R300_RE_CLIPRECT_BR_2               0x43C4
+#define R300_RE_CLIPRECT_TL_3               0x43C8
+#define R300_RE_CLIPRECT_BR_3               0x43CC
+#       define R300_CLIPRECT_OFFSET              1440
+#       define R300_CLIPRECT_MASK                0x1FFF
+#       define R300_CLIPRECT_X_SHIFT             0
+#       define R300_CLIPRECT_X_MASK              (0x1FFF << 0)
+#       define R300_CLIPRECT_Y_SHIFT             13
+#       define R300_CLIPRECT_Y_MASK              (0x1FFF << 13)
+#define R300_RE_CLIPRECT_CNTL               0x43D0
+#       define R300_CLIP_OUT                     (1 << 0)
+#       define R300_CLIP_0                       (1 << 1)
+#       define R300_CLIP_1                       (1 << 2)
+#       define R300_CLIP_10                      (1 << 3)
+#       define R300_CLIP_2                       (1 << 4)
+#       define R300_CLIP_20                      (1 << 5)
+#       define R300_CLIP_21                      (1 << 6)
+#       define R300_CLIP_210                     (1 << 7)
+#       define R300_CLIP_3                       (1 << 8)
+#       define R300_CLIP_30                      (1 << 9)
+#       define R300_CLIP_31                      (1 << 10)
+#       define R300_CLIP_310                     (1 << 11)
+#       define R300_CLIP_32                      (1 << 12)
+#       define R300_CLIP_320                     (1 << 13)
+#       define R300_CLIP_321                     (1 << 14)
+#       define R300_CLIP_3210                    (1 << 15)
+
+/* gap */
+
+#define R300_RE_SCISSORS_TL                 0x43E0
+#define R300_RE_SCISSORS_BR                 0x43E4
+#       define R300_SCISSORS_OFFSET              1440
+#       define R300_SCISSORS_X_SHIFT             0
+#       define R300_SCISSORS_X_MASK              (0x1FFF << 0)
+#       define R300_SCISSORS_Y_SHIFT             13
+#       define R300_SCISSORS_Y_MASK              (0x1FFF << 13)
+/* END: Scissors and cliprects */
+
+/* BEGIN: Texture specification */
+
+/*
+ * The texture specification dwords are grouped by meaning and not by texture
+ * unit. This means that e.g. the offset for texture image unit N is found in
+ * register TX_OFFSET_0 + (4*N)
+ */
+#define R300_TX_FILTER_0                    0x4400
+#       define R300_TX_REPEAT                    0
+#       define R300_TX_MIRRORED                  1
+#       define R300_TX_CLAMP                     4
+#       define R300_TX_CLAMP_TO_EDGE             2
+#       define R300_TX_CLAMP_TO_BORDER           6
+#       define R300_TX_WRAP_S_SHIFT              0
+#       define R300_TX_WRAP_S_MASK               (7 << 0)
+#       define R300_TX_WRAP_T_SHIFT              3
+#       define R300_TX_WRAP_T_MASK               (7 << 3)
+#       define R300_TX_WRAP_Q_SHIFT              6
+#       define R300_TX_WRAP_Q_MASK               (7 << 6)
+#       define R300_TX_MAG_FILTER_NEAREST        (1 << 9)
+#       define R300_TX_MAG_FILTER_LINEAR         (2 << 9)
+#       define R300_TX_MAG_FILTER_MASK           (3 << 9)
+#       define R300_TX_MIN_FILTER_NEAREST        (1 << 11)
+#       define R300_TX_MIN_FILTER_LINEAR         (2 << 11)
+#	define R300_TX_MIN_FILTER_NEAREST_MIP_NEAREST       (5  <<  11)
+#	define R300_TX_MIN_FILTER_NEAREST_MIP_LINEAR        (9  <<  11)
+#	define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST        (6  <<  11)
+#	define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR         (10 <<  11)
+
+/* NOTE: NEAREST doesnt seem to exist.
+ * Im not seting MAG_FILTER_MASK and (3 << 11) on for all
+ * anisotropy modes because that would void selected mag filter
+ */
+#	define R300_TX_MIN_FILTER_ANISO_NEAREST             (0 << 13)
+#	define R300_TX_MIN_FILTER_ANISO_LINEAR              (0 << 13)
+#	define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST (1 << 13)
+#	define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR  (2 << 13)
+#       define R300_TX_MIN_FILTER_MASK   ( (15 << 11) | (3 << 13) )
+#	define R300_TX_MAX_ANISO_1_TO_1  (0 << 21)
+#	define R300_TX_MAX_ANISO_2_TO_1  (2 << 21)
+#	define R300_TX_MAX_ANISO_4_TO_1  (4 << 21)
+#	define R300_TX_MAX_ANISO_8_TO_1  (6 << 21)
+#	define R300_TX_MAX_ANISO_16_TO_1 (8 << 21)
+#	define R300_TX_MAX_ANISO_MASK    (14 << 21)
+
+#define R300_TX_FILTER1_0                      0x4440
+#	define R300_CHROMA_KEY_MODE_DISABLE    0
+#	define R300_CHROMA_KEY_FORCE	       1
+#	define R300_CHROMA_KEY_BLEND           2
+#	define R300_MC_ROUND_NORMAL            (0<<2)
+#	define R300_MC_ROUND_MPEG4             (1<<2)
+#	define R300_LOD_BIAS_MASK	    0x1fff
+#	define R300_EDGE_ANISO_EDGE_DIAG       (0<<13)
+#	define R300_EDGE_ANISO_EDGE_ONLY       (1<<13)
+#	define R300_MC_COORD_TRUNCATE_DISABLE  (0<<14)
+#	define R300_MC_COORD_TRUNCATE_MPEG     (1<<14)
+#	define R300_TX_TRI_PERF_0_8            (0<<15)
+#	define R300_TX_TRI_PERF_1_8            (1<<15)
+#	define R300_TX_TRI_PERF_1_4            (2<<15)
+#	define R300_TX_TRI_PERF_3_8            (3<<15)
+#	define R300_ANISO_THRESHOLD_MASK       (7<<17)
+
+#define R300_TX_SIZE_0                      0x4480
+#       define R300_TX_WIDTHMASK_SHIFT           0
+#       define R300_TX_WIDTHMASK_MASK            (2047 << 0)
+#       define R300_TX_HEIGHTMASK_SHIFT          11
+#       define R300_TX_HEIGHTMASK_MASK           (2047 << 11)
+#       define R300_TX_UNK23                     (1 << 23)
+#       define R300_TX_MAX_MIP_LEVEL_SHIFT       26
+#       define R300_TX_MAX_MIP_LEVEL_MASK        (0xf << 26)
+#       define R300_TX_SIZE_PROJECTED            (1<<30)
+#       define R300_TX_SIZE_TXPITCH_EN           (1<<31)
+#define R300_TX_FORMAT_0                    0x44C0
+	/* The interpretation of the format word by Wladimir van der Laan */
+	/* The X, Y, Z and W refer to the layout of the components.
+	   They are given meanings as R, G, B and Alpha by the swizzle
+	   specification */
+#	define R300_TX_FORMAT_X8		    0x0
+#	define R300_TX_FORMAT_X16		    0x1
+#	define R300_TX_FORMAT_Y4X4		    0x2
+#	define R300_TX_FORMAT_Y8X8		    0x3
+#	define R300_TX_FORMAT_Y16X16		    0x4
+#	define R300_TX_FORMAT_Z3Y3X2		    0x5
+#	define R300_TX_FORMAT_Z5Y6X5		    0x6
+#	define R300_TX_FORMAT_Z6Y5X5		    0x7
+#	define R300_TX_FORMAT_Z11Y11X10		    0x8
+#	define R300_TX_FORMAT_Z10Y11X11		    0x9
+#	define R300_TX_FORMAT_W4Z4Y4X4		    0xA
+#	define R300_TX_FORMAT_W1Z5Y5X5		    0xB
+#	define R300_TX_FORMAT_W8Z8Y8X8		    0xC
+#	define R300_TX_FORMAT_W2Z10Y10X10	    0xD
+#	define R300_TX_FORMAT_W16Z16Y16X16	    0xE
+#	define R300_TX_FORMAT_DXT1		    0xF
+#	define R300_TX_FORMAT_DXT3		    0x10
+#	define R300_TX_FORMAT_DXT5		    0x11
+#	define R300_TX_FORMAT_D3DMFT_CxV8U8	    0x12     /* no swizzle */
+#	define R300_TX_FORMAT_A8R8G8B8		    0x13     /* no swizzle */
+#	define R300_TX_FORMAT_B8G8_B8G8		    0x14     /* no swizzle */
+#	define R300_TX_FORMAT_G8R8_G8B8		    0x15     /* no swizzle */
+	/* 0x16 - some 16 bit green format.. ?? */
+#	define R300_TX_FORMAT_UNK25		   (1 << 25) /* no swizzle */
+#	define R300_TX_FORMAT_CUBIC_MAP		   (1 << 26)
+
+	/* gap */
+	/* Floating point formats */
+	/* Note - hardware supports both 16 and 32 bit floating point */
+#	define R300_TX_FORMAT_FL_I16		    0x18
+#	define R300_TX_FORMAT_FL_I16A16		    0x19
+#	define R300_TX_FORMAT_FL_R16G16B16A16	    0x1A
+#	define R300_TX_FORMAT_FL_I32		    0x1B
+#	define R300_TX_FORMAT_FL_I32A32		    0x1C
+#	define R300_TX_FORMAT_FL_R32G32B32A32	    0x1D
+	/* alpha modes, convenience mostly */
+	/* if you have alpha, pick constant appropriate to the
+	   number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
+#	define R300_TX_FORMAT_ALPHA_1CH		    0x000
+#	define R300_TX_FORMAT_ALPHA_2CH		    0x200
+#	define R300_TX_FORMAT_ALPHA_4CH		    0x600
+#	define R300_TX_FORMAT_ALPHA_NONE	    0xA00
+	/* Swizzling */
+	/* constants */
+#	define R300_TX_FORMAT_X		0
+#	define R300_TX_FORMAT_Y		1
+#	define R300_TX_FORMAT_Z		2
+#	define R300_TX_FORMAT_W		3
+#	define R300_TX_FORMAT_ZERO	4
+#	define R300_TX_FORMAT_ONE	5
+	/* 2.0*Z, everything above 1.0 is set to 0.0 */
+#	define R300_TX_FORMAT_CUT_Z	6
+	/* 2.0*W, everything above 1.0 is set to 0.0 */
+#	define R300_TX_FORMAT_CUT_W	7
+
+#	define R300_TX_FORMAT_B_SHIFT	18
+#	define R300_TX_FORMAT_G_SHIFT	15
+#	define R300_TX_FORMAT_R_SHIFT	12
+#	define R300_TX_FORMAT_A_SHIFT	9
+	/* Convenience macro to take care of layout and swizzling */
+#	define R300_EASY_TX_FORMAT(B, G, R, A, FMT)	(		\
+		((R300_TX_FORMAT_##B)<<R300_TX_FORMAT_B_SHIFT)		\
+		| ((R300_TX_FORMAT_##G)<<R300_TX_FORMAT_G_SHIFT)	\
+		| ((R300_TX_FORMAT_##R)<<R300_TX_FORMAT_R_SHIFT)	\
+		| ((R300_TX_FORMAT_##A)<<R300_TX_FORMAT_A_SHIFT)	\
+		| (R300_TX_FORMAT_##FMT)				\
+		)
+	/* These can be ORed with result of R300_EASY_TX_FORMAT()
+	   We don't really know what they do. Take values from a
+           constant color ? */
+#	define R300_TX_FORMAT_CONST_X		(1<<5)
+#	define R300_TX_FORMAT_CONST_Y		(2<<5)
+#	define R300_TX_FORMAT_CONST_Z		(4<<5)
+#	define R300_TX_FORMAT_CONST_W		(8<<5)
+
+#	define R300_TX_FORMAT_YUV_MODE		0x00800000
+
+#define R300_TX_PITCH_0			    0x4500 /* obvious missing in gap */
+#define R300_TX_OFFSET_0                    0x4540
+	/* BEGIN: Guess from R200 */
+#       define R300_TXO_ENDIAN_NO_SWAP           (0 << 0)
+#       define R300_TXO_ENDIAN_BYTE_SWAP         (1 << 0)
+#       define R300_TXO_ENDIAN_WORD_SWAP         (2 << 0)
+#       define R300_TXO_ENDIAN_HALFDW_SWAP       (3 << 0)
+#       define R300_TXO_MACRO_TILE               (1 << 2)
+#       define R300_TXO_MICRO_TILE               (1 << 3)
+#       define R300_TXO_OFFSET_MASK              0xffffffe0
+#       define R300_TXO_OFFSET_SHIFT             5
+	/* END: Guess from R200 */
+
+/* 32 bit chroma key */
+#define R300_TX_CHROMA_KEY_0                      0x4580
+/* ff00ff00 == { 0, 1.0, 0, 1.0 } */
+#define R300_TX_BORDER_COLOR_0              0x45C0
+
+/* END: Texture specification */
+
+/* BEGIN: Fragment program instruction set */
+
+/* Fragment programs are written directly into register space.
+ * There are separate instruction streams for texture instructions and ALU
+ * instructions.
+ * In order to synchronize these streams, the program is divided into up
+ * to 4 nodes. Each node begins with a number of TEX operations, followed
+ * by a number of ALU operations.
+ * The first node can have zero TEX ops, all subsequent nodes must have at
+ * least
+ * one TEX ops.
+ * All nodes must have at least one ALU op.
+ *
+ * The index of the last node is stored in PFS_CNTL_0: A value of 0 means
+ * 1 node, a value of 3 means 4 nodes.
+ * The total amount of instructions is defined in PFS_CNTL_2. The offsets are
+ * offsets into the respective instruction streams, while *_END points to the
+ * last instruction relative to this offset.
+ */
+#define R300_PFS_CNTL_0                     0x4600
+#       define R300_PFS_CNTL_LAST_NODES_SHIFT    0
+#       define R300_PFS_CNTL_LAST_NODES_MASK     (3 << 0)
+#       define R300_PFS_CNTL_FIRST_NODE_HAS_TEX  (1 << 3)
+#define R300_PFS_CNTL_1                     0x4604
+/* There is an unshifted value here which has so far always been equal to the
+ * index of the highest used temporary register.
+ */
+#define R300_PFS_CNTL_2                     0x4608
+#       define R300_PFS_CNTL_ALU_OFFSET_SHIFT    0
+#       define R300_PFS_CNTL_ALU_OFFSET_MASK     (63 << 0)
+#       define R300_PFS_CNTL_ALU_END_SHIFT       6
+#       define R300_PFS_CNTL_ALU_END_MASK        (63 << 6)
+#       define R300_PFS_CNTL_TEX_OFFSET_SHIFT    12
+#       define R300_PFS_CNTL_TEX_OFFSET_MASK     (31 << 12) /* GUESS */
+#       define R300_PFS_CNTL_TEX_END_SHIFT       18
+#       define R300_PFS_CNTL_TEX_END_MASK        (31 << 18) /* GUESS */
+
+/* gap */
+
+/* Nodes are stored backwards. The last active node is always stored in
+ * PFS_NODE_3.
+ * Example: In a 2-node program, NODE_0 and NODE_1 are set to 0. The
+ * first node is stored in NODE_2, the second node is stored in NODE_3.
+ *
+ * Offsets are relative to the master offset from PFS_CNTL_2.
+ */
+#define R300_PFS_NODE_0                     0x4610
+#define R300_PFS_NODE_1                     0x4614
+#define R300_PFS_NODE_2                     0x4618
+#define R300_PFS_NODE_3                     0x461C
+#       define R300_PFS_NODE_ALU_OFFSET_SHIFT    0
+#       define R300_PFS_NODE_ALU_OFFSET_MASK     (63 << 0)
+#       define R300_PFS_NODE_ALU_END_SHIFT       6
+#       define R300_PFS_NODE_ALU_END_MASK        (63 << 6)
+#       define R300_PFS_NODE_TEX_OFFSET_SHIFT    12
+#       define R300_PFS_NODE_TEX_OFFSET_MASK     (31 << 12)
+#       define R300_PFS_NODE_TEX_END_SHIFT       17
+#       define R300_PFS_NODE_TEX_END_MASK        (31 << 17)
+#		define R300_PFS_NODE_OUTPUT_COLOR        (1 << 22)
+#		define R300_PFS_NODE_OUTPUT_DEPTH        (1 << 23)
+
+/* TEX
+ * As far as I can tell, texture instructions cannot write into output
+ * registers directly. A subsequent ALU instruction is always necessary,
+ * even if it's just MAD o0, r0, 1, 0
+ */
+#define R300_PFS_TEXI_0                     0x4620
+#	define R300_FPITX_SRC_SHIFT              0
+#	define R300_FPITX_SRC_MASK               (31 << 0)
+	/* GUESS */
+#	define R300_FPITX_SRC_CONST              (1 << 5)
+#	define R300_FPITX_DST_SHIFT              6
+#	define R300_FPITX_DST_MASK               (31 << 6)
+#	define R300_FPITX_IMAGE_SHIFT            11
+	/* GUESS based on layout and native limits */
+#       define R300_FPITX_IMAGE_MASK             (15 << 11)
+/* Unsure if these are opcodes, or some kind of bitfield, but this is how
+ * they were set when I checked
+ */
+#	define R300_FPITX_OPCODE_SHIFT		15
+#		define R300_FPITX_OP_TEX	1
+#		define R300_FPITX_OP_KIL	2
+#		define R300_FPITX_OP_TXP	3
+#		define R300_FPITX_OP_TXB	4
+#	define R300_FPITX_OPCODE_MASK           (7 << 15)
+
+/* ALU
+ * The ALU instructions register blocks are enumerated according to the order
+ * in which fglrx. I assume there is space for 64 instructions, since
+ * each block has space for a maximum of 64 DWORDs, and this matches reported
+ * native limits.
+ *
+ * The basic functional block seems to be one MAD for each color and alpha,
+ * and an adder that adds all components after the MUL.
+ *  - ADD, MUL, MAD etc.: use MAD with appropriate neutral operands
+ *  - DP4: Use OUTC_DP4, OUTA_DP4
+ *  - DP3: Use OUTC_DP3, OUTA_DP4, appropriate alpha operands
+ *  - DPH: Use OUTC_DP4, OUTA_DP4, appropriate alpha operands
+ *  - CMPH: If ARG2 > 0.5, return ARG0, else return ARG1
+ *  - CMP: If ARG2 < 0, return ARG1, else return ARG0
+ *  - FLR: use FRC+MAD
+ *  - XPD: use MAD+MAD
+ *  - SGE, SLT: use MAD+CMP
+ *  - RSQ: use ABS modifier for argument
+ *  - Use OUTC_REPL_ALPHA to write results of an alpha-only operation
+ *    (e.g. RCP) into color register
+ *  - apparently, there's no quick DST operation
+ *  - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2"
+ *  - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0"
+ *  - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1"
+ *
+ * Operand selection
+ * First stage selects three sources from the available registers and
+ * constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha).
+ * fglrx sorts the three source fields: Registers before constants,
+ * lower indices before higher indices; I do not know whether this is
+ * necessary.
+ *
+ * fglrx fills unused sources with "read constant 0"
+ * According to specs, you cannot select more than two different constants.
+ *
+ * Second stage selects the operands from the sources. This is defined in
+ * INSTR0 (color) and INSTR2 (alpha). You can also select the special constants
+ * zero and one.
+ * Swizzling and negation happens in this stage, as well.
+ *
+ * Important: Color and alpha seem to be mostly separate, i.e. their sources
+ * selection appears to be fully independent (the register storage is probably
+ * physically split into a color and an alpha section).
+ * However (because of the apparent physical split), there is some interaction
+ * WRT swizzling. If, for example, you want to load an R component into an
+ * Alpha operand, this R component is taken from a *color* source, not from
+ * an alpha source. The corresponding register doesn't even have to appear in
+ * the alpha sources list. (I hope this all makes sense to you)
+ *
+ * Destination selection
+ * The destination register index is in FPI1 (color) and FPI3 (alpha)
+ * together with enable bits.
+ * There are separate enable bits for writing into temporary registers
+ * (DSTC_REG_* /DSTA_REG) and and program output registers (DSTC_OUTPUT_*
+ * /DSTA_OUTPUT). You can write to both at once, or not write at all (the
+ * same index must be used for both).
+ *
+ * Note: There is a special form for LRP
+ *  - Argument order is the same as in ARB_fragment_program.
+ *  - Operation is MAD
+ *  - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP
+ *  - Set FPI0/FPI2_SPECIAL_LRP
+ * Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD
+ */
+#define R300_PFS_INSTR1_0                   0x46C0
+#       define R300_FPI1_SRC0C_SHIFT             0
+#       define R300_FPI1_SRC0C_MASK              (31 << 0)
+#       define R300_FPI1_SRC0C_CONST             (1 << 5)
+#       define R300_FPI1_SRC1C_SHIFT             6
+#       define R300_FPI1_SRC1C_MASK              (31 << 6)
+#       define R300_FPI1_SRC1C_CONST             (1 << 11)
+#       define R300_FPI1_SRC2C_SHIFT             12
+#       define R300_FPI1_SRC2C_MASK              (31 << 12)
+#       define R300_FPI1_SRC2C_CONST             (1 << 17)
+#       define R300_FPI1_SRC_MASK                0x0003ffff
+#       define R300_FPI1_DSTC_SHIFT              18
+#       define R300_FPI1_DSTC_MASK               (31 << 18)
+#		define R300_FPI1_DSTC_REG_MASK_SHIFT     23
+#       define R300_FPI1_DSTC_REG_X              (1 << 23)
+#       define R300_FPI1_DSTC_REG_Y              (1 << 24)
+#       define R300_FPI1_DSTC_REG_Z              (1 << 25)
+#		define R300_FPI1_DSTC_OUTPUT_MASK_SHIFT  26
+#       define R300_FPI1_DSTC_OUTPUT_X           (1 << 26)
+#       define R300_FPI1_DSTC_OUTPUT_Y           (1 << 27)
+#       define R300_FPI1_DSTC_OUTPUT_Z           (1 << 28)
+
+#define R300_PFS_INSTR3_0                   0x47C0
+#       define R300_FPI3_SRC0A_SHIFT             0
+#       define R300_FPI3_SRC0A_MASK              (31 << 0)
+#       define R300_FPI3_SRC0A_CONST             (1 << 5)
+#       define R300_FPI3_SRC1A_SHIFT             6
+#       define R300_FPI3_SRC1A_MASK              (31 << 6)
+#       define R300_FPI3_SRC1A_CONST             (1 << 11)
+#       define R300_FPI3_SRC2A_SHIFT             12
+#       define R300_FPI3_SRC2A_MASK              (31 << 12)
+#       define R300_FPI3_SRC2A_CONST             (1 << 17)
+#       define R300_FPI3_SRC_MASK                0x0003ffff
+#       define R300_FPI3_DSTA_SHIFT              18
+#       define R300_FPI3_DSTA_MASK               (31 << 18)
+#       define R300_FPI3_DSTA_REG                (1 << 23)
+#       define R300_FPI3_DSTA_OUTPUT             (1 << 24)
+#		define R300_FPI3_DSTA_DEPTH              (1 << 27)
+
+#define R300_PFS_INSTR0_0                   0x48C0
+#       define R300_FPI0_ARGC_SRC0C_XYZ          0
+#       define R300_FPI0_ARGC_SRC0C_XXX          1
+#       define R300_FPI0_ARGC_SRC0C_YYY          2
+#       define R300_FPI0_ARGC_SRC0C_ZZZ          3
+#       define R300_FPI0_ARGC_SRC1C_XYZ          4
+#       define R300_FPI0_ARGC_SRC1C_XXX          5
+#       define R300_FPI0_ARGC_SRC1C_YYY          6
+#       define R300_FPI0_ARGC_SRC1C_ZZZ          7
+#       define R300_FPI0_ARGC_SRC2C_XYZ          8
+#       define R300_FPI0_ARGC_SRC2C_XXX          9
+#       define R300_FPI0_ARGC_SRC2C_YYY          10
+#       define R300_FPI0_ARGC_SRC2C_ZZZ          11
+#       define R300_FPI0_ARGC_SRC0A              12
+#       define R300_FPI0_ARGC_SRC1A              13
+#       define R300_FPI0_ARGC_SRC2A              14
+#       define R300_FPI0_ARGC_SRC1C_LRP          15
+#       define R300_FPI0_ARGC_ZERO               20
+#       define R300_FPI0_ARGC_ONE                21
+	/* GUESS */
+#       define R300_FPI0_ARGC_HALF               22
+#       define R300_FPI0_ARGC_SRC0C_YZX          23
+#       define R300_FPI0_ARGC_SRC1C_YZX          24
+#       define R300_FPI0_ARGC_SRC2C_YZX          25
+#       define R300_FPI0_ARGC_SRC0C_ZXY          26
+#       define R300_FPI0_ARGC_SRC1C_ZXY          27
+#       define R300_FPI0_ARGC_SRC2C_ZXY          28
+#       define R300_FPI0_ARGC_SRC0CA_WZY         29
+#       define R300_FPI0_ARGC_SRC1CA_WZY         30
+#       define R300_FPI0_ARGC_SRC2CA_WZY         31
+
+#       define R300_FPI0_ARG0C_SHIFT             0
+#       define R300_FPI0_ARG0C_MASK              (31 << 0)
+#       define R300_FPI0_ARG0C_NEG               (1 << 5)
+#       define R300_FPI0_ARG0C_ABS               (1 << 6)
+#       define R300_FPI0_ARG1C_SHIFT             7
+#       define R300_FPI0_ARG1C_MASK              (31 << 7)
+#       define R300_FPI0_ARG1C_NEG               (1 << 12)
+#       define R300_FPI0_ARG1C_ABS               (1 << 13)
+#       define R300_FPI0_ARG2C_SHIFT             14
+#       define R300_FPI0_ARG2C_MASK              (31 << 14)
+#       define R300_FPI0_ARG2C_NEG               (1 << 19)
+#       define R300_FPI0_ARG2C_ABS               (1 << 20)
+#       define R300_FPI0_SPECIAL_LRP             (1 << 21)
+#       define R300_FPI0_OUTC_MAD                (0 << 23)
+#       define R300_FPI0_OUTC_DP3                (1 << 23)
+#       define R300_FPI0_OUTC_DP4                (2 << 23)
+#       define R300_FPI0_OUTC_MIN                (4 << 23)
+#       define R300_FPI0_OUTC_MAX                (5 << 23)
+#       define R300_FPI0_OUTC_CMPH               (7 << 23)
+#       define R300_FPI0_OUTC_CMP                (8 << 23)
+#       define R300_FPI0_OUTC_FRC                (9 << 23)
+#       define R300_FPI0_OUTC_REPL_ALPHA         (10 << 23)
+#       define R300_FPI0_OUTC_SAT                (1 << 30)
+#       define R300_FPI0_INSERT_NOP              (1 << 31)
+
+#define R300_PFS_INSTR2_0                   0x49C0
+#       define R300_FPI2_ARGA_SRC0C_X            0
+#       define R300_FPI2_ARGA_SRC0C_Y            1
+#       define R300_FPI2_ARGA_SRC0C_Z            2
+#       define R300_FPI2_ARGA_SRC1C_X            3
+#       define R300_FPI2_ARGA_SRC1C_Y            4
+#       define R300_FPI2_ARGA_SRC1C_Z            5
+#       define R300_FPI2_ARGA_SRC2C_X            6
+#       define R300_FPI2_ARGA_SRC2C_Y            7
+#       define R300_FPI2_ARGA_SRC2C_Z            8
+#       define R300_FPI2_ARGA_SRC0A              9
+#       define R300_FPI2_ARGA_SRC1A              10
+#       define R300_FPI2_ARGA_SRC2A              11
+#       define R300_FPI2_ARGA_SRC1A_LRP          15
+#       define R300_FPI2_ARGA_ZERO               16
+#       define R300_FPI2_ARGA_ONE                17
+	/* GUESS */
+#       define R300_FPI2_ARGA_HALF               18
+#       define R300_FPI2_ARG0A_SHIFT             0
+#       define R300_FPI2_ARG0A_MASK              (31 << 0)
+#       define R300_FPI2_ARG0A_NEG               (1 << 5)
+	/* GUESS */
+#	define R300_FPI2_ARG0A_ABS		 (1 << 6)
+#       define R300_FPI2_ARG1A_SHIFT             7
+#       define R300_FPI2_ARG1A_MASK              (31 << 7)
+#       define R300_FPI2_ARG1A_NEG               (1 << 12)
+	/* GUESS */
+#	define R300_FPI2_ARG1A_ABS		 (1 << 13)
+#       define R300_FPI2_ARG2A_SHIFT             14
+#       define R300_FPI2_ARG2A_MASK              (31 << 14)
+#       define R300_FPI2_ARG2A_NEG               (1 << 19)
+	/* GUESS */
+#	define R300_FPI2_ARG2A_ABS		 (1 << 20)
+#       define R300_FPI2_SPECIAL_LRP             (1 << 21)
+#       define R300_FPI2_OUTA_MAD                (0 << 23)
+#       define R300_FPI2_OUTA_DP4                (1 << 23)
+#       define R300_FPI2_OUTA_MIN                (2 << 23)
+#       define R300_FPI2_OUTA_MAX                (3 << 23)
+#       define R300_FPI2_OUTA_CMP                (6 << 23)
+#       define R300_FPI2_OUTA_FRC                (7 << 23)
+#       define R300_FPI2_OUTA_EX2                (8 << 23)
+#       define R300_FPI2_OUTA_LG2                (9 << 23)
+#       define R300_FPI2_OUTA_RCP                (10 << 23)
+#       define R300_FPI2_OUTA_RSQ                (11 << 23)
+#       define R300_FPI2_OUTA_SAT                (1 << 30)
+#       define R300_FPI2_UNKNOWN_31              (1 << 31)
+/* END: Fragment program instruction set */
+
+/* Fog state and color */
+#define R300_RE_FOG_STATE                   0x4BC0
+#       define R300_FOG_ENABLE                   (1 << 0)
+#	define R300_FOG_MODE_LINEAR              (0 << 1)
+#	define R300_FOG_MODE_EXP                 (1 << 1)
+#	define R300_FOG_MODE_EXP2                (2 << 1)
+#	define R300_FOG_MODE_MASK                (3 << 1)
+#define R300_FOG_COLOR_R                    0x4BC8
+#define R300_FOG_COLOR_G                    0x4BCC
+#define R300_FOG_COLOR_B                    0x4BD0
+
+#define R300_PP_ALPHA_TEST                  0x4BD4
+#       define R300_REF_ALPHA_MASK               0x000000ff
+#       define R300_ALPHA_TEST_FAIL              (0 << 8)
+#       define R300_ALPHA_TEST_LESS              (1 << 8)
+#       define R300_ALPHA_TEST_LEQUAL            (3 << 8)
+#       define R300_ALPHA_TEST_EQUAL             (2 << 8)
+#       define R300_ALPHA_TEST_GEQUAL            (6 << 8)
+#       define R300_ALPHA_TEST_GREATER           (4 << 8)
+#       define R300_ALPHA_TEST_NEQUAL            (5 << 8)
+#       define R300_ALPHA_TEST_PASS              (7 << 8)
+#       define R300_ALPHA_TEST_OP_MASK           (7 << 8)
+#       define R300_ALPHA_TEST_ENABLE            (1 << 11)
+
+/* gap */
+
+/* Fragment program parameters in 7.16 floating point */
+#define R300_PFS_PARAM_0_X                  0x4C00
+#define R300_PFS_PARAM_0_Y                  0x4C04
+#define R300_PFS_PARAM_0_Z                  0x4C08
+#define R300_PFS_PARAM_0_W                  0x4C0C
+/* GUESS: PARAM_31 is last, based on native limits reported by fglrx */
+#define R300_PFS_PARAM_31_X                 0x4DF0
+#define R300_PFS_PARAM_31_Y                 0x4DF4
+#define R300_PFS_PARAM_31_Z                 0x4DF8
+#define R300_PFS_PARAM_31_W                 0x4DFC
+
+/* Notes:
+ * - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used in
+ *   the application
+ * - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and ABLEND
+ *    are set to the same
+ *   function (both registers are always set up completely in any case)
+ * - Most blend flags are simply copied from R200 and not tested yet
+ */
+#define R300_RB3D_CBLEND                    0x4E04
+#define R300_RB3D_ABLEND                    0x4E08
+/* the following only appear in CBLEND */
+#       define R300_BLEND_ENABLE                     (1 << 0)
+#       define R300_BLEND_UNKNOWN                    (3 << 1)
+#       define R300_BLEND_NO_SEPARATE                (1 << 3)
+/* the following are shared between CBLEND and ABLEND */
+#       define R300_FCN_MASK                         (3  << 12)
+#       define R300_COMB_FCN_ADD_CLAMP               (0  << 12)
+#       define R300_COMB_FCN_ADD_NOCLAMP             (1  << 12)
+#       define R300_COMB_FCN_SUB_CLAMP               (2  << 12)
+#       define R300_COMB_FCN_SUB_NOCLAMP             (3  << 12)
+#       define R300_COMB_FCN_MIN                     (4  << 12)
+#       define R300_COMB_FCN_MAX                     (5  << 12)
+#       define R300_COMB_FCN_RSUB_CLAMP              (6  << 12)
+#       define R300_COMB_FCN_RSUB_NOCLAMP            (7  << 12)
+#       define R300_BLEND_GL_ZERO                    (32)
+#       define R300_BLEND_GL_ONE                     (33)
+#       define R300_BLEND_GL_SRC_COLOR               (34)
+#       define R300_BLEND_GL_ONE_MINUS_SRC_COLOR     (35)
+#       define R300_BLEND_GL_DST_COLOR               (36)
+#       define R300_BLEND_GL_ONE_MINUS_DST_COLOR     (37)
+#       define R300_BLEND_GL_SRC_ALPHA               (38)
+#       define R300_BLEND_GL_ONE_MINUS_SRC_ALPHA     (39)
+#       define R300_BLEND_GL_DST_ALPHA               (40)
+#       define R300_BLEND_GL_ONE_MINUS_DST_ALPHA     (41)
+#       define R300_BLEND_GL_SRC_ALPHA_SATURATE      (42)
+#       define R300_BLEND_GL_CONST_COLOR             (43)
+#       define R300_BLEND_GL_ONE_MINUS_CONST_COLOR   (44)
+#       define R300_BLEND_GL_CONST_ALPHA             (45)
+#       define R300_BLEND_GL_ONE_MINUS_CONST_ALPHA   (46)
+#       define R300_BLEND_MASK                       (63)
+#       define R300_SRC_BLEND_SHIFT                  (16)
+#       define R300_DST_BLEND_SHIFT                  (24)
+#define R300_RB3D_BLEND_COLOR               0x4E10
+#define R300_RB3D_COLORMASK                 0x4E0C
+#       define R300_COLORMASK0_B                 (1<<0)
+#       define R300_COLORMASK0_G                 (1<<1)
+#       define R300_COLORMASK0_R                 (1<<2)
+#       define R300_COLORMASK0_A                 (1<<3)
+
+/* gap */
+
+#define R300_RB3D_COLOROFFSET0              0x4E28
+#       define R300_COLOROFFSET_MASK             0xFFFFFFF0 /* GUESS */
+#define R300_RB3D_COLOROFFSET1              0x4E2C /* GUESS */
+#define R300_RB3D_COLOROFFSET2              0x4E30 /* GUESS */
+#define R300_RB3D_COLOROFFSET3              0x4E34 /* GUESS */
+
+/* gap */
+
+/* Bit 16: Larger tiles
+ * Bit 17: 4x2 tiles
+ * Bit 18: Extremely weird tile like, but some pixels duplicated?
+ */
+#define R300_RB3D_COLORPITCH0               0x4E38
+#       define R300_COLORPITCH_MASK              0x00001FF8 /* GUESS */
+#       define R300_COLOR_TILE_ENABLE            (1 << 16) /* GUESS */
+#       define R300_COLOR_MICROTILE_ENABLE       (1 << 17) /* GUESS */
+#       define R300_COLOR_ENDIAN_NO_SWAP         (0 << 18) /* GUESS */
+#       define R300_COLOR_ENDIAN_WORD_SWAP       (1 << 18) /* GUESS */
+#       define R300_COLOR_ENDIAN_DWORD_SWAP      (2 << 18) /* GUESS */
+#       define R300_COLOR_FORMAT_RGB565          (2 << 22)
+#       define R300_COLOR_FORMAT_ARGB8888        (3 << 22)
+#define R300_RB3D_COLORPITCH1               0x4E3C /* GUESS */
+#define R300_RB3D_COLORPITCH2               0x4E40 /* GUESS */
+#define R300_RB3D_COLORPITCH3               0x4E44 /* GUESS */
+
+/* gap */
+
+/* Guess by Vladimir.
+ * Set to 0A before 3D operations, set to 02 afterwards.
+ */
+/*#define R300_RB3D_DSTCACHE_CTLSTAT          0x4E4C*/
+#       define R300_RB3D_DSTCACHE_UNKNOWN_02             0x00000002
+#       define R300_RB3D_DSTCACHE_UNKNOWN_0A             0x0000000A
+
+/* gap */
+/* There seems to be no "write only" setting, so use Z-test = ALWAYS
+ * for this.
+ * Bit (1<<8) is the "test" bit. so plain write is 6  - vd
+ */
+#define R300_ZB_CNTL                             0x4F00
+#	define R300_STENCIL_ENABLE		 (1 << 0)
+#	define R300_Z_ENABLE		         (1 << 1)
+#	define R300_Z_WRITE_ENABLE		 (1 << 2)
+#	define R300_Z_SIGNED_COMPARE		 (1 << 3)
+#	define R300_STENCIL_FRONT_BACK		 (1 << 4)
+
+#define R300_ZB_ZSTENCILCNTL                   0x4f04
+	/* functions */
+#	define R300_ZS_NEVER			0
+#	define R300_ZS_LESS			1
+#	define R300_ZS_LEQUAL			2
+#	define R300_ZS_EQUAL			3
+#	define R300_ZS_GEQUAL			4
+#	define R300_ZS_GREATER			5
+#	define R300_ZS_NOTEQUAL			6
+#	define R300_ZS_ALWAYS			7
+#       define R300_ZS_MASK                     7
+	/* operations */
+#	define R300_ZS_KEEP			0
+#	define R300_ZS_ZERO			1
+#	define R300_ZS_REPLACE			2
+#	define R300_ZS_INCR			3
+#	define R300_ZS_DECR			4
+#	define R300_ZS_INVERT			5
+#	define R300_ZS_INCR_WRAP		6
+#	define R300_ZS_DECR_WRAP		7
+#	define R300_Z_FUNC_SHIFT		0
+	/* front and back refer to operations done for front
+	   and back faces, i.e. separate stencil function support */
+#	define R300_S_FRONT_FUNC_SHIFT	        3
+#	define R300_S_FRONT_SFAIL_OP_SHIFT	6
+#	define R300_S_FRONT_ZPASS_OP_SHIFT	9
+#	define R300_S_FRONT_ZFAIL_OP_SHIFT      12
+#	define R300_S_BACK_FUNC_SHIFT           15
+#	define R300_S_BACK_SFAIL_OP_SHIFT       18
+#	define R300_S_BACK_ZPASS_OP_SHIFT       21
+#	define R300_S_BACK_ZFAIL_OP_SHIFT       24
+
+#define R300_ZB_STENCILREFMASK                        0x4f08
+#	define R300_STENCILREF_SHIFT       0
+#	define R300_STENCILREF_MASK        0x000000ff
+#	define R300_STENCILMASK_SHIFT      8
+#	define R300_STENCILMASK_MASK       0x0000ff00
+#	define R300_STENCILWRITEMASK_SHIFT 16
+#	define R300_STENCILWRITEMASK_MASK  0x00ff0000
+
+/* gap */
+
+#define R300_ZB_FORMAT                             0x4f10
+#	define R300_DEPTHFORMAT_16BIT_INT_Z   (0 << 0)
+#	define R300_DEPTHFORMAT_16BIT_13E3    (1 << 0)
+#	define R300_DEPTHFORMAT_24BIT_INT_Z_8BIT_STENCIL   (2 << 0)
+/* reserved up to (15 << 0) */
+#	define R300_INVERT_13E3_LEADING_ONES  (0 << 4)
+#	define R300_INVERT_13E3_LEADING_ZEROS (1 << 4)
+
+#define R300_ZB_ZTOP                             0x4F14
+#	define R300_ZTOP_DISABLE                 (0 << 0)
+#	define R300_ZTOP_ENABLE                  (1 << 0)
+
+/* gap */
+
+#define R300_ZB_ZCACHE_CTLSTAT            0x4f18
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_NO_EFFECT      (0 << 0)
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FLUSH_FLUSH_AND_FREE (1 << 0)
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_NO_EFFECT       (0 << 1)
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_FREE_FREE            (1 << 1)
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_IDLE            (0 << 31)
+#       define R300_ZB_ZCACHE_CTLSTAT_ZC_BUSY_BUSY            (1 << 31)
+
+#define R300_ZB_BW_CNTL                     0x4f1c
+#	define R300_HIZ_DISABLE                              (0 << 0)
+#	define R300_HIZ_ENABLE                               (1 << 0)
+#	define R300_HIZ_MIN                                  (0 << 1)
+#	define R300_HIZ_MAX                                  (1 << 1)
+#	define R300_FAST_FILL_DISABLE                        (0 << 2)
+#	define R300_FAST_FILL_ENABLE                         (1 << 2)
+#	define R300_RD_COMP_DISABLE                          (0 << 3)
+#	define R300_RD_COMP_ENABLE                           (1 << 3)
+#	define R300_WR_COMP_DISABLE                          (0 << 4)
+#	define R300_WR_COMP_ENABLE                           (1 << 4)
+#	define R300_ZB_CB_CLEAR_RMW                          (0 << 5)
+#	define R300_ZB_CB_CLEAR_CACHE_LINEAR                 (1 << 5)
+#	define R300_FORCE_COMPRESSED_STENCIL_VALUE_DISABLE   (0 << 6)
+#	define R300_FORCE_COMPRESSED_STENCIL_VALUE_ENABLE    (1 << 6)
+
+#	define R500_ZEQUAL_OPTIMIZE_ENABLE                   (0 << 7)
+#	define R500_ZEQUAL_OPTIMIZE_DISABLE                  (1 << 7)
+#	define R500_SEQUAL_OPTIMIZE_ENABLE                   (0 << 8)
+#	define R500_SEQUAL_OPTIMIZE_DISABLE                  (1 << 8)
+
+#	define R500_BMASK_ENABLE                             (0 << 10)
+#	define R500_BMASK_DISABLE                            (1 << 10)
+#	define R500_HIZ_EQUAL_REJECT_DISABLE                 (0 << 11)
+#	define R500_HIZ_EQUAL_REJECT_ENABLE                  (1 << 11)
+#	define R500_HIZ_FP_EXP_BITS_DISABLE                  (0 << 12)
+#	define R500_HIZ_FP_EXP_BITS_1                        (1 << 12)
+#	define R500_HIZ_FP_EXP_BITS_2                        (2 << 12)
+#	define R500_HIZ_FP_EXP_BITS_3                        (3 << 12)
+#	define R500_HIZ_FP_EXP_BITS_4                        (4 << 12)
+#	define R500_HIZ_FP_EXP_BITS_5                        (5 << 12)
+#	define R500_HIZ_FP_INVERT_LEADING_ONES               (0 << 15)
+#	define R500_HIZ_FP_INVERT_LEADING_ZEROS              (1 << 15)
+#	define R500_TILE_OVERWRITE_RECOMPRESSION_ENABLE      (0 << 16)
+#	define R500_TILE_OVERWRITE_RECOMPRESSION_DISABLE     (1 << 16)
+#	define R500_CONTIGUOUS_6XAA_SAMPLES_ENABLE           (0 << 17)
+#	define R500_CONTIGUOUS_6XAA_SAMPLES_DISABLE          (1 << 17)
+#	define R500_PEQ_PACKING_DISABLE                      (0 << 18)
+#	define R500_PEQ_PACKING_ENABLE                       (1 << 18)
+#	define R500_COVERED_PTR_MASKING_DISABLE              (0 << 18)
+#	define R500_COVERED_PTR_MASKING_ENABLE               (1 << 18)
+
+
+/* gap */
+
+/* Z Buffer Address Offset.
+ * Bits 31 to 5 are used for aligned Z buffer address offset for macro tiles.
+ */
+#define R300_ZB_DEPTHOFFSET               0x4f20
+
+/* Z Buffer Pitch and Endian Control */
+#define R300_ZB_DEPTHPITCH                0x4f24
+#       define R300_DEPTHPITCH_MASK              0x00003FFC
+#       define R300_DEPTHMACROTILE_DISABLE      (0 << 16)
+#       define R300_DEPTHMACROTILE_ENABLE       (1 << 16)
+#       define R300_DEPTHMICROTILE_LINEAR       (0 << 17)
+#       define R300_DEPTHMICROTILE_TILED        (1 << 17)
+#       define R300_DEPTHMICROTILE_TILED_SQUARE (2 << 17)
+#       define R300_DEPTHENDIAN_NO_SWAP         (0 << 18)
+#       define R300_DEPTHENDIAN_WORD_SWAP       (1 << 18)
+#       define R300_DEPTHENDIAN_DWORD_SWAP      (2 << 18)
+#       define R300_DEPTHENDIAN_HALF_DWORD_SWAP (3 << 18)
+
+/* Z Buffer Clear Value */
+#define R300_ZB_DEPTHCLEARVALUE                  0x4f28
+
+#define R300_ZB_ZMASK_OFFSET			 0x4f30
+#define R300_ZB_ZMASK_PITCH			 0x4f34
+#define R300_ZB_ZMASK_WRINDEX			 0x4f38
+#define R300_ZB_ZMASK_DWORD			 0x4f3c
+#define R300_ZB_ZMASK_RDINDEX			 0x4f40
+
+/* Hierarchical Z Memory Offset */
+#define R300_ZB_HIZ_OFFSET                       0x4f44
+
+/* Hierarchical Z Write Index */
+#define R300_ZB_HIZ_WRINDEX                      0x4f48
+
+/* Hierarchical Z Data */
+#define R300_ZB_HIZ_DWORD                        0x4f4c
+
+/* Hierarchical Z Read Index */
+#define R300_ZB_HIZ_RDINDEX                      0x4f50
+
+/* Hierarchical Z Pitch */
+#define R300_ZB_HIZ_PITCH                        0x4f54
+
+/* Z Buffer Z Pass Counter Data */
+#define R300_ZB_ZPASS_DATA                       0x4f58
+
+/* Z Buffer Z Pass Counter Address */
+#define R300_ZB_ZPASS_ADDR                       0x4f5c
+
+/* Depth buffer X and Y coordinate offset */
+#define R300_ZB_DEPTHXY_OFFSET                   0x4f60
+#	define R300_DEPTHX_OFFSET_SHIFT  1
+#	define R300_DEPTHX_OFFSET_MASK   0x000007FE
+#	define R300_DEPTHY_OFFSET_SHIFT  17
+#	define R300_DEPTHY_OFFSET_MASK   0x07FE0000
+
+/* Sets the fifo sizes */
+#define R500_ZB_FIFO_SIZE                        0x4fd0
+#	define R500_OP_FIFO_SIZE_FULL   (0 << 0)
+#	define R500_OP_FIFO_SIZE_HALF   (1 << 0)
+#	define R500_OP_FIFO_SIZE_QUATER (2 << 0)
+#	define R500_OP_FIFO_SIZE_EIGTHS (4 << 0)
+
+/* Stencil Reference Value and Mask for backfacing quads */
+/* R300_ZB_STENCILREFMASK handles front face */
+#define R500_ZB_STENCILREFMASK_BF                0x4fd4
+#	define R500_STENCILREF_SHIFT       0
+#	define R500_STENCILREF_MASK        0x000000ff
+#	define R500_STENCILMASK_SHIFT      8
+#	define R500_STENCILMASK_MASK       0x0000ff00
+#	define R500_STENCILWRITEMASK_SHIFT 16
+#	define R500_STENCILWRITEMASK_MASK  0x00ff0000
+
+/* BEGIN: Vertex program instruction set */
+
+/* Every instruction is four dwords long:
+ *  DWORD 0: output and opcode
+ *  DWORD 1: first argument
+ *  DWORD 2: second argument
+ *  DWORD 3: third argument
+ *
+ * Notes:
+ *  - ABS r, a is implemented as MAX r, a, -a
+ *  - MOV is implemented as ADD to zero
+ *  - XPD is implemented as MUL + MAD
+ *  - FLR is implemented as FRC + ADD
+ *  - apparently, fglrx tries to schedule instructions so that there is at
+ *    least one instruction between the write to a temporary and the first
+ *    read from said temporary; however, violations of this scheduling are
+ *    allowed
+ *  - register indices seem to be unrelated with OpenGL aliasing to
+ *    conventional state
+ *  - only one attribute and one parameter can be loaded at a time; however,
+ *    the same attribute/parameter can be used for more than one argument
+ *  - the second software argument for POW is the third hardware argument
+ *    (no idea why)
+ *  - MAD with only temporaries as input seems to use VPI_OUT_SELECT_MAD_2
+ *
+ * There is some magic surrounding LIT:
+ *   The single argument is replicated across all three inputs, but swizzled:
+ *     First argument: xyzy
+ *     Second argument: xyzx
+ *     Third argument: xyzw
+ *   Whenever the result is used later in the fragment program, fglrx forces
+ *   x and w to be 1.0 in the input selection; I don't know whether this is
+ *   strictly necessary
+ */
+#define R300_VPI_OUT_OP_DOT                     (1 << 0)
+#define R300_VPI_OUT_OP_MUL                     (2 << 0)
+#define R300_VPI_OUT_OP_ADD                     (3 << 0)
+#define R300_VPI_OUT_OP_MAD                     (4 << 0)
+#define R300_VPI_OUT_OP_DST                     (5 << 0)
+#define R300_VPI_OUT_OP_FRC                     (6 << 0)
+#define R300_VPI_OUT_OP_MAX                     (7 << 0)
+#define R300_VPI_OUT_OP_MIN                     (8 << 0)
+#define R300_VPI_OUT_OP_SGE                     (9 << 0)
+#define R300_VPI_OUT_OP_SLT                     (10 << 0)
+	/* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, vector(scalar, vector) */
+#define R300_VPI_OUT_OP_UNK12                   (12 << 0)
+#define R300_VPI_OUT_OP_ARL                     (13 << 0)
+#define R300_VPI_OUT_OP_EXP                     (65 << 0)
+#define R300_VPI_OUT_OP_LOG                     (66 << 0)
+	/* Used in fog computations, scalar(scalar) */
+#define R300_VPI_OUT_OP_UNK67                   (67 << 0)
+#define R300_VPI_OUT_OP_LIT                     (68 << 0)
+#define R300_VPI_OUT_OP_POW                     (69 << 0)
+#define R300_VPI_OUT_OP_RCP                     (70 << 0)
+#define R300_VPI_OUT_OP_RSQ                     (72 << 0)
+	/* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, scalar(scalar) */
+#define R300_VPI_OUT_OP_UNK73                   (73 << 0)
+#define R300_VPI_OUT_OP_EX2                     (75 << 0)
+#define R300_VPI_OUT_OP_LG2                     (76 << 0)
+#define R300_VPI_OUT_OP_MAD_2                   (128 << 0)
+	/* all temps, vector(scalar, vector, vector) */
+#define R300_VPI_OUT_OP_UNK129                  (129 << 0)
+
+#define R300_VPI_OUT_REG_CLASS_TEMPORARY        (0 << 8)
+#define R300_VPI_OUT_REG_CLASS_ADDR             (1 << 8)
+#define R300_VPI_OUT_REG_CLASS_RESULT           (2 << 8)
+#define R300_VPI_OUT_REG_CLASS_MASK             (31 << 8)
+
+#define R300_VPI_OUT_REG_INDEX_SHIFT            13
+	/* GUESS based on fglrx native limits */
+#define R300_VPI_OUT_REG_INDEX_MASK             (31 << 13)
+
+#define R300_VPI_OUT_WRITE_X                    (1 << 20)
+#define R300_VPI_OUT_WRITE_Y                    (1 << 21)
+#define R300_VPI_OUT_WRITE_Z                    (1 << 22)
+#define R300_VPI_OUT_WRITE_W                    (1 << 23)
+
+#define R300_VPI_IN_REG_CLASS_TEMPORARY         (0 << 0)
+#define R300_VPI_IN_REG_CLASS_ATTRIBUTE         (1 << 0)
+#define R300_VPI_IN_REG_CLASS_PARAMETER         (2 << 0)
+#define R300_VPI_IN_REG_CLASS_NONE              (9 << 0)
+#define R300_VPI_IN_REG_CLASS_MASK              (31 << 0)
+
+#define R300_VPI_IN_REG_INDEX_SHIFT             5
+	/* GUESS based on fglrx native limits */
+#define R300_VPI_IN_REG_INDEX_MASK              (255 << 5)
+
+/* The R300 can select components from the input register arbitrarily.
+ * Use the following constants, shifted by the component shift you
+ * want to select
+ */
+#define R300_VPI_IN_SELECT_X    0
+#define R300_VPI_IN_SELECT_Y    1
+#define R300_VPI_IN_SELECT_Z    2
+#define R300_VPI_IN_SELECT_W    3
+#define R300_VPI_IN_SELECT_ZERO 4
+#define R300_VPI_IN_SELECT_ONE  5
+#define R300_VPI_IN_SELECT_MASK 7
+
+#define R300_VPI_IN_X_SHIFT                     13
+#define R300_VPI_IN_Y_SHIFT                     16
+#define R300_VPI_IN_Z_SHIFT                     19
+#define R300_VPI_IN_W_SHIFT                     22
+
+#define R300_VPI_IN_NEG_X                       (1 << 25)
+#define R300_VPI_IN_NEG_Y                       (1 << 26)
+#define R300_VPI_IN_NEG_Z                       (1 << 27)
+#define R300_VPI_IN_NEG_W                       (1 << 28)
+/* END: Vertex program instruction set */
+
+/* BEGIN: Packet 3 commands */
+
+/* A primitive emission dword. */
+#define R300_PRIM_TYPE_NONE                     (0 << 0)
+#define R300_PRIM_TYPE_POINT                    (1 << 0)
+#define R300_PRIM_TYPE_LINE                     (2 << 0)
+#define R300_PRIM_TYPE_LINE_STRIP               (3 << 0)
+#define R300_PRIM_TYPE_TRI_LIST                 (4 << 0)
+#define R300_PRIM_TYPE_TRI_FAN                  (5 << 0)
+#define R300_PRIM_TYPE_TRI_STRIP                (6 << 0)
+#define R300_PRIM_TYPE_TRI_TYPE2                (7 << 0)
+#define R300_PRIM_TYPE_RECT_LIST                (8 << 0)
+#define R300_PRIM_TYPE_3VRT_POINT_LIST          (9 << 0)
+#define R300_PRIM_TYPE_3VRT_LINE_LIST           (10 << 0)
+	/* GUESS (based on r200) */
+#define R300_PRIM_TYPE_POINT_SPRITES            (11 << 0)
+#define R300_PRIM_TYPE_LINE_LOOP                (12 << 0)
+#define R300_PRIM_TYPE_QUADS                    (13 << 0)
+#define R300_PRIM_TYPE_QUAD_STRIP               (14 << 0)
+#define R300_PRIM_TYPE_POLYGON                  (15 << 0)
+#define R300_PRIM_TYPE_MASK                     0xF
+#define R300_PRIM_WALK_IND                      (1 << 4)
+#define R300_PRIM_WALK_LIST                     (2 << 4)
+#define R300_PRIM_WALK_RING                     (3 << 4)
+#define R300_PRIM_WALK_MASK                     (3 << 4)
+	/* GUESS (based on r200) */
+#define R300_PRIM_COLOR_ORDER_BGRA              (0 << 6)
+#define R300_PRIM_COLOR_ORDER_RGBA              (1 << 6)
+#define R300_PRIM_NUM_VERTICES_SHIFT            16
+#define R300_PRIM_NUM_VERTICES_MASK             0xffff
+
+/* Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR.
+ * Two parameter dwords:
+ * 0. The first parameter appears to be always 0
+ * 1. The second parameter is a standard primitive emission dword.
+ */
+#define R300_PACKET3_3D_DRAW_VBUF           0x00002800
+
+/* Specify the full set of vertex arrays as (address, stride).
+ * The first parameter is the number of vertex arrays specified.
+ * The rest of the command is a variable length list of blocks, where
+ * each block is three dwords long and specifies two arrays.
+ * The first dword of a block is split into two words, the lower significant
+ * word refers to the first array, the more significant word to the second
+ * array in the block.
+ * The low byte of each word contains the size of an array entry in dwords,
+ * the high byte contains the stride of the array.
+ * The second dword of a block contains the pointer to the first array,
+ * the third dword of a block contains the pointer to the second array.
+ * Note that if the total number of arrays is odd, the third dword of
+ * the last block is omitted.
+ */
+#define R300_PACKET3_3D_LOAD_VBPNTR         0x00002F00
+
+#define R300_PACKET3_INDX_BUFFER            0x00003300
+#    define R300_EB_UNK1_SHIFT                      24
+#    define R300_EB_UNK1                    (0x80<<24)
+#    define R300_EB_UNK2                        0x0810
+#define R300_PACKET3_3D_DRAW_VBUF_2         0x00003400
+#define R300_PACKET3_3D_DRAW_INDX_2         0x00003600
+
+/* END: Packet 3 commands */
+
+
+/* Color formats for 2d packets
+ */
+#define R300_CP_COLOR_FORMAT_CI8	2
+#define R300_CP_COLOR_FORMAT_ARGB1555	3
+#define R300_CP_COLOR_FORMAT_RGB565	4
+#define R300_CP_COLOR_FORMAT_ARGB8888	6
+#define R300_CP_COLOR_FORMAT_RGB332	7
+#define R300_CP_COLOR_FORMAT_RGB8	9
+#define R300_CP_COLOR_FORMAT_ARGB4444	15
+
+/*
+ * CP type-3 packets
+ */
+#define R300_CP_CMD_BITBLT_MULTI	0xC0009B00
+
+#define R500_VAP_INDEX_OFFSET		0x208c
+
+#define R500_GA_US_VECTOR_INDEX         0x4250
+#define R500_GA_US_VECTOR_DATA          0x4254
+
+#define R500_RS_IP_0                    0x4074
+#define R500_RS_INST_0                  0x4320
+
+#define R500_US_CONFIG                  0x4600
+
+#define R500_US_FC_CTRL			0x4624
+#define R500_US_CODE_ADDR		0x4630
+
+#define R500_RB3D_COLOR_CLEAR_VALUE_AR  0x46c0
+#define R500_RB3D_CONSTANT_COLOR_AR     0x4ef8
+
+#endif /* _R300_REG_H */
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
new file mode 100644
index 000000000000..f0de81a5689d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -0,0 +1,1773 @@
+/* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */
+/*
+ * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * Copyright 2007 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Kevin E. Martin <martin@valinux.com>
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon_drv.h"
+#include "r300_reg.h"
+
+#include "radeon_microcode.h"
+
+#define RADEON_FIFO_DEBUG	0
+
+static int radeon_do_cleanup_cp(struct drm_device * dev);
+
+static u32 R500_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
+{
+	u32 ret;
+	RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff));
+	ret = RADEON_READ(R520_MC_IND_DATA);
+	RADEON_WRITE(R520_MC_IND_INDEX, 0);
+	return ret;
+}
+
+static u32 RS480_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
+{
+	u32 ret;
+	RADEON_WRITE(RS480_NB_MC_INDEX, addr & 0xff);
+	ret = RADEON_READ(RS480_NB_MC_DATA);
+	RADEON_WRITE(RS480_NB_MC_INDEX, 0xff);
+	return ret;
+}
+
+static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
+{
+	u32 ret;
+	RADEON_WRITE(RS690_MC_INDEX, (addr & RS690_MC_INDEX_MASK));
+	ret = RADEON_READ(RS690_MC_DATA);
+	RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_MASK);
+	return ret;
+}
+
+static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
+{
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
+		return RS690_READ_MCIND(dev_priv, addr);
+	else
+		return RS480_READ_MCIND(dev_priv, addr);
+}
+
+u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
+{
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
+		return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
+		return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
+		return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION);
+	else
+		return RADEON_READ(RADEON_MC_FB_LOCATION);
+}
+
+static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
+{
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
+		R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
+		RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
+		R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc);
+	else
+		RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc);
+}
+
+static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc)
+{
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
+		R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
+		RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc);
+	else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
+		R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc);
+	else
+		RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc);
+}
+
+static void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base)
+{
+	u32 agp_base_hi = upper_32_bits(agp_base);
+	u32 agp_base_lo = agp_base & 0xffffffff;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) {
+		R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo);
+		R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi);
+	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
+		RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo);
+		RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi);
+	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) {
+		R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo);
+		R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi);
+	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480) {
+		RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
+		RADEON_WRITE(RS480_AGP_BASE_2, 0);
+	} else {
+		RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
+			RADEON_WRITE(RADEON_AGP_BASE_2, agp_base_hi);
+	}
+}
+
+static int RADEON_READ_PLL(struct drm_device * dev, int addr)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX, addr & 0x1f);
+	return RADEON_READ(RADEON_CLOCK_CNTL_DATA);
+}
+
+static u32 RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
+{
+	RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff);
+	return RADEON_READ(RADEON_PCIE_DATA);
+}
+
+#if RADEON_FIFO_DEBUG
+static void radeon_status(drm_radeon_private_t * dev_priv)
+{
+	printk("%s:\n", __func__);
+	printk("RBBM_STATUS = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_RBBM_STATUS));
+	printk("CP_RB_RTPR = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_CP_RB_RPTR));
+	printk("CP_RB_WTPR = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_CP_RB_WPTR));
+	printk("AIC_CNTL = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_AIC_CNTL));
+	printk("AIC_STAT = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_AIC_STAT));
+	printk("AIC_PT_BASE = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_AIC_PT_BASE));
+	printk("TLB_ADDR = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_AIC_TLB_ADDR));
+	printk("TLB_DATA = 0x%08x\n",
+	       (unsigned int)RADEON_READ(RADEON_AIC_TLB_DATA));
+}
+#endif
+
+/* ================================================================
+ * Engine, FIFO control
+ */
+
+static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv)
+{
+	u32 tmp;
+	int i;
+
+	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {
+		tmp = RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT);
+		tmp |= RADEON_RB3D_DC_FLUSH_ALL;
+		RADEON_WRITE(RADEON_RB3D_DSTCACHE_CTLSTAT, tmp);
+
+		for (i = 0; i < dev_priv->usec_timeout; i++) {
+			if (!(RADEON_READ(RADEON_RB3D_DSTCACHE_CTLSTAT)
+			      & RADEON_RB3D_DC_BUSY)) {
+				return 0;
+			}
+			DRM_UDELAY(1);
+		}
+	} else {
+		/* 3D */
+		tmp = RADEON_READ(R300_RB3D_DSTCACHE_CTLSTAT);
+		tmp |= RADEON_RB3D_DC_FLUSH_ALL;
+		RADEON_WRITE(R300_RB3D_DSTCACHE_CTLSTAT, tmp);
+
+		/* 2D */
+		tmp = RADEON_READ(R300_DSTCACHE_CTLSTAT);
+		tmp |= RADEON_RB3D_DC_FLUSH_ALL;
+		RADEON_WRITE(R300_DSTCACHE_CTLSTAT, tmp);
+
+		for (i = 0; i < dev_priv->usec_timeout; i++) {
+			if (!(RADEON_READ(R300_DSTCACHE_CTLSTAT)
+			  & RADEON_RB3D_DC_BUSY)) {
+				return 0;
+			}
+			DRM_UDELAY(1);
+		}
+	}
+
+#if RADEON_FIFO_DEBUG
+	DRM_ERROR("failed!\n");
+	radeon_status(dev_priv);
+#endif
+	return -EBUSY;
+}
+
+static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
+{
+	int i;
+
+	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		int slots = (RADEON_READ(RADEON_RBBM_STATUS)
+			     & RADEON_RBBM_FIFOCNT_MASK);
+		if (slots >= entries)
+			return 0;
+		DRM_UDELAY(1);
+	}
+
+#if RADEON_FIFO_DEBUG
+	DRM_ERROR("failed!\n");
+	radeon_status(dev_priv);
+#endif
+	return -EBUSY;
+}
+
+static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
+{
+	int i, ret;
+
+	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+	ret = radeon_do_wait_for_fifo(dev_priv, 64);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		if (!(RADEON_READ(RADEON_RBBM_STATUS)
+		      & RADEON_RBBM_ACTIVE)) {
+			radeon_do_pixcache_flush(dev_priv);
+			return 0;
+		}
+		DRM_UDELAY(1);
+	}
+
+#if RADEON_FIFO_DEBUG
+	DRM_ERROR("failed!\n");
+	radeon_status(dev_priv);
+#endif
+	return -EBUSY;
+}
+
+static void radeon_init_pipes(drm_radeon_private_t *dev_priv)
+{
+	uint32_t gb_tile_config, gb_pipe_sel = 0;
+
+	/* RS4xx/RS6xx/R4xx/R5xx */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R420) {
+		gb_pipe_sel = RADEON_READ(R400_GB_PIPE_SELECT);
+		dev_priv->num_gb_pipes = ((gb_pipe_sel >> 12) & 0x3) + 1;
+	} else {
+		/* R3xx */
+		if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
+		    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350)) {
+			dev_priv->num_gb_pipes = 2;
+		} else {
+			/* R3Vxx */
+			dev_priv->num_gb_pipes = 1;
+		}
+	}
+	DRM_INFO("Num pipes: %d\n", dev_priv->num_gb_pipes);
+
+	gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16 /*| R300_SUBPIXEL_1_16*/);
+
+	switch (dev_priv->num_gb_pipes) {
+	case 2: gb_tile_config |= R300_PIPE_COUNT_R300; break;
+	case 3: gb_tile_config |= R300_PIPE_COUNT_R420_3P; break;
+	case 4: gb_tile_config |= R300_PIPE_COUNT_R420; break;
+	default:
+	case 1: gb_tile_config |= R300_PIPE_COUNT_RV350; break;
+	}
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
+		RADEON_WRITE_PLL(R500_DYN_SCLK_PWMEM_PIPE, (1 | ((gb_pipe_sel >> 8) & 0xf) << 4));
+		RADEON_WRITE(R500_SU_REG_DEST, ((1 << dev_priv->num_gb_pipes) - 1));
+	}
+	RADEON_WRITE(R300_GB_TILE_CONFIG, gb_tile_config);
+	radeon_do_wait_for_idle(dev_priv);
+	RADEON_WRITE(R300_DST_PIPE_CONFIG, RADEON_READ(R300_DST_PIPE_CONFIG) | R300_PIPE_AUTO_CONFIG);
+	RADEON_WRITE(R300_RB2D_DSTCACHE_MODE, (RADEON_READ(R300_RB2D_DSTCACHE_MODE) |
+					       R300_DC_AUTOFLUSH_ENABLE |
+					       R300_DC_DC_DISABLE_IGNORE_PE));
+
+
+}
+
+/* ================================================================
+ * CP control, initialization
+ */
+
+/* Load the microcode for the CP */
+static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
+{
+	int i;
+	DRM_DEBUG("\n");
+
+	radeon_do_wait_for_idle(dev_priv);
+
+	RADEON_WRITE(RADEON_CP_ME_RAM_ADDR, 0);
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R100) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV100) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV200) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS100) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS200)) {
+		DRM_INFO("Loading R100 Microcode\n");
+		for (i = 0; i < 256; i++) {
+			RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
+				     R100_cp_microcode[i][1]);
+			RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
+				     R100_cp_microcode[i][0]);
+		}
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R200) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV250) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV280) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS300)) {
+		DRM_INFO("Loading R200 Microcode\n");
+		for (i = 0; i < 256; i++) {
+			RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
+				     R200_cp_microcode[i][1]);
+			RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
+				     R200_cp_microcode[i][0]);
+		}
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R300) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
+		DRM_INFO("Loading R300 Microcode\n");
+		for (i = 0; i < 256; i++) {
+			RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
+				     R300_cp_microcode[i][1]);
+			RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
+				     R300_cp_microcode[i][0]);
+		}
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) {
+		DRM_INFO("Loading R400 Microcode\n");
+		for (i = 0; i < 256; i++) {
+			RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
+				     R420_cp_microcode[i][1]);
+			RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
+				     R420_cp_microcode[i][0]);
+		}
+	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
+		DRM_INFO("Loading RS690 Microcode\n");
+		for (i = 0; i < 256; i++) {
+			RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
+				     RS690_cp_microcode[i][1]);
+			RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
+				     RS690_cp_microcode[i][0]);
+		}
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R520) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R580) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV560) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV570)) {
+		DRM_INFO("Loading R500 Microcode\n");
+		for (i = 0; i < 256; i++) {
+			RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
+				     R520_cp_microcode[i][1]);
+			RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
+				     R520_cp_microcode[i][0]);
+		}
+	}
+}
+
+/* Flush any pending commands to the CP.  This should only be used just
+ * prior to a wait for idle, as it informs the engine that the command
+ * stream is ending.
+ */
+static void radeon_do_cp_flush(drm_radeon_private_t * dev_priv)
+{
+	DRM_DEBUG("\n");
+#if 0
+	u32 tmp;
+
+	tmp = RADEON_READ(RADEON_CP_RB_WPTR) | (1 << 31);
+	RADEON_WRITE(RADEON_CP_RB_WPTR, tmp);
+#endif
+}
+
+/* Wait for the CP to go idle.
+ */
+int radeon_do_cp_idle(drm_radeon_private_t * dev_priv)
+{
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(6);
+
+	RADEON_PURGE_CACHE();
+	RADEON_PURGE_ZCACHE();
+	RADEON_WAIT_UNTIL_IDLE();
+
+	ADVANCE_RING();
+	COMMIT_RING();
+
+	return radeon_do_wait_for_idle(dev_priv);
+}
+
+/* Start the Command Processor.
+ */
+static void radeon_do_cp_start(drm_radeon_private_t * dev_priv)
+{
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	radeon_do_wait_for_idle(dev_priv);
+
+	RADEON_WRITE(RADEON_CP_CSQ_CNTL, dev_priv->cp_mode);
+
+	dev_priv->cp_running = 1;
+
+	BEGIN_RING(6);
+
+	RADEON_PURGE_CACHE();
+	RADEON_PURGE_ZCACHE();
+	RADEON_WAIT_UNTIL_IDLE();
+
+	ADVANCE_RING();
+	COMMIT_RING();
+}
+
+/* Reset the Command Processor.  This will not flush any pending
+ * commands, so you must wait for the CP command stream to complete
+ * before calling this routine.
+ */
+static void radeon_do_cp_reset(drm_radeon_private_t * dev_priv)
+{
+	u32 cur_read_ptr;
+	DRM_DEBUG("\n");
+
+	cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
+	RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
+	SET_RING_HEAD(dev_priv, cur_read_ptr);
+	dev_priv->ring.tail = cur_read_ptr;
+}
+
+/* Stop the Command Processor.  This will not flush any pending
+ * commands, so you must flush the command stream and wait for the CP
+ * to go idle before calling this routine.
+ */
+static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv)
+{
+	DRM_DEBUG("\n");
+
+	RADEON_WRITE(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS);
+
+	dev_priv->cp_running = 0;
+}
+
+/* Reset the engine.  This will stop the CP if it is running.
+ */
+static int radeon_do_engine_reset(struct drm_device * dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	u32 clock_cntl_index = 0, mclk_cntl = 0, rbbm_soft_reset;
+	DRM_DEBUG("\n");
+
+	radeon_do_pixcache_flush(dev_priv);
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
+		/* may need something similar for newer chips */
+		clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX);
+		mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL);
+
+		RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl |
+						    RADEON_FORCEON_MCLKA |
+						    RADEON_FORCEON_MCLKB |
+						    RADEON_FORCEON_YCLKA |
+						    RADEON_FORCEON_YCLKB |
+						    RADEON_FORCEON_MC |
+						    RADEON_FORCEON_AIC));
+	}
+
+	rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET);
+
+	RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset |
+					      RADEON_SOFT_RESET_CP |
+					      RADEON_SOFT_RESET_HI |
+					      RADEON_SOFT_RESET_SE |
+					      RADEON_SOFT_RESET_RE |
+					      RADEON_SOFT_RESET_PP |
+					      RADEON_SOFT_RESET_E2 |
+					      RADEON_SOFT_RESET_RB));
+	RADEON_READ(RADEON_RBBM_SOFT_RESET);
+	RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset &
+					      ~(RADEON_SOFT_RESET_CP |
+						RADEON_SOFT_RESET_HI |
+						RADEON_SOFT_RESET_SE |
+						RADEON_SOFT_RESET_RE |
+						RADEON_SOFT_RESET_PP |
+						RADEON_SOFT_RESET_E2 |
+						RADEON_SOFT_RESET_RB)));
+	RADEON_READ(RADEON_RBBM_SOFT_RESET);
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV410) {
+		RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl);
+		RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index);
+		RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset);
+	}
+
+	/* setup the raster pipes */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R300)
+	    radeon_init_pipes(dev_priv);
+
+	/* Reset the CP ring */
+	radeon_do_cp_reset(dev_priv);
+
+	/* The CP is no longer running after an engine reset */
+	dev_priv->cp_running = 0;
+
+	/* Reset any pending vertex, indirect buffers */
+	radeon_freelist_reset(dev);
+
+	return 0;
+}
+
+static void radeon_cp_init_ring_buffer(struct drm_device * dev,
+				       drm_radeon_private_t * dev_priv)
+{
+	u32 ring_start, cur_read_ptr;
+	u32 tmp;
+
+	/* Initialize the memory controller. With new memory map, the fb location
+	 * is not changed, it should have been properly initialized already. Part
+	 * of the problem is that the code below is bogus, assuming the GART is
+	 * always appended to the fb which is not necessarily the case
+	 */
+	if (!dev_priv->new_memmap)
+		radeon_write_fb_location(dev_priv,
+			     ((dev_priv->gart_vm_start - 1) & 0xffff0000)
+			     | (dev_priv->fb_location >> 16));
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		radeon_write_agp_base(dev_priv, dev->agp->base);
+
+		radeon_write_agp_location(dev_priv,
+			     (((dev_priv->gart_vm_start - 1 +
+				dev_priv->gart_size) & 0xffff0000) |
+			      (dev_priv->gart_vm_start >> 16)));
+
+		ring_start = (dev_priv->cp_ring->offset
+			      - dev->agp->base
+			      + dev_priv->gart_vm_start);
+	} else
+#endif
+		ring_start = (dev_priv->cp_ring->offset
+			      - (unsigned long)dev->sg->virtual
+			      + dev_priv->gart_vm_start);
+
+	RADEON_WRITE(RADEON_CP_RB_BASE, ring_start);
+
+	/* Set the write pointer delay */
+	RADEON_WRITE(RADEON_CP_RB_WPTR_DELAY, 0);
+
+	/* Initialize the ring buffer's read and write pointers */
+	cur_read_ptr = RADEON_READ(RADEON_CP_RB_RPTR);
+	RADEON_WRITE(RADEON_CP_RB_WPTR, cur_read_ptr);
+	SET_RING_HEAD(dev_priv, cur_read_ptr);
+	dev_priv->ring.tail = cur_read_ptr;
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
+			     dev_priv->ring_rptr->offset
+			     - dev->agp->base + dev_priv->gart_vm_start);
+	} else
+#endif
+	{
+		struct drm_sg_mem *entry = dev->sg;
+		unsigned long tmp_ofs, page_ofs;
+
+		tmp_ofs = dev_priv->ring_rptr->offset -
+				(unsigned long)dev->sg->virtual;
+		page_ofs = tmp_ofs >> PAGE_SHIFT;
+
+		RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, entry->busaddr[page_ofs]);
+		DRM_DEBUG("ring rptr: offset=0x%08lx handle=0x%08lx\n",
+			  (unsigned long)entry->busaddr[page_ofs],
+			  entry->handle + tmp_ofs);
+	}
+
+	/* Set ring buffer size */
+#ifdef __BIG_ENDIAN
+	RADEON_WRITE(RADEON_CP_RB_CNTL,
+		     RADEON_BUF_SWAP_32BIT |
+		     (dev_priv->ring.fetch_size_l2ow << 18) |
+		     (dev_priv->ring.rptr_update_l2qw << 8) |
+		     dev_priv->ring.size_l2qw);
+#else
+	RADEON_WRITE(RADEON_CP_RB_CNTL,
+		     (dev_priv->ring.fetch_size_l2ow << 18) |
+		     (dev_priv->ring.rptr_update_l2qw << 8) |
+		     dev_priv->ring.size_l2qw);
+#endif
+
+	/* Start with assuming that writeback doesn't work */
+	dev_priv->writeback_works = 0;
+
+	/* Initialize the scratch register pointer.  This will cause
+	 * the scratch register values to be written out to memory
+	 * whenever they are updated.
+	 *
+	 * We simply put this behind the ring read pointer, this works
+	 * with PCI GART as well as (whatever kind of) AGP GART
+	 */
+	RADEON_WRITE(RADEON_SCRATCH_ADDR, RADEON_READ(RADEON_CP_RB_RPTR_ADDR)
+		     + RADEON_SCRATCH_REG_OFFSET);
+
+	dev_priv->scratch = ((__volatile__ u32 *)
+			     dev_priv->ring_rptr->handle +
+			     (RADEON_SCRATCH_REG_OFFSET / sizeof(u32)));
+
+	RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7);
+
+	/* Turn on bus mastering */
+	tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+	RADEON_WRITE(RADEON_BUS_CNTL, tmp);
+
+	dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0;
+	RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
+
+	dev_priv->sarea_priv->last_dispatch = dev_priv->scratch[1] = 0;
+	RADEON_WRITE(RADEON_LAST_DISPATCH_REG,
+		     dev_priv->sarea_priv->last_dispatch);
+
+	dev_priv->sarea_priv->last_clear = dev_priv->scratch[2] = 0;
+	RADEON_WRITE(RADEON_LAST_CLEAR_REG, dev_priv->sarea_priv->last_clear);
+
+	radeon_do_wait_for_idle(dev_priv);
+
+	/* Sync everything up */
+	RADEON_WRITE(RADEON_ISYNC_CNTL,
+		     (RADEON_ISYNC_ANY2D_IDLE3D |
+		      RADEON_ISYNC_ANY3D_IDLE2D |
+		      RADEON_ISYNC_WAIT_IDLEGUI |
+		      RADEON_ISYNC_CPSCRATCH_IDLEGUI));
+
+}
+
+static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
+{
+	u32 tmp;
+
+	/* Writeback doesn't seem to work everywhere, test it here and possibly
+	 * enable it if it appears to work
+	 */
+	DRM_WRITE32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1), 0);
+	RADEON_WRITE(RADEON_SCRATCH_REG1, 0xdeadbeef);
+
+	for (tmp = 0; tmp < dev_priv->usec_timeout; tmp++) {
+		if (DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1)) ==
+		    0xdeadbeef)
+			break;
+		DRM_UDELAY(1);
+	}
+
+	if (tmp < dev_priv->usec_timeout) {
+		dev_priv->writeback_works = 1;
+		DRM_INFO("writeback test succeeded in %d usecs\n", tmp);
+	} else {
+		dev_priv->writeback_works = 0;
+		DRM_INFO("writeback test failed\n");
+	}
+	if (radeon_no_wb == 1) {
+		dev_priv->writeback_works = 0;
+		DRM_INFO("writeback forced off\n");
+	}
+
+	if (!dev_priv->writeback_works) {
+		/* Disable writeback to avoid unnecessary bus master transfer */
+		RADEON_WRITE(RADEON_CP_RB_CNTL, RADEON_READ(RADEON_CP_RB_CNTL) |
+			     RADEON_RB_NO_UPDATE);
+		RADEON_WRITE(RADEON_SCRATCH_UMSK, 0);
+	}
+}
+
+/* Enable or disable IGP GART on the chip */
+static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
+{
+	u32 temp;
+
+	if (on) {
+		DRM_DEBUG("programming igp gart %08X %08lX %08X\n",
+			  dev_priv->gart_vm_start,
+			  (long)dev_priv->gart_info.bus_addr,
+			  dev_priv->gart_size);
+
+		temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL);
+		if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
+			IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN |
+							     RS690_BLOCK_GFX_D3_EN));
+		else
+			IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
+
+		IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
+							       RS480_VA_SIZE_32MB));
+
+		temp = IGP_READ_MCIND(dev_priv, RS480_GART_FEATURE_ID);
+		IGP_WRITE_MCIND(RS480_GART_FEATURE_ID, (RS480_HANG_EN |
+							RS480_TLB_ENABLE |
+							RS480_GTW_LAC_EN |
+							RS480_1LEVEL_GART));
+
+		temp = dev_priv->gart_info.bus_addr & 0xfffff000;
+		temp |= (upper_32_bits(dev_priv->gart_info.bus_addr) & 0xff) << 4;
+		IGP_WRITE_MCIND(RS480_GART_BASE, temp);
+
+		temp = IGP_READ_MCIND(dev_priv, RS480_AGP_MODE_CNTL);
+		IGP_WRITE_MCIND(RS480_AGP_MODE_CNTL, ((1 << RS480_REQ_TYPE_SNOOP_SHIFT) |
+						      RS480_REQ_TYPE_SNOOP_DIS));
+
+		radeon_write_agp_base(dev_priv, dev_priv->gart_vm_start);
+
+		dev_priv->gart_size = 32*1024*1024;
+		temp = (((dev_priv->gart_vm_start - 1 + dev_priv->gart_size) &
+			 0xffff0000) | (dev_priv->gart_vm_start >> 16));
+
+		radeon_write_agp_location(dev_priv, temp);
+
+		temp = IGP_READ_MCIND(dev_priv, RS480_AGP_ADDRESS_SPACE_SIZE);
+		IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN |
+							       RS480_VA_SIZE_32MB));
+
+		do {
+			temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
+			if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
+				break;
+			DRM_UDELAY(1);
+		} while (1);
+
+		IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL,
+				RS480_GART_CACHE_INVALIDATE);
+
+		do {
+			temp = IGP_READ_MCIND(dev_priv, RS480_GART_CACHE_CNTRL);
+			if ((temp & RS480_GART_CACHE_INVALIDATE) == 0)
+				break;
+			DRM_UDELAY(1);
+		} while (1);
+
+		IGP_WRITE_MCIND(RS480_GART_CACHE_CNTRL, 0);
+	} else {
+		IGP_WRITE_MCIND(RS480_AGP_ADDRESS_SPACE_SIZE, 0);
+	}
+}
+
+static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
+{
+	u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL);
+	if (on) {
+
+		DRM_DEBUG("programming pcie %08X %08lX %08X\n",
+			  dev_priv->gart_vm_start,
+			  (long)dev_priv->gart_info.bus_addr,
+			  dev_priv->gart_size);
+		RADEON_WRITE_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO,
+				  dev_priv->gart_vm_start);
+		RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_BASE,
+				  dev_priv->gart_info.bus_addr);
+		RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_START_LO,
+				  dev_priv->gart_vm_start);
+		RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_END_LO,
+				  dev_priv->gart_vm_start +
+				  dev_priv->gart_size - 1);
+
+		radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */
+
+		RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
+				  RADEON_PCIE_TX_GART_EN);
+	} else {
+		RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,
+				  tmp & ~RADEON_PCIE_TX_GART_EN);
+	}
+}
+
+/* Enable or disable PCI GART on the chip */
+static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
+{
+	u32 tmp;
+
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+	    (dev_priv->flags & RADEON_IS_IGPGART)) {
+		radeon_set_igpgart(dev_priv, on);
+		return;
+	}
+
+	if (dev_priv->flags & RADEON_IS_PCIE) {
+		radeon_set_pciegart(dev_priv, on);
+		return;
+	}
+
+	tmp = RADEON_READ(RADEON_AIC_CNTL);
+
+	if (on) {
+		RADEON_WRITE(RADEON_AIC_CNTL,
+			     tmp | RADEON_PCIGART_TRANSLATE_EN);
+
+		/* set PCI GART page-table base address
+		 */
+		RADEON_WRITE(RADEON_AIC_PT_BASE, dev_priv->gart_info.bus_addr);
+
+		/* set address range for PCI address translate
+		 */
+		RADEON_WRITE(RADEON_AIC_LO_ADDR, dev_priv->gart_vm_start);
+		RADEON_WRITE(RADEON_AIC_HI_ADDR, dev_priv->gart_vm_start
+			     + dev_priv->gart_size - 1);
+
+		/* Turn off AGP aperture -- is this required for PCI GART?
+		 */
+		radeon_write_agp_location(dev_priv, 0xffffffc0);
+		RADEON_WRITE(RADEON_AGP_COMMAND, 0);	/* clear AGP_COMMAND */
+	} else {
+		RADEON_WRITE(RADEON_AIC_CNTL,
+			     tmp & ~RADEON_PCIGART_TRANSLATE_EN);
+	}
+}
+
+static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	DRM_DEBUG("\n");
+
+	/* if we require new memory map but we don't have it fail */
+	if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
+		DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP)) {
+		DRM_DEBUG("Forcing AGP card to PCI mode\n");
+		dev_priv->flags &= ~RADEON_IS_AGP;
+	} else if (!(dev_priv->flags & (RADEON_IS_AGP | RADEON_IS_PCI | RADEON_IS_PCIE))
+		   && !init->is_pci) {
+		DRM_DEBUG("Restoring AGP flag\n");
+		dev_priv->flags |= RADEON_IS_AGP;
+	}
+
+	if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) {
+		DRM_ERROR("PCI GART memory not allocated!\n");
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	dev_priv->usec_timeout = init->usec_timeout;
+	if (dev_priv->usec_timeout < 1 ||
+	    dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
+		DRM_DEBUG("TIMEOUT problem!\n");
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	/* Enable vblank on CRTC1 for older X servers
+	 */
+	dev_priv->vblank_crtc = DRM_RADEON_VBLANK_CRTC1;
+
+	switch(init->func) {
+	case RADEON_INIT_R200_CP:
+		dev_priv->microcode_version = UCODE_R200;
+		break;
+	case RADEON_INIT_R300_CP:
+		dev_priv->microcode_version = UCODE_R300;
+		break;
+	default:
+		dev_priv->microcode_version = UCODE_R100;
+	}
+
+	dev_priv->do_boxes = 0;
+	dev_priv->cp_mode = init->cp_mode;
+
+	/* We don't support anything other than bus-mastering ring mode,
+	 * but the ring can be in either AGP or PCI space for the ring
+	 * read pointer.
+	 */
+	if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
+	    (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
+		DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	switch (init->fb_bpp) {
+	case 16:
+		dev_priv->color_fmt = RADEON_COLOR_FORMAT_RGB565;
+		break;
+	case 32:
+	default:
+		dev_priv->color_fmt = RADEON_COLOR_FORMAT_ARGB8888;
+		break;
+	}
+	dev_priv->front_offset = init->front_offset;
+	dev_priv->front_pitch = init->front_pitch;
+	dev_priv->back_offset = init->back_offset;
+	dev_priv->back_pitch = init->back_pitch;
+
+	switch (init->depth_bpp) {
+	case 16:
+		dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_16BIT_INT_Z;
+		break;
+	case 32:
+	default:
+		dev_priv->depth_fmt = RADEON_DEPTH_FORMAT_24BIT_INT_Z;
+		break;
+	}
+	dev_priv->depth_offset = init->depth_offset;
+	dev_priv->depth_pitch = init->depth_pitch;
+
+	/* Hardware state for depth clears.  Remove this if/when we no
+	 * longer clear the depth buffer with a 3D rectangle.  Hard-code
+	 * all values to prevent unwanted 3D state from slipping through
+	 * and screwing with the clear operation.
+	 */
+	dev_priv->depth_clear.rb3d_cntl = (RADEON_PLANE_MASK_ENABLE |
+					   (dev_priv->color_fmt << 10) |
+					   (dev_priv->microcode_version ==
+					    UCODE_R100 ? RADEON_ZBLOCK16 : 0));
+
+	dev_priv->depth_clear.rb3d_zstencilcntl =
+	    (dev_priv->depth_fmt |
+	     RADEON_Z_TEST_ALWAYS |
+	     RADEON_STENCIL_TEST_ALWAYS |
+	     RADEON_STENCIL_S_FAIL_REPLACE |
+	     RADEON_STENCIL_ZPASS_REPLACE |
+	     RADEON_STENCIL_ZFAIL_REPLACE | RADEON_Z_WRITE_ENABLE);
+
+	dev_priv->depth_clear.se_cntl = (RADEON_FFACE_CULL_CW |
+					 RADEON_BFACE_SOLID |
+					 RADEON_FFACE_SOLID |
+					 RADEON_FLAT_SHADE_VTX_LAST |
+					 RADEON_DIFFUSE_SHADE_FLAT |
+					 RADEON_ALPHA_SHADE_FLAT |
+					 RADEON_SPECULAR_SHADE_FLAT |
+					 RADEON_FOG_SHADE_FLAT |
+					 RADEON_VTX_PIX_CENTER_OGL |
+					 RADEON_ROUND_MODE_TRUNC |
+					 RADEON_ROUND_PREC_8TH_PIX);
+
+
+	dev_priv->ring_offset = init->ring_offset;
+	dev_priv->ring_rptr_offset = init->ring_rptr_offset;
+	dev_priv->buffers_offset = init->buffers_offset;
+	dev_priv->gart_textures_offset = init->gart_textures_offset;
+
+	dev_priv->sarea = drm_getsarea(dev);
+	if (!dev_priv->sarea) {
+		DRM_ERROR("could not find sarea!\n");
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
+	if (!dev_priv->cp_ring) {
+		DRM_ERROR("could not find cp ring region!\n");
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+	dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
+	if (!dev_priv->ring_rptr) {
+		DRM_ERROR("could not find ring read pointer!\n");
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+	dev->agp_buffer_token = init->buffers_offset;
+	dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
+	if (!dev->agp_buffer_map) {
+		DRM_ERROR("could not find dma buffer region!\n");
+		radeon_do_cleanup_cp(dev);
+		return -EINVAL;
+	}
+
+	if (init->gart_textures_offset) {
+		dev_priv->gart_textures =
+		    drm_core_findmap(dev, init->gart_textures_offset);
+		if (!dev_priv->gart_textures) {
+			DRM_ERROR("could not find GART texture region!\n");
+			radeon_do_cleanup_cp(dev);
+			return -EINVAL;
+		}
+	}
+
+	dev_priv->sarea_priv =
+	    (drm_radeon_sarea_t *) ((u8 *) dev_priv->sarea->handle +
+				    init->sarea_priv_offset);
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		drm_core_ioremap(dev_priv->cp_ring, dev);
+		drm_core_ioremap(dev_priv->ring_rptr, dev);
+		drm_core_ioremap(dev->agp_buffer_map, dev);
+		if (!dev_priv->cp_ring->handle ||
+		    !dev_priv->ring_rptr->handle ||
+		    !dev->agp_buffer_map->handle) {
+			DRM_ERROR("could not find ioremap agp regions!\n");
+			radeon_do_cleanup_cp(dev);
+			return -EINVAL;
+		}
+	} else
+#endif
+	{
+		dev_priv->cp_ring->handle = (void *)dev_priv->cp_ring->offset;
+		dev_priv->ring_rptr->handle =
+		    (void *)dev_priv->ring_rptr->offset;
+		dev->agp_buffer_map->handle =
+		    (void *)dev->agp_buffer_map->offset;
+
+		DRM_DEBUG("dev_priv->cp_ring->handle %p\n",
+			  dev_priv->cp_ring->handle);
+		DRM_DEBUG("dev_priv->ring_rptr->handle %p\n",
+			  dev_priv->ring_rptr->handle);
+		DRM_DEBUG("dev->agp_buffer_map->handle %p\n",
+			  dev->agp_buffer_map->handle);
+	}
+
+	dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16;
+	dev_priv->fb_size =
+		((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000)
+		- dev_priv->fb_location;
+
+	dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) |
+					((dev_priv->front_offset
+					  + dev_priv->fb_location) >> 10));
+
+	dev_priv->back_pitch_offset = (((dev_priv->back_pitch / 64) << 22) |
+				       ((dev_priv->back_offset
+					 + dev_priv->fb_location) >> 10));
+
+	dev_priv->depth_pitch_offset = (((dev_priv->depth_pitch / 64) << 22) |
+					((dev_priv->depth_offset
+					  + dev_priv->fb_location) >> 10));
+
+	dev_priv->gart_size = init->gart_size;
+
+	/* New let's set the memory map ... */
+	if (dev_priv->new_memmap) {
+		u32 base = 0;
+
+		DRM_INFO("Setting GART location based on new memory map\n");
+
+		/* If using AGP, try to locate the AGP aperture at the same
+		 * location in the card and on the bus, though we have to
+		 * align it down.
+		 */
+#if __OS_HAS_AGP
+		if (dev_priv->flags & RADEON_IS_AGP) {
+			base = dev->agp->base;
+			/* Check if valid */
+			if ((base + dev_priv->gart_size - 1) >= dev_priv->fb_location &&
+			    base < (dev_priv->fb_location + dev_priv->fb_size - 1)) {
+				DRM_INFO("Can't use AGP base @0x%08lx, won't fit\n",
+					 dev->agp->base);
+				base = 0;
+			}
+		}
+#endif
+		/* If not or if AGP is at 0 (Macs), try to put it elsewhere */
+		if (base == 0) {
+			base = dev_priv->fb_location + dev_priv->fb_size;
+			if (base < dev_priv->fb_location ||
+			    ((base + dev_priv->gart_size) & 0xfffffffful) < base)
+				base = dev_priv->fb_location
+					- dev_priv->gart_size;
+		}
+		dev_priv->gart_vm_start = base & 0xffc00000u;
+		if (dev_priv->gart_vm_start != base)
+			DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n",
+				 base, dev_priv->gart_vm_start);
+	} else {
+		DRM_INFO("Setting GART location based on old memory map\n");
+		dev_priv->gart_vm_start = dev_priv->fb_location +
+			RADEON_READ(RADEON_CONFIG_APER_SIZE);
+	}
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP)
+		dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
+						 - dev->agp->base
+						 + dev_priv->gart_vm_start);
+	else
+#endif
+		dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
+					- (unsigned long)dev->sg->virtual
+					+ dev_priv->gart_vm_start);
+
+	DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size);
+	DRM_DEBUG("dev_priv->gart_vm_start 0x%x\n", dev_priv->gart_vm_start);
+	DRM_DEBUG("dev_priv->gart_buffers_offset 0x%lx\n",
+		  dev_priv->gart_buffers_offset);
+
+	dev_priv->ring.start = (u32 *) dev_priv->cp_ring->handle;
+	dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
+			      + init->ring_size / sizeof(u32));
+	dev_priv->ring.size = init->ring_size;
+	dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+
+	dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
+	dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8);
+
+	dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
+	dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16);
+	dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
+
+	dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		/* Turn off PCI GART */
+		radeon_set_pcigart(dev_priv, 0);
+	} else
+#endif
+	{
+		dev_priv->gart_info.table_mask = DMA_BIT_MASK(32);
+		/* if we have an offset set from userspace */
+		if (dev_priv->pcigart_offset_set) {
+			dev_priv->gart_info.bus_addr =
+			    dev_priv->pcigart_offset + dev_priv->fb_location;
+			dev_priv->gart_info.mapping.offset =
+			    dev_priv->pcigart_offset + dev_priv->fb_aper_offset;
+			dev_priv->gart_info.mapping.size =
+			    dev_priv->gart_info.table_size;
+
+			drm_core_ioremap_wc(&dev_priv->gart_info.mapping, dev);
+			dev_priv->gart_info.addr =
+			    dev_priv->gart_info.mapping.handle;
+
+			if (dev_priv->flags & RADEON_IS_PCIE)
+				dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCIE;
+			else
+				dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
+			dev_priv->gart_info.gart_table_location =
+			    DRM_ATI_GART_FB;
+
+			DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
+				  dev_priv->gart_info.addr,
+				  dev_priv->pcigart_offset);
+		} else {
+			if (dev_priv->flags & RADEON_IS_IGPGART)
+				dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_IGP;
+			else
+				dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
+			dev_priv->gart_info.gart_table_location =
+			    DRM_ATI_GART_MAIN;
+			dev_priv->gart_info.addr = NULL;
+			dev_priv->gart_info.bus_addr = 0;
+			if (dev_priv->flags & RADEON_IS_PCIE) {
+				DRM_ERROR
+				    ("Cannot use PCI Express without GART in FB memory\n");
+				radeon_do_cleanup_cp(dev);
+				return -EINVAL;
+			}
+		}
+
+		if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
+			DRM_ERROR("failed to init PCI GART!\n");
+			radeon_do_cleanup_cp(dev);
+			return -ENOMEM;
+		}
+
+		/* Turn on PCI GART */
+		radeon_set_pcigart(dev_priv, 1);
+	}
+
+	radeon_cp_load_microcode(dev_priv);
+	radeon_cp_init_ring_buffer(dev, dev_priv);
+
+	dev_priv->last_buf = 0;
+
+	radeon_do_engine_reset(dev);
+	radeon_test_writeback(dev_priv);
+
+	return 0;
+}
+
+static int radeon_do_cleanup_cp(struct drm_device * dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	/* Make sure interrupts are disabled here because the uninstall ioctl
+	 * may not have been called from userspace and after dev_private
+	 * is freed, it's too late.
+	 */
+	if (dev->irq_enabled)
+		drm_irq_uninstall(dev);
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		if (dev_priv->cp_ring != NULL) {
+			drm_core_ioremapfree(dev_priv->cp_ring, dev);
+			dev_priv->cp_ring = NULL;
+		}
+		if (dev_priv->ring_rptr != NULL) {
+			drm_core_ioremapfree(dev_priv->ring_rptr, dev);
+			dev_priv->ring_rptr = NULL;
+		}
+		if (dev->agp_buffer_map != NULL) {
+			drm_core_ioremapfree(dev->agp_buffer_map, dev);
+			dev->agp_buffer_map = NULL;
+		}
+	} else
+#endif
+	{
+
+		if (dev_priv->gart_info.bus_addr) {
+			/* Turn off PCI GART */
+			radeon_set_pcigart(dev_priv, 0);
+			if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
+				DRM_ERROR("failed to cleanup PCI GART!\n");
+		}
+
+		if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
+		{
+			drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
+			dev_priv->gart_info.addr = 0;
+		}
+	}
+	/* only clear to the start of flags */
+	memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
+
+	return 0;
+}
+
+/* This code will reinit the Radeon CP hardware after a resume from disc.
+ * AFAIK, it would be very difficult to pickle the state at suspend time, so
+ * here we make sure that all Radeon hardware initialisation is re-done without
+ * affecting running applications.
+ *
+ * Charl P. Botha <http://cpbotha.net>
+ */
+static int radeon_do_resume_cp(struct drm_device * dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if (!dev_priv) {
+		DRM_ERROR("Called with no initialization\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG("Starting radeon_do_resume_cp()\n");
+
+#if __OS_HAS_AGP
+	if (dev_priv->flags & RADEON_IS_AGP) {
+		/* Turn off PCI GART */
+		radeon_set_pcigart(dev_priv, 0);
+	} else
+#endif
+	{
+		/* Turn on PCI GART */
+		radeon_set_pcigart(dev_priv, 1);
+	}
+
+	radeon_cp_load_microcode(dev_priv);
+	radeon_cp_init_ring_buffer(dev, dev_priv);
+
+	radeon_do_engine_reset(dev);
+	radeon_enable_interrupt(dev);
+
+	DRM_DEBUG("radeon_do_resume_cp() complete\n");
+
+	return 0;
+}
+
+int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_init_t *init = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (init->func == RADEON_INIT_R300_CP)
+		r300_init_reg_flags(dev);
+
+	switch (init->func) {
+	case RADEON_INIT_CP:
+	case RADEON_INIT_R200_CP:
+	case RADEON_INIT_R300_CP:
+		return radeon_do_init_cp(dev, init);
+	case RADEON_CLEANUP_CP:
+		return radeon_do_cleanup_cp(dev);
+	}
+
+	return -EINVAL;
+}
+
+int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (dev_priv->cp_running) {
+		DRM_DEBUG("while CP running\n");
+		return 0;
+	}
+	if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) {
+		DRM_DEBUG("called with bogus CP mode (%d)\n",
+			  dev_priv->cp_mode);
+		return 0;
+	}
+
+	radeon_do_cp_start(dev_priv);
+
+	return 0;
+}
+
+/* Stop the CP.  The engine must have been idled before calling this
+ * routine.
+ */
+int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_cp_stop_t *stop = data;
+	int ret;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (!dev_priv->cp_running)
+		return 0;
+
+	/* Flush any pending CP commands.  This ensures any outstanding
+	 * commands are exectuted by the engine before we turn it off.
+	 */
+	if (stop->flush) {
+		radeon_do_cp_flush(dev_priv);
+	}
+
+	/* If we fail to make the engine go idle, we return an error
+	 * code so that the DRM ioctl wrapper can try again.
+	 */
+	if (stop->idle) {
+		ret = radeon_do_cp_idle(dev_priv);
+		if (ret)
+			return ret;
+	}
+
+	/* Finally, we can turn off the CP.  If the engine isn't idle,
+	 * we will get some dropped triangles as they won't be fully
+	 * rendered before the CP is shut down.
+	 */
+	radeon_do_cp_stop(dev_priv);
+
+	/* Reset the engine */
+	radeon_do_engine_reset(dev);
+
+	return 0;
+}
+
+void radeon_do_release(struct drm_device * dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	int i, ret;
+
+	if (dev_priv) {
+		if (dev_priv->cp_running) {
+			/* Stop the cp */
+			while ((ret = radeon_do_cp_idle(dev_priv)) != 0) {
+				DRM_DEBUG("radeon_do_cp_idle %d\n", ret);
+#ifdef __linux__
+				schedule();
+#else
+				tsleep(&ret, PZERO, "rdnrel", 1);
+#endif
+			}
+			radeon_do_cp_stop(dev_priv);
+			radeon_do_engine_reset(dev);
+		}
+
+		/* Disable *all* interrupts */
+		if (dev_priv->mmio)	/* remove this after permanent addmaps */
+			RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
+
+		if (dev_priv->mmio) {	/* remove all surfaces */
+			for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+				RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * i, 0);
+				RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND +
+					     16 * i, 0);
+				RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND +
+					     16 * i, 0);
+			}
+		}
+
+		/* Free memory heap structures */
+		radeon_mem_takedown(&(dev_priv->gart_heap));
+		radeon_mem_takedown(&(dev_priv->fb_heap));
+
+		/* deallocate kernel resources */
+		radeon_do_cleanup_cp(dev);
+	}
+}
+
+/* Just reset the CP ring.  Called as part of an X Server engine reset.
+ */
+int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (!dev_priv) {
+		DRM_DEBUG("called before init done\n");
+		return -EINVAL;
+	}
+
+	radeon_do_cp_reset(dev_priv);
+
+	/* The CP is no longer running after an engine reset */
+	dev_priv->cp_running = 0;
+
+	return 0;
+}
+
+int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	return radeon_do_cp_idle(dev_priv);
+}
+
+/* Added by Charl P. Botha to call radeon_do_resume_cp().
+ */
+int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+
+	return radeon_do_resume_cp(dev);
+}
+
+int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	return radeon_do_engine_reset(dev);
+}
+
+/* ================================================================
+ * Fullscreen mode
+ */
+
+/* KW: Deprecated to say the least:
+ */
+int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	return 0;
+}
+
+/* ================================================================
+ * Freelist management
+ */
+
+/* Original comment: FIXME: ROTATE_BUFS is a hack to cycle through
+ *   bufs until freelist code is used.  Note this hides a problem with
+ *   the scratch register * (used to keep track of last buffer
+ *   completed) being written to before * the last buffer has actually
+ *   completed rendering.
+ *
+ * KW:  It's also a good way to find free buffers quickly.
+ *
+ * KW: Ideally this loop wouldn't exist, and freelist_get wouldn't
+ * sleep.  However, bugs in older versions of radeon_accel.c mean that
+ * we essentially have to do this, else old clients will break.
+ *
+ * However, it does leave open a potential deadlock where all the
+ * buffers are held by other clients, which can't release them because
+ * they can't get the lock.
+ */
+
+struct drm_buf *radeon_freelist_get(struct drm_device * dev)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_buf_priv_t *buf_priv;
+	struct drm_buf *buf;
+	int i, t;
+	int start;
+
+	if (++dev_priv->last_buf >= dma->buf_count)
+		dev_priv->last_buf = 0;
+
+	start = dev_priv->last_buf;
+
+	for (t = 0; t < dev_priv->usec_timeout; t++) {
+		u32 done_age = GET_SCRATCH(1);
+		DRM_DEBUG("done_age = %d\n", done_age);
+		for (i = start; i < dma->buf_count; i++) {
+			buf = dma->buflist[i];
+			buf_priv = buf->dev_private;
+			if (buf->file_priv == NULL || (buf->pending &&
+						       buf_priv->age <=
+						       done_age)) {
+				dev_priv->stats.requested_bufs++;
+				buf->pending = 0;
+				return buf;
+			}
+			start = 0;
+		}
+
+		if (t) {
+			DRM_UDELAY(1);
+			dev_priv->stats.freelist_loops++;
+		}
+	}
+
+	DRM_DEBUG("returning NULL!\n");
+	return NULL;
+}
+
+#if 0
+struct drm_buf *radeon_freelist_get(struct drm_device * dev)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_buf_priv_t *buf_priv;
+	struct drm_buf *buf;
+	int i, t;
+	int start;
+	u32 done_age = DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1));
+
+	if (++dev_priv->last_buf >= dma->buf_count)
+		dev_priv->last_buf = 0;
+
+	start = dev_priv->last_buf;
+	dev_priv->stats.freelist_loops++;
+
+	for (t = 0; t < 2; t++) {
+		for (i = start; i < dma->buf_count; i++) {
+			buf = dma->buflist[i];
+			buf_priv = buf->dev_private;
+			if (buf->file_priv == 0 || (buf->pending &&
+						    buf_priv->age <=
+						    done_age)) {
+				dev_priv->stats.requested_bufs++;
+				buf->pending = 0;
+				return buf;
+			}
+		}
+		start = 0;
+	}
+
+	return NULL;
+}
+#endif
+
+void radeon_freelist_reset(struct drm_device * dev)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	int i;
+
+	dev_priv->last_buf = 0;
+	for (i = 0; i < dma->buf_count; i++) {
+		struct drm_buf *buf = dma->buflist[i];
+		drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
+		buf_priv->age = 0;
+	}
+}
+
+/* ================================================================
+ * CP command submission
+ */
+
+int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n)
+{
+	drm_radeon_ring_buffer_t *ring = &dev_priv->ring;
+	int i;
+	u32 last_head = GET_RING_HEAD(dev_priv);
+
+	for (i = 0; i < dev_priv->usec_timeout; i++) {
+		u32 head = GET_RING_HEAD(dev_priv);
+
+		ring->space = (head - ring->tail) * sizeof(u32);
+		if (ring->space <= 0)
+			ring->space += ring->size;
+		if (ring->space > n)
+			return 0;
+
+		dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+		if (head != last_head)
+			i = 0;
+		last_head = head;
+
+		DRM_UDELAY(1);
+	}
+
+	/* FIXME: This return value is ignored in the BEGIN_RING macro! */
+#if RADEON_FIFO_DEBUG
+	radeon_status(dev_priv);
+	DRM_ERROR("failed!\n");
+#endif
+	return -EBUSY;
+}
+
+static int radeon_cp_get_buffers(struct drm_device *dev,
+				 struct drm_file *file_priv,
+				 struct drm_dma * d)
+{
+	int i;
+	struct drm_buf *buf;
+
+	for (i = d->granted_count; i < d->request_count; i++) {
+		buf = radeon_freelist_get(dev);
+		if (!buf)
+			return -EBUSY;	/* NOTE: broken client */
+
+		buf->file_priv = file_priv;
+
+		if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
+				     sizeof(buf->idx)))
+			return -EFAULT;
+		if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
+				     sizeof(buf->total)))
+			return -EFAULT;
+
+		d->granted_count++;
+	}
+	return 0;
+}
+
+int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	int ret = 0;
+	struct drm_dma *d = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	/* Please don't send us buffers.
+	 */
+	if (d->send_count != 0) {
+		DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
+			  DRM_CURRENTPID, d->send_count);
+		return -EINVAL;
+	}
+
+	/* We'll send you buffers.
+	 */
+	if (d->request_count < 0 || d->request_count > dma->buf_count) {
+		DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
+			  DRM_CURRENTPID, d->request_count, dma->buf_count);
+		return -EINVAL;
+	}
+
+	d->granted_count = 0;
+
+	if (d->request_count) {
+		ret = radeon_cp_get_buffers(dev, file_priv, d);
+	}
+
+	return ret;
+}
+
+int radeon_driver_load(struct drm_device *dev, unsigned long flags)
+{
+	drm_radeon_private_t *dev_priv;
+	int ret = 0;
+
+	dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER);
+	if (dev_priv == NULL)
+		return -ENOMEM;
+
+	memset(dev_priv, 0, sizeof(drm_radeon_private_t));
+	dev->dev_private = (void *)dev_priv;
+	dev_priv->flags = flags;
+
+	switch (flags & RADEON_FAMILY_MASK) {
+	case CHIP_R100:
+	case CHIP_RV200:
+	case CHIP_R200:
+	case CHIP_R300:
+	case CHIP_R350:
+	case CHIP_R420:
+	case CHIP_RV410:
+	case CHIP_RV515:
+	case CHIP_R520:
+	case CHIP_RV570:
+	case CHIP_R580:
+		dev_priv->flags |= RADEON_HAS_HIERZ;
+		break;
+	default:
+		/* all other chips have no hierarchical z buffer */
+		break;
+	}
+
+	if (drm_device_is_agp(dev))
+		dev_priv->flags |= RADEON_IS_AGP;
+	else if (drm_device_is_pcie(dev))
+		dev_priv->flags |= RADEON_IS_PCIE;
+	else
+		dev_priv->flags |= RADEON_IS_PCI;
+
+	DRM_DEBUG("%s card detected\n",
+		  ((dev_priv->flags & RADEON_IS_AGP) ? "AGP" : (((dev_priv->flags & RADEON_IS_PCIE) ? "PCIE" : "PCI"))));
+	return ret;
+}
+
+/* Create mappings for registers and framebuffer so userland doesn't necessarily
+ * have to find them.
+ */
+int radeon_driver_firstopen(struct drm_device *dev)
+{
+	int ret;
+	drm_local_map_t *map;
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
+
+	ret = drm_addmap(dev, drm_get_resource_start(dev, 2),
+			 drm_get_resource_len(dev, 2), _DRM_REGISTERS,
+			 _DRM_READ_ONLY, &dev_priv->mmio);
+	if (ret != 0)
+		return ret;
+
+	dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0);
+	ret = drm_addmap(dev, dev_priv->fb_aper_offset,
+			 drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER,
+			 _DRM_WRITE_COMBINING, &map);
+	if (ret != 0)
+		return ret;
+
+	return 0;
+}
+
+int radeon_driver_unload(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	DRM_DEBUG("\n");
+	drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
+
+	dev->dev_private = NULL;
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
new file mode 100644
index 000000000000..349ac3d3b848
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -0,0 +1,126 @@
+/**
+ * \file radeon_drv.c
+ * ATI Radeon driver
+ *
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon_drv.h"
+
+#include "drm_pciids.h"
+
+int radeon_no_wb;
+
+MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers\n");
+module_param_named(no_wb, radeon_no_wb, int, 0444);
+
+static int dri_library_name(struct drm_device *dev, char *buf)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	int family = dev_priv->flags & RADEON_FAMILY_MASK;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+		        (family < CHIP_R200) ? "radeon" :
+		        ((family < CHIP_R300) ? "r200" :
+		        "r300"));
+}
+
+static struct pci_device_id pciidlist[] = {
+	radeon_PCI_IDS
+};
+
+static struct drm_driver driver = {
+	.driver_features =
+	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
+	    DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED |
+	    DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
+	.dev_priv_size = sizeof(drm_radeon_buf_priv_t),
+	.load = radeon_driver_load,
+	.firstopen = radeon_driver_firstopen,
+	.open = radeon_driver_open,
+	.preclose = radeon_driver_preclose,
+	.postclose = radeon_driver_postclose,
+	.lastclose = radeon_driver_lastclose,
+	.unload = radeon_driver_unload,
+	.vblank_wait = radeon_driver_vblank_wait,
+	.vblank_wait2 = radeon_driver_vblank_wait2,
+	.dri_library_name = dri_library_name,
+	.irq_preinstall = radeon_driver_irq_preinstall,
+	.irq_postinstall = radeon_driver_irq_postinstall,
+	.irq_uninstall = radeon_driver_irq_uninstall,
+	.irq_handler = radeon_driver_irq_handler,
+	.reclaim_buffers = drm_core_reclaim_buffers,
+	.get_map_ofs = drm_core_get_map_ofs,
+	.get_reg_ofs = drm_core_get_reg_ofs,
+	.ioctls = radeon_ioctls,
+	.dma_ioctl = radeon_cp_buffers,
+	.fops = {
+		 .owner = THIS_MODULE,
+		 .open = drm_open,
+		 .release = drm_release,
+		 .ioctl = drm_ioctl,
+		 .mmap = drm_mmap,
+		 .poll = drm_poll,
+		 .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+		 .compat_ioctl = radeon_compat_ioctl,
+#endif
+	},
+
+	.pci_driver = {
+		 .name = DRIVER_NAME,
+		 .id_table = pciidlist,
+	},
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int __init radeon_init(void)
+{
+	driver.num_ioctls = radeon_max_ioctl;
+	return drm_init(&driver);
+}
+
+static void __exit radeon_exit(void)
+{
+	drm_exit(&driver);
+}
+
+module_init(radeon_init);
+module_exit(radeon_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
new file mode 100644
index 000000000000..3f0eca957aa7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -0,0 +1,1406 @@
+/* radeon_drv.h -- Private header for radeon driver -*- linux-c -*-
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Kevin E. Martin <martin@valinux.com>
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#ifndef __RADEON_DRV_H__
+#define __RADEON_DRV_H__
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR		"Gareth Hughes, Keith Whitwell, others."
+
+#define DRIVER_NAME		"radeon"
+#define DRIVER_DESC		"ATI Radeon"
+#define DRIVER_DATE		"20080528"
+
+/* Interface history:
+ *
+ * 1.1 - ??
+ * 1.2 - Add vertex2 ioctl (keith)
+ *     - Add stencil capability to clear ioctl (gareth, keith)
+ *     - Increase MAX_TEXTURE_LEVELS (brian)
+ * 1.3 - Add cmdbuf ioctl (keith)
+ *     - Add support for new radeon packets (keith)
+ *     - Add getparam ioctl (keith)
+ *     - Add flip-buffers ioctl, deprecate fullscreen foo (keith).
+ * 1.4 - Add scratch registers to get_param ioctl.
+ * 1.5 - Add r200 packets to cmdbuf ioctl
+ *     - Add r200 function to init ioctl
+ *     - Add 'scalar2' instruction to cmdbuf
+ * 1.6 - Add static GART memory manager
+ *       Add irq handler (won't be turned on unless X server knows to)
+ *       Add irq ioctls and irq_active getparam.
+ *       Add wait command for cmdbuf ioctl
+ *       Add GART offset query for getparam
+ * 1.7 - Add support for cube map registers: R200_PP_CUBIC_FACES_[0..5]
+ *       and R200_PP_CUBIC_OFFSET_F1_[0..5].
+ *       Added packets R200_EMIT_PP_CUBIC_FACES_[0..5] and
+ *       R200_EMIT_PP_CUBIC_OFFSETS_[0..5].  (brian)
+ * 1.8 - Remove need to call cleanup ioctls on last client exit (keith)
+ *       Add 'GET' queries for starting additional clients on different VT's.
+ * 1.9 - Add DRM_IOCTL_RADEON_CP_RESUME ioctl.
+ *       Add texture rectangle support for r100.
+ * 1.10- Add SETPARAM ioctl; first parameter to set is FB_LOCATION, which
+ *       clients use to tell the DRM where they think the framebuffer is
+ *       located in the card's address space
+ * 1.11- Add packet R200_EMIT_RB3D_BLENDCOLOR to support GL_EXT_blend_color
+ *       and GL_EXT_blend_[func|equation]_separate on r200
+ * 1.12- Add R300 CP microcode support - this just loads the CP on r300
+ *       (No 3D support yet - just microcode loading).
+ * 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters
+ *     - Add hyperz support, add hyperz flags to clear ioctl.
+ * 1.14- Add support for color tiling
+ *     - Add R100/R200 surface allocation/free support
+ * 1.15- Add support for texture micro tiling
+ *     - Add support for r100 cube maps
+ * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear
+ *       texture filtering on r200
+ * 1.17- Add initial support for R300 (3D).
+ * 1.18- Add support for GL_ATI_fragment_shader, new packets
+ *       R200_EMIT_PP_AFS_0/1, R200_EMIT_PP_TXCTLALL_0-5 (replaces
+ *       R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and R200_EMIT_ATF_TFACTOR
+ *       (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6)
+ * 1.19- Add support for gart table in FB memory and PCIE r300
+ * 1.20- Add support for r300 texrect
+ * 1.21- Add support for card type getparam
+ * 1.22- Add support for texture cache flushes (R300_TX_CNTL)
+ * 1.23- Add new radeon memory map work from benh
+ * 1.24- Add general-purpose packet for manipulating scratch registers (r300)
+ * 1.25- Add support for r200 vertex programs (R200_EMIT_VAP_PVS_CNTL,
+ *       new packet type)
+ * 1.26- Add support for variable size PCI(E) gart aperture
+ * 1.27- Add support for IGP GART
+ * 1.28- Add support for VBL on CRTC2
+ * 1.29- R500 3D cmd buffer support
+ */
+#define DRIVER_MAJOR		1
+#define DRIVER_MINOR		29
+#define DRIVER_PATCHLEVEL	0
+
+/*
+ * Radeon chip families
+ */
+enum radeon_family {
+	CHIP_R100,
+	CHIP_RV100,
+	CHIP_RS100,
+	CHIP_RV200,
+	CHIP_RS200,
+	CHIP_R200,
+	CHIP_RV250,
+	CHIP_RS300,
+	CHIP_RV280,
+	CHIP_R300,
+	CHIP_R350,
+	CHIP_RV350,
+	CHIP_RV380,
+	CHIP_R420,
+	CHIP_RV410,
+	CHIP_RS480,
+	CHIP_RS690,
+	CHIP_RV515,
+	CHIP_R520,
+	CHIP_RV530,
+	CHIP_RV560,
+	CHIP_RV570,
+	CHIP_R580,
+	CHIP_LAST,
+};
+
+enum radeon_cp_microcode_version {
+	UCODE_R100,
+	UCODE_R200,
+	UCODE_R300,
+};
+
+/*
+ * Chip flags
+ */
+enum radeon_chip_flags {
+	RADEON_FAMILY_MASK = 0x0000ffffUL,
+	RADEON_FLAGS_MASK = 0xffff0000UL,
+	RADEON_IS_MOBILITY = 0x00010000UL,
+	RADEON_IS_IGP = 0x00020000UL,
+	RADEON_SINGLE_CRTC = 0x00040000UL,
+	RADEON_IS_AGP = 0x00080000UL,
+	RADEON_HAS_HIERZ = 0x00100000UL,
+	RADEON_IS_PCIE = 0x00200000UL,
+	RADEON_NEW_MEMMAP = 0x00400000UL,
+	RADEON_IS_PCI = 0x00800000UL,
+	RADEON_IS_IGPGART = 0x01000000UL,
+};
+
+#define GET_RING_HEAD(dev_priv)	(dev_priv->writeback_works ? \
+        DRM_READ32(  (dev_priv)->ring_rptr, 0 ) : RADEON_READ(RADEON_CP_RB_RPTR))
+#define SET_RING_HEAD(dev_priv,val)	DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) )
+
+typedef struct drm_radeon_freelist {
+	unsigned int age;
+	struct drm_buf *buf;
+	struct drm_radeon_freelist *next;
+	struct drm_radeon_freelist *prev;
+} drm_radeon_freelist_t;
+
+typedef struct drm_radeon_ring_buffer {
+	u32 *start;
+	u32 *end;
+	int size;
+	int size_l2qw;
+
+	int rptr_update; /* Double Words */
+	int rptr_update_l2qw; /* log2 Quad Words */
+
+	int fetch_size; /* Double Words */
+	int fetch_size_l2ow; /* log2 Oct Words */
+
+	u32 tail;
+	u32 tail_mask;
+	int space;
+
+	int high_mark;
+} drm_radeon_ring_buffer_t;
+
+typedef struct drm_radeon_depth_clear_t {
+	u32 rb3d_cntl;
+	u32 rb3d_zstencilcntl;
+	u32 se_cntl;
+} drm_radeon_depth_clear_t;
+
+struct drm_radeon_driver_file_fields {
+	int64_t radeon_fb_delta;
+};
+
+struct mem_block {
+	struct mem_block *next;
+	struct mem_block *prev;
+	int start;
+	int size;
+	struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
+};
+
+struct radeon_surface {
+	int refcount;
+	u32 lower;
+	u32 upper;
+	u32 flags;
+};
+
+struct radeon_virt_surface {
+	int surface_index;
+	u32 lower;
+	u32 upper;
+	u32 flags;
+	struct drm_file *file_priv;
+};
+
+typedef struct drm_radeon_private {
+	drm_radeon_ring_buffer_t ring;
+	drm_radeon_sarea_t *sarea_priv;
+
+	u32 fb_location;
+	u32 fb_size;
+	int new_memmap;
+
+	int gart_size;
+	u32 gart_vm_start;
+	unsigned long gart_buffers_offset;
+
+	int cp_mode;
+	int cp_running;
+
+	drm_radeon_freelist_t *head;
+	drm_radeon_freelist_t *tail;
+	int last_buf;
+	volatile u32 *scratch;
+	int writeback_works;
+
+	int usec_timeout;
+
+	int microcode_version;
+
+	struct {
+		u32 boxes;
+		int freelist_timeouts;
+		int freelist_loops;
+		int requested_bufs;
+		int last_frame_reads;
+		int last_clear_reads;
+		int clears;
+		int texture_uploads;
+	} stats;
+
+	int do_boxes;
+	int page_flipping;
+
+	u32 color_fmt;
+	unsigned int front_offset;
+	unsigned int front_pitch;
+	unsigned int back_offset;
+	unsigned int back_pitch;
+
+	u32 depth_fmt;
+	unsigned int depth_offset;
+	unsigned int depth_pitch;
+
+	u32 front_pitch_offset;
+	u32 back_pitch_offset;
+	u32 depth_pitch_offset;
+
+	drm_radeon_depth_clear_t depth_clear;
+
+	unsigned long ring_offset;
+	unsigned long ring_rptr_offset;
+	unsigned long buffers_offset;
+	unsigned long gart_textures_offset;
+
+	drm_local_map_t *sarea;
+	drm_local_map_t *mmio;
+	drm_local_map_t *cp_ring;
+	drm_local_map_t *ring_rptr;
+	drm_local_map_t *gart_textures;
+
+	struct mem_block *gart_heap;
+	struct mem_block *fb_heap;
+
+	/* SW interrupt */
+	wait_queue_head_t swi_queue;
+	atomic_t swi_emitted;
+	int vblank_crtc;
+	uint32_t irq_enable_reg;
+	int irq_enabled;
+	uint32_t r500_disp_irq_reg;
+
+	struct radeon_surface surfaces[RADEON_MAX_SURFACES];
+	struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES];
+
+	unsigned long pcigart_offset;
+	unsigned int pcigart_offset_set;
+	struct drm_ati_pcigart_info gart_info;
+
+	u32 scratch_ages[5];
+
+	/* starting from here on, data is preserved accross an open */
+	uint32_t flags;		/* see radeon_chip_flags */
+	unsigned long fb_aper_offset;
+
+	int num_gb_pipes;
+} drm_radeon_private_t;
+
+typedef struct drm_radeon_buf_priv {
+	u32 age;
+} drm_radeon_buf_priv_t;
+
+typedef struct drm_radeon_kcmd_buffer {
+	int bufsz;
+	char *buf;
+	int nbox;
+	struct drm_clip_rect __user *boxes;
+} drm_radeon_kcmd_buffer_t;
+
+extern int radeon_no_wb;
+extern struct drm_ioctl_desc radeon_ioctls[];
+extern int radeon_max_ioctl;
+
+/* Check whether the given hardware address is inside the framebuffer or the
+ * GART area.
+ */
+static __inline__ int radeon_check_offset(drm_radeon_private_t *dev_priv,
+					  u64 off)
+{
+	u32 fb_start = dev_priv->fb_location;
+	u32 fb_end = fb_start + dev_priv->fb_size - 1;
+	u32 gart_start = dev_priv->gart_vm_start;
+	u32 gart_end = gart_start + dev_priv->gart_size - 1;
+
+	return ((off >= fb_start && off <= fb_end) ||
+		(off >= gart_start && off <= gart_end));
+}
+
+				/* radeon_cp.c */
+extern int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);
+
+extern void radeon_freelist_reset(struct drm_device * dev);
+extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
+
+extern int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n);
+
+extern int radeon_do_cp_idle(drm_radeon_private_t * dev_priv);
+
+extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags);
+extern int radeon_presetup(struct drm_device *dev);
+extern int radeon_driver_postcleanup(struct drm_device *dev);
+
+extern int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern void radeon_mem_takedown(struct mem_block **heap);
+extern void radeon_mem_release(struct drm_file *file_priv,
+			       struct mem_block *heap);
+
+				/* radeon_irq.c */
+extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
+
+extern void radeon_do_release(struct drm_device * dev);
+extern int radeon_driver_vblank_wait(struct drm_device * dev,
+				     unsigned int *sequence);
+extern int radeon_driver_vblank_wait2(struct drm_device * dev,
+				      unsigned int *sequence);
+extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
+extern void radeon_driver_irq_preinstall(struct drm_device * dev);
+extern void radeon_driver_irq_postinstall(struct drm_device * dev);
+extern void radeon_driver_irq_uninstall(struct drm_device * dev);
+extern void radeon_enable_interrupt(struct drm_device *dev);
+extern int radeon_vblank_crtc_get(struct drm_device *dev);
+extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
+
+extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
+extern int radeon_driver_unload(struct drm_device *dev);
+extern int radeon_driver_firstopen(struct drm_device *dev);
+extern void radeon_driver_preclose(struct drm_device * dev, struct drm_file *file_priv);
+extern void radeon_driver_postclose(struct drm_device * dev, struct drm_file * filp);
+extern void radeon_driver_lastclose(struct drm_device * dev);
+extern int radeon_driver_open(struct drm_device * dev, struct drm_file * filp_priv);
+extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
+				unsigned long arg);
+
+/* r300_cmdbuf.c */
+extern void r300_init_reg_flags(struct drm_device *dev);
+
+extern int r300_do_cp_cmdbuf(struct drm_device * dev,
+			     struct drm_file *file_priv,
+			     drm_radeon_kcmd_buffer_t * cmdbuf);
+
+/* Flags for stats.boxes
+ */
+#define RADEON_BOX_DMA_IDLE      0x1
+#define RADEON_BOX_RING_FULL     0x2
+#define RADEON_BOX_FLIP          0x4
+#define RADEON_BOX_WAIT_IDLE     0x8
+#define RADEON_BOX_TEXTURE_LOAD  0x10
+
+/* Register definitions, register access macros and drmAddMap constants
+ * for Radeon kernel driver.
+ */
+
+#define RADEON_AGP_COMMAND		0x0f60
+#define RADEON_AGP_COMMAND_PCI_CONFIG   0x0060	/* offset in PCI config */
+#	define RADEON_AGP_ENABLE	(1<<8)
+#define RADEON_AUX_SCISSOR_CNTL		0x26f0
+#	define RADEON_EXCLUSIVE_SCISSOR_0	(1 << 24)
+#	define RADEON_EXCLUSIVE_SCISSOR_1	(1 << 25)
+#	define RADEON_EXCLUSIVE_SCISSOR_2	(1 << 26)
+#	define RADEON_SCISSOR_0_ENABLE		(1 << 28)
+#	define RADEON_SCISSOR_1_ENABLE		(1 << 29)
+#	define RADEON_SCISSOR_2_ENABLE		(1 << 30)
+
+#define RADEON_BUS_CNTL			0x0030
+#	define RADEON_BUS_MASTER_DIS		(1 << 6)
+
+#define RADEON_CLOCK_CNTL_DATA		0x000c
+#	define RADEON_PLL_WR_EN			(1 << 7)
+#define RADEON_CLOCK_CNTL_INDEX		0x0008
+#define RADEON_CONFIG_APER_SIZE		0x0108
+#define RADEON_CONFIG_MEMSIZE		0x00f8
+#define RADEON_CRTC_OFFSET		0x0224
+#define RADEON_CRTC_OFFSET_CNTL		0x0228
+#	define RADEON_CRTC_TILE_EN		(1 << 15)
+#	define RADEON_CRTC_OFFSET_FLIP_CNTL	(1 << 16)
+#define RADEON_CRTC2_OFFSET		0x0324
+#define RADEON_CRTC2_OFFSET_CNTL	0x0328
+
+#define RADEON_PCIE_INDEX               0x0030
+#define RADEON_PCIE_DATA                0x0034
+#define RADEON_PCIE_TX_GART_CNTL	0x10
+#	define RADEON_PCIE_TX_GART_EN		(1 << 0)
+#	define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0 << 1)
+#	define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO  (1 << 1)
+#	define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD   (3 << 1)
+#	define RADEON_PCIE_TX_GART_MODE_32_128_CACHE	(0 << 3)
+#	define RADEON_PCIE_TX_GART_MODE_8_4_128_CACHE	(1 << 3)
+#	define RADEON_PCIE_TX_GART_CHK_RW_VALID_EN      (1 << 5)
+#	define RADEON_PCIE_TX_GART_INVALIDATE_TLB	(1 << 8)
+#define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11
+#define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12
+#define RADEON_PCIE_TX_GART_BASE	0x13
+#define RADEON_PCIE_TX_GART_START_LO	0x14
+#define RADEON_PCIE_TX_GART_START_HI	0x15
+#define RADEON_PCIE_TX_GART_END_LO	0x16
+#define RADEON_PCIE_TX_GART_END_HI	0x17
+
+#define RS480_NB_MC_INDEX               0x168
+#	define RS480_NB_MC_IND_WR_EN	(1 << 8)
+#define RS480_NB_MC_DATA                0x16c
+
+#define RS690_MC_INDEX                  0x78
+#   define RS690_MC_INDEX_MASK          0x1ff
+#   define RS690_MC_INDEX_WR_EN         (1 << 9)
+#   define RS690_MC_INDEX_WR_ACK        0x7f
+#define RS690_MC_DATA                   0x7c
+
+/* MC indirect registers */
+#define RS480_MC_MISC_CNTL              0x18
+#	define RS480_DISABLE_GTW	(1 << 1)
+/* switch between MCIND GART and MM GART registers. 0 = mmgart, 1 = mcind gart */
+#	define RS480_GART_INDEX_REG_EN	(1 << 12)
+#	define RS690_BLOCK_GFX_D3_EN	(1 << 14)
+#define RS480_K8_FB_LOCATION            0x1e
+#define RS480_GART_FEATURE_ID           0x2b
+#	define RS480_HANG_EN	        (1 << 11)
+#	define RS480_TLB_ENABLE	        (1 << 18)
+#	define RS480_P2P_ENABLE	        (1 << 19)
+#	define RS480_GTW_LAC_EN	        (1 << 25)
+#	define RS480_2LEVEL_GART	(0 << 30)
+#	define RS480_1LEVEL_GART	(1 << 30)
+#	define RS480_PDC_EN	        (1 << 31)
+#define RS480_GART_BASE                 0x2c
+#define RS480_GART_CACHE_CNTRL          0x2e
+#	define RS480_GART_CACHE_INVALIDATE (1 << 0) /* wait for it to clear */
+#define RS480_AGP_ADDRESS_SPACE_SIZE    0x38
+#	define RS480_GART_EN	        (1 << 0)
+#	define RS480_VA_SIZE_32MB	(0 << 1)
+#	define RS480_VA_SIZE_64MB	(1 << 1)
+#	define RS480_VA_SIZE_128MB	(2 << 1)
+#	define RS480_VA_SIZE_256MB	(3 << 1)
+#	define RS480_VA_SIZE_512MB	(4 << 1)
+#	define RS480_VA_SIZE_1GB	(5 << 1)
+#	define RS480_VA_SIZE_2GB	(6 << 1)
+#define RS480_AGP_MODE_CNTL             0x39
+#	define RS480_POST_GART_Q_SIZE	(1 << 18)
+#	define RS480_NONGART_SNOOP	(1 << 19)
+#	define RS480_AGP_RD_BUF_SIZE	(1 << 20)
+#	define RS480_REQ_TYPE_SNOOP_SHIFT 22
+#	define RS480_REQ_TYPE_SNOOP_MASK  0x3
+#	define RS480_REQ_TYPE_SNOOP_DIS	(1 << 24)
+#define RS480_MC_MISC_UMA_CNTL          0x5f
+#define RS480_MC_MCLK_CNTL              0x7a
+#define RS480_MC_UMA_DUALCH_CNTL        0x86
+
+#define RS690_MC_FB_LOCATION            0x100
+#define RS690_MC_AGP_LOCATION           0x101
+#define RS690_MC_AGP_BASE               0x102
+#define RS690_MC_AGP_BASE_2             0x103
+
+#define R520_MC_IND_INDEX 0x70
+#define R520_MC_IND_WR_EN (1 << 24)
+#define R520_MC_IND_DATA  0x74
+
+#define RV515_MC_FB_LOCATION 0x01
+#define RV515_MC_AGP_LOCATION 0x02
+#define RV515_MC_AGP_BASE     0x03
+#define RV515_MC_AGP_BASE_2   0x04
+
+#define R520_MC_FB_LOCATION 0x04
+#define R520_MC_AGP_LOCATION 0x05
+#define R520_MC_AGP_BASE     0x06
+#define R520_MC_AGP_BASE_2   0x07
+
+#define RADEON_MPP_TB_CONFIG		0x01c0
+#define RADEON_MEM_CNTL			0x0140
+#define RADEON_MEM_SDRAM_MODE_REG	0x0158
+#define RADEON_AGP_BASE_2		0x015c /* r200+ only */
+#define RS480_AGP_BASE_2		0x0164
+#define RADEON_AGP_BASE			0x0170
+
+/* pipe config regs */
+#define R400_GB_PIPE_SELECT             0x402c
+#define R500_DYN_SCLK_PWMEM_PIPE        0x000d /* PLL */
+#define R500_SU_REG_DEST                0x42c8
+#define R300_GB_TILE_CONFIG             0x4018
+#       define R300_ENABLE_TILING       (1 << 0)
+#       define R300_PIPE_COUNT_RV350    (0 << 1)
+#       define R300_PIPE_COUNT_R300     (3 << 1)
+#       define R300_PIPE_COUNT_R420_3P  (6 << 1)
+#       define R300_PIPE_COUNT_R420     (7 << 1)
+#       define R300_TILE_SIZE_8         (0 << 4)
+#       define R300_TILE_SIZE_16        (1 << 4)
+#       define R300_TILE_SIZE_32        (2 << 4)
+#       define R300_SUBPIXEL_1_12       (0 << 16)
+#       define R300_SUBPIXEL_1_16       (1 << 16)
+#define R300_DST_PIPE_CONFIG            0x170c
+#       define R300_PIPE_AUTO_CONFIG    (1 << 31)
+#define R300_RB2D_DSTCACHE_MODE         0x3428
+#       define R300_DC_AUTOFLUSH_ENABLE (1 << 8)
+#       define R300_DC_DC_DISABLE_IGNORE_PE (1 << 17)
+
+#define RADEON_RB3D_COLOROFFSET		0x1c40
+#define RADEON_RB3D_COLORPITCH		0x1c48
+
+#define	RADEON_SRC_X_Y			0x1590
+
+#define RADEON_DP_GUI_MASTER_CNTL	0x146c
+#	define RADEON_GMC_SRC_PITCH_OFFSET_CNTL	(1 << 0)
+#	define RADEON_GMC_DST_PITCH_OFFSET_CNTL	(1 << 1)
+#	define RADEON_GMC_BRUSH_SOLID_COLOR	(13 << 4)
+#	define RADEON_GMC_BRUSH_NONE		(15 << 4)
+#	define RADEON_GMC_DST_16BPP		(4 << 8)
+#	define RADEON_GMC_DST_24BPP		(5 << 8)
+#	define RADEON_GMC_DST_32BPP		(6 << 8)
+#	define RADEON_GMC_DST_DATATYPE_SHIFT	8
+#	define RADEON_GMC_SRC_DATATYPE_COLOR	(3 << 12)
+#	define RADEON_DP_SRC_SOURCE_MEMORY	(2 << 24)
+#	define RADEON_DP_SRC_SOURCE_HOST_DATA	(3 << 24)
+#	define RADEON_GMC_CLR_CMP_CNTL_DIS	(1 << 28)
+#	define RADEON_GMC_WR_MSK_DIS		(1 << 30)
+#	define RADEON_ROP3_S			0x00cc0000
+#	define RADEON_ROP3_P			0x00f00000
+#define RADEON_DP_WRITE_MASK		0x16cc
+#define RADEON_SRC_PITCH_OFFSET		0x1428
+#define RADEON_DST_PITCH_OFFSET		0x142c
+#define RADEON_DST_PITCH_OFFSET_C	0x1c80
+#	define RADEON_DST_TILE_LINEAR		(0 << 30)
+#	define RADEON_DST_TILE_MACRO		(1 << 30)
+#	define RADEON_DST_TILE_MICRO		(2 << 30)
+#	define RADEON_DST_TILE_BOTH		(3 << 30)
+
+#define RADEON_SCRATCH_REG0		0x15e0
+#define RADEON_SCRATCH_REG1		0x15e4
+#define RADEON_SCRATCH_REG2		0x15e8
+#define RADEON_SCRATCH_REG3		0x15ec
+#define RADEON_SCRATCH_REG4		0x15f0
+#define RADEON_SCRATCH_REG5		0x15f4
+#define RADEON_SCRATCH_UMSK		0x0770
+#define RADEON_SCRATCH_ADDR		0x0774
+
+#define RADEON_SCRATCHOFF( x )		(RADEON_SCRATCH_REG_OFFSET + 4*(x))
+
+#define GET_SCRATCH( x )	(dev_priv->writeback_works			\
+				? DRM_READ32( dev_priv->ring_rptr, RADEON_SCRATCHOFF(x) ) \
+				: RADEON_READ( RADEON_SCRATCH_REG0 + 4*(x) ) )
+
+#define RADEON_GEN_INT_CNTL		0x0040
+#	define RADEON_CRTC_VBLANK_MASK		(1 << 0)
+#	define RADEON_CRTC2_VBLANK_MASK		(1 << 9)
+#	define RADEON_GUI_IDLE_INT_ENABLE	(1 << 19)
+#	define RADEON_SW_INT_ENABLE		(1 << 25)
+
+#define RADEON_GEN_INT_STATUS		0x0044
+#	define RADEON_CRTC_VBLANK_STAT		(1 << 0)
+#	define RADEON_CRTC_VBLANK_STAT_ACK	(1 << 0)
+#	define RADEON_CRTC2_VBLANK_STAT		(1 << 9)
+#	define RADEON_CRTC2_VBLANK_STAT_ACK	(1 << 9)
+#	define RADEON_GUI_IDLE_INT_TEST_ACK     (1 << 19)
+#	define RADEON_SW_INT_TEST		(1 << 25)
+#	define RADEON_SW_INT_TEST_ACK		(1 << 25)
+#	define RADEON_SW_INT_FIRE		(1 << 26)
+
+#define RADEON_HOST_PATH_CNTL		0x0130
+#	define RADEON_HDP_SOFT_RESET		(1 << 26)
+#	define RADEON_HDP_WC_TIMEOUT_MASK	(7 << 28)
+#	define RADEON_HDP_WC_TIMEOUT_28BCLK	(7 << 28)
+
+#define RADEON_ISYNC_CNTL		0x1724
+#	define RADEON_ISYNC_ANY2D_IDLE3D	(1 << 0)
+#	define RADEON_ISYNC_ANY3D_IDLE2D	(1 << 1)
+#	define RADEON_ISYNC_TRIG2D_IDLE3D	(1 << 2)
+#	define RADEON_ISYNC_TRIG3D_IDLE2D	(1 << 3)
+#	define RADEON_ISYNC_WAIT_IDLEGUI	(1 << 4)
+#	define RADEON_ISYNC_CPSCRATCH_IDLEGUI	(1 << 5)
+
+#define RADEON_RBBM_GUICNTL		0x172c
+#	define RADEON_HOST_DATA_SWAP_NONE	(0 << 0)
+#	define RADEON_HOST_DATA_SWAP_16BIT	(1 << 0)
+#	define RADEON_HOST_DATA_SWAP_32BIT	(2 << 0)
+#	define RADEON_HOST_DATA_SWAP_HDW	(3 << 0)
+
+#define RADEON_MC_AGP_LOCATION		0x014c
+#define RADEON_MC_FB_LOCATION		0x0148
+#define RADEON_MCLK_CNTL		0x0012
+#	define RADEON_FORCEON_MCLKA		(1 << 16)
+#	define RADEON_FORCEON_MCLKB		(1 << 17)
+#	define RADEON_FORCEON_YCLKA		(1 << 18)
+#	define RADEON_FORCEON_YCLKB		(1 << 19)
+#	define RADEON_FORCEON_MC		(1 << 20)
+#	define RADEON_FORCEON_AIC		(1 << 21)
+
+#define RADEON_PP_BORDER_COLOR_0	0x1d40
+#define RADEON_PP_BORDER_COLOR_1	0x1d44
+#define RADEON_PP_BORDER_COLOR_2	0x1d48
+#define RADEON_PP_CNTL			0x1c38
+#	define RADEON_SCISSOR_ENABLE		(1 <<  1)
+#define RADEON_PP_LUM_MATRIX		0x1d00
+#define RADEON_PP_MISC			0x1c14
+#define RADEON_PP_ROT_MATRIX_0		0x1d58
+#define RADEON_PP_TXFILTER_0		0x1c54
+#define RADEON_PP_TXOFFSET_0		0x1c5c
+#define RADEON_PP_TXFILTER_1		0x1c6c
+#define RADEON_PP_TXFILTER_2		0x1c84
+
+#define R300_RB2D_DSTCACHE_CTLSTAT	0x342c /* use R300_DSTCACHE_CTLSTAT */
+#define R300_DSTCACHE_CTLSTAT		0x1714
+#	define R300_RB2D_DC_FLUSH		(3 << 0)
+#	define R300_RB2D_DC_FREE		(3 << 2)
+#	define R300_RB2D_DC_FLUSH_ALL		0xf
+#	define R300_RB2D_DC_BUSY		(1 << 31)
+#define RADEON_RB3D_CNTL		0x1c3c
+#	define RADEON_ALPHA_BLEND_ENABLE	(1 << 0)
+#	define RADEON_PLANE_MASK_ENABLE		(1 << 1)
+#	define RADEON_DITHER_ENABLE		(1 << 2)
+#	define RADEON_ROUND_ENABLE		(1 << 3)
+#	define RADEON_SCALE_DITHER_ENABLE	(1 << 4)
+#	define RADEON_DITHER_INIT		(1 << 5)
+#	define RADEON_ROP_ENABLE		(1 << 6)
+#	define RADEON_STENCIL_ENABLE		(1 << 7)
+#	define RADEON_Z_ENABLE			(1 << 8)
+#	define RADEON_ZBLOCK16			(1 << 15)
+#define RADEON_RB3D_DEPTHOFFSET		0x1c24
+#define RADEON_RB3D_DEPTHCLEARVALUE	0x3230
+#define RADEON_RB3D_DEPTHPITCH		0x1c28
+#define RADEON_RB3D_PLANEMASK		0x1d84
+#define RADEON_RB3D_STENCILREFMASK	0x1d7c
+#define RADEON_RB3D_ZCACHE_MODE		0x3250
+#define RADEON_RB3D_ZCACHE_CTLSTAT	0x3254
+#	define RADEON_RB3D_ZC_FLUSH		(1 << 0)
+#	define RADEON_RB3D_ZC_FREE		(1 << 2)
+#	define RADEON_RB3D_ZC_FLUSH_ALL		0x5
+#	define RADEON_RB3D_ZC_BUSY		(1 << 31)
+#define R300_ZB_ZCACHE_CTLSTAT                  0x4f18
+#	define R300_ZC_FLUSH		        (1 << 0)
+#	define R300_ZC_FREE		        (1 << 1)
+#	define R300_ZC_FLUSH_ALL		0x3
+#	define R300_ZC_BUSY		        (1 << 31)
+#define RADEON_RB3D_DSTCACHE_CTLSTAT	0x325c
+#	define RADEON_RB3D_DC_FLUSH		(3 << 0)
+#	define RADEON_RB3D_DC_FREE		(3 << 2)
+#	define RADEON_RB3D_DC_FLUSH_ALL		0xf
+#	define RADEON_RB3D_DC_BUSY		(1 << 31)
+#define R300_RB3D_DSTCACHE_CTLSTAT              0x4e4c
+#	define R300_RB3D_DC_FINISH		(1 << 4)
+#define RADEON_RB3D_ZSTENCILCNTL	0x1c2c
+#	define RADEON_Z_TEST_MASK		(7 << 4)
+#	define RADEON_Z_TEST_ALWAYS		(7 << 4)
+#	define RADEON_Z_HIERARCHY_ENABLE	(1 << 8)
+#	define RADEON_STENCIL_TEST_ALWAYS	(7 << 12)
+#	define RADEON_STENCIL_S_FAIL_REPLACE	(2 << 16)
+#	define RADEON_STENCIL_ZPASS_REPLACE	(2 << 20)
+#	define RADEON_STENCIL_ZFAIL_REPLACE	(2 << 24)
+#	define RADEON_Z_COMPRESSION_ENABLE	(1 << 28)
+#	define RADEON_FORCE_Z_DIRTY		(1 << 29)
+#	define RADEON_Z_WRITE_ENABLE		(1 << 30)
+#	define RADEON_Z_DECOMPRESSION_ENABLE	(1 << 31)
+#define RADEON_RBBM_SOFT_RESET		0x00f0
+#	define RADEON_SOFT_RESET_CP		(1 <<  0)
+#	define RADEON_SOFT_RESET_HI		(1 <<  1)
+#	define RADEON_SOFT_RESET_SE		(1 <<  2)
+#	define RADEON_SOFT_RESET_RE		(1 <<  3)
+#	define RADEON_SOFT_RESET_PP		(1 <<  4)
+#	define RADEON_SOFT_RESET_E2		(1 <<  5)
+#	define RADEON_SOFT_RESET_RB		(1 <<  6)
+#	define RADEON_SOFT_RESET_HDP		(1 <<  7)
+/*
+ *   6:0  Available slots in the FIFO
+ *   8    Host Interface active
+ *   9    CP request active
+ *   10   FIFO request active
+ *   11   Host Interface retry active
+ *   12   CP retry active
+ *   13   FIFO retry active
+ *   14   FIFO pipeline busy
+ *   15   Event engine busy
+ *   16   CP command stream busy
+ *   17   2D engine busy
+ *   18   2D portion of render backend busy
+ *   20   3D setup engine busy
+ *   26   GA engine busy
+ *   27   CBA 2D engine busy
+ *   31   2D engine busy or 3D engine busy or FIFO not empty or CP busy or
+ *           command stream queue not empty or Ring Buffer not empty
+ */
+#define RADEON_RBBM_STATUS		0x0e40
+/* Same as the previous RADEON_RBBM_STATUS; this is a mirror of that register.  */
+/* #define RADEON_RBBM_STATUS		0x1740 */
+/* bits 6:0 are dword slots available in the cmd fifo */
+#	define RADEON_RBBM_FIFOCNT_MASK		0x007f
+#	define RADEON_HIRQ_ON_RBB	(1 <<  8)
+#	define RADEON_CPRQ_ON_RBB	(1 <<  9)
+#	define RADEON_CFRQ_ON_RBB	(1 << 10)
+#	define RADEON_HIRQ_IN_RTBUF	(1 << 11)
+#	define RADEON_CPRQ_IN_RTBUF	(1 << 12)
+#	define RADEON_CFRQ_IN_RTBUF	(1 << 13)
+#	define RADEON_PIPE_BUSY		(1 << 14)
+#	define RADEON_ENG_EV_BUSY	(1 << 15)
+#	define RADEON_CP_CMDSTRM_BUSY	(1 << 16)
+#	define RADEON_E2_BUSY		(1 << 17)
+#	define RADEON_RB2D_BUSY		(1 << 18)
+#	define RADEON_RB3D_BUSY		(1 << 19) /* not used on r300 */
+#	define RADEON_VAP_BUSY		(1 << 20)
+#	define RADEON_RE_BUSY		(1 << 21) /* not used on r300 */
+#	define RADEON_TAM_BUSY		(1 << 22) /* not used on r300 */
+#	define RADEON_TDM_BUSY		(1 << 23) /* not used on r300 */
+#	define RADEON_PB_BUSY		(1 << 24) /* not used on r300 */
+#	define RADEON_TIM_BUSY		(1 << 25) /* not used on r300 */
+#	define RADEON_GA_BUSY		(1 << 26)
+#	define RADEON_CBA2D_BUSY	(1 << 27)
+#	define RADEON_RBBM_ACTIVE	(1 << 31)
+#define RADEON_RE_LINE_PATTERN		0x1cd0
+#define RADEON_RE_MISC			0x26c4
+#define RADEON_RE_TOP_LEFT		0x26c0
+#define RADEON_RE_WIDTH_HEIGHT		0x1c44
+#define RADEON_RE_STIPPLE_ADDR		0x1cc8
+#define RADEON_RE_STIPPLE_DATA		0x1ccc
+
+#define RADEON_SCISSOR_TL_0		0x1cd8
+#define RADEON_SCISSOR_BR_0		0x1cdc
+#define RADEON_SCISSOR_TL_1		0x1ce0
+#define RADEON_SCISSOR_BR_1		0x1ce4
+#define RADEON_SCISSOR_TL_2		0x1ce8
+#define RADEON_SCISSOR_BR_2		0x1cec
+#define RADEON_SE_COORD_FMT		0x1c50
+#define RADEON_SE_CNTL			0x1c4c
+#	define RADEON_FFACE_CULL_CW		(0 << 0)
+#	define RADEON_BFACE_SOLID		(3 << 1)
+#	define RADEON_FFACE_SOLID		(3 << 3)
+#	define RADEON_FLAT_SHADE_VTX_LAST	(3 << 6)
+#	define RADEON_DIFFUSE_SHADE_FLAT	(1 << 8)
+#	define RADEON_DIFFUSE_SHADE_GOURAUD	(2 << 8)
+#	define RADEON_ALPHA_SHADE_FLAT		(1 << 10)
+#	define RADEON_ALPHA_SHADE_GOURAUD	(2 << 10)
+#	define RADEON_SPECULAR_SHADE_FLAT	(1 << 12)
+#	define RADEON_SPECULAR_SHADE_GOURAUD	(2 << 12)
+#	define RADEON_FOG_SHADE_FLAT		(1 << 14)
+#	define RADEON_FOG_SHADE_GOURAUD		(2 << 14)
+#	define RADEON_VPORT_XY_XFORM_ENABLE	(1 << 24)
+#	define RADEON_VPORT_Z_XFORM_ENABLE	(1 << 25)
+#	define RADEON_VTX_PIX_CENTER_OGL	(1 << 27)
+#	define RADEON_ROUND_MODE_TRUNC		(0 << 28)
+#	define RADEON_ROUND_PREC_8TH_PIX	(1 << 30)
+#define RADEON_SE_CNTL_STATUS		0x2140
+#define RADEON_SE_LINE_WIDTH		0x1db8
+#define RADEON_SE_VPORT_XSCALE		0x1d98
+#define RADEON_SE_ZBIAS_FACTOR		0x1db0
+#define RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED 0x2210
+#define RADEON_SE_TCL_OUTPUT_VTX_FMT         0x2254
+#define RADEON_SE_TCL_VECTOR_INDX_REG        0x2200
+#       define RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT  16
+#       define RADEON_VEC_INDX_DWORD_COUNT_SHIFT     28
+#define RADEON_SE_TCL_VECTOR_DATA_REG       0x2204
+#define RADEON_SE_TCL_SCALAR_INDX_REG       0x2208
+#       define RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT  16
+#define RADEON_SE_TCL_SCALAR_DATA_REG       0x220C
+#define RADEON_SURFACE_ACCESS_FLAGS	0x0bf8
+#define RADEON_SURFACE_ACCESS_CLR	0x0bfc
+#define RADEON_SURFACE_CNTL		0x0b00
+#	define RADEON_SURF_TRANSLATION_DIS	(1 << 8)
+#	define RADEON_NONSURF_AP0_SWP_MASK	(3 << 20)
+#	define RADEON_NONSURF_AP0_SWP_LITTLE	(0 << 20)
+#	define RADEON_NONSURF_AP0_SWP_BIG16	(1 << 20)
+#	define RADEON_NONSURF_AP0_SWP_BIG32	(2 << 20)
+#	define RADEON_NONSURF_AP1_SWP_MASK	(3 << 22)
+#	define RADEON_NONSURF_AP1_SWP_LITTLE	(0 << 22)
+#	define RADEON_NONSURF_AP1_SWP_BIG16	(1 << 22)
+#	define RADEON_NONSURF_AP1_SWP_BIG32	(2 << 22)
+#define RADEON_SURFACE0_INFO		0x0b0c
+#	define RADEON_SURF_PITCHSEL_MASK	(0x1ff << 0)
+#	define RADEON_SURF_TILE_MODE_MASK	(3 << 16)
+#	define RADEON_SURF_TILE_MODE_MACRO	(0 << 16)
+#	define RADEON_SURF_TILE_MODE_MICRO	(1 << 16)
+#	define RADEON_SURF_TILE_MODE_32BIT_Z	(2 << 16)
+#	define RADEON_SURF_TILE_MODE_16BIT_Z	(3 << 16)
+#define RADEON_SURFACE0_LOWER_BOUND	0x0b04
+#define RADEON_SURFACE0_UPPER_BOUND	0x0b08
+#	define RADEON_SURF_ADDRESS_FIXED_MASK	(0x3ff << 0)
+#define RADEON_SURFACE1_INFO		0x0b1c
+#define RADEON_SURFACE1_LOWER_BOUND	0x0b14
+#define RADEON_SURFACE1_UPPER_BOUND	0x0b18
+#define RADEON_SURFACE2_INFO		0x0b2c
+#define RADEON_SURFACE2_LOWER_BOUND	0x0b24
+#define RADEON_SURFACE2_UPPER_BOUND	0x0b28
+#define RADEON_SURFACE3_INFO		0x0b3c
+#define RADEON_SURFACE3_LOWER_BOUND	0x0b34
+#define RADEON_SURFACE3_UPPER_BOUND	0x0b38
+#define RADEON_SURFACE4_INFO		0x0b4c
+#define RADEON_SURFACE4_LOWER_BOUND	0x0b44
+#define RADEON_SURFACE4_UPPER_BOUND	0x0b48
+#define RADEON_SURFACE5_INFO		0x0b5c
+#define RADEON_SURFACE5_LOWER_BOUND	0x0b54
+#define RADEON_SURFACE5_UPPER_BOUND	0x0b58
+#define RADEON_SURFACE6_INFO		0x0b6c
+#define RADEON_SURFACE6_LOWER_BOUND	0x0b64
+#define RADEON_SURFACE6_UPPER_BOUND	0x0b68
+#define RADEON_SURFACE7_INFO		0x0b7c
+#define RADEON_SURFACE7_LOWER_BOUND	0x0b74
+#define RADEON_SURFACE7_UPPER_BOUND	0x0b78
+#define RADEON_SW_SEMAPHORE		0x013c
+
+#define RADEON_WAIT_UNTIL		0x1720
+#	define RADEON_WAIT_CRTC_PFLIP		(1 << 0)
+#	define RADEON_WAIT_2D_IDLE		(1 << 14)
+#	define RADEON_WAIT_3D_IDLE		(1 << 15)
+#	define RADEON_WAIT_2D_IDLECLEAN		(1 << 16)
+#	define RADEON_WAIT_3D_IDLECLEAN		(1 << 17)
+#	define RADEON_WAIT_HOST_IDLECLEAN	(1 << 18)
+
+#define RADEON_RB3D_ZMASKOFFSET		0x3234
+#define RADEON_RB3D_ZSTENCILCNTL	0x1c2c
+#	define RADEON_DEPTH_FORMAT_16BIT_INT_Z	(0 << 0)
+#	define RADEON_DEPTH_FORMAT_24BIT_INT_Z	(2 << 0)
+
+/* CP registers */
+#define RADEON_CP_ME_RAM_ADDR		0x07d4
+#define RADEON_CP_ME_RAM_RADDR		0x07d8
+#define RADEON_CP_ME_RAM_DATAH		0x07dc
+#define RADEON_CP_ME_RAM_DATAL		0x07e0
+
+#define RADEON_CP_RB_BASE		0x0700
+#define RADEON_CP_RB_CNTL		0x0704
+#	define RADEON_BUF_SWAP_32BIT		(2 << 16)
+#	define RADEON_RB_NO_UPDATE		(1 << 27)
+#define RADEON_CP_RB_RPTR_ADDR		0x070c
+#define RADEON_CP_RB_RPTR		0x0710
+#define RADEON_CP_RB_WPTR		0x0714
+
+#define RADEON_CP_RB_WPTR_DELAY		0x0718
+#	define RADEON_PRE_WRITE_TIMER_SHIFT	0
+#	define RADEON_PRE_WRITE_LIMIT_SHIFT	23
+
+#define RADEON_CP_IB_BASE		0x0738
+
+#define RADEON_CP_CSQ_CNTL		0x0740
+#	define RADEON_CSQ_CNT_PRIMARY_MASK	(0xff << 0)
+#	define RADEON_CSQ_PRIDIS_INDDIS		(0 << 28)
+#	define RADEON_CSQ_PRIPIO_INDDIS		(1 << 28)
+#	define RADEON_CSQ_PRIBM_INDDIS		(2 << 28)
+#	define RADEON_CSQ_PRIPIO_INDBM		(3 << 28)
+#	define RADEON_CSQ_PRIBM_INDBM		(4 << 28)
+#	define RADEON_CSQ_PRIPIO_INDPIO		(15 << 28)
+
+#define RADEON_AIC_CNTL			0x01d0
+#	define RADEON_PCIGART_TRANSLATE_EN	(1 << 0)
+#define RADEON_AIC_STAT			0x01d4
+#define RADEON_AIC_PT_BASE		0x01d8
+#define RADEON_AIC_LO_ADDR		0x01dc
+#define RADEON_AIC_HI_ADDR		0x01e0
+#define RADEON_AIC_TLB_ADDR		0x01e4
+#define RADEON_AIC_TLB_DATA		0x01e8
+
+/* CP command packets */
+#define RADEON_CP_PACKET0		0x00000000
+#	define RADEON_ONE_REG_WR		(1 << 15)
+#define RADEON_CP_PACKET1		0x40000000
+#define RADEON_CP_PACKET2		0x80000000
+#define RADEON_CP_PACKET3		0xC0000000
+#       define RADEON_CP_NOP                    0x00001000
+#       define RADEON_CP_NEXT_CHAR              0x00001900
+#       define RADEON_CP_PLY_NEXTSCAN           0x00001D00
+#       define RADEON_CP_SET_SCISSORS           0x00001E00
+	     /* GEN_INDX_PRIM is unsupported starting with R300 */
+#	define RADEON_3D_RNDR_GEN_INDX_PRIM	0x00002300
+#	define RADEON_WAIT_FOR_IDLE		0x00002600
+#	define RADEON_3D_DRAW_VBUF		0x00002800
+#	define RADEON_3D_DRAW_IMMD		0x00002900
+#	define RADEON_3D_DRAW_INDX		0x00002A00
+#       define RADEON_CP_LOAD_PALETTE           0x00002C00
+#	define RADEON_3D_LOAD_VBPNTR		0x00002F00
+#	define RADEON_MPEG_IDCT_MACROBLOCK	0x00003000
+#	define RADEON_MPEG_IDCT_MACROBLOCK_REV	0x00003100
+#	define RADEON_3D_CLEAR_ZMASK		0x00003200
+#	define RADEON_CP_INDX_BUFFER		0x00003300
+#       define RADEON_CP_3D_DRAW_VBUF_2         0x00003400
+#       define RADEON_CP_3D_DRAW_IMMD_2         0x00003500
+#       define RADEON_CP_3D_DRAW_INDX_2         0x00003600
+#	define RADEON_3D_CLEAR_HIZ		0x00003700
+#       define RADEON_CP_3D_CLEAR_CMASK         0x00003802
+#	define RADEON_CNTL_HOSTDATA_BLT		0x00009400
+#	define RADEON_CNTL_PAINT_MULTI		0x00009A00
+#	define RADEON_CNTL_BITBLT_MULTI		0x00009B00
+#	define RADEON_CNTL_SET_SCISSORS		0xC0001E00
+
+#define RADEON_CP_PACKET_MASK		0xC0000000
+#define RADEON_CP_PACKET_COUNT_MASK	0x3fff0000
+#define RADEON_CP_PACKET0_REG_MASK	0x000007ff
+#define RADEON_CP_PACKET1_REG0_MASK	0x000007ff
+#define RADEON_CP_PACKET1_REG1_MASK	0x003ff800
+
+#define RADEON_VTX_Z_PRESENT			(1 << 31)
+#define RADEON_VTX_PKCOLOR_PRESENT		(1 << 3)
+
+#define RADEON_PRIM_TYPE_NONE			(0 << 0)
+#define RADEON_PRIM_TYPE_POINT			(1 << 0)
+#define RADEON_PRIM_TYPE_LINE			(2 << 0)
+#define RADEON_PRIM_TYPE_LINE_STRIP		(3 << 0)
+#define RADEON_PRIM_TYPE_TRI_LIST		(4 << 0)
+#define RADEON_PRIM_TYPE_TRI_FAN		(5 << 0)
+#define RADEON_PRIM_TYPE_TRI_STRIP		(6 << 0)
+#define RADEON_PRIM_TYPE_TRI_TYPE2		(7 << 0)
+#define RADEON_PRIM_TYPE_RECT_LIST		(8 << 0)
+#define RADEON_PRIM_TYPE_3VRT_POINT_LIST	(9 << 0)
+#define RADEON_PRIM_TYPE_3VRT_LINE_LIST		(10 << 0)
+#define RADEON_PRIM_TYPE_MASK                   0xf
+#define RADEON_PRIM_WALK_IND			(1 << 4)
+#define RADEON_PRIM_WALK_LIST			(2 << 4)
+#define RADEON_PRIM_WALK_RING			(3 << 4)
+#define RADEON_COLOR_ORDER_BGRA			(0 << 6)
+#define RADEON_COLOR_ORDER_RGBA			(1 << 6)
+#define RADEON_MAOS_ENABLE			(1 << 7)
+#define RADEON_VTX_FMT_R128_MODE		(0 << 8)
+#define RADEON_VTX_FMT_RADEON_MODE		(1 << 8)
+#define RADEON_NUM_VERTICES_SHIFT		16
+
+#define RADEON_COLOR_FORMAT_CI8		2
+#define RADEON_COLOR_FORMAT_ARGB1555	3
+#define RADEON_COLOR_FORMAT_RGB565	4
+#define RADEON_COLOR_FORMAT_ARGB8888	6
+#define RADEON_COLOR_FORMAT_RGB332	7
+#define RADEON_COLOR_FORMAT_RGB8	9
+#define RADEON_COLOR_FORMAT_ARGB4444	15
+
+#define RADEON_TXFORMAT_I8		0
+#define RADEON_TXFORMAT_AI88		1
+#define RADEON_TXFORMAT_RGB332		2
+#define RADEON_TXFORMAT_ARGB1555	3
+#define RADEON_TXFORMAT_RGB565		4
+#define RADEON_TXFORMAT_ARGB4444	5
+#define RADEON_TXFORMAT_ARGB8888	6
+#define RADEON_TXFORMAT_RGBA8888	7
+#define RADEON_TXFORMAT_Y8		8
+#define RADEON_TXFORMAT_VYUY422         10
+#define RADEON_TXFORMAT_YVYU422         11
+#define RADEON_TXFORMAT_DXT1            12
+#define RADEON_TXFORMAT_DXT23           14
+#define RADEON_TXFORMAT_DXT45           15
+
+#define R200_PP_TXCBLEND_0                0x2f00
+#define R200_PP_TXCBLEND_1                0x2f10
+#define R200_PP_TXCBLEND_2                0x2f20
+#define R200_PP_TXCBLEND_3                0x2f30
+#define R200_PP_TXCBLEND_4                0x2f40
+#define R200_PP_TXCBLEND_5                0x2f50
+#define R200_PP_TXCBLEND_6                0x2f60
+#define R200_PP_TXCBLEND_7                0x2f70
+#define R200_SE_TCL_LIGHT_MODEL_CTL_0     0x2268
+#define R200_PP_TFACTOR_0                 0x2ee0
+#define R200_SE_VTX_FMT_0                 0x2088
+#define R200_SE_VAP_CNTL                  0x2080
+#define R200_SE_TCL_MATRIX_SEL_0          0x2230
+#define R200_SE_TCL_TEX_PROC_CTL_2        0x22a8
+#define R200_SE_TCL_UCP_VERT_BLEND_CTL    0x22c0
+#define R200_PP_TXFILTER_5                0x2ca0
+#define R200_PP_TXFILTER_4                0x2c80
+#define R200_PP_TXFILTER_3                0x2c60
+#define R200_PP_TXFILTER_2                0x2c40
+#define R200_PP_TXFILTER_1                0x2c20
+#define R200_PP_TXFILTER_0                0x2c00
+#define R200_PP_TXOFFSET_5                0x2d78
+#define R200_PP_TXOFFSET_4                0x2d60
+#define R200_PP_TXOFFSET_3                0x2d48
+#define R200_PP_TXOFFSET_2                0x2d30
+#define R200_PP_TXOFFSET_1                0x2d18
+#define R200_PP_TXOFFSET_0                0x2d00
+
+#define R200_PP_CUBIC_FACES_0             0x2c18
+#define R200_PP_CUBIC_FACES_1             0x2c38
+#define R200_PP_CUBIC_FACES_2             0x2c58
+#define R200_PP_CUBIC_FACES_3             0x2c78
+#define R200_PP_CUBIC_FACES_4             0x2c98
+#define R200_PP_CUBIC_FACES_5             0x2cb8
+#define R200_PP_CUBIC_OFFSET_F1_0         0x2d04
+#define R200_PP_CUBIC_OFFSET_F2_0         0x2d08
+#define R200_PP_CUBIC_OFFSET_F3_0         0x2d0c
+#define R200_PP_CUBIC_OFFSET_F4_0         0x2d10
+#define R200_PP_CUBIC_OFFSET_F5_0         0x2d14
+#define R200_PP_CUBIC_OFFSET_F1_1         0x2d1c
+#define R200_PP_CUBIC_OFFSET_F2_1         0x2d20
+#define R200_PP_CUBIC_OFFSET_F3_1         0x2d24
+#define R200_PP_CUBIC_OFFSET_F4_1         0x2d28
+#define R200_PP_CUBIC_OFFSET_F5_1         0x2d2c
+#define R200_PP_CUBIC_OFFSET_F1_2         0x2d34
+#define R200_PP_CUBIC_OFFSET_F2_2         0x2d38
+#define R200_PP_CUBIC_OFFSET_F3_2         0x2d3c
+#define R200_PP_CUBIC_OFFSET_F4_2         0x2d40
+#define R200_PP_CUBIC_OFFSET_F5_2         0x2d44
+#define R200_PP_CUBIC_OFFSET_F1_3         0x2d4c
+#define R200_PP_CUBIC_OFFSET_F2_3         0x2d50
+#define R200_PP_CUBIC_OFFSET_F3_3         0x2d54
+#define R200_PP_CUBIC_OFFSET_F4_3         0x2d58
+#define R200_PP_CUBIC_OFFSET_F5_3         0x2d5c
+#define R200_PP_CUBIC_OFFSET_F1_4         0x2d64
+#define R200_PP_CUBIC_OFFSET_F2_4         0x2d68
+#define R200_PP_CUBIC_OFFSET_F3_4         0x2d6c
+#define R200_PP_CUBIC_OFFSET_F4_4         0x2d70
+#define R200_PP_CUBIC_OFFSET_F5_4         0x2d74
+#define R200_PP_CUBIC_OFFSET_F1_5         0x2d7c
+#define R200_PP_CUBIC_OFFSET_F2_5         0x2d80
+#define R200_PP_CUBIC_OFFSET_F3_5         0x2d84
+#define R200_PP_CUBIC_OFFSET_F4_5         0x2d88
+#define R200_PP_CUBIC_OFFSET_F5_5         0x2d8c
+
+#define R200_RE_AUX_SCISSOR_CNTL          0x26f0
+#define R200_SE_VTE_CNTL                  0x20b0
+#define R200_SE_TCL_OUTPUT_VTX_COMP_SEL   0x2250
+#define R200_PP_TAM_DEBUG3                0x2d9c
+#define R200_PP_CNTL_X                    0x2cc4
+#define R200_SE_VAP_CNTL_STATUS           0x2140
+#define R200_RE_SCISSOR_TL_0              0x1cd8
+#define R200_RE_SCISSOR_TL_1              0x1ce0
+#define R200_RE_SCISSOR_TL_2              0x1ce8
+#define R200_RB3D_DEPTHXY_OFFSET          0x1d60
+#define R200_RE_AUX_SCISSOR_CNTL          0x26f0
+#define R200_SE_VTX_STATE_CNTL            0x2180
+#define R200_RE_POINTSIZE                 0x2648
+#define R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0 0x2254
+
+#define RADEON_PP_TEX_SIZE_0                0x1d04	/* NPOT */
+#define RADEON_PP_TEX_SIZE_1                0x1d0c
+#define RADEON_PP_TEX_SIZE_2                0x1d14
+
+#define RADEON_PP_CUBIC_FACES_0             0x1d24
+#define RADEON_PP_CUBIC_FACES_1             0x1d28
+#define RADEON_PP_CUBIC_FACES_2             0x1d2c
+#define RADEON_PP_CUBIC_OFFSET_T0_0         0x1dd0	/* bits [31:5] */
+#define RADEON_PP_CUBIC_OFFSET_T1_0         0x1e00
+#define RADEON_PP_CUBIC_OFFSET_T2_0         0x1e14
+
+#define RADEON_SE_TCL_STATE_FLUSH           0x2284
+
+#define SE_VAP_CNTL__TCL_ENA_MASK                          0x00000001
+#define SE_VAP_CNTL__FORCE_W_TO_ONE_MASK                   0x00010000
+#define SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT                 0x00000012
+#define SE_VTE_CNTL__VTX_XY_FMT_MASK                       0x00000100
+#define SE_VTE_CNTL__VTX_Z_FMT_MASK                        0x00000200
+#define SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK                  0x00000001
+#define SE_VTX_FMT_0__VTX_W0_PRESENT_MASK                  0x00000002
+#define SE_VTX_FMT_0__VTX_COLOR_0_FMT__SHIFT               0x0000000b
+#define R200_3D_DRAW_IMMD_2      0xC0003500
+#define R200_SE_VTX_FMT_1                 0x208c
+#define R200_RE_CNTL                      0x1c50
+
+#define R200_RB3D_BLENDCOLOR              0x3218
+
+#define R200_SE_TCL_POINT_SPRITE_CNTL     0x22c4
+
+#define R200_PP_TRI_PERF 0x2cf8
+
+#define R200_PP_AFS_0                     0x2f80
+#define R200_PP_AFS_1                     0x2f00	/* same as txcblend_0 */
+
+#define R200_VAP_PVS_CNTL_1               0x22D0
+
+#define R500_D1CRTC_STATUS 0x609c
+#define R500_D2CRTC_STATUS 0x689c
+#define R500_CRTC_V_BLANK (1<<0)
+
+#define R500_D1CRTC_FRAME_COUNT 0x60a4
+#define R500_D2CRTC_FRAME_COUNT 0x68a4
+
+#define R500_D1MODE_V_COUNTER 0x6530
+#define R500_D2MODE_V_COUNTER 0x6d30
+
+#define R500_D1MODE_VBLANK_STATUS 0x6534
+#define R500_D2MODE_VBLANK_STATUS 0x6d34
+#define R500_VBLANK_OCCURED (1<<0)
+#define R500_VBLANK_ACK     (1<<4)
+#define R500_VBLANK_STAT    (1<<12)
+#define R500_VBLANK_INT     (1<<16)
+
+#define R500_DxMODE_INT_MASK 0x6540
+#define R500_D1MODE_INT_MASK (1<<0)
+#define R500_D2MODE_INT_MASK (1<<8)
+
+#define R500_DISP_INTERRUPT_STATUS 0x7edc
+#define R500_D1_VBLANK_INTERRUPT (1 << 4)
+#define R500_D2_VBLANK_INTERRUPT (1 << 5)
+
+/* Constants */
+#define RADEON_MAX_USEC_TIMEOUT		100000	/* 100 ms */
+
+#define RADEON_LAST_FRAME_REG		RADEON_SCRATCH_REG0
+#define RADEON_LAST_DISPATCH_REG	RADEON_SCRATCH_REG1
+#define RADEON_LAST_CLEAR_REG		RADEON_SCRATCH_REG2
+#define RADEON_LAST_SWI_REG		RADEON_SCRATCH_REG3
+#define RADEON_LAST_DISPATCH		1
+
+#define RADEON_MAX_VB_AGE		0x7fffffff
+#define RADEON_MAX_VB_VERTS		(0xffff)
+
+#define RADEON_RING_HIGH_MARK		128
+
+#define RADEON_PCIGART_TABLE_SIZE      (32*1024)
+
+#define RADEON_READ(reg)	DRM_READ32(  dev_priv->mmio, (reg) )
+#define RADEON_WRITE(reg,val)	DRM_WRITE32( dev_priv->mmio, (reg), (val) )
+#define RADEON_READ8(reg)	DRM_READ8(  dev_priv->mmio, (reg) )
+#define RADEON_WRITE8(reg,val)	DRM_WRITE8( dev_priv->mmio, (reg), (val) )
+
+#define RADEON_WRITE_PLL(addr, val)					\
+do {									\
+	RADEON_WRITE8(RADEON_CLOCK_CNTL_INDEX,				\
+		       ((addr) & 0x1f) | RADEON_PLL_WR_EN );		\
+	RADEON_WRITE(RADEON_CLOCK_CNTL_DATA, (val));			\
+} while (0)
+
+#define RADEON_WRITE_PCIE(addr, val)					\
+do {									\
+	RADEON_WRITE8(RADEON_PCIE_INDEX,				\
+			((addr) & 0xff));				\
+	RADEON_WRITE(RADEON_PCIE_DATA, (val));			\
+} while (0)
+
+#define R500_WRITE_MCIND(addr, val)					\
+do {								\
+	RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff));	\
+	RADEON_WRITE(R520_MC_IND_DATA, (val));			\
+	RADEON_WRITE(R520_MC_IND_INDEX, 0);	\
+} while (0)
+
+#define RS480_WRITE_MCIND(addr, val)				\
+do {									\
+	RADEON_WRITE(RS480_NB_MC_INDEX,				\
+			((addr) & 0xff) | RS480_NB_MC_IND_WR_EN);	\
+	RADEON_WRITE(RS480_NB_MC_DATA, (val));			\
+	RADEON_WRITE(RS480_NB_MC_INDEX, 0xff);			\
+} while (0)
+
+#define RS690_WRITE_MCIND(addr, val)					\
+do {								\
+	RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_EN | ((addr) & RS690_MC_INDEX_MASK));	\
+	RADEON_WRITE(RS690_MC_DATA, val);			\
+	RADEON_WRITE(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK);	\
+} while (0)
+
+#define IGP_WRITE_MCIND(addr, val)				\
+do {									\
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)       \
+		RS690_WRITE_MCIND(addr, val);				\
+	else								\
+		RS480_WRITE_MCIND(addr, val);				\
+} while (0)
+
+#define CP_PACKET0( reg, n )						\
+	(RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))
+#define CP_PACKET0_TABLE( reg, n )					\
+	(RADEON_CP_PACKET0 | RADEON_ONE_REG_WR | ((n) << 16) | ((reg) >> 2))
+#define CP_PACKET1( reg0, reg1 )					\
+	(RADEON_CP_PACKET1 | (((reg1) >> 2) << 15) | ((reg0) >> 2))
+#define CP_PACKET2()							\
+	(RADEON_CP_PACKET2)
+#define CP_PACKET3( pkt, n )						\
+	(RADEON_CP_PACKET3 | (pkt) | ((n) << 16))
+
+/* ================================================================
+ * Engine control helper macros
+ */
+
+#define RADEON_WAIT_UNTIL_2D_IDLE() do {				\
+	OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );			\
+	OUT_RING( (RADEON_WAIT_2D_IDLECLEAN |				\
+		   RADEON_WAIT_HOST_IDLECLEAN) );			\
+} while (0)
+
+#define RADEON_WAIT_UNTIL_3D_IDLE() do {				\
+	OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );			\
+	OUT_RING( (RADEON_WAIT_3D_IDLECLEAN |				\
+		   RADEON_WAIT_HOST_IDLECLEAN) );			\
+} while (0)
+
+#define RADEON_WAIT_UNTIL_IDLE() do {					\
+	OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );			\
+	OUT_RING( (RADEON_WAIT_2D_IDLECLEAN |				\
+		   RADEON_WAIT_3D_IDLECLEAN |				\
+		   RADEON_WAIT_HOST_IDLECLEAN) );			\
+} while (0)
+
+#define RADEON_WAIT_UNTIL_PAGE_FLIPPED() do {				\
+	OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );			\
+	OUT_RING( RADEON_WAIT_CRTC_PFLIP );				\
+} while (0)
+
+#define RADEON_FLUSH_CACHE() do {					\
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {	\
+		OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));	\
+		OUT_RING(RADEON_RB3D_DC_FLUSH);				\
+	} else {                                                        \
+		OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));	\
+		OUT_RING(RADEON_RB3D_DC_FLUSH);				\
+	}                                                               \
+} while (0)
+
+#define RADEON_PURGE_CACHE() do {					\
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {	\
+		OUT_RING(CP_PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));	\
+		OUT_RING(RADEON_RB3D_DC_FLUSH_ALL);			\
+	} else {                                                        \
+		OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));	\
+		OUT_RING(RADEON_RB3D_DC_FLUSH_ALL);			\
+	}                                                               \
+} while (0)
+
+#define RADEON_FLUSH_ZCACHE() do {					\
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {	\
+		OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));	\
+		OUT_RING(RADEON_RB3D_ZC_FLUSH);				\
+	} else {                                                        \
+		OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));	\
+		OUT_RING(R300_ZC_FLUSH);				\
+	}                                                               \
+} while (0)
+
+#define RADEON_PURGE_ZCACHE() do {					\
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) <= CHIP_RV280) {	\
+		OUT_RING(CP_PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));	\
+		OUT_RING(RADEON_RB3D_ZC_FLUSH_ALL);			\
+	} else {                                                        \
+		OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));	\
+		OUT_RING(R300_ZC_FLUSH_ALL);				\
+	}                                                               \
+} while (0)
+
+/* ================================================================
+ * Misc helper macros
+ */
+
+/* Perfbox functionality only.
+ */
+#define RING_SPACE_TEST_WITH_RETURN( dev_priv )				\
+do {									\
+	if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE)) {		\
+		u32 head = GET_RING_HEAD( dev_priv );			\
+		if (head == dev_priv->ring.tail)			\
+			dev_priv->stats.boxes |= RADEON_BOX_DMA_IDLE;	\
+	}								\
+} while (0)
+
+#define VB_AGE_TEST_WITH_RETURN( dev_priv )				\
+do {									\
+	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;		\
+	if ( sarea_priv->last_dispatch >= RADEON_MAX_VB_AGE ) {		\
+		int __ret = radeon_do_cp_idle( dev_priv );		\
+		if ( __ret ) return __ret;				\
+		sarea_priv->last_dispatch = 0;				\
+		radeon_freelist_reset( dev );				\
+	}								\
+} while (0)
+
+#define RADEON_DISPATCH_AGE( age ) do {					\
+	OUT_RING( CP_PACKET0( RADEON_LAST_DISPATCH_REG, 0 ) );		\
+	OUT_RING( age );						\
+} while (0)
+
+#define RADEON_FRAME_AGE( age ) do {					\
+	OUT_RING( CP_PACKET0( RADEON_LAST_FRAME_REG, 0 ) );		\
+	OUT_RING( age );						\
+} while (0)
+
+#define RADEON_CLEAR_AGE( age ) do {					\
+	OUT_RING( CP_PACKET0( RADEON_LAST_CLEAR_REG, 0 ) );		\
+	OUT_RING( age );						\
+} while (0)
+
+/* ================================================================
+ * Ring control
+ */
+
+#define RADEON_VERBOSE	0
+
+#define RING_LOCALS	int write, _nr; unsigned int mask; u32 *ring;
+
+#define BEGIN_RING( n ) do {						\
+	if ( RADEON_VERBOSE ) {						\
+		DRM_INFO( "BEGIN_RING( %d )\n", (n));			\
+	}								\
+	if ( dev_priv->ring.space <= (n) * sizeof(u32) ) {		\
+                COMMIT_RING();						\
+		radeon_wait_ring( dev_priv, (n) * sizeof(u32) );	\
+	}								\
+	_nr = n; dev_priv->ring.space -= (n) * sizeof(u32);		\
+	ring = dev_priv->ring.start;					\
+	write = dev_priv->ring.tail;					\
+	mask = dev_priv->ring.tail_mask;				\
+} while (0)
+
+#define ADVANCE_RING() do {						\
+	if ( RADEON_VERBOSE ) {						\
+		DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n",	\
+			  write, dev_priv->ring.tail );			\
+	}								\
+	if (((dev_priv->ring.tail + _nr) & mask) != write) {		\
+		DRM_ERROR(						\
+			"ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n",	\
+			((dev_priv->ring.tail + _nr) & mask),		\
+			write, __LINE__);						\
+	} else								\
+		dev_priv->ring.tail = write;				\
+} while (0)
+
+#define COMMIT_RING() do {						\
+	/* Flush writes to ring */					\
+	DRM_MEMORYBARRIER();						\
+	GET_RING_HEAD( dev_priv );					\
+	RADEON_WRITE( RADEON_CP_RB_WPTR, dev_priv->ring.tail );		\
+	/* read from PCI bus to ensure correct posting */		\
+	RADEON_READ( RADEON_CP_RB_RPTR );				\
+} while (0)
+
+#define OUT_RING( x ) do {						\
+	if ( RADEON_VERBOSE ) {						\
+		DRM_INFO( "   OUT_RING( 0x%08x ) at 0x%x\n",		\
+			   (unsigned int)(x), write );			\
+	}								\
+	ring[write++] = (x);						\
+	write &= mask;							\
+} while (0)
+
+#define OUT_RING_REG( reg, val ) do {					\
+	OUT_RING( CP_PACKET0( reg, 0 ) );				\
+	OUT_RING( val );						\
+} while (0)
+
+#define OUT_RING_TABLE( tab, sz ) do {					\
+	int _size = (sz);					\
+	int *_tab = (int *)(tab);				\
+								\
+	if (write + _size > mask) {				\
+		int _i = (mask+1) - write;			\
+		_size -= _i;					\
+		while (_i > 0 ) {				\
+			*(int *)(ring + write) = *_tab++;	\
+			write++;				\
+			_i--;					\
+		}						\
+		write = 0;					\
+		_tab += _i;					\
+	}							\
+	while (_size > 0) {					\
+		*(ring + write) = *_tab++;			\
+		write++;					\
+		_size--;					\
+	}							\
+	write &= mask;						\
+} while (0)
+
+#endif				/* __RADEON_DRV_H__ */
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
new file mode 100644
index 000000000000..56decda2a71f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
@@ -0,0 +1,424 @@
+/**
+ * \file radeon_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the Radeon DRM.
+ *
+ * \author Paul Mackerras <paulus@samba.org>
+ *
+ * Copyright (C) Paul Mackerras 2005
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include <linux/compat.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon_drv.h"
+
+typedef struct drm_radeon_init32 {
+	int func;
+	u32 sarea_priv_offset;
+	int is_pci;
+	int cp_mode;
+	int gart_size;
+	int ring_size;
+	int usec_timeout;
+
+	unsigned int fb_bpp;
+	unsigned int front_offset, front_pitch;
+	unsigned int back_offset, back_pitch;
+	unsigned int depth_bpp;
+	unsigned int depth_offset, depth_pitch;
+
+	u32 fb_offset;
+	u32 mmio_offset;
+	u32 ring_offset;
+	u32 ring_rptr_offset;
+	u32 buffers_offset;
+	u32 gart_textures_offset;
+} drm_radeon_init32_t;
+
+static int compat_radeon_cp_init(struct file *file, unsigned int cmd,
+				 unsigned long arg)
+{
+	drm_radeon_init32_t init32;
+	drm_radeon_init_t __user *init;
+
+	if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
+		return -EFAULT;
+
+	init = compat_alloc_user_space(sizeof(*init));
+	if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
+	    || __put_user(init32.func, &init->func)
+	    || __put_user(init32.sarea_priv_offset, &init->sarea_priv_offset)
+	    || __put_user(init32.is_pci, &init->is_pci)
+	    || __put_user(init32.cp_mode, &init->cp_mode)
+	    || __put_user(init32.gart_size, &init->gart_size)
+	    || __put_user(init32.ring_size, &init->ring_size)
+	    || __put_user(init32.usec_timeout, &init->usec_timeout)
+	    || __put_user(init32.fb_bpp, &init->fb_bpp)
+	    || __put_user(init32.front_offset, &init->front_offset)
+	    || __put_user(init32.front_pitch, &init->front_pitch)
+	    || __put_user(init32.back_offset, &init->back_offset)
+	    || __put_user(init32.back_pitch, &init->back_pitch)
+	    || __put_user(init32.depth_bpp, &init->depth_bpp)
+	    || __put_user(init32.depth_offset, &init->depth_offset)
+	    || __put_user(init32.depth_pitch, &init->depth_pitch)
+	    || __put_user(init32.fb_offset, &init->fb_offset)
+	    || __put_user(init32.mmio_offset, &init->mmio_offset)
+	    || __put_user(init32.ring_offset, &init->ring_offset)
+	    || __put_user(init32.ring_rptr_offset, &init->ring_rptr_offset)
+	    || __put_user(init32.buffers_offset, &init->buffers_offset)
+	    || __put_user(init32.gart_textures_offset,
+			  &init->gart_textures_offset))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init);
+}
+
+typedef struct drm_radeon_clear32 {
+	unsigned int flags;
+	unsigned int clear_color;
+	unsigned int clear_depth;
+	unsigned int color_mask;
+	unsigned int depth_mask;	/* misnamed field:  should be stencil */
+	u32 depth_boxes;
+} drm_radeon_clear32_t;
+
+static int compat_radeon_cp_clear(struct file *file, unsigned int cmd,
+				  unsigned long arg)
+{
+	drm_radeon_clear32_t clr32;
+	drm_radeon_clear_t __user *clr;
+
+	if (copy_from_user(&clr32, (void __user *)arg, sizeof(clr32)))
+		return -EFAULT;
+
+	clr = compat_alloc_user_space(sizeof(*clr));
+	if (!access_ok(VERIFY_WRITE, clr, sizeof(*clr))
+	    || __put_user(clr32.flags, &clr->flags)
+	    || __put_user(clr32.clear_color, &clr->clear_color)
+	    || __put_user(clr32.clear_depth, &clr->clear_depth)
+	    || __put_user(clr32.color_mask, &clr->color_mask)
+	    || __put_user(clr32.depth_mask, &clr->depth_mask)
+	    || __put_user((void __user *)(unsigned long)clr32.depth_boxes,
+			  &clr->depth_boxes))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr);
+}
+
+typedef struct drm_radeon_stipple32 {
+	u32 mask;
+} drm_radeon_stipple32_t;
+
+static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd,
+				    unsigned long arg)
+{
+	drm_radeon_stipple32_t __user *argp = (void __user *)arg;
+	drm_radeon_stipple_t __user *request;
+	u32 mask;
+
+	if (get_user(mask, &argp->mask))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user((unsigned int __user *)(unsigned long)mask,
+			  &request->mask))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request);
+}
+
+typedef struct drm_radeon_tex_image32 {
+	unsigned int x, y;	/* Blit coordinates */
+	unsigned int width, height;
+	u32 data;
+} drm_radeon_tex_image32_t;
+
+typedef struct drm_radeon_texture32 {
+	unsigned int offset;
+	int pitch;
+	int format;
+	int width;		/* Texture image coordinates */
+	int height;
+	u32 image;
+} drm_radeon_texture32_t;
+
+static int compat_radeon_cp_texture(struct file *file, unsigned int cmd,
+				    unsigned long arg)
+{
+	drm_radeon_texture32_t req32;
+	drm_radeon_texture_t __user *request;
+	drm_radeon_tex_image32_t img32;
+	drm_radeon_tex_image_t __user *image;
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+		return -EFAULT;
+	if (req32.image == 0)
+		return -EINVAL;
+	if (copy_from_user(&img32, (void __user *)(unsigned long)req32.image,
+			   sizeof(img32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request) + sizeof(*image));
+	if (!access_ok(VERIFY_WRITE, request,
+		       sizeof(*request) + sizeof(*image)))
+		return -EFAULT;
+	image = (drm_radeon_tex_image_t __user *) (request + 1);
+
+	if (__put_user(req32.offset, &request->offset)
+	    || __put_user(req32.pitch, &request->pitch)
+	    || __put_user(req32.format, &request->format)
+	    || __put_user(req32.width, &request->width)
+	    || __put_user(req32.height, &request->height)
+	    || __put_user(image, &request->image)
+	    || __put_user(img32.x, &image->x)
+	    || __put_user(img32.y, &image->y)
+	    || __put_user(img32.width, &image->width)
+	    || __put_user(img32.height, &image->height)
+	    || __put_user((const void __user *)(unsigned long)img32.data,
+			  &image->data))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request);
+}
+
+typedef struct drm_radeon_vertex2_32 {
+	int idx;		/* Index of vertex buffer */
+	int discard;		/* Client finished with buffer? */
+	int nr_states;
+	u32 state;
+	int nr_prims;
+	u32 prim;
+} drm_radeon_vertex2_32_t;
+
+static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd,
+				    unsigned long arg)
+{
+	drm_radeon_vertex2_32_t req32;
+	drm_radeon_vertex2_t __user *request;
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user(req32.idx, &request->idx)
+	    || __put_user(req32.discard, &request->discard)
+	    || __put_user(req32.nr_states, &request->nr_states)
+	    || __put_user((void __user *)(unsigned long)req32.state,
+			  &request->state)
+	    || __put_user(req32.nr_prims, &request->nr_prims)
+	    || __put_user((void __user *)(unsigned long)req32.prim,
+			  &request->prim))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request);
+}
+
+typedef struct drm_radeon_cmd_buffer32 {
+	int bufsz;
+	u32 buf;
+	int nbox;
+	u32 boxes;
+} drm_radeon_cmd_buffer32_t;
+
+static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd,
+				   unsigned long arg)
+{
+	drm_radeon_cmd_buffer32_t req32;
+	drm_radeon_cmd_buffer_t __user *request;
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user(req32.bufsz, &request->bufsz)
+	    || __put_user((void __user *)(unsigned long)req32.buf,
+			  &request->buf)
+	    || __put_user(req32.nbox, &request->nbox)
+	    || __put_user((void __user *)(unsigned long)req32.boxes,
+			  &request->boxes))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request);
+}
+
+typedef struct drm_radeon_getparam32 {
+	int param;
+	u32 value;
+} drm_radeon_getparam32_t;
+
+static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd,
+				     unsigned long arg)
+{
+	drm_radeon_getparam32_t req32;
+	drm_radeon_getparam_t __user *request;
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user(req32.param, &request->param)
+	    || __put_user((void __user *)(unsigned long)req32.value,
+			  &request->value))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request);
+}
+
+typedef struct drm_radeon_mem_alloc32 {
+	int region;
+	int alignment;
+	int size;
+	u32 region_offset;	/* offset from start of fb or GART */
+} drm_radeon_mem_alloc32_t;
+
+static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd,
+				   unsigned long arg)
+{
+	drm_radeon_mem_alloc32_t req32;
+	drm_radeon_mem_alloc_t __user *request;
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user(req32.region, &request->region)
+	    || __put_user(req32.alignment, &request->alignment)
+	    || __put_user(req32.size, &request->size)
+	    || __put_user((int __user *)(unsigned long)req32.region_offset,
+			  &request->region_offset))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_RADEON_ALLOC, (unsigned long)request);
+}
+
+typedef struct drm_radeon_irq_emit32 {
+	u32 irq_seq;
+} drm_radeon_irq_emit32_t;
+
+static int compat_radeon_irq_emit(struct file *file, unsigned int cmd,
+				  unsigned long arg)
+{
+	drm_radeon_irq_emit32_t req32;
+	drm_radeon_irq_emit_t __user *request;
+
+	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user((int __user *)(unsigned long)req32.irq_seq,
+			  &request->irq_seq))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_path.dentry->d_inode, file,
+			 DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request);
+}
+
+/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */
+#if defined (CONFIG_X86_64) || defined(CONFIG_IA64)
+typedef struct drm_radeon_setparam32 {
+	int param;
+	u64 value;
+} __attribute__((packed)) drm_radeon_setparam32_t;
+
+static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
+				     unsigned long arg)
+{
+	drm_radeon_setparam32_t req32;
+	drm_radeon_setparam_t __user *request;
+
+	if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
+		return -EFAULT;
+
+	request = compat_alloc_user_space(sizeof(*request));
+	if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+	    || __put_user(req32.param, &request->param)
+	    || __put_user((void __user *)(unsigned long)req32.value,
+			  &request->value))
+		return -EFAULT;
+
+	return drm_ioctl(file->f_dentry->d_inode, file,
+			 DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
+}
+#else
+#define compat_radeon_cp_setparam NULL
+#endif /* X86_64 || IA64 */
+
+drm_ioctl_compat_t *radeon_compat_ioctls[] = {
+	[DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
+	[DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
+	[DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
+	[DRM_RADEON_TEXTURE] = compat_radeon_cp_texture,
+	[DRM_RADEON_VERTEX2] = compat_radeon_cp_vertex2,
+	[DRM_RADEON_CMDBUF] = compat_radeon_cp_cmdbuf,
+	[DRM_RADEON_GETPARAM] = compat_radeon_cp_getparam,
+	[DRM_RADEON_SETPARAM] = compat_radeon_cp_setparam,
+	[DRM_RADEON_ALLOC] = compat_radeon_mem_alloc,
+	[DRM_RADEON_IRQ_EMIT] = compat_radeon_irq_emit,
+};
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/dri/card<n>.
+ *
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	unsigned int nr = DRM_IOCTL_NR(cmd);
+	drm_ioctl_compat_t *fn = NULL;
+	int ret;
+
+	if (nr < DRM_COMMAND_BASE)
+		return drm_compat_ioctl(filp, cmd, arg);
+
+	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
+		fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
+
+	lock_kernel();		/* XXX for now */
+	if (fn != NULL)
+		ret = (*fn) (filp, cmd, arg);
+	else
+		ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
+	unlock_kernel();
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
new file mode 100644
index 000000000000..ee40d197deb7
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -0,0 +1,320 @@
+/* radeon_irq.c -- IRQ handling for radeon -*- linux-c -*- */
+/*
+ * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Keith Whitwell <keith@tungstengraphics.com>
+ *    Michel Dänzer <michel@daenzer.net>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon_drv.h"
+
+static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv,
+					      u32 mask)
+{
+	u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS) & mask;
+	if (irqs)
+		RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs);
+	return irqs;
+}
+
+/* Interrupts - Used for device synchronization and flushing in the
+ * following circumstances:
+ *
+ * - Exclusive FB access with hw idle:
+ *    - Wait for GUI Idle (?) interrupt, then do normal flush.
+ *
+ * - Frame throttling, NV_fence:
+ *    - Drop marker irq's into command stream ahead of time.
+ *    - Wait on irq's with lock *not held*
+ *    - Check each for termination condition
+ *
+ * - Internally in cp_getbuffer, etc:
+ *    - as above, but wait with lock held???
+ *
+ * NOTE: These functions are misleadingly named -- the irq's aren't
+ * tied to dma at all, this is just a hangover from dri prehistory.
+ */
+
+irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	drm_radeon_private_t *dev_priv =
+	    (drm_radeon_private_t *) dev->dev_private;
+	u32 stat;
+
+	/* Only consider the bits we're interested in - others could be used
+	 * outside the DRM
+	 */
+	stat = radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
+						  RADEON_CRTC_VBLANK_STAT |
+						  RADEON_CRTC2_VBLANK_STAT));
+	if (!stat)
+		return IRQ_NONE;
+
+	stat &= dev_priv->irq_enable_reg;
+
+	/* SW interrupt */
+	if (stat & RADEON_SW_INT_TEST) {
+		DRM_WAKEUP(&dev_priv->swi_queue);
+	}
+
+	/* VBLANK interrupt */
+	if (stat & (RADEON_CRTC_VBLANK_STAT|RADEON_CRTC2_VBLANK_STAT)) {
+		int vblank_crtc = dev_priv->vblank_crtc;
+
+		if ((vblank_crtc &
+		     (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) ==
+		    (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
+			if (stat & RADEON_CRTC_VBLANK_STAT)
+				atomic_inc(&dev->vbl_received);
+			if (stat & RADEON_CRTC2_VBLANK_STAT)
+				atomic_inc(&dev->vbl_received2);
+		} else if (((stat & RADEON_CRTC_VBLANK_STAT) &&
+			   (vblank_crtc & DRM_RADEON_VBLANK_CRTC1)) ||
+			   ((stat & RADEON_CRTC2_VBLANK_STAT) &&
+			    (vblank_crtc & DRM_RADEON_VBLANK_CRTC2)))
+			atomic_inc(&dev->vbl_received);
+
+		DRM_WAKEUP(&dev->vbl_queue);
+		drm_vbl_send_signals(dev);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int radeon_emit_irq(struct drm_device * dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	unsigned int ret;
+	RING_LOCALS;
+
+	atomic_inc(&dev_priv->swi_emitted);
+	ret = atomic_read(&dev_priv->swi_emitted);
+
+	BEGIN_RING(4);
+	OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
+	OUT_RING_REG(RADEON_GEN_INT_STATUS, RADEON_SW_INT_FIRE);
+	ADVANCE_RING();
+	COMMIT_RING();
+
+	return ret;
+}
+
+static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
+{
+	drm_radeon_private_t *dev_priv =
+	    (drm_radeon_private_t *) dev->dev_private;
+	int ret = 0;
+
+	if (RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr)
+		return 0;
+
+	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+	DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * DRM_HZ,
+		    RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr);
+
+	return ret;
+}
+
+static int radeon_driver_vblank_do_wait(struct drm_device * dev,
+					unsigned int *sequence, int crtc)
+{
+	drm_radeon_private_t *dev_priv =
+	    (drm_radeon_private_t *) dev->dev_private;
+	unsigned int cur_vblank;
+	int ret = 0;
+	int ack = 0;
+	atomic_t *counter;
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	if (crtc == DRM_RADEON_VBLANK_CRTC1) {
+		counter = &dev->vbl_received;
+		ack |= RADEON_CRTC_VBLANK_STAT;
+	} else if (crtc == DRM_RADEON_VBLANK_CRTC2) {
+		counter = &dev->vbl_received2;
+		ack |= RADEON_CRTC2_VBLANK_STAT;
+	} else
+		return -EINVAL;
+
+	radeon_acknowledge_irqs(dev_priv, ack);
+
+	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+
+	/* Assume that the user has missed the current sequence number
+	 * by about a day rather than she wants to wait for years
+	 * using vertical blanks...
+	 */
+	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+		    (((cur_vblank = atomic_read(counter))
+		      - *sequence) <= (1 << 23)));
+
+	*sequence = cur_vblank;
+
+	return ret;
+}
+
+int radeon_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
+{
+	return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC1);
+}
+
+int radeon_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
+{
+	return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC2);
+}
+
+/* Needs the lock as it touches the ring.
+ */
+int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_irq_emit_t *emit = data;
+	int result;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	result = radeon_emit_irq(dev);
+
+	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
+		DRM_ERROR("copy_to_user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/* Doesn't need the hardware lock.
+ */
+int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_irq_wait_t *irqwait = data;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	return radeon_wait_irq(dev, irqwait->irq_seq);
+}
+
+void radeon_enable_interrupt(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
+
+	dev_priv->irq_enable_reg = RADEON_SW_INT_ENABLE;
+	if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC1)
+		dev_priv->irq_enable_reg |= RADEON_CRTC_VBLANK_MASK;
+
+	if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC2)
+		dev_priv->irq_enable_reg |= RADEON_CRTC2_VBLANK_MASK;
+
+	RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
+	dev_priv->irq_enabled = 1;
+}
+
+/* drm_dma.h hooks
+*/
+void radeon_driver_irq_preinstall(struct drm_device * dev)
+{
+	drm_radeon_private_t *dev_priv =
+	    (drm_radeon_private_t *) dev->dev_private;
+
+	/* Disable *all* interrupts */
+	RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
+
+	/* Clear bits if they're already high */
+	radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
+					   RADEON_CRTC_VBLANK_STAT |
+					   RADEON_CRTC2_VBLANK_STAT));
+}
+
+void radeon_driver_irq_postinstall(struct drm_device * dev)
+{
+	drm_radeon_private_t *dev_priv =
+	    (drm_radeon_private_t *) dev->dev_private;
+
+	atomic_set(&dev_priv->swi_emitted, 0);
+	DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
+
+	radeon_enable_interrupt(dev);
+}
+
+void radeon_driver_irq_uninstall(struct drm_device * dev)
+{
+	drm_radeon_private_t *dev_priv =
+	    (drm_radeon_private_t *) dev->dev_private;
+	if (!dev_priv)
+		return;
+
+	dev_priv->irq_enabled = 0;
+
+	/* Disable *all* interrupts */
+	RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
+}
+
+
+int radeon_vblank_crtc_get(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
+	u32 flag;
+	u32 value;
+
+	flag = RADEON_READ(RADEON_GEN_INT_CNTL);
+	value = 0;
+
+	if (flag & RADEON_CRTC_VBLANK_MASK)
+		value |= DRM_RADEON_VBLANK_CRTC1;
+
+	if (flag & RADEON_CRTC2_VBLANK_MASK)
+		value |= DRM_RADEON_VBLANK_CRTC2;
+	return value;
+}
+
+int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
+{
+	drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
+	if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
+		DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value);
+		return -EINVAL;
+	}
+	dev_priv->vblank_crtc = (unsigned int)value;
+	radeon_enable_interrupt(dev);
+	return 0;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_mem.c b/drivers/gpu/drm/radeon/radeon_mem.c
new file mode 100644
index 000000000000..4af5286a36fb
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_mem.c
@@ -0,0 +1,302 @@
+/* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*- */
+/*
+ * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon_drv.h"
+
+/* Very simple allocator for GART memory, working on a static range
+ * already mapped into each client's address space.
+ */
+
+static struct mem_block *split_block(struct mem_block *p, int start, int size,
+				     struct drm_file *file_priv)
+{
+	/* Maybe cut off the start of an existing block */
+	if (start > p->start) {
+		struct mem_block *newblock =
+		    drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
+		if (!newblock)
+			goto out;
+		newblock->start = start;
+		newblock->size = p->size - (start - p->start);
+		newblock->file_priv = NULL;
+		newblock->next = p->next;
+		newblock->prev = p;
+		p->next->prev = newblock;
+		p->next = newblock;
+		p->size -= newblock->size;
+		p = newblock;
+	}
+
+	/* Maybe cut off the end of an existing block */
+	if (size < p->size) {
+		struct mem_block *newblock =
+		    drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
+		if (!newblock)
+			goto out;
+		newblock->start = start + size;
+		newblock->size = p->size - size;
+		newblock->file_priv = NULL;
+		newblock->next = p->next;
+		newblock->prev = p;
+		p->next->prev = newblock;
+		p->next = newblock;
+		p->size = size;
+	}
+
+      out:
+	/* Our block is in the middle */
+	p->file_priv = file_priv;
+	return p;
+}
+
+static struct mem_block *alloc_block(struct mem_block *heap, int size,
+				     int align2, struct drm_file *file_priv)
+{
+	struct mem_block *p;
+	int mask = (1 << align2) - 1;
+
+	list_for_each(p, heap) {
+		int start = (p->start + mask) & ~mask;
+		if (p->file_priv == NULL && start + size <= p->start + p->size)
+			return split_block(p, start, size, file_priv);
+	}
+
+	return NULL;
+}
+
+static struct mem_block *find_block(struct mem_block *heap, int start)
+{
+	struct mem_block *p;
+
+	list_for_each(p, heap)
+	    if (p->start == start)
+		return p;
+
+	return NULL;
+}
+
+static void free_block(struct mem_block *p)
+{
+	p->file_priv = NULL;
+
+	/* Assumes a single contiguous range.  Needs a special file_priv in
+	 * 'heap' to stop it being subsumed.
+	 */
+	if (p->next->file_priv == NULL) {
+		struct mem_block *q = p->next;
+		p->size += q->size;
+		p->next = q->next;
+		p->next->prev = p;
+		drm_free(q, sizeof(*q), DRM_MEM_BUFS);
+	}
+
+	if (p->prev->file_priv == NULL) {
+		struct mem_block *q = p->prev;
+		q->size += p->size;
+		q->next = p->next;
+		q->next->prev = q;
+		drm_free(p, sizeof(*q), DRM_MEM_BUFS);
+	}
+}
+
+/* Initialize.  How to check for an uninitialized heap?
+ */
+static int init_heap(struct mem_block **heap, int start, int size)
+{
+	struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS);
+
+	if (!blocks)
+		return -ENOMEM;
+
+	*heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS);
+	if (!*heap) {
+		drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS);
+		return -ENOMEM;
+	}
+
+	blocks->start = start;
+	blocks->size = size;
+	blocks->file_priv = NULL;
+	blocks->next = blocks->prev = *heap;
+
+	memset(*heap, 0, sizeof(**heap));
+	(*heap)->file_priv = (struct drm_file *) - 1;
+	(*heap)->next = (*heap)->prev = blocks;
+	return 0;
+}
+
+/* Free all blocks associated with the releasing file.
+ */
+void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap)
+{
+	struct mem_block *p;
+
+	if (!heap || !heap->next)
+		return;
+
+	list_for_each(p, heap) {
+		if (p->file_priv == file_priv)
+			p->file_priv = NULL;
+	}
+
+	/* Assumes a single contiguous range.  Needs a special file_priv in
+	 * 'heap' to stop it being subsumed.
+	 */
+	list_for_each(p, heap) {
+		while (p->file_priv == NULL && p->next->file_priv == NULL) {
+			struct mem_block *q = p->next;
+			p->size += q->size;
+			p->next = q->next;
+			p->next->prev = p;
+			drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
+		}
+	}
+}
+
+/* Shutdown.
+ */
+void radeon_mem_takedown(struct mem_block **heap)
+{
+	struct mem_block *p;
+
+	if (!*heap)
+		return;
+
+	for (p = (*heap)->next; p != *heap;) {
+		struct mem_block *q = p;
+		p = p->next;
+		drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
+	}
+
+	drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER);
+	*heap = NULL;
+}
+
+/* IOCTL HANDLERS */
+
+static struct mem_block **get_heap(drm_radeon_private_t * dev_priv, int region)
+{
+	switch (region) {
+	case RADEON_MEM_REGION_GART:
+		return &dev_priv->gart_heap;
+	case RADEON_MEM_REGION_FB:
+		return &dev_priv->fb_heap;
+	default:
+		return NULL;
+	}
+}
+
+int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_mem_alloc_t *alloc = data;
+	struct mem_block *block, **heap;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	heap = get_heap(dev_priv, alloc->region);
+	if (!heap || !*heap)
+		return -EFAULT;
+
+	/* Make things easier on ourselves: all allocations at least
+	 * 4k aligned.
+	 */
+	if (alloc->alignment < 12)
+		alloc->alignment = 12;
+
+	block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
+
+	if (!block)
+		return -ENOMEM;
+
+	if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
+			     sizeof(int))) {
+		DRM_ERROR("copy_to_user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_mem_free_t *memfree = data;
+	struct mem_block *block, **heap;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	heap = get_heap(dev_priv, memfree->region);
+	if (!heap || !*heap)
+		return -EFAULT;
+
+	block = find_block(*heap, memfree->region_offset);
+	if (!block)
+		return -EFAULT;
+
+	if (block->file_priv != file_priv)
+		return -EPERM;
+
+	free_block(block);
+	return 0;
+}
+
+int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_mem_init_heap_t *initheap = data;
+	struct mem_block **heap;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	heap = get_heap(dev_priv, initheap->region);
+	if (!heap)
+		return -EFAULT;
+
+	if (*heap) {
+		DRM_ERROR("heap already initialized?");
+		return -EFAULT;
+	}
+
+	return init_heap(heap, initheap->start, initheap->size);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_microcode.h b/drivers/gpu/drm/radeon/radeon_microcode.h
new file mode 100644
index 000000000000..a348c9e7db1c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_microcode.h
@@ -0,0 +1,1844 @@
+/*
+ * Copyright 2007 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef RADEON_MICROCODE_H
+#define RADEON_MICROCODE_H
+
+/* production radeon ucode r1xx-r6xx */
+static const u32 R100_cp_microcode[][2] = {
+    { 0x21007000, 0000000000 },
+    { 0x20007000, 0000000000 },
+    { 0x000000b4, 0x00000004 },
+    { 0x000000b8, 0x00000004 },
+    { 0x6f5b4d4c, 0000000000 },
+    { 0x4c4c427f, 0000000000 },
+    { 0x5b568a92, 0000000000 },
+    { 0x4ca09c6d, 0000000000 },
+    { 0xad4c4c4c, 0000000000 },
+    { 0x4ce1af3d, 0000000000 },
+    { 0xd8afafaf, 0000000000 },
+    { 0xd64c4cdc, 0000000000 },
+    { 0x4cd10d10, 0000000000 },
+    { 0x000f0000, 0x00000016 },
+    { 0x362f242d, 0000000000 },
+    { 0x00000012, 0x00000004 },
+    { 0x000f0000, 0x00000016 },
+    { 0x362f282d, 0000000000 },
+    { 0x000380e7, 0x00000002 },
+    { 0x04002c97, 0x00000002 },
+    { 0x000f0001, 0x00000016 },
+    { 0x333a3730, 0000000000 },
+    { 0x000077ef, 0x00000002 },
+    { 0x00061000, 0x00000002 },
+    { 0x00000021, 0x0000001a },
+    { 0x00004000, 0x0000001e },
+    { 0x00061000, 0x00000002 },
+    { 0x00000021, 0x0000001a },
+    { 0x00004000, 0x0000001e },
+    { 0x00061000, 0x00000002 },
+    { 0x00000021, 0x0000001a },
+    { 0x00004000, 0x0000001e },
+    { 0x00000017, 0x00000004 },
+    { 0x0003802b, 0x00000002 },
+    { 0x040067e0, 0x00000002 },
+    { 0x00000017, 0x00000004 },
+    { 0x000077e0, 0x00000002 },
+    { 0x00065000, 0x00000002 },
+    { 0x000037e1, 0x00000002 },
+    { 0x040067e1, 0x00000006 },
+    { 0x000077e0, 0x00000002 },
+    { 0x000077e1, 0x00000002 },
+    { 0x000077e1, 0x00000006 },
+    { 0xffffffff, 0000000000 },
+    { 0x10000000, 0000000000 },
+    { 0x0003802b, 0x00000002 },
+    { 0x040067e0, 0x00000006 },
+    { 0x00007675, 0x00000002 },
+    { 0x00007676, 0x00000002 },
+    { 0x00007677, 0x00000002 },
+    { 0x00007678, 0x00000006 },
+    { 0x0003802c, 0x00000002 },
+    { 0x04002676, 0x00000002 },
+    { 0x00007677, 0x00000002 },
+    { 0x00007678, 0x00000006 },
+    { 0x0000002f, 0x00000018 },
+    { 0x0000002f, 0x00000018 },
+    { 0000000000, 0x00000006 },
+    { 0x00000030, 0x00000018 },
+    { 0x00000030, 0x00000018 },
+    { 0000000000, 0x00000006 },
+    { 0x01605000, 0x00000002 },
+    { 0x00065000, 0x00000002 },
+    { 0x00098000, 0x00000002 },
+    { 0x00061000, 0x00000002 },
+    { 0x64c0603e, 0x00000004 },
+    { 0x000380e6, 0x00000002 },
+    { 0x040025c5, 0x00000002 },
+    { 0x00080000, 0x00000016 },
+    { 0000000000, 0000000000 },
+    { 0x0400251d, 0x00000002 },
+    { 0x00007580, 0x00000002 },
+    { 0x00067581, 0x00000002 },
+    { 0x04002580, 0x00000002 },
+    { 0x00067581, 0x00000002 },
+    { 0x00000049, 0x00000004 },
+    { 0x00005000, 0000000000 },
+    { 0x000380e6, 0x00000002 },
+    { 0x040025c5, 0x00000002 },
+    { 0x00061000, 0x00000002 },
+    { 0x0000750e, 0x00000002 },
+    { 0x00019000, 0x00000002 },
+    { 0x00011055, 0x00000014 },
+    { 0x00000055, 0x00000012 },
+    { 0x0400250f, 0x00000002 },
+    { 0x0000504f, 0x00000004 },
+    { 0x000380e6, 0x00000002 },
+    { 0x040025c5, 0x00000002 },
+    { 0x00007565, 0x00000002 },
+    { 0x00007566, 0x00000002 },
+    { 0x00000058, 0x00000004 },
+    { 0x000380e6, 0x00000002 },
+    { 0x040025c5, 0x00000002 },
+    { 0x01e655b4, 0x00000002 },
+    { 0x4401b0e4, 0x00000002 },
+    { 0x01c110e4, 0x00000002 },
+    { 0x26667066, 0x00000018 },
+    { 0x040c2565, 0x00000002 },
+    { 0x00000066, 0x00000018 },
+    { 0x04002564, 0x00000002 },
+    { 0x00007566, 0x00000002 },
+    { 0x0000005d, 0x00000004 },
+    { 0x00401069, 0x00000008 },
+    { 0x00101000, 0x00000002 },
+    { 0x000d80ff, 0x00000002 },
+    { 0x0080006c, 0x00000008 },
+    { 0x000f9000, 0x00000002 },
+    { 0x000e00ff, 0x00000002 },
+    { 0000000000, 0x00000006 },
+    { 0x0000008f, 0x00000018 },
+    { 0x0000005b, 0x00000004 },
+    { 0x000380e6, 0x00000002 },
+    { 0x040025c5, 0x00000002 },
+    { 0x00007576, 0x00000002 },
+    { 0x00065000, 0x00000002 },
+    { 0x00009000, 0x00000002 },
+    { 0x00041000, 0x00000002 },
+    { 0x0c00350e, 0x00000002 },
+    { 0x00049000, 0x00000002 },
+    { 0x00051000, 0x00000002 },
+    { 0x01e785f8, 0x00000002 },
+    { 0x00200000, 0x00000002 },
+    { 0x0060007e, 0x0000000c },
+    { 0x00007563, 0x00000002 },
+    { 0x006075f0, 0x00000021 },
+    { 0x20007073, 0x00000004 },
+    { 0x00005073, 0x00000004 },
+    { 0x000380e6, 0x00000002 },
+    { 0x040025c5, 0x00000002 },
+    { 0x00007576, 0x00000002 },
+    { 0x00007577, 0x00000002 },
+    { 0x0000750e, 0x00000002 },
+    { 0x0000750f, 0x00000002 },
+    { 0x00a05000, 0x00000002 },
+    { 0x00600083, 0x0000000c },
+    { 0x006075f0, 0x00000021 },
+    { 0x000075f8, 0x00000002 },
+    { 0x00000083, 0x00000004 },
+    { 0x000a750e, 0x00000002 },
+    { 0x000380e6, 0x00000002 },
+    { 0x040025c5, 0x00000002 },
+    { 0x0020750f, 0x00000002 },
+    { 0x00600086, 0x00000004 },
+    { 0x00007570, 0x00000002 },
+    { 0x00007571, 0x00000002 },
+    { 0x00007572, 0x00000006 },
+    { 0x000380e6, 0x00000002 },
+    { 0x040025c5, 0x00000002 },
+    { 0x00005000, 0x00000002 },
+    { 0x00a05000, 0x00000002 },
+    { 0x00007568, 0x00000002 },
+    { 0x00061000, 0x00000002 },
+    { 0x00000095, 0x0000000c },
+    { 0x00058000, 0x00000002 },
+    { 0x0c607562, 0x00000002 },
+    { 0x00000097, 0x00000004 },
+    { 0x000380e6, 0x00000002 },
+    { 0x040025c5, 0x00000002 },
+    { 0x00600096, 0x00000004 },
+    { 0x400070e5, 0000000000 },
+    { 0x000380e6, 0x00000002 },
+    { 0x040025c5, 0x00000002 },
+    { 0x000380e5, 0x00000002 },
+    { 0x000000a8, 0x0000001c },
+    { 0x000650aa, 0x00000018 },
+    { 0x040025bb, 0x00000002 },
+    { 0x000610ab, 0x00000018 },
+    { 0x040075bc, 0000000000 },
+    { 0x000075bb, 0x00000002 },
+    { 0x000075bc, 0000000000 },
+    { 0x00090000, 0x00000006 },
+    { 0x00090000, 0x00000002 },
+    { 0x000d8002, 0x00000006 },
+    { 0x00007832, 0x00000002 },
+    { 0x00005000, 0x00000002 },
+    { 0x000380e7, 0x00000002 },
+    { 0x04002c97, 0x00000002 },
+    { 0x00007820, 0x00000002 },
+    { 0x00007821, 0x00000002 },
+    { 0x00007800, 0000000000 },
+    { 0x01200000, 0x00000002 },
+    { 0x20077000, 0x00000002 },
+    { 0x01200000, 0x00000002 },
+    { 0x20007000, 0x00000002 },
+    { 0x00061000, 0x00000002 },
+    { 0x0120751b, 0x00000002 },
+    { 0x8040750a, 0x00000002 },
+    { 0x8040750b, 0x00000002 },
+    { 0x00110000, 0x00000002 },
+    { 0x000380e5, 0x00000002 },
+    { 0x000000c6, 0x0000001c },
+    { 0x000610ab, 0x00000018 },
+    { 0x844075bd, 0x00000002 },
+    { 0x000610aa, 0x00000018 },
+    { 0x840075bb, 0x00000002 },
+    { 0x000610ab, 0x00000018 },
+    { 0x844075bc, 0x00000002 },
+    { 0x000000c9, 0x00000004 },
+    { 0x804075bd, 0x00000002 },
+    { 0x800075bb, 0x00000002 },
+    { 0x804075bc, 0x00000002 },
+    { 0x00108000, 0x00000002 },
+    { 0x01400000, 0x00000002 },
+    { 0x006000cd, 0x0000000c },
+    { 0x20c07000, 0x00000020 },
+    { 0x000000cf, 0x00000012 },
+    { 0x00800000, 0x00000006 },
+    { 0x0080751d, 0x00000006 },
+    { 0000000000, 0000000000 },
+    { 0x0000775c, 0x00000002 },
+    { 0x00a05000, 0x00000002 },
+    { 0x00661000, 0x00000002 },
+    { 0x0460275d, 0x00000020 },
+    { 0x00004000, 0000000000 },
+    { 0x01e00830, 0x00000002 },
+    { 0x21007000, 0000000000 },
+    { 0x6464614d, 0000000000 },
+    { 0x69687420, 0000000000 },
+    { 0x00000073, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0x00005000, 0x00000002 },
+    { 0x000380d0, 0x00000002 },
+    { 0x040025e0, 0x00000002 },
+    { 0x000075e1, 0000000000 },
+    { 0x00000001, 0000000000 },
+    { 0x000380e0, 0x00000002 },
+    { 0x04002394, 0x00000002 },
+    { 0x00005000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0x00000008, 0000000000 },
+    { 0x00000004, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+};
+
+static const u32 R200_cp_microcode[][2] = {
+    { 0x21007000, 0000000000 },
+    { 0x20007000, 0000000000 },
+    { 0x000000bf, 0x00000004 },
+    { 0x000000c3, 0x00000004 },
+    { 0x7a685e5d, 0000000000 },
+    { 0x5d5d5588, 0000000000 },
+    { 0x68659197, 0000000000 },
+    { 0x5da19f78, 0000000000 },
+    { 0x5d5d5d5d, 0000000000 },
+    { 0x5dee5d50, 0000000000 },
+    { 0xf2acacac, 0000000000 },
+    { 0xe75df9e9, 0000000000 },
+    { 0xb1dd0e11, 0000000000 },
+    { 0xe2afafaf, 0000000000 },
+    { 0x000f0000, 0x00000016 },
+    { 0x452f232d, 0000000000 },
+    { 0x00000013, 0x00000004 },
+    { 0x000f0000, 0x00000016 },
+    { 0x452f272d, 0000000000 },
+    { 0x000f0001, 0x00000016 },
+    { 0x3e4d4a37, 0000000000 },
+    { 0x000077ef, 0x00000002 },
+    { 0x00061000, 0x00000002 },
+    { 0x00000020, 0x0000001a },
+    { 0x00004000, 0x0000001e },
+    { 0x00061000, 0x00000002 },
+    { 0x00000020, 0x0000001a },
+    { 0x00004000, 0x0000001e },
+    { 0x00061000, 0x00000002 },
+    { 0x00000020, 0x0000001a },
+    { 0x00004000, 0x0000001e },
+    { 0x00000016, 0x00000004 },
+    { 0x0003802a, 0x00000002 },
+    { 0x040067e0, 0x00000002 },
+    { 0x00000016, 0x00000004 },
+    { 0x000077e0, 0x00000002 },
+    { 0x00065000, 0x00000002 },
+    { 0x000037e1, 0x00000002 },
+    { 0x040067e1, 0x00000006 },
+    { 0x000077e0, 0x00000002 },
+    { 0x000077e1, 0x00000002 },
+    { 0x000077e1, 0x00000006 },
+    { 0xffffffff, 0000000000 },
+    { 0x10000000, 0000000000 },
+    { 0x07f007f0, 0000000000 },
+    { 0x0003802a, 0x00000002 },
+    { 0x040067e0, 0x00000006 },
+    { 0x0003802c, 0x00000002 },
+    { 0x04002741, 0x00000002 },
+    { 0x04002741, 0x00000002 },
+    { 0x04002743, 0x00000002 },
+    { 0x00007675, 0x00000002 },
+    { 0x00007676, 0x00000002 },
+    { 0x00007677, 0x00000002 },
+    { 0x00007678, 0x00000006 },
+    { 0x0003802c, 0x00000002 },
+    { 0x04002741, 0x00000002 },
+    { 0x04002741, 0x00000002 },
+    { 0x04002743, 0x00000002 },
+    { 0x00007676, 0x00000002 },
+    { 0x00007677, 0x00000002 },
+    { 0x00007678, 0x00000006 },
+    { 0x0003802b, 0x00000002 },
+    { 0x04002676, 0x00000002 },
+    { 0x00007677, 0x00000002 },
+    { 0x0003802c, 0x00000002 },
+    { 0x04002741, 0x00000002 },
+    { 0x04002743, 0x00000002 },
+    { 0x00007678, 0x00000006 },
+    { 0x0003802c, 0x00000002 },
+    { 0x04002741, 0x00000002 },
+    { 0x04002741, 0x00000002 },
+    { 0x04002743, 0x00000002 },
+    { 0x00007678, 0x00000006 },
+    { 0x0000002f, 0x00000018 },
+    { 0x0000002f, 0x00000018 },
+    { 0000000000, 0x00000006 },
+    { 0x00000037, 0x00000018 },
+    { 0x00000037, 0x00000018 },
+    { 0000000000, 0x00000006 },
+    { 0x01605000, 0x00000002 },
+    { 0x00065000, 0x00000002 },
+    { 0x00098000, 0x00000002 },
+    { 0x00061000, 0x00000002 },
+    { 0x64c06051, 0x00000004 },
+    { 0x00080000, 0x00000016 },
+    { 0000000000, 0000000000 },
+    { 0x0400251d, 0x00000002 },
+    { 0x00007580, 0x00000002 },
+    { 0x00067581, 0x00000002 },
+    { 0x04002580, 0x00000002 },
+    { 0x00067581, 0x00000002 },
+    { 0x0000005a, 0x00000004 },
+    { 0x00005000, 0000000000 },
+    { 0x00061000, 0x00000002 },
+    { 0x0000750e, 0x00000002 },
+    { 0x00019000, 0x00000002 },
+    { 0x00011064, 0x00000014 },
+    { 0x00000064, 0x00000012 },
+    { 0x0400250f, 0x00000002 },
+    { 0x0000505e, 0x00000004 },
+    { 0x00007565, 0x00000002 },
+    { 0x00007566, 0x00000002 },
+    { 0x00000065, 0x00000004 },
+    { 0x01e655b4, 0x00000002 },
+    { 0x4401b0f0, 0x00000002 },
+    { 0x01c110f0, 0x00000002 },
+    { 0x26667071, 0x00000018 },
+    { 0x040c2565, 0x00000002 },
+    { 0x00000071, 0x00000018 },
+    { 0x04002564, 0x00000002 },
+    { 0x00007566, 0x00000002 },
+    { 0x00000068, 0x00000004 },
+    { 0x00401074, 0x00000008 },
+    { 0x00101000, 0x00000002 },
+    { 0x000d80ff, 0x00000002 },
+    { 0x00800077, 0x00000008 },
+    { 0x000f9000, 0x00000002 },
+    { 0x000e00ff, 0x00000002 },
+    { 0000000000, 0x00000006 },
+    { 0x00000094, 0x00000018 },
+    { 0x00000068, 0x00000004 },
+    { 0x00007576, 0x00000002 },
+    { 0x00065000, 0x00000002 },
+    { 0x00009000, 0x00000002 },
+    { 0x00041000, 0x00000002 },
+    { 0x0c00350e, 0x00000002 },
+    { 0x00049000, 0x00000002 },
+    { 0x00051000, 0x00000002 },
+    { 0x01e785f8, 0x00000002 },
+    { 0x00200000, 0x00000002 },
+    { 0x00600087, 0x0000000c },
+    { 0x00007563, 0x00000002 },
+    { 0x006075f0, 0x00000021 },
+    { 0x2000707c, 0x00000004 },
+    { 0x0000507c, 0x00000004 },
+    { 0x00007576, 0x00000002 },
+    { 0x00007577, 0x00000002 },
+    { 0x0000750e, 0x00000002 },
+    { 0x0000750f, 0x00000002 },
+    { 0x00a05000, 0x00000002 },
+    { 0x0060008a, 0x0000000c },
+    { 0x006075f0, 0x00000021 },
+    { 0x000075f8, 0x00000002 },
+    { 0x0000008a, 0x00000004 },
+    { 0x000a750e, 0x00000002 },
+    { 0x0020750f, 0x00000002 },
+    { 0x0060008d, 0x00000004 },
+    { 0x00007570, 0x00000002 },
+    { 0x00007571, 0x00000002 },
+    { 0x00007572, 0x00000006 },
+    { 0x00005000, 0x00000002 },
+    { 0x00a05000, 0x00000002 },
+    { 0x00007568, 0x00000002 },
+    { 0x00061000, 0x00000002 },
+    { 0x00000098, 0x0000000c },
+    { 0x00058000, 0x00000002 },
+    { 0x0c607562, 0x00000002 },
+    { 0x0000009a, 0x00000004 },
+    { 0x00600099, 0x00000004 },
+    { 0x400070f1, 0000000000 },
+    { 0x000380f1, 0x00000002 },
+    { 0x000000a7, 0x0000001c },
+    { 0x000650a9, 0x00000018 },
+    { 0x040025bb, 0x00000002 },
+    { 0x000610aa, 0x00000018 },
+    { 0x040075bc, 0000000000 },
+    { 0x000075bb, 0x00000002 },
+    { 0x000075bc, 0000000000 },
+    { 0x00090000, 0x00000006 },
+    { 0x00090000, 0x00000002 },
+    { 0x000d8002, 0x00000006 },
+    { 0x00005000, 0x00000002 },
+    { 0x00007821, 0x00000002 },
+    { 0x00007800, 0000000000 },
+    { 0x00007821, 0x00000002 },
+    { 0x00007800, 0000000000 },
+    { 0x01665000, 0x00000002 },
+    { 0x000a0000, 0x00000002 },
+    { 0x000671cc, 0x00000002 },
+    { 0x0286f1cd, 0x00000002 },
+    { 0x000000b7, 0x00000010 },
+    { 0x21007000, 0000000000 },
+    { 0x000000be, 0x0000001c },
+    { 0x00065000, 0x00000002 },
+    { 0x000a0000, 0x00000002 },
+    { 0x00061000, 0x00000002 },
+    { 0x000b0000, 0x00000002 },
+    { 0x38067000, 0x00000002 },
+    { 0x000a00ba, 0x00000004 },
+    { 0x20007000, 0000000000 },
+    { 0x01200000, 0x00000002 },
+    { 0x20077000, 0x00000002 },
+    { 0x01200000, 0x00000002 },
+    { 0x20007000, 0000000000 },
+    { 0x00061000, 0x00000002 },
+    { 0x0120751b, 0x00000002 },
+    { 0x8040750a, 0x00000002 },
+    { 0x8040750b, 0x00000002 },
+    { 0x00110000, 0x00000002 },
+    { 0x000380f1, 0x00000002 },
+    { 0x000000d1, 0x0000001c },
+    { 0x000610aa, 0x00000018 },
+    { 0x844075bd, 0x00000002 },
+    { 0x000610a9, 0x00000018 },
+    { 0x840075bb, 0x00000002 },
+    { 0x000610aa, 0x00000018 },
+    { 0x844075bc, 0x00000002 },
+    { 0x000000d4, 0x00000004 },
+    { 0x804075bd, 0x00000002 },
+    { 0x800075bb, 0x00000002 },
+    { 0x804075bc, 0x00000002 },
+    { 0x00108000, 0x00000002 },
+    { 0x01400000, 0x00000002 },
+    { 0x006000d8, 0x0000000c },
+    { 0x20c07000, 0x00000020 },
+    { 0x000000da, 0x00000012 },
+    { 0x00800000, 0x00000006 },
+    { 0x0080751d, 0x00000006 },
+    { 0x000025bb, 0x00000002 },
+    { 0x000040d4, 0x00000004 },
+    { 0x0000775c, 0x00000002 },
+    { 0x00a05000, 0x00000002 },
+    { 0x00661000, 0x00000002 },
+    { 0x0460275d, 0x00000020 },
+    { 0x00004000, 0000000000 },
+    { 0x00007999, 0x00000002 },
+    { 0x00a05000, 0x00000002 },
+    { 0x00661000, 0x00000002 },
+    { 0x0460299b, 0x00000020 },
+    { 0x00004000, 0000000000 },
+    { 0x01e00830, 0x00000002 },
+    { 0x21007000, 0000000000 },
+    { 0x00005000, 0x00000002 },
+    { 0x00038056, 0x00000002 },
+    { 0x040025e0, 0x00000002 },
+    { 0x000075e1, 0000000000 },
+    { 0x00000001, 0000000000 },
+    { 0x000380ed, 0x00000002 },
+    { 0x04007394, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0x000078c4, 0x00000002 },
+    { 0x000078c5, 0x00000002 },
+    { 0x000078c6, 0x00000002 },
+    { 0x00007924, 0x00000002 },
+    { 0x00007925, 0x00000002 },
+    { 0x00007926, 0x00000002 },
+    { 0x000000f2, 0x00000004 },
+    { 0x00007924, 0x00000002 },
+    { 0x00007925, 0x00000002 },
+    { 0x00007926, 0x00000002 },
+    { 0x000000f9, 0x00000004 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+};
+
+static const u32 R300_cp_microcode[][2] = {
+    { 0x4200e000, 0000000000 },
+    { 0x4000e000, 0000000000 },
+    { 0x000000ae, 0x00000008 },
+    { 0x000000b2, 0x00000008 },
+    { 0x67554b4a, 0000000000 },
+    { 0x4a4a4475, 0000000000 },
+    { 0x55527d83, 0000000000 },
+    { 0x4a8c8b65, 0000000000 },
+    { 0x4aef4af6, 0000000000 },
+    { 0x4ae14a4a, 0000000000 },
+    { 0xe4979797, 0000000000 },
+    { 0xdb4aebdd, 0000000000 },
+    { 0x9ccc4a4a, 0000000000 },
+    { 0xd1989898, 0000000000 },
+    { 0x4a0f9ad6, 0000000000 },
+    { 0x000ca000, 0x00000004 },
+    { 0x000d0012, 0x00000038 },
+    { 0x0000e8b4, 0x00000004 },
+    { 0x000d0014, 0x00000038 },
+    { 0x0000e8b6, 0x00000004 },
+    { 0x000d0016, 0x00000038 },
+    { 0x0000e854, 0x00000004 },
+    { 0x000d0018, 0x00000038 },
+    { 0x0000e855, 0x00000004 },
+    { 0x000d001a, 0x00000038 },
+    { 0x0000e856, 0x00000004 },
+    { 0x000d001c, 0x00000038 },
+    { 0x0000e857, 0x00000004 },
+    { 0x000d001e, 0x00000038 },
+    { 0x0000e824, 0x00000004 },
+    { 0x000d0020, 0x00000038 },
+    { 0x0000e825, 0x00000004 },
+    { 0x000d0022, 0x00000038 },
+    { 0x0000e830, 0x00000004 },
+    { 0x000d0024, 0x00000038 },
+    { 0x0000f0c0, 0x00000004 },
+    { 0x000d0026, 0x00000038 },
+    { 0x0000f0c1, 0x00000004 },
+    { 0x000d0028, 0x00000038 },
+    { 0x0000f041, 0x00000004 },
+    { 0x000d002a, 0x00000038 },
+    { 0x0000f184, 0x00000004 },
+    { 0x000d002c, 0x00000038 },
+    { 0x0000f185, 0x00000004 },
+    { 0x000d002e, 0x00000038 },
+    { 0x0000f186, 0x00000004 },
+    { 0x000d0030, 0x00000038 },
+    { 0x0000f187, 0x00000004 },
+    { 0x000d0032, 0x00000038 },
+    { 0x0000f180, 0x00000004 },
+    { 0x000d0034, 0x00000038 },
+    { 0x0000f393, 0x00000004 },
+    { 0x000d0036, 0x00000038 },
+    { 0x0000f38a, 0x00000004 },
+    { 0x000d0038, 0x00000038 },
+    { 0x0000f38e, 0x00000004 },
+    { 0x0000e821, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x00000043, 0x00000018 },
+    { 0x00cce800, 0x00000004 },
+    { 0x001b0001, 0x00000004 },
+    { 0x08004800, 0x00000004 },
+    { 0x001b0001, 0x00000004 },
+    { 0x08004800, 0x00000004 },
+    { 0x001b0001, 0x00000004 },
+    { 0x08004800, 0x00000004 },
+    { 0x0000003a, 0x00000008 },
+    { 0x0000a000, 0000000000 },
+    { 0x2000451d, 0x00000004 },
+    { 0x0000e580, 0x00000004 },
+    { 0x000ce581, 0x00000004 },
+    { 0x08004580, 0x00000004 },
+    { 0x000ce581, 0x00000004 },
+    { 0x00000047, 0x00000008 },
+    { 0x0000a000, 0000000000 },
+    { 0x000c2000, 0x00000004 },
+    { 0x0000e50e, 0x00000004 },
+    { 0x00032000, 0x00000004 },
+    { 0x00022051, 0x00000028 },
+    { 0x00000051, 0x00000024 },
+    { 0x0800450f, 0x00000004 },
+    { 0x0000a04b, 0x00000008 },
+    { 0x0000e565, 0x00000004 },
+    { 0x0000e566, 0x00000004 },
+    { 0x00000052, 0x00000008 },
+    { 0x03cca5b4, 0x00000004 },
+    { 0x05432000, 0x00000004 },
+    { 0x00022000, 0x00000004 },
+    { 0x4ccce05e, 0x00000030 },
+    { 0x08274565, 0x00000004 },
+    { 0x0000005e, 0x00000030 },
+    { 0x08004564, 0x00000004 },
+    { 0x0000e566, 0x00000004 },
+    { 0x00000055, 0x00000008 },
+    { 0x00802061, 0x00000010 },
+    { 0x00202000, 0x00000004 },
+    { 0x001b00ff, 0x00000004 },
+    { 0x01000064, 0x00000010 },
+    { 0x001f2000, 0x00000004 },
+    { 0x001c00ff, 0x00000004 },
+    { 0000000000, 0x0000000c },
+    { 0x00000080, 0x00000030 },
+    { 0x00000055, 0x00000008 },
+    { 0x0000e576, 0x00000004 },
+    { 0x000ca000, 0x00000004 },
+    { 0x00012000, 0x00000004 },
+    { 0x00082000, 0x00000004 },
+    { 0x1800650e, 0x00000004 },
+    { 0x00092000, 0x00000004 },
+    { 0x000a2000, 0x00000004 },
+    { 0x000f0000, 0x00000004 },
+    { 0x00400000, 0x00000004 },
+    { 0x00000074, 0x00000018 },
+    { 0x0000e563, 0x00000004 },
+    { 0x00c0e5f9, 0x000000c2 },
+    { 0x00000069, 0x00000008 },
+    { 0x0000a069, 0x00000008 },
+    { 0x0000e576, 0x00000004 },
+    { 0x0000e577, 0x00000004 },
+    { 0x0000e50e, 0x00000004 },
+    { 0x0000e50f, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x00000077, 0x00000018 },
+    { 0x00c0e5f9, 0x000000c2 },
+    { 0x00000077, 0x00000008 },
+    { 0x0014e50e, 0x00000004 },
+    { 0x0040e50f, 0x00000004 },
+    { 0x00c0007a, 0x00000008 },
+    { 0x0000e570, 0x00000004 },
+    { 0x0000e571, 0x00000004 },
+    { 0x0000e572, 0x0000000c },
+    { 0x0000a000, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x0000e568, 0x00000004 },
+    { 0x000c2000, 0x00000004 },
+    { 0x00000084, 0x00000018 },
+    { 0x000b0000, 0x00000004 },
+    { 0x18c0e562, 0x00000004 },
+    { 0x00000086, 0x00000008 },
+    { 0x00c00085, 0x00000008 },
+    { 0x000700e3, 0x00000004 },
+    { 0x00000092, 0x00000038 },
+    { 0x000ca094, 0x00000030 },
+    { 0x080045bb, 0x00000004 },
+    { 0x000c2095, 0x00000030 },
+    { 0x0800e5bc, 0000000000 },
+    { 0x0000e5bb, 0x00000004 },
+    { 0x0000e5bc, 0000000000 },
+    { 0x00120000, 0x0000000c },
+    { 0x00120000, 0x00000004 },
+    { 0x001b0002, 0x0000000c },
+    { 0x0000a000, 0x00000004 },
+    { 0x0000e821, 0x00000004 },
+    { 0x0000e800, 0000000000 },
+    { 0x0000e821, 0x00000004 },
+    { 0x0000e82e, 0000000000 },
+    { 0x02cca000, 0x00000004 },
+    { 0x00140000, 0x00000004 },
+    { 0x000ce1cc, 0x00000004 },
+    { 0x050de1cd, 0x00000004 },
+    { 0x00400000, 0x00000004 },
+    { 0x000000a4, 0x00000018 },
+    { 0x00c0a000, 0x00000004 },
+    { 0x000000a1, 0x00000008 },
+    { 0x000000a6, 0x00000020 },
+    { 0x4200e000, 0000000000 },
+    { 0x000000ad, 0x00000038 },
+    { 0x000ca000, 0x00000004 },
+    { 0x00140000, 0x00000004 },
+    { 0x000c2000, 0x00000004 },
+    { 0x00160000, 0x00000004 },
+    { 0x700ce000, 0x00000004 },
+    { 0x001400a9, 0x00000008 },
+    { 0x4000e000, 0000000000 },
+    { 0x02400000, 0x00000004 },
+    { 0x400ee000, 0x00000004 },
+    { 0x02400000, 0x00000004 },
+    { 0x4000e000, 0000000000 },
+    { 0x000c2000, 0x00000004 },
+    { 0x0240e51b, 0x00000004 },
+    { 0x0080e50a, 0x00000005 },
+    { 0x0080e50b, 0x00000005 },
+    { 0x00220000, 0x00000004 },
+    { 0x000700e3, 0x00000004 },
+    { 0x000000c0, 0x00000038 },
+    { 0x000c2095, 0x00000030 },
+    { 0x0880e5bd, 0x00000005 },
+    { 0x000c2094, 0x00000030 },
+    { 0x0800e5bb, 0x00000005 },
+    { 0x000c2095, 0x00000030 },
+    { 0x0880e5bc, 0x00000005 },
+    { 0x000000c3, 0x00000008 },
+    { 0x0080e5bd, 0x00000005 },
+    { 0x0000e5bb, 0x00000005 },
+    { 0x0080e5bc, 0x00000005 },
+    { 0x00210000, 0x00000004 },
+    { 0x02800000, 0x00000004 },
+    { 0x00c000c7, 0x00000018 },
+    { 0x4180e000, 0x00000040 },
+    { 0x000000c9, 0x00000024 },
+    { 0x01000000, 0x0000000c },
+    { 0x0100e51d, 0x0000000c },
+    { 0x000045bb, 0x00000004 },
+    { 0x000080c3, 0x00000008 },
+    { 0x0000f3ce, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x00cc2000, 0x00000004 },
+    { 0x08c053cf, 0x00000040 },
+    { 0x00008000, 0000000000 },
+    { 0x0000f3d2, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x00cc2000, 0x00000004 },
+    { 0x08c053d3, 0x00000040 },
+    { 0x00008000, 0000000000 },
+    { 0x0000f39d, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x00cc2000, 0x00000004 },
+    { 0x08c0539e, 0x00000040 },
+    { 0x00008000, 0000000000 },
+    { 0x03c00830, 0x00000004 },
+    { 0x4200e000, 0000000000 },
+    { 0x0000a000, 0x00000004 },
+    { 0x200045e0, 0x00000004 },
+    { 0x0000e5e1, 0000000000 },
+    { 0x00000001, 0000000000 },
+    { 0x000700e0, 0x00000004 },
+    { 0x0800e394, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0x0000e8c4, 0x00000004 },
+    { 0x0000e8c5, 0x00000004 },
+    { 0x0000e8c6, 0x00000004 },
+    { 0x0000e928, 0x00000004 },
+    { 0x0000e929, 0x00000004 },
+    { 0x0000e92a, 0x00000004 },
+    { 0x000000e4, 0x00000008 },
+    { 0x0000e928, 0x00000004 },
+    { 0x0000e929, 0x00000004 },
+    { 0x0000e92a, 0x00000004 },
+    { 0x000000eb, 0x00000008 },
+    { 0x02c02000, 0x00000004 },
+    { 0x00060000, 0x00000004 },
+    { 0x000000f3, 0x00000034 },
+    { 0x000000f0, 0x00000008 },
+    { 0x00008000, 0x00000004 },
+    { 0xc000e000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0x000c2000, 0x00000004 },
+    { 0x001d0018, 0x00000004 },
+    { 0x001a0001, 0x00000004 },
+    { 0x000000fb, 0x00000034 },
+    { 0x0000004a, 0x00000008 },
+    { 0x0500a04a, 0x00000008 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+};
+
+static const u32 R420_cp_microcode[][2] = {
+    { 0x4200e000, 0000000000 },
+    { 0x4000e000, 0000000000 },
+    { 0x00000099, 0x00000008 },
+    { 0x0000009d, 0x00000008 },
+    { 0x4a554b4a, 0000000000 },
+    { 0x4a4a4467, 0000000000 },
+    { 0x55526f75, 0000000000 },
+    { 0x4a7e7d65, 0000000000 },
+    { 0xd9d3dff6, 0000000000 },
+    { 0x4ac54a4a, 0000000000 },
+    { 0xc8828282, 0000000000 },
+    { 0xbf4acfc1, 0000000000 },
+    { 0x87b04a4a, 0000000000 },
+    { 0xb5838383, 0000000000 },
+    { 0x4a0f85ba, 0000000000 },
+    { 0x000ca000, 0x00000004 },
+    { 0x000d0012, 0x00000038 },
+    { 0x0000e8b4, 0x00000004 },
+    { 0x000d0014, 0x00000038 },
+    { 0x0000e8b6, 0x00000004 },
+    { 0x000d0016, 0x00000038 },
+    { 0x0000e854, 0x00000004 },
+    { 0x000d0018, 0x00000038 },
+    { 0x0000e855, 0x00000004 },
+    { 0x000d001a, 0x00000038 },
+    { 0x0000e856, 0x00000004 },
+    { 0x000d001c, 0x00000038 },
+    { 0x0000e857, 0x00000004 },
+    { 0x000d001e, 0x00000038 },
+    { 0x0000e824, 0x00000004 },
+    { 0x000d0020, 0x00000038 },
+    { 0x0000e825, 0x00000004 },
+    { 0x000d0022, 0x00000038 },
+    { 0x0000e830, 0x00000004 },
+    { 0x000d0024, 0x00000038 },
+    { 0x0000f0c0, 0x00000004 },
+    { 0x000d0026, 0x00000038 },
+    { 0x0000f0c1, 0x00000004 },
+    { 0x000d0028, 0x00000038 },
+    { 0x0000f041, 0x00000004 },
+    { 0x000d002a, 0x00000038 },
+    { 0x0000f184, 0x00000004 },
+    { 0x000d002c, 0x00000038 },
+    { 0x0000f185, 0x00000004 },
+    { 0x000d002e, 0x00000038 },
+    { 0x0000f186, 0x00000004 },
+    { 0x000d0030, 0x00000038 },
+    { 0x0000f187, 0x00000004 },
+    { 0x000d0032, 0x00000038 },
+    { 0x0000f180, 0x00000004 },
+    { 0x000d0034, 0x00000038 },
+    { 0x0000f393, 0x00000004 },
+    { 0x000d0036, 0x00000038 },
+    { 0x0000f38a, 0x00000004 },
+    { 0x000d0038, 0x00000038 },
+    { 0x0000f38e, 0x00000004 },
+    { 0x0000e821, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x00000043, 0x00000018 },
+    { 0x00cce800, 0x00000004 },
+    { 0x001b0001, 0x00000004 },
+    { 0x08004800, 0x00000004 },
+    { 0x001b0001, 0x00000004 },
+    { 0x08004800, 0x00000004 },
+    { 0x001b0001, 0x00000004 },
+    { 0x08004800, 0x00000004 },
+    { 0x0000003a, 0x00000008 },
+    { 0x0000a000, 0000000000 },
+    { 0x2000451d, 0x00000004 },
+    { 0x0000e580, 0x00000004 },
+    { 0x000ce581, 0x00000004 },
+    { 0x08004580, 0x00000004 },
+    { 0x000ce581, 0x00000004 },
+    { 0x00000047, 0x00000008 },
+    { 0x0000a000, 0000000000 },
+    { 0x000c2000, 0x00000004 },
+    { 0x0000e50e, 0x00000004 },
+    { 0x00032000, 0x00000004 },
+    { 0x00022051, 0x00000028 },
+    { 0x00000051, 0x00000024 },
+    { 0x0800450f, 0x00000004 },
+    { 0x0000a04b, 0x00000008 },
+    { 0x0000e565, 0x00000004 },
+    { 0x0000e566, 0x00000004 },
+    { 0x00000052, 0x00000008 },
+    { 0x03cca5b4, 0x00000004 },
+    { 0x05432000, 0x00000004 },
+    { 0x00022000, 0x00000004 },
+    { 0x4ccce05e, 0x00000030 },
+    { 0x08274565, 0x00000004 },
+    { 0x0000005e, 0x00000030 },
+    { 0x08004564, 0x00000004 },
+    { 0x0000e566, 0x00000004 },
+    { 0x00000055, 0x00000008 },
+    { 0x00802061, 0x00000010 },
+    { 0x00202000, 0x00000004 },
+    { 0x001b00ff, 0x00000004 },
+    { 0x01000064, 0x00000010 },
+    { 0x001f2000, 0x00000004 },
+    { 0x001c00ff, 0x00000004 },
+    { 0000000000, 0x0000000c },
+    { 0x00000072, 0x00000030 },
+    { 0x00000055, 0x00000008 },
+    { 0x0000e576, 0x00000004 },
+    { 0x0000e577, 0x00000004 },
+    { 0x0000e50e, 0x00000004 },
+    { 0x0000e50f, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x00000069, 0x00000018 },
+    { 0x00c0e5f9, 0x000000c2 },
+    { 0x00000069, 0x00000008 },
+    { 0x0014e50e, 0x00000004 },
+    { 0x0040e50f, 0x00000004 },
+    { 0x00c0006c, 0x00000008 },
+    { 0x0000e570, 0x00000004 },
+    { 0x0000e571, 0x00000004 },
+    { 0x0000e572, 0x0000000c },
+    { 0x0000a000, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x0000e568, 0x00000004 },
+    { 0x000c2000, 0x00000004 },
+    { 0x00000076, 0x00000018 },
+    { 0x000b0000, 0x00000004 },
+    { 0x18c0e562, 0x00000004 },
+    { 0x00000078, 0x00000008 },
+    { 0x00c00077, 0x00000008 },
+    { 0x000700c7, 0x00000004 },
+    { 0x00000080, 0x00000038 },
+    { 0x0000e5bb, 0x00000004 },
+    { 0x0000e5bc, 0000000000 },
+    { 0x0000a000, 0x00000004 },
+    { 0x0000e821, 0x00000004 },
+    { 0x0000e800, 0000000000 },
+    { 0x0000e821, 0x00000004 },
+    { 0x0000e82e, 0000000000 },
+    { 0x02cca000, 0x00000004 },
+    { 0x00140000, 0x00000004 },
+    { 0x000ce1cc, 0x00000004 },
+    { 0x050de1cd, 0x00000004 },
+    { 0x00400000, 0x00000004 },
+    { 0x0000008f, 0x00000018 },
+    { 0x00c0a000, 0x00000004 },
+    { 0x0000008c, 0x00000008 },
+    { 0x00000091, 0x00000020 },
+    { 0x4200e000, 0000000000 },
+    { 0x00000098, 0x00000038 },
+    { 0x000ca000, 0x00000004 },
+    { 0x00140000, 0x00000004 },
+    { 0x000c2000, 0x00000004 },
+    { 0x00160000, 0x00000004 },
+    { 0x700ce000, 0x00000004 },
+    { 0x00140094, 0x00000008 },
+    { 0x4000e000, 0000000000 },
+    { 0x02400000, 0x00000004 },
+    { 0x400ee000, 0x00000004 },
+    { 0x02400000, 0x00000004 },
+    { 0x4000e000, 0000000000 },
+    { 0x000c2000, 0x00000004 },
+    { 0x0240e51b, 0x00000004 },
+    { 0x0080e50a, 0x00000005 },
+    { 0x0080e50b, 0x00000005 },
+    { 0x00220000, 0x00000004 },
+    { 0x000700c7, 0x00000004 },
+    { 0x000000a4, 0x00000038 },
+    { 0x0080e5bd, 0x00000005 },
+    { 0x0000e5bb, 0x00000005 },
+    { 0x0080e5bc, 0x00000005 },
+    { 0x00210000, 0x00000004 },
+    { 0x02800000, 0x00000004 },
+    { 0x00c000ab, 0x00000018 },
+    { 0x4180e000, 0x00000040 },
+    { 0x000000ad, 0x00000024 },
+    { 0x01000000, 0x0000000c },
+    { 0x0100e51d, 0x0000000c },
+    { 0x000045bb, 0x00000004 },
+    { 0x000080a7, 0x00000008 },
+    { 0x0000f3ce, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x00cc2000, 0x00000004 },
+    { 0x08c053cf, 0x00000040 },
+    { 0x00008000, 0000000000 },
+    { 0x0000f3d2, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x00cc2000, 0x00000004 },
+    { 0x08c053d3, 0x00000040 },
+    { 0x00008000, 0000000000 },
+    { 0x0000f39d, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x00cc2000, 0x00000004 },
+    { 0x08c0539e, 0x00000040 },
+    { 0x00008000, 0000000000 },
+    { 0x03c00830, 0x00000004 },
+    { 0x4200e000, 0000000000 },
+    { 0x0000a000, 0x00000004 },
+    { 0x200045e0, 0x00000004 },
+    { 0x0000e5e1, 0000000000 },
+    { 0x00000001, 0000000000 },
+    { 0x000700c4, 0x00000004 },
+    { 0x0800e394, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0x0000e8c4, 0x00000004 },
+    { 0x0000e8c5, 0x00000004 },
+    { 0x0000e8c6, 0x00000004 },
+    { 0x0000e928, 0x00000004 },
+    { 0x0000e929, 0x00000004 },
+    { 0x0000e92a, 0x00000004 },
+    { 0x000000c8, 0x00000008 },
+    { 0x0000e928, 0x00000004 },
+    { 0x0000e929, 0x00000004 },
+    { 0x0000e92a, 0x00000004 },
+    { 0x000000cf, 0x00000008 },
+    { 0x02c02000, 0x00000004 },
+    { 0x00060000, 0x00000004 },
+    { 0x000000d7, 0x00000034 },
+    { 0x000000d4, 0x00000008 },
+    { 0x00008000, 0x00000004 },
+    { 0xc000e000, 0000000000 },
+    { 0x0000e1cc, 0x00000004 },
+    { 0x0500e1cd, 0x00000004 },
+    { 0x000ca000, 0x00000004 },
+    { 0x000000de, 0x00000034 },
+    { 0x000000da, 0x00000008 },
+    { 0x0000a000, 0000000000 },
+    { 0x0019e1cc, 0x00000004 },
+    { 0x001b0001, 0x00000004 },
+    { 0x0500a000, 0x00000004 },
+    { 0x080041cd, 0x00000004 },
+    { 0x000ca000, 0x00000004 },
+    { 0x000000fb, 0x00000034 },
+    { 0x0000004a, 0x00000008 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0x000c2000, 0x00000004 },
+    { 0x001d0018, 0x00000004 },
+    { 0x001a0001, 0x00000004 },
+    { 0x000000fb, 0x00000034 },
+    { 0x0000004a, 0x00000008 },
+    { 0x0500a04a, 0x00000008 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+};
+
+static const u32 RS600_cp_microcode[][2] = {
+    { 0x4200e000, 0000000000 },
+    { 0x4000e000, 0000000000 },
+    { 0x000000a0, 0x00000008 },
+    { 0x000000a4, 0x00000008 },
+    { 0x4a554b4a, 0000000000 },
+    { 0x4a4a4467, 0000000000 },
+    { 0x55526f75, 0000000000 },
+    { 0x4a7e7d65, 0000000000 },
+    { 0x4ae74af6, 0000000000 },
+    { 0x4ad34a4a, 0000000000 },
+    { 0xd6898989, 0000000000 },
+    { 0xcd4addcf, 0000000000 },
+    { 0x8ebe4ae2, 0000000000 },
+    { 0xc38a8a8a, 0000000000 },
+    { 0x4a0f8cc8, 0000000000 },
+    { 0x000ca000, 0x00000004 },
+    { 0x000d0012, 0x00000038 },
+    { 0x0000e8b4, 0x00000004 },
+    { 0x000d0014, 0x00000038 },
+    { 0x0000e8b6, 0x00000004 },
+    { 0x000d0016, 0x00000038 },
+    { 0x0000e854, 0x00000004 },
+    { 0x000d0018, 0x00000038 },
+    { 0x0000e855, 0x00000004 },
+    { 0x000d001a, 0x00000038 },
+    { 0x0000e856, 0x00000004 },
+    { 0x000d001c, 0x00000038 },
+    { 0x0000e857, 0x00000004 },
+    { 0x000d001e, 0x00000038 },
+    { 0x0000e824, 0x00000004 },
+    { 0x000d0020, 0x00000038 },
+    { 0x0000e825, 0x00000004 },
+    { 0x000d0022, 0x00000038 },
+    { 0x0000e830, 0x00000004 },
+    { 0x000d0024, 0x00000038 },
+    { 0x0000f0c0, 0x00000004 },
+    { 0x000d0026, 0x00000038 },
+    { 0x0000f0c1, 0x00000004 },
+    { 0x000d0028, 0x00000038 },
+    { 0x0000f041, 0x00000004 },
+    { 0x000d002a, 0x00000038 },
+    { 0x0000f184, 0x00000004 },
+    { 0x000d002c, 0x00000038 },
+    { 0x0000f185, 0x00000004 },
+    { 0x000d002e, 0x00000038 },
+    { 0x0000f186, 0x00000004 },
+    { 0x000d0030, 0x00000038 },
+    { 0x0000f187, 0x00000004 },
+    { 0x000d0032, 0x00000038 },
+    { 0x0000f180, 0x00000004 },
+    { 0x000d0034, 0x00000038 },
+    { 0x0000f393, 0x00000004 },
+    { 0x000d0036, 0x00000038 },
+    { 0x0000f38a, 0x00000004 },
+    { 0x000d0038, 0x00000038 },
+    { 0x0000f38e, 0x00000004 },
+    { 0x0000e821, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x00000043, 0x00000018 },
+    { 0x00cce800, 0x00000004 },
+    { 0x001b0001, 0x00000004 },
+    { 0x08004800, 0x00000004 },
+    { 0x001b0001, 0x00000004 },
+    { 0x08004800, 0x00000004 },
+    { 0x001b0001, 0x00000004 },
+    { 0x08004800, 0x00000004 },
+    { 0x0000003a, 0x00000008 },
+    { 0x0000a000, 0000000000 },
+    { 0x2000451d, 0x00000004 },
+    { 0x0000e580, 0x00000004 },
+    { 0x000ce581, 0x00000004 },
+    { 0x08004580, 0x00000004 },
+    { 0x000ce581, 0x00000004 },
+    { 0x00000047, 0x00000008 },
+    { 0x0000a000, 0000000000 },
+    { 0x000c2000, 0x00000004 },
+    { 0x0000e50e, 0x00000004 },
+    { 0x00032000, 0x00000004 },
+    { 0x00022051, 0x00000028 },
+    { 0x00000051, 0x00000024 },
+    { 0x0800450f, 0x00000004 },
+    { 0x0000a04b, 0x00000008 },
+    { 0x0000e565, 0x00000004 },
+    { 0x0000e566, 0x00000004 },
+    { 0x00000052, 0x00000008 },
+    { 0x03cca5b4, 0x00000004 },
+    { 0x05432000, 0x00000004 },
+    { 0x00022000, 0x00000004 },
+    { 0x4ccce05e, 0x00000030 },
+    { 0x08274565, 0x00000004 },
+    { 0x0000005e, 0x00000030 },
+    { 0x08004564, 0x00000004 },
+    { 0x0000e566, 0x00000004 },
+    { 0x00000055, 0x00000008 },
+    { 0x00802061, 0x00000010 },
+    { 0x00202000, 0x00000004 },
+    { 0x001b00ff, 0x00000004 },
+    { 0x01000064, 0x00000010 },
+    { 0x001f2000, 0x00000004 },
+    { 0x001c00ff, 0x00000004 },
+    { 0000000000, 0x0000000c },
+    { 0x00000072, 0x00000030 },
+    { 0x00000055, 0x00000008 },
+    { 0x0000e576, 0x00000004 },
+    { 0x0000e577, 0x00000004 },
+    { 0x0000e50e, 0x00000004 },
+    { 0x0000e50f, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x00000069, 0x00000018 },
+    { 0x00c0e5f9, 0x000000c2 },
+    { 0x00000069, 0x00000008 },
+    { 0x0014e50e, 0x00000004 },
+    { 0x0040e50f, 0x00000004 },
+    { 0x00c0006c, 0x00000008 },
+    { 0x0000e570, 0x00000004 },
+    { 0x0000e571, 0x00000004 },
+    { 0x0000e572, 0x0000000c },
+    { 0x0000a000, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x0000e568, 0x00000004 },
+    { 0x000c2000, 0x00000004 },
+    { 0x00000076, 0x00000018 },
+    { 0x000b0000, 0x00000004 },
+    { 0x18c0e562, 0x00000004 },
+    { 0x00000078, 0x00000008 },
+    { 0x00c00077, 0x00000008 },
+    { 0x000700d5, 0x00000004 },
+    { 0x00000084, 0x00000038 },
+    { 0x000ca086, 0x00000030 },
+    { 0x080045bb, 0x00000004 },
+    { 0x000c2087, 0x00000030 },
+    { 0x0800e5bc, 0000000000 },
+    { 0x0000e5bb, 0x00000004 },
+    { 0x0000e5bc, 0000000000 },
+    { 0x00120000, 0x0000000c },
+    { 0x00120000, 0x00000004 },
+    { 0x001b0002, 0x0000000c },
+    { 0x0000a000, 0x00000004 },
+    { 0x0000e821, 0x00000004 },
+    { 0x0000e800, 0000000000 },
+    { 0x0000e821, 0x00000004 },
+    { 0x0000e82e, 0000000000 },
+    { 0x02cca000, 0x00000004 },
+    { 0x00140000, 0x00000004 },
+    { 0x000ce1cc, 0x00000004 },
+    { 0x050de1cd, 0x00000004 },
+    { 0x00400000, 0x00000004 },
+    { 0x00000096, 0x00000018 },
+    { 0x00c0a000, 0x00000004 },
+    { 0x00000093, 0x00000008 },
+    { 0x00000098, 0x00000020 },
+    { 0x4200e000, 0000000000 },
+    { 0x0000009f, 0x00000038 },
+    { 0x000ca000, 0x00000004 },
+    { 0x00140000, 0x00000004 },
+    { 0x000c2000, 0x00000004 },
+    { 0x00160000, 0x00000004 },
+    { 0x700ce000, 0x00000004 },
+    { 0x0014009b, 0x00000008 },
+    { 0x4000e000, 0000000000 },
+    { 0x02400000, 0x00000004 },
+    { 0x400ee000, 0x00000004 },
+    { 0x02400000, 0x00000004 },
+    { 0x4000e000, 0000000000 },
+    { 0x000c2000, 0x00000004 },
+    { 0x0240e51b, 0x00000004 },
+    { 0x0080e50a, 0x00000005 },
+    { 0x0080e50b, 0x00000005 },
+    { 0x00220000, 0x00000004 },
+    { 0x000700d5, 0x00000004 },
+    { 0x000000b2, 0x00000038 },
+    { 0x000c2087, 0x00000030 },
+    { 0x0880e5bd, 0x00000005 },
+    { 0x000c2086, 0x00000030 },
+    { 0x0800e5bb, 0x00000005 },
+    { 0x000c2087, 0x00000030 },
+    { 0x0880e5bc, 0x00000005 },
+    { 0x000000b5, 0x00000008 },
+    { 0x0080e5bd, 0x00000005 },
+    { 0x0000e5bb, 0x00000005 },
+    { 0x0080e5bc, 0x00000005 },
+    { 0x00210000, 0x00000004 },
+    { 0x02800000, 0x00000004 },
+    { 0x00c000b9, 0x00000018 },
+    { 0x4180e000, 0x00000040 },
+    { 0x000000bb, 0x00000024 },
+    { 0x01000000, 0x0000000c },
+    { 0x0100e51d, 0x0000000c },
+    { 0x000045bb, 0x00000004 },
+    { 0x000080b5, 0x00000008 },
+    { 0x0000f3ce, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x00cc2000, 0x00000004 },
+    { 0x08c053cf, 0x00000040 },
+    { 0x00008000, 0000000000 },
+    { 0x0000f3d2, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x00cc2000, 0x00000004 },
+    { 0x08c053d3, 0x00000040 },
+    { 0x00008000, 0000000000 },
+    { 0x0000f39d, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x00cc2000, 0x00000004 },
+    { 0x08c0539e, 0x00000040 },
+    { 0x00008000, 0000000000 },
+    { 0x03c00830, 0x00000004 },
+    { 0x4200e000, 0000000000 },
+    { 0x0000a000, 0x00000004 },
+    { 0x200045e0, 0x00000004 },
+    { 0x0000e5e1, 0000000000 },
+    { 0x00000001, 0000000000 },
+    { 0x000700d2, 0x00000004 },
+    { 0x0800e394, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0x0000e8c4, 0x00000004 },
+    { 0x0000e8c5, 0x00000004 },
+    { 0x0000e8c6, 0x00000004 },
+    { 0x0000e928, 0x00000004 },
+    { 0x0000e929, 0x00000004 },
+    { 0x0000e92a, 0x00000004 },
+    { 0x000000d6, 0x00000008 },
+    { 0x0000e928, 0x00000004 },
+    { 0x0000e929, 0x00000004 },
+    { 0x0000e92a, 0x00000004 },
+    { 0x000000dd, 0x00000008 },
+    { 0x00e00116, 0000000000 },
+    { 0x000700e1, 0x00000004 },
+    { 0x0800401c, 0x00000004 },
+    { 0x200050e7, 0x00000004 },
+    { 0x0000e01d, 0x00000004 },
+    { 0x000000e4, 0x00000008 },
+    { 0x02c02000, 0x00000004 },
+    { 0x00060000, 0x00000004 },
+    { 0x000000eb, 0x00000034 },
+    { 0x000000e8, 0x00000008 },
+    { 0x00008000, 0x00000004 },
+    { 0xc000e000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0x000c2000, 0x00000004 },
+    { 0x001d0018, 0x00000004 },
+    { 0x001a0001, 0x00000004 },
+    { 0x000000fb, 0x00000034 },
+    { 0x0000004a, 0x00000008 },
+    { 0x0500a04a, 0x00000008 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+};
+
+static const u32 RS690_cp_microcode[][2] = {
+    { 0x000000dd, 0x00000008 },
+    { 0x000000df, 0x00000008 },
+    { 0x000000a0, 0x00000008 },
+    { 0x000000a4, 0x00000008 },
+    { 0x4a554b4a, 0000000000 },
+    { 0x4a4a4467, 0000000000 },
+    { 0x55526f75, 0000000000 },
+    { 0x4a7e7d65, 0000000000 },
+    { 0x4ad74af6, 0000000000 },
+    { 0x4ac94a4a, 0000000000 },
+    { 0xcc898989, 0000000000 },
+    { 0xc34ad3c5, 0000000000 },
+    { 0x8e4a4a4a, 0000000000 },
+    { 0x4a8a8a8a, 0000000000 },
+    { 0x4a0f8c4a, 0000000000 },
+    { 0x000ca000, 0x00000004 },
+    { 0x000d0012, 0x00000038 },
+    { 0x0000e8b4, 0x00000004 },
+    { 0x000d0014, 0x00000038 },
+    { 0x0000e8b6, 0x00000004 },
+    { 0x000d0016, 0x00000038 },
+    { 0x0000e854, 0x00000004 },
+    { 0x000d0018, 0x00000038 },
+    { 0x0000e855, 0x00000004 },
+    { 0x000d001a, 0x00000038 },
+    { 0x0000e856, 0x00000004 },
+    { 0x000d001c, 0x00000038 },
+    { 0x0000e857, 0x00000004 },
+    { 0x000d001e, 0x00000038 },
+    { 0x0000e824, 0x00000004 },
+    { 0x000d0020, 0x00000038 },
+    { 0x0000e825, 0x00000004 },
+    { 0x000d0022, 0x00000038 },
+    { 0x0000e830, 0x00000004 },
+    { 0x000d0024, 0x00000038 },
+    { 0x0000f0c0, 0x00000004 },
+    { 0x000d0026, 0x00000038 },
+    { 0x0000f0c1, 0x00000004 },
+    { 0x000d0028, 0x00000038 },
+    { 0x0000f041, 0x00000004 },
+    { 0x000d002a, 0x00000038 },
+    { 0x0000f184, 0x00000004 },
+    { 0x000d002c, 0x00000038 },
+    { 0x0000f185, 0x00000004 },
+    { 0x000d002e, 0x00000038 },
+    { 0x0000f186, 0x00000004 },
+    { 0x000d0030, 0x00000038 },
+    { 0x0000f187, 0x00000004 },
+    { 0x000d0032, 0x00000038 },
+    { 0x0000f180, 0x00000004 },
+    { 0x000d0034, 0x00000038 },
+    { 0x0000f393, 0x00000004 },
+    { 0x000d0036, 0x00000038 },
+    { 0x0000f38a, 0x00000004 },
+    { 0x000d0038, 0x00000038 },
+    { 0x0000f38e, 0x00000004 },
+    { 0x0000e821, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x00000043, 0x00000018 },
+    { 0x00cce800, 0x00000004 },
+    { 0x001b0001, 0x00000004 },
+    { 0x08004800, 0x00000004 },
+    { 0x001b0001, 0x00000004 },
+    { 0x08004800, 0x00000004 },
+    { 0x001b0001, 0x00000004 },
+    { 0x08004800, 0x00000004 },
+    { 0x0000003a, 0x00000008 },
+    { 0x0000a000, 0000000000 },
+    { 0x2000451d, 0x00000004 },
+    { 0x0000e580, 0x00000004 },
+    { 0x000ce581, 0x00000004 },
+    { 0x08004580, 0x00000004 },
+    { 0x000ce581, 0x00000004 },
+    { 0x00000047, 0x00000008 },
+    { 0x0000a000, 0000000000 },
+    { 0x000c2000, 0x00000004 },
+    { 0x0000e50e, 0x00000004 },
+    { 0x00032000, 0x00000004 },
+    { 0x00022051, 0x00000028 },
+    { 0x00000051, 0x00000024 },
+    { 0x0800450f, 0x00000004 },
+    { 0x0000a04b, 0x00000008 },
+    { 0x0000e565, 0x00000004 },
+    { 0x0000e566, 0x00000004 },
+    { 0x00000052, 0x00000008 },
+    { 0x03cca5b4, 0x00000004 },
+    { 0x05432000, 0x00000004 },
+    { 0x00022000, 0x00000004 },
+    { 0x4ccce05e, 0x00000030 },
+    { 0x08274565, 0x00000004 },
+    { 0x0000005e, 0x00000030 },
+    { 0x08004564, 0x00000004 },
+    { 0x0000e566, 0x00000004 },
+    { 0x00000055, 0x00000008 },
+    { 0x00802061, 0x00000010 },
+    { 0x00202000, 0x00000004 },
+    { 0x001b00ff, 0x00000004 },
+    { 0x01000064, 0x00000010 },
+    { 0x001f2000, 0x00000004 },
+    { 0x001c00ff, 0x00000004 },
+    { 0000000000, 0x0000000c },
+    { 0x00000072, 0x00000030 },
+    { 0x00000055, 0x00000008 },
+    { 0x0000e576, 0x00000004 },
+    { 0x0000e577, 0x00000004 },
+    { 0x0000e50e, 0x00000004 },
+    { 0x0000e50f, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x00000069, 0x00000018 },
+    { 0x00c0e5f9, 0x000000c2 },
+    { 0x00000069, 0x00000008 },
+    { 0x0014e50e, 0x00000004 },
+    { 0x0040e50f, 0x00000004 },
+    { 0x00c0006c, 0x00000008 },
+    { 0x0000e570, 0x00000004 },
+    { 0x0000e571, 0x00000004 },
+    { 0x0000e572, 0x0000000c },
+    { 0x0000a000, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x0000e568, 0x00000004 },
+    { 0x000c2000, 0x00000004 },
+    { 0x00000076, 0x00000018 },
+    { 0x000b0000, 0x00000004 },
+    { 0x18c0e562, 0x00000004 },
+    { 0x00000078, 0x00000008 },
+    { 0x00c00077, 0x00000008 },
+    { 0x000700cb, 0x00000004 },
+    { 0x00000084, 0x00000038 },
+    { 0x000ca086, 0x00000030 },
+    { 0x080045bb, 0x00000004 },
+    { 0x000c2087, 0x00000030 },
+    { 0x0800e5bc, 0000000000 },
+    { 0x0000e5bb, 0x00000004 },
+    { 0x0000e5bc, 0000000000 },
+    { 0x00120000, 0x0000000c },
+    { 0x00120000, 0x00000004 },
+    { 0x001b0002, 0x0000000c },
+    { 0x0000a000, 0x00000004 },
+    { 0x0000e821, 0x00000004 },
+    { 0x0000e800, 0000000000 },
+    { 0x0000e821, 0x00000004 },
+    { 0x0000e82e, 0000000000 },
+    { 0x02cca000, 0x00000004 },
+    { 0x00140000, 0x00000004 },
+    { 0x000ce1cc, 0x00000004 },
+    { 0x050de1cd, 0x00000004 },
+    { 0x00400000, 0x00000004 },
+    { 0x00000096, 0x00000018 },
+    { 0x00c0a000, 0x00000004 },
+    { 0x00000093, 0x00000008 },
+    { 0x00000098, 0x00000020 },
+    { 0x4200e000, 0000000000 },
+    { 0x0000009f, 0x00000038 },
+    { 0x000ca000, 0x00000004 },
+    { 0x00140000, 0x00000004 },
+    { 0x000c2000, 0x00000004 },
+    { 0x00160000, 0x00000004 },
+    { 0x700ce000, 0x00000004 },
+    { 0x0014009b, 0x00000008 },
+    { 0x4000e000, 0000000000 },
+    { 0x02400000, 0x00000004 },
+    { 0x400ee000, 0x00000004 },
+    { 0x02400000, 0x00000004 },
+    { 0x4000e000, 0000000000 },
+    { 0x00100000, 0x0000002c },
+    { 0x00004000, 0000000000 },
+    { 0x080045c8, 0x00000004 },
+    { 0x00240005, 0x00000004 },
+    { 0x08004d0b, 0x00000004 },
+    { 0x000c2000, 0x00000004 },
+    { 0x0240e51b, 0x00000004 },
+    { 0x0080e50a, 0x00000005 },
+    { 0x0080e50b, 0x00000005 },
+    { 0x00220000, 0x00000004 },
+    { 0x000700cb, 0x00000004 },
+    { 0x000000b7, 0x00000038 },
+    { 0x000c2087, 0x00000030 },
+    { 0x0880e5bd, 0x00000005 },
+    { 0x000c2086, 0x00000030 },
+    { 0x0800e5bb, 0x00000005 },
+    { 0x000c2087, 0x00000030 },
+    { 0x0880e5bc, 0x00000005 },
+    { 0x000000ba, 0x00000008 },
+    { 0x0080e5bd, 0x00000005 },
+    { 0x0000e5bb, 0x00000005 },
+    { 0x0080e5bc, 0x00000005 },
+    { 0x00210000, 0x00000004 },
+    { 0x02800000, 0x00000004 },
+    { 0x00c000be, 0x00000018 },
+    { 0x4180e000, 0x00000040 },
+    { 0x000000c0, 0x00000024 },
+    { 0x01000000, 0x0000000c },
+    { 0x0100e51d, 0x0000000c },
+    { 0x000045bb, 0x00000004 },
+    { 0x000080ba, 0x00000008 },
+    { 0x03c00830, 0x00000004 },
+    { 0x4200e000, 0000000000 },
+    { 0x0000a000, 0x00000004 },
+    { 0x200045e0, 0x00000004 },
+    { 0x0000e5e1, 0000000000 },
+    { 0x00000001, 0000000000 },
+    { 0x000700c8, 0x00000004 },
+    { 0x0800e394, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0x0000e8c4, 0x00000004 },
+    { 0x0000e8c5, 0x00000004 },
+    { 0x0000e8c6, 0x00000004 },
+    { 0x0000e928, 0x00000004 },
+    { 0x0000e929, 0x00000004 },
+    { 0x0000e92a, 0x00000004 },
+    { 0x000000cc, 0x00000008 },
+    { 0x0000e928, 0x00000004 },
+    { 0x0000e929, 0x00000004 },
+    { 0x0000e92a, 0x00000004 },
+    { 0x000000d3, 0x00000008 },
+    { 0x02c02000, 0x00000004 },
+    { 0x00060000, 0x00000004 },
+    { 0x000000db, 0x00000034 },
+    { 0x000000d8, 0x00000008 },
+    { 0x00008000, 0x00000004 },
+    { 0xc000e000, 0000000000 },
+    { 0x000000e1, 0x00000030 },
+    { 0x4200e000, 0000000000 },
+    { 0x000000e1, 0x00000030 },
+    { 0x4000e000, 0000000000 },
+    { 0x0025001b, 0x00000004 },
+    { 0x00230000, 0x00000004 },
+    { 0x00250005, 0x00000004 },
+    { 0x000000e6, 0x00000034 },
+    { 0000000000, 0x0000000c },
+    { 0x00244000, 0x00000004 },
+    { 0x080045c8, 0x00000004 },
+    { 0x00240005, 0x00000004 },
+    { 0x08004d0b, 0x0000000c },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0x000c2000, 0x00000004 },
+    { 0x001d0018, 0x00000004 },
+    { 0x001a0001, 0x00000004 },
+    { 0x000000fb, 0x00000034 },
+    { 0x0000004a, 0x00000008 },
+    { 0x0500a04a, 0x00000008 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+};
+
+static const u32 R520_cp_microcode[][2] = {
+    { 0x4200e000, 0000000000 },
+    { 0x4000e000, 0000000000 },
+    { 0x00000099, 0x00000008 },
+    { 0x0000009d, 0x00000008 },
+    { 0x4a554b4a, 0000000000 },
+    { 0x4a4a4467, 0000000000 },
+    { 0x55526f75, 0000000000 },
+    { 0x4a7e7d65, 0000000000 },
+    { 0xe0dae6f6, 0000000000 },
+    { 0x4ac54a4a, 0000000000 },
+    { 0xc8828282, 0000000000 },
+    { 0xbf4acfc1, 0000000000 },
+    { 0x87b04ad5, 0000000000 },
+    { 0xb5838383, 0000000000 },
+    { 0x4a0f85ba, 0000000000 },
+    { 0x000ca000, 0x00000004 },
+    { 0x000d0012, 0x00000038 },
+    { 0x0000e8b4, 0x00000004 },
+    { 0x000d0014, 0x00000038 },
+    { 0x0000e8b6, 0x00000004 },
+    { 0x000d0016, 0x00000038 },
+    { 0x0000e854, 0x00000004 },
+    { 0x000d0018, 0x00000038 },
+    { 0x0000e855, 0x00000004 },
+    { 0x000d001a, 0x00000038 },
+    { 0x0000e856, 0x00000004 },
+    { 0x000d001c, 0x00000038 },
+    { 0x0000e857, 0x00000004 },
+    { 0x000d001e, 0x00000038 },
+    { 0x0000e824, 0x00000004 },
+    { 0x000d0020, 0x00000038 },
+    { 0x0000e825, 0x00000004 },
+    { 0x000d0022, 0x00000038 },
+    { 0x0000e830, 0x00000004 },
+    { 0x000d0024, 0x00000038 },
+    { 0x0000f0c0, 0x00000004 },
+    { 0x000d0026, 0x00000038 },
+    { 0x0000f0c1, 0x00000004 },
+    { 0x000d0028, 0x00000038 },
+    { 0x0000e000, 0x00000004 },
+    { 0x000d002a, 0x00000038 },
+    { 0x0000e000, 0x00000004 },
+    { 0x000d002c, 0x00000038 },
+    { 0x0000e000, 0x00000004 },
+    { 0x000d002e, 0x00000038 },
+    { 0x0000e000, 0x00000004 },
+    { 0x000d0030, 0x00000038 },
+    { 0x0000e000, 0x00000004 },
+    { 0x000d0032, 0x00000038 },
+    { 0x0000f180, 0x00000004 },
+    { 0x000d0034, 0x00000038 },
+    { 0x0000f393, 0x00000004 },
+    { 0x000d0036, 0x00000038 },
+    { 0x0000f38a, 0x00000004 },
+    { 0x000d0038, 0x00000038 },
+    { 0x0000f38e, 0x00000004 },
+    { 0x0000e821, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x00000043, 0x00000018 },
+    { 0x00cce800, 0x00000004 },
+    { 0x001b0001, 0x00000004 },
+    { 0x08004800, 0x00000004 },
+    { 0x001b0001, 0x00000004 },
+    { 0x08004800, 0x00000004 },
+    { 0x001b0001, 0x00000004 },
+    { 0x08004800, 0x00000004 },
+    { 0x0000003a, 0x00000008 },
+    { 0x0000a000, 0000000000 },
+    { 0x2000451d, 0x00000004 },
+    { 0x0000e580, 0x00000004 },
+    { 0x000ce581, 0x00000004 },
+    { 0x08004580, 0x00000004 },
+    { 0x000ce581, 0x00000004 },
+    { 0x00000047, 0x00000008 },
+    { 0x0000a000, 0000000000 },
+    { 0x000c2000, 0x00000004 },
+    { 0x0000e50e, 0x00000004 },
+    { 0x00032000, 0x00000004 },
+    { 0x00022051, 0x00000028 },
+    { 0x00000051, 0x00000024 },
+    { 0x0800450f, 0x00000004 },
+    { 0x0000a04b, 0x00000008 },
+    { 0x0000e565, 0x00000004 },
+    { 0x0000e566, 0x00000004 },
+    { 0x00000052, 0x00000008 },
+    { 0x03cca5b4, 0x00000004 },
+    { 0x05432000, 0x00000004 },
+    { 0x00022000, 0x00000004 },
+    { 0x4ccce05e, 0x00000030 },
+    { 0x08274565, 0x00000004 },
+    { 0x0000005e, 0x00000030 },
+    { 0x08004564, 0x00000004 },
+    { 0x0000e566, 0x00000004 },
+    { 0x00000055, 0x00000008 },
+    { 0x00802061, 0x00000010 },
+    { 0x00202000, 0x00000004 },
+    { 0x001b00ff, 0x00000004 },
+    { 0x01000064, 0x00000010 },
+    { 0x001f2000, 0x00000004 },
+    { 0x001c00ff, 0x00000004 },
+    { 0000000000, 0x0000000c },
+    { 0x00000072, 0x00000030 },
+    { 0x00000055, 0x00000008 },
+    { 0x0000e576, 0x00000004 },
+    { 0x0000e577, 0x00000004 },
+    { 0x0000e50e, 0x00000004 },
+    { 0x0000e50f, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x00000069, 0x00000018 },
+    { 0x00c0e5f9, 0x000000c2 },
+    { 0x00000069, 0x00000008 },
+    { 0x0014e50e, 0x00000004 },
+    { 0x0040e50f, 0x00000004 },
+    { 0x00c0006c, 0x00000008 },
+    { 0x0000e570, 0x00000004 },
+    { 0x0000e571, 0x00000004 },
+    { 0x0000e572, 0x0000000c },
+    { 0x0000a000, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x0000e568, 0x00000004 },
+    { 0x000c2000, 0x00000004 },
+    { 0x00000076, 0x00000018 },
+    { 0x000b0000, 0x00000004 },
+    { 0x18c0e562, 0x00000004 },
+    { 0x00000078, 0x00000008 },
+    { 0x00c00077, 0x00000008 },
+    { 0x000700c7, 0x00000004 },
+    { 0x00000080, 0x00000038 },
+    { 0x0000e5bb, 0x00000004 },
+    { 0x0000e5bc, 0000000000 },
+    { 0x0000a000, 0x00000004 },
+    { 0x0000e821, 0x00000004 },
+    { 0x0000e800, 0000000000 },
+    { 0x0000e821, 0x00000004 },
+    { 0x0000e82e, 0000000000 },
+    { 0x02cca000, 0x00000004 },
+    { 0x00140000, 0x00000004 },
+    { 0x000ce1cc, 0x00000004 },
+    { 0x050de1cd, 0x00000004 },
+    { 0x00400000, 0x00000004 },
+    { 0x0000008f, 0x00000018 },
+    { 0x00c0a000, 0x00000004 },
+    { 0x0000008c, 0x00000008 },
+    { 0x00000091, 0x00000020 },
+    { 0x4200e000, 0000000000 },
+    { 0x00000098, 0x00000038 },
+    { 0x000ca000, 0x00000004 },
+    { 0x00140000, 0x00000004 },
+    { 0x000c2000, 0x00000004 },
+    { 0x00160000, 0x00000004 },
+    { 0x700ce000, 0x00000004 },
+    { 0x00140094, 0x00000008 },
+    { 0x4000e000, 0000000000 },
+    { 0x02400000, 0x00000004 },
+    { 0x400ee000, 0x00000004 },
+    { 0x02400000, 0x00000004 },
+    { 0x4000e000, 0000000000 },
+    { 0x000c2000, 0x00000004 },
+    { 0x0240e51b, 0x00000004 },
+    { 0x0080e50a, 0x00000005 },
+    { 0x0080e50b, 0x00000005 },
+    { 0x00220000, 0x00000004 },
+    { 0x000700c7, 0x00000004 },
+    { 0x000000a4, 0x00000038 },
+    { 0x0080e5bd, 0x00000005 },
+    { 0x0000e5bb, 0x00000005 },
+    { 0x0080e5bc, 0x00000005 },
+    { 0x00210000, 0x00000004 },
+    { 0x02800000, 0x00000004 },
+    { 0x00c000ab, 0x00000018 },
+    { 0x4180e000, 0x00000040 },
+    { 0x000000ad, 0x00000024 },
+    { 0x01000000, 0x0000000c },
+    { 0x0100e51d, 0x0000000c },
+    { 0x000045bb, 0x00000004 },
+    { 0x000080a7, 0x00000008 },
+    { 0x0000f3ce, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x00cc2000, 0x00000004 },
+    { 0x08c053cf, 0x00000040 },
+    { 0x00008000, 0000000000 },
+    { 0x0000f3d2, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x00cc2000, 0x00000004 },
+    { 0x08c053d3, 0x00000040 },
+    { 0x00008000, 0000000000 },
+    { 0x0000f39d, 0x00000004 },
+    { 0x0140a000, 0x00000004 },
+    { 0x00cc2000, 0x00000004 },
+    { 0x08c0539e, 0x00000040 },
+    { 0x00008000, 0000000000 },
+    { 0x03c00830, 0x00000004 },
+    { 0x4200e000, 0000000000 },
+    { 0x0000a000, 0x00000004 },
+    { 0x200045e0, 0x00000004 },
+    { 0x0000e5e1, 0000000000 },
+    { 0x00000001, 0000000000 },
+    { 0x000700c4, 0x00000004 },
+    { 0x0800e394, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0x0000e8c4, 0x00000004 },
+    { 0x0000e8c5, 0x00000004 },
+    { 0x0000e8c6, 0x00000004 },
+    { 0x0000e928, 0x00000004 },
+    { 0x0000e929, 0x00000004 },
+    { 0x0000e92a, 0x00000004 },
+    { 0x000000c8, 0x00000008 },
+    { 0x0000e928, 0x00000004 },
+    { 0x0000e929, 0x00000004 },
+    { 0x0000e92a, 0x00000004 },
+    { 0x000000cf, 0x00000008 },
+    { 0xdeadbeef, 0000000000 },
+    { 0x00000116, 0000000000 },
+    { 0x000700d3, 0x00000004 },
+    { 0x080050e7, 0x00000004 },
+    { 0x000700d4, 0x00000004 },
+    { 0x0800401c, 0x00000004 },
+    { 0x0000e01d, 0000000000 },
+    { 0x02c02000, 0x00000004 },
+    { 0x00060000, 0x00000004 },
+    { 0x000000de, 0x00000034 },
+    { 0x000000db, 0x00000008 },
+    { 0x00008000, 0x00000004 },
+    { 0xc000e000, 0000000000 },
+    { 0x0000e1cc, 0x00000004 },
+    { 0x0500e1cd, 0x00000004 },
+    { 0x000ca000, 0x00000004 },
+    { 0x000000e5, 0x00000034 },
+    { 0x000000e1, 0x00000008 },
+    { 0x0000a000, 0000000000 },
+    { 0x0019e1cc, 0x00000004 },
+    { 0x001b0001, 0x00000004 },
+    { 0x0500a000, 0x00000004 },
+    { 0x080041cd, 0x00000004 },
+    { 0x000ca000, 0x00000004 },
+    { 0x000000fb, 0x00000034 },
+    { 0x0000004a, 0x00000008 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0x000c2000, 0x00000004 },
+    { 0x001d0018, 0x00000004 },
+    { 0x001a0001, 0x00000004 },
+    { 0x000000fb, 0x00000034 },
+    { 0x0000004a, 0x00000008 },
+    { 0x0500a04a, 0x00000008 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+    { 0000000000, 0000000000 },
+};
+
+
+#endif
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
new file mode 100644
index 000000000000..11c146b49211
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -0,0 +1,3203 @@
+/* radeon_state.c -- State support for Radeon -*- linux-c -*- */
+/*
+ * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Gareth Hughes <gareth@valinux.com>
+ *    Kevin E. Martin <martin@valinux.com>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_sarea.h"
+#include "radeon_drm.h"
+#include "radeon_drv.h"
+
+/* ================================================================
+ * Helper functions for client state checking and fixup
+ */
+
+static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
+						    dev_priv,
+						    struct drm_file * file_priv,
+						    u32 *offset)
+{
+	u64 off = *offset;
+	u32 fb_end = dev_priv->fb_location + dev_priv->fb_size - 1;
+	struct drm_radeon_driver_file_fields *radeon_priv;
+
+	/* Hrm ... the story of the offset ... So this function converts
+	 * the various ideas of what userland clients might have for an
+	 * offset in the card address space into an offset into the card
+	 * address space :) So with a sane client, it should just keep
+	 * the value intact and just do some boundary checking. However,
+	 * not all clients are sane. Some older clients pass us 0 based
+	 * offsets relative to the start of the framebuffer and some may
+	 * assume the AGP aperture it appended to the framebuffer, so we
+	 * try to detect those cases and fix them up.
+	 *
+	 * Note: It might be a good idea here to make sure the offset lands
+	 * in some "allowed" area to protect things like the PCIE GART...
+	 */
+
+	/* First, the best case, the offset already lands in either the
+	 * framebuffer or the GART mapped space
+	 */
+	if (radeon_check_offset(dev_priv, off))
+		return 0;
+
+	/* Ok, that didn't happen... now check if we have a zero based
+	 * offset that fits in the framebuffer + gart space, apply the
+	 * magic offset we get from SETPARAM or calculated from fb_location
+	 */
+	if (off < (dev_priv->fb_size + dev_priv->gart_size)) {
+		radeon_priv = file_priv->driver_priv;
+		off += radeon_priv->radeon_fb_delta;
+	}
+
+	/* Finally, assume we aimed at a GART offset if beyond the fb */
+	if (off > fb_end)
+		off = off - fb_end - 1 + dev_priv->gart_vm_start;
+
+	/* Now recheck and fail if out of bounds */
+	if (radeon_check_offset(dev_priv, off)) {
+		DRM_DEBUG("offset fixed up to 0x%x\n", (unsigned int)off);
+		*offset = off;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
+						     dev_priv,
+						     struct drm_file *file_priv,
+						     int id, u32 *data)
+{
+	switch (id) {
+
+	case RADEON_EMIT_PP_MISC:
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+		    &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
+			DRM_ERROR("Invalid depth buffer offset\n");
+			return -EINVAL;
+		}
+		break;
+
+	case RADEON_EMIT_PP_CNTL:
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+		    &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
+			DRM_ERROR("Invalid colour buffer offset\n");
+			return -EINVAL;
+		}
+		break;
+
+	case R200_EMIT_PP_TXOFFSET_0:
+	case R200_EMIT_PP_TXOFFSET_1:
+	case R200_EMIT_PP_TXOFFSET_2:
+	case R200_EMIT_PP_TXOFFSET_3:
+	case R200_EMIT_PP_TXOFFSET_4:
+	case R200_EMIT_PP_TXOFFSET_5:
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+						  &data[0])) {
+			DRM_ERROR("Invalid R200 texture offset\n");
+			return -EINVAL;
+		}
+		break;
+
+	case RADEON_EMIT_PP_TXFILTER_0:
+	case RADEON_EMIT_PP_TXFILTER_1:
+	case RADEON_EMIT_PP_TXFILTER_2:
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+		    &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
+			DRM_ERROR("Invalid R100 texture offset\n");
+			return -EINVAL;
+		}
+		break;
+
+	case R200_EMIT_PP_CUBIC_OFFSETS_0:
+	case R200_EMIT_PP_CUBIC_OFFSETS_1:
+	case R200_EMIT_PP_CUBIC_OFFSETS_2:
+	case R200_EMIT_PP_CUBIC_OFFSETS_3:
+	case R200_EMIT_PP_CUBIC_OFFSETS_4:
+	case R200_EMIT_PP_CUBIC_OFFSETS_5:{
+			int i;
+			for (i = 0; i < 5; i++) {
+				if (radeon_check_and_fixup_offset(dev_priv,
+								  file_priv,
+								  &data[i])) {
+					DRM_ERROR
+					    ("Invalid R200 cubic texture offset\n");
+					return -EINVAL;
+				}
+			}
+			break;
+		}
+
+	case RADEON_EMIT_PP_CUBIC_OFFSETS_T0:
+	case RADEON_EMIT_PP_CUBIC_OFFSETS_T1:
+	case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{
+			int i;
+			for (i = 0; i < 5; i++) {
+				if (radeon_check_and_fixup_offset(dev_priv,
+								  file_priv,
+								  &data[i])) {
+					DRM_ERROR
+					    ("Invalid R100 cubic texture offset\n");
+					return -EINVAL;
+				}
+			}
+		}
+		break;
+
+	case R200_EMIT_VAP_CTL:{
+			RING_LOCALS;
+			BEGIN_RING(2);
+			OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
+			ADVANCE_RING();
+		}
+		break;
+
+	case RADEON_EMIT_RB3D_COLORPITCH:
+	case RADEON_EMIT_RE_LINE_PATTERN:
+	case RADEON_EMIT_SE_LINE_WIDTH:
+	case RADEON_EMIT_PP_LUM_MATRIX:
+	case RADEON_EMIT_PP_ROT_MATRIX_0:
+	case RADEON_EMIT_RB3D_STENCILREFMASK:
+	case RADEON_EMIT_SE_VPORT_XSCALE:
+	case RADEON_EMIT_SE_CNTL:
+	case RADEON_EMIT_SE_CNTL_STATUS:
+	case RADEON_EMIT_RE_MISC:
+	case RADEON_EMIT_PP_BORDER_COLOR_0:
+	case RADEON_EMIT_PP_BORDER_COLOR_1:
+	case RADEON_EMIT_PP_BORDER_COLOR_2:
+	case RADEON_EMIT_SE_ZBIAS_FACTOR:
+	case RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT:
+	case RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED:
+	case R200_EMIT_PP_TXCBLEND_0:
+	case R200_EMIT_PP_TXCBLEND_1:
+	case R200_EMIT_PP_TXCBLEND_2:
+	case R200_EMIT_PP_TXCBLEND_3:
+	case R200_EMIT_PP_TXCBLEND_4:
+	case R200_EMIT_PP_TXCBLEND_5:
+	case R200_EMIT_PP_TXCBLEND_6:
+	case R200_EMIT_PP_TXCBLEND_7:
+	case R200_EMIT_TCL_LIGHT_MODEL_CTL_0:
+	case R200_EMIT_TFACTOR_0:
+	case R200_EMIT_VTX_FMT_0:
+	case R200_EMIT_MATRIX_SELECT_0:
+	case R200_EMIT_TEX_PROC_CTL_2:
+	case R200_EMIT_TCL_UCP_VERT_BLEND_CTL:
+	case R200_EMIT_PP_TXFILTER_0:
+	case R200_EMIT_PP_TXFILTER_1:
+	case R200_EMIT_PP_TXFILTER_2:
+	case R200_EMIT_PP_TXFILTER_3:
+	case R200_EMIT_PP_TXFILTER_4:
+	case R200_EMIT_PP_TXFILTER_5:
+	case R200_EMIT_VTE_CNTL:
+	case R200_EMIT_OUTPUT_VTX_COMP_SEL:
+	case R200_EMIT_PP_TAM_DEBUG3:
+	case R200_EMIT_PP_CNTL_X:
+	case R200_EMIT_RB3D_DEPTHXY_OFFSET:
+	case R200_EMIT_RE_AUX_SCISSOR_CNTL:
+	case R200_EMIT_RE_SCISSOR_TL_0:
+	case R200_EMIT_RE_SCISSOR_TL_1:
+	case R200_EMIT_RE_SCISSOR_TL_2:
+	case R200_EMIT_SE_VAP_CNTL_STATUS:
+	case R200_EMIT_SE_VTX_STATE_CNTL:
+	case R200_EMIT_RE_POINTSIZE:
+	case R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0:
+	case R200_EMIT_PP_CUBIC_FACES_0:
+	case R200_EMIT_PP_CUBIC_FACES_1:
+	case R200_EMIT_PP_CUBIC_FACES_2:
+	case R200_EMIT_PP_CUBIC_FACES_3:
+	case R200_EMIT_PP_CUBIC_FACES_4:
+	case R200_EMIT_PP_CUBIC_FACES_5:
+	case RADEON_EMIT_PP_TEX_SIZE_0:
+	case RADEON_EMIT_PP_TEX_SIZE_1:
+	case RADEON_EMIT_PP_TEX_SIZE_2:
+	case R200_EMIT_RB3D_BLENDCOLOR:
+	case R200_EMIT_TCL_POINT_SPRITE_CNTL:
+	case RADEON_EMIT_PP_CUBIC_FACES_0:
+	case RADEON_EMIT_PP_CUBIC_FACES_1:
+	case RADEON_EMIT_PP_CUBIC_FACES_2:
+	case R200_EMIT_PP_TRI_PERF_CNTL:
+	case R200_EMIT_PP_AFS_0:
+	case R200_EMIT_PP_AFS_1:
+	case R200_EMIT_ATF_TFACTOR:
+	case R200_EMIT_PP_TXCTLALL_0:
+	case R200_EMIT_PP_TXCTLALL_1:
+	case R200_EMIT_PP_TXCTLALL_2:
+	case R200_EMIT_PP_TXCTLALL_3:
+	case R200_EMIT_PP_TXCTLALL_4:
+	case R200_EMIT_PP_TXCTLALL_5:
+	case R200_EMIT_VAP_PVS_CNTL:
+		/* These packets don't contain memory offsets */
+		break;
+
+	default:
+		DRM_ERROR("Unknown state packet ID %d\n", id);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
+						     dev_priv,
+						     struct drm_file *file_priv,
+						     drm_radeon_kcmd_buffer_t *
+						     cmdbuf,
+						     unsigned int *cmdsz)
+{
+	u32 *cmd = (u32 *) cmdbuf->buf;
+	u32 offset, narrays;
+	int count, i, k;
+
+	*cmdsz = 2 + ((cmd[0] & RADEON_CP_PACKET_COUNT_MASK) >> 16);
+
+	if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) {
+		DRM_ERROR("Not a type 3 packet\n");
+		return -EINVAL;
+	}
+
+	if (4 * *cmdsz > cmdbuf->bufsz) {
+		DRM_ERROR("Packet size larger than size of data provided\n");
+		return -EINVAL;
+	}
+
+	switch(cmd[0] & 0xff00) {
+	/* XXX Are there old drivers needing other packets? */
+
+	case RADEON_3D_DRAW_IMMD:
+	case RADEON_3D_DRAW_VBUF:
+	case RADEON_3D_DRAW_INDX:
+	case RADEON_WAIT_FOR_IDLE:
+	case RADEON_CP_NOP:
+	case RADEON_3D_CLEAR_ZMASK:
+/*	case RADEON_CP_NEXT_CHAR:
+	case RADEON_CP_PLY_NEXTSCAN:
+	case RADEON_CP_SET_SCISSORS: */ /* probably safe but will never need them? */
+		/* these packets are safe */
+		break;
+
+	case RADEON_CP_3D_DRAW_IMMD_2:
+	case RADEON_CP_3D_DRAW_VBUF_2:
+	case RADEON_CP_3D_DRAW_INDX_2:
+	case RADEON_3D_CLEAR_HIZ:
+		/* safe but r200 only */
+		if (dev_priv->microcode_version != UCODE_R200) {
+			DRM_ERROR("Invalid 3d packet for r100-class chip\n");
+			return -EINVAL;
+		}
+		break;
+
+	case RADEON_3D_LOAD_VBPNTR:
+		count = (cmd[0] >> 16) & 0x3fff;
+
+		if (count > 18) { /* 12 arrays max */
+			DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
+				  count);
+			return -EINVAL;
+		}
+
+		/* carefully check packet contents */
+		narrays = cmd[1] & ~0xc000;
+		k = 0;
+		i = 2;
+		while ((k < narrays) && (i < (count + 2))) {
+			i++;		/* skip attribute field */
+			if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+							  &cmd[i])) {
+				DRM_ERROR
+				    ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
+				     k, i);
+				return -EINVAL;
+			}
+			k++;
+			i++;
+			if (k == narrays)
+				break;
+			/* have one more to process, they come in pairs */
+			if (radeon_check_and_fixup_offset(dev_priv,
+							  file_priv, &cmd[i]))
+			{
+				DRM_ERROR
+				    ("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
+				     k, i);
+				return -EINVAL;
+			}
+			k++;
+			i++;
+		}
+		/* do the counts match what we expect ? */
+		if ((k != narrays) || (i != (count + 2))) {
+			DRM_ERROR
+			    ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
+			      k, i, narrays, count + 1);
+			return -EINVAL;
+		}
+		break;
+
+	case RADEON_3D_RNDR_GEN_INDX_PRIM:
+		if (dev_priv->microcode_version != UCODE_R100) {
+			DRM_ERROR("Invalid 3d packet for r200-class chip\n");
+			return -EINVAL;
+		}
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) {
+				DRM_ERROR("Invalid rndr_gen_indx offset\n");
+				return -EINVAL;
+		}
+		break;
+
+	case RADEON_CP_INDX_BUFFER:
+		if (dev_priv->microcode_version != UCODE_R200) {
+			DRM_ERROR("Invalid 3d packet for r100-class chip\n");
+			return -EINVAL;
+		}
+		if ((cmd[1] & 0x8000ffff) != 0x80000810) {
+			DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
+			return -EINVAL;
+		}
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[2])) {
+			DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
+			return -EINVAL;
+		}
+		break;
+
+	case RADEON_CNTL_HOSTDATA_BLT:
+	case RADEON_CNTL_PAINT_MULTI:
+	case RADEON_CNTL_BITBLT_MULTI:
+		/* MSB of opcode: next DWORD GUI_CNTL */
+		if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
+			      | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+			offset = cmd[2] << 10;
+			if (radeon_check_and_fixup_offset
+			    (dev_priv, file_priv, &offset)) {
+				DRM_ERROR("Invalid first packet offset\n");
+				return -EINVAL;
+			}
+			cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10;
+		}
+
+		if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
+		    (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+			offset = cmd[3] << 10;
+			if (radeon_check_and_fixup_offset
+			    (dev_priv, file_priv, &offset)) {
+				DRM_ERROR("Invalid second packet offset\n");
+				return -EINVAL;
+			}
+			cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10;
+		}
+		break;
+
+	default:
+		DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* ================================================================
+ * CP hardware state programming functions
+ */
+
+static __inline__ void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv,
+					     struct drm_clip_rect * box)
+{
+	RING_LOCALS;
+
+	DRM_DEBUG("   box:  x1=%d y1=%d  x2=%d y2=%d\n",
+		  box->x1, box->y1, box->x2, box->y2);
+
+	BEGIN_RING(4);
+	OUT_RING(CP_PACKET0(RADEON_RE_TOP_LEFT, 0));
+	OUT_RING((box->y1 << 16) | box->x1);
+	OUT_RING(CP_PACKET0(RADEON_RE_WIDTH_HEIGHT, 0));
+	OUT_RING(((box->y2 - 1) << 16) | (box->x2 - 1));
+	ADVANCE_RING();
+}
+
+/* Emit 1.1 state
+ */
+static int radeon_emit_state(drm_radeon_private_t * dev_priv,
+			     struct drm_file *file_priv,
+			     drm_radeon_context_regs_t * ctx,
+			     drm_radeon_texture_regs_t * tex,
+			     unsigned int dirty)
+{
+	RING_LOCALS;
+	DRM_DEBUG("dirty=0x%08x\n", dirty);
+
+	if (dirty & RADEON_UPLOAD_CONTEXT) {
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+						  &ctx->rb3d_depthoffset)) {
+			DRM_ERROR("Invalid depth buffer offset\n");
+			return -EINVAL;
+		}
+
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+						  &ctx->rb3d_coloroffset)) {
+			DRM_ERROR("Invalid depth buffer offset\n");
+			return -EINVAL;
+		}
+
+		BEGIN_RING(14);
+		OUT_RING(CP_PACKET0(RADEON_PP_MISC, 6));
+		OUT_RING(ctx->pp_misc);
+		OUT_RING(ctx->pp_fog_color);
+		OUT_RING(ctx->re_solid_color);
+		OUT_RING(ctx->rb3d_blendcntl);
+		OUT_RING(ctx->rb3d_depthoffset);
+		OUT_RING(ctx->rb3d_depthpitch);
+		OUT_RING(ctx->rb3d_zstencilcntl);
+		OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 2));
+		OUT_RING(ctx->pp_cntl);
+		OUT_RING(ctx->rb3d_cntl);
+		OUT_RING(ctx->rb3d_coloroffset);
+		OUT_RING(CP_PACKET0(RADEON_RB3D_COLORPITCH, 0));
+		OUT_RING(ctx->rb3d_colorpitch);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_VERTFMT) {
+		BEGIN_RING(2);
+		OUT_RING(CP_PACKET0(RADEON_SE_COORD_FMT, 0));
+		OUT_RING(ctx->se_coord_fmt);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_LINE) {
+		BEGIN_RING(5);
+		OUT_RING(CP_PACKET0(RADEON_RE_LINE_PATTERN, 1));
+		OUT_RING(ctx->re_line_pattern);
+		OUT_RING(ctx->re_line_state);
+		OUT_RING(CP_PACKET0(RADEON_SE_LINE_WIDTH, 0));
+		OUT_RING(ctx->se_line_width);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_BUMPMAP) {
+		BEGIN_RING(5);
+		OUT_RING(CP_PACKET0(RADEON_PP_LUM_MATRIX, 0));
+		OUT_RING(ctx->pp_lum_matrix);
+		OUT_RING(CP_PACKET0(RADEON_PP_ROT_MATRIX_0, 1));
+		OUT_RING(ctx->pp_rot_matrix_0);
+		OUT_RING(ctx->pp_rot_matrix_1);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_MASKS) {
+		BEGIN_RING(4);
+		OUT_RING(CP_PACKET0(RADEON_RB3D_STENCILREFMASK, 2));
+		OUT_RING(ctx->rb3d_stencilrefmask);
+		OUT_RING(ctx->rb3d_ropcntl);
+		OUT_RING(ctx->rb3d_planemask);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_VIEWPORT) {
+		BEGIN_RING(7);
+		OUT_RING(CP_PACKET0(RADEON_SE_VPORT_XSCALE, 5));
+		OUT_RING(ctx->se_vport_xscale);
+		OUT_RING(ctx->se_vport_xoffset);
+		OUT_RING(ctx->se_vport_yscale);
+		OUT_RING(ctx->se_vport_yoffset);
+		OUT_RING(ctx->se_vport_zscale);
+		OUT_RING(ctx->se_vport_zoffset);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_SETUP) {
+		BEGIN_RING(4);
+		OUT_RING(CP_PACKET0(RADEON_SE_CNTL, 0));
+		OUT_RING(ctx->se_cntl);
+		OUT_RING(CP_PACKET0(RADEON_SE_CNTL_STATUS, 0));
+		OUT_RING(ctx->se_cntl_status);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_MISC) {
+		BEGIN_RING(2);
+		OUT_RING(CP_PACKET0(RADEON_RE_MISC, 0));
+		OUT_RING(ctx->re_misc);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_TEX0) {
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+						  &tex[0].pp_txoffset)) {
+			DRM_ERROR("Invalid texture offset for unit 0\n");
+			return -EINVAL;
+		}
+
+		BEGIN_RING(9);
+		OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_0, 5));
+		OUT_RING(tex[0].pp_txfilter);
+		OUT_RING(tex[0].pp_txformat);
+		OUT_RING(tex[0].pp_txoffset);
+		OUT_RING(tex[0].pp_txcblend);
+		OUT_RING(tex[0].pp_txablend);
+		OUT_RING(tex[0].pp_tfactor);
+		OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_0, 0));
+		OUT_RING(tex[0].pp_border_color);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_TEX1) {
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+						  &tex[1].pp_txoffset)) {
+			DRM_ERROR("Invalid texture offset for unit 1\n");
+			return -EINVAL;
+		}
+
+		BEGIN_RING(9);
+		OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_1, 5));
+		OUT_RING(tex[1].pp_txfilter);
+		OUT_RING(tex[1].pp_txformat);
+		OUT_RING(tex[1].pp_txoffset);
+		OUT_RING(tex[1].pp_txcblend);
+		OUT_RING(tex[1].pp_txablend);
+		OUT_RING(tex[1].pp_tfactor);
+		OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_1, 0));
+		OUT_RING(tex[1].pp_border_color);
+		ADVANCE_RING();
+	}
+
+	if (dirty & RADEON_UPLOAD_TEX2) {
+		if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+						  &tex[2].pp_txoffset)) {
+			DRM_ERROR("Invalid texture offset for unit 2\n");
+			return -EINVAL;
+		}
+
+		BEGIN_RING(9);
+		OUT_RING(CP_PACKET0(RADEON_PP_TXFILTER_2, 5));
+		OUT_RING(tex[2].pp_txfilter);
+		OUT_RING(tex[2].pp_txformat);
+		OUT_RING(tex[2].pp_txoffset);
+		OUT_RING(tex[2].pp_txcblend);
+		OUT_RING(tex[2].pp_txablend);
+		OUT_RING(tex[2].pp_tfactor);
+		OUT_RING(CP_PACKET0(RADEON_PP_BORDER_COLOR_2, 0));
+		OUT_RING(tex[2].pp_border_color);
+		ADVANCE_RING();
+	}
+
+	return 0;
+}
+
+/* Emit 1.2 state
+ */
+static int radeon_emit_state2(drm_radeon_private_t * dev_priv,
+			      struct drm_file *file_priv,
+			      drm_radeon_state_t * state)
+{
+	RING_LOCALS;
+
+	if (state->dirty & RADEON_UPLOAD_ZBIAS) {
+		BEGIN_RING(3);
+		OUT_RING(CP_PACKET0(RADEON_SE_ZBIAS_FACTOR, 1));
+		OUT_RING(state->context2.se_zbias_factor);
+		OUT_RING(state->context2.se_zbias_constant);
+		ADVANCE_RING();
+	}
+
+	return radeon_emit_state(dev_priv, file_priv, &state->context,
+				 state->tex, state->dirty);
+}
+
+/* New (1.3) state mechanism.  3 commands (packet, scalar, vector) in
+ * 1.3 cmdbuffers allow all previous state to be updated as well as
+ * the tcl scalar and vector areas.
+ */
+static struct {
+	int start;
+	int len;
+	const char *name;
+} packet[RADEON_MAX_STATE_PACKETS] = {
+	{RADEON_PP_MISC, 7, "RADEON_PP_MISC"},
+	{RADEON_PP_CNTL, 3, "RADEON_PP_CNTL"},
+	{RADEON_RB3D_COLORPITCH, 1, "RADEON_RB3D_COLORPITCH"},
+	{RADEON_RE_LINE_PATTERN, 2, "RADEON_RE_LINE_PATTERN"},
+	{RADEON_SE_LINE_WIDTH, 1, "RADEON_SE_LINE_WIDTH"},
+	{RADEON_PP_LUM_MATRIX, 1, "RADEON_PP_LUM_MATRIX"},
+	{RADEON_PP_ROT_MATRIX_0, 2, "RADEON_PP_ROT_MATRIX_0"},
+	{RADEON_RB3D_STENCILREFMASK, 3, "RADEON_RB3D_STENCILREFMASK"},
+	{RADEON_SE_VPORT_XSCALE, 6, "RADEON_SE_VPORT_XSCALE"},
+	{RADEON_SE_CNTL, 2, "RADEON_SE_CNTL"},
+	{RADEON_SE_CNTL_STATUS, 1, "RADEON_SE_CNTL_STATUS"},
+	{RADEON_RE_MISC, 1, "RADEON_RE_MISC"},
+	{RADEON_PP_TXFILTER_0, 6, "RADEON_PP_TXFILTER_0"},
+	{RADEON_PP_BORDER_COLOR_0, 1, "RADEON_PP_BORDER_COLOR_0"},
+	{RADEON_PP_TXFILTER_1, 6, "RADEON_PP_TXFILTER_1"},
+	{RADEON_PP_BORDER_COLOR_1, 1, "RADEON_PP_BORDER_COLOR_1"},
+	{RADEON_PP_TXFILTER_2, 6, "RADEON_PP_TXFILTER_2"},
+	{RADEON_PP_BORDER_COLOR_2, 1, "RADEON_PP_BORDER_COLOR_2"},
+	{RADEON_SE_ZBIAS_FACTOR, 2, "RADEON_SE_ZBIAS_FACTOR"},
+	{RADEON_SE_TCL_OUTPUT_VTX_FMT, 11, "RADEON_SE_TCL_OUTPUT_VTX_FMT"},
+	{RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED, 17,
+		    "RADEON_SE_TCL_MATERIAL_EMMISSIVE_RED"},
+	{R200_PP_TXCBLEND_0, 4, "R200_PP_TXCBLEND_0"},
+	{R200_PP_TXCBLEND_1, 4, "R200_PP_TXCBLEND_1"},
+	{R200_PP_TXCBLEND_2, 4, "R200_PP_TXCBLEND_2"},
+	{R200_PP_TXCBLEND_3, 4, "R200_PP_TXCBLEND_3"},
+	{R200_PP_TXCBLEND_4, 4, "R200_PP_TXCBLEND_4"},
+	{R200_PP_TXCBLEND_5, 4, "R200_PP_TXCBLEND_5"},
+	{R200_PP_TXCBLEND_6, 4, "R200_PP_TXCBLEND_6"},
+	{R200_PP_TXCBLEND_7, 4, "R200_PP_TXCBLEND_7"},
+	{R200_SE_TCL_LIGHT_MODEL_CTL_0, 6, "R200_SE_TCL_LIGHT_MODEL_CTL_0"},
+	{R200_PP_TFACTOR_0, 6, "R200_PP_TFACTOR_0"},
+	{R200_SE_VTX_FMT_0, 4, "R200_SE_VTX_FMT_0"},
+	{R200_SE_VAP_CNTL, 1, "R200_SE_VAP_CNTL"},
+	{R200_SE_TCL_MATRIX_SEL_0, 5, "R200_SE_TCL_MATRIX_SEL_0"},
+	{R200_SE_TCL_TEX_PROC_CTL_2, 5, "R200_SE_TCL_TEX_PROC_CTL_2"},
+	{R200_SE_TCL_UCP_VERT_BLEND_CTL, 1, "R200_SE_TCL_UCP_VERT_BLEND_CTL"},
+	{R200_PP_TXFILTER_0, 6, "R200_PP_TXFILTER_0"},
+	{R200_PP_TXFILTER_1, 6, "R200_PP_TXFILTER_1"},
+	{R200_PP_TXFILTER_2, 6, "R200_PP_TXFILTER_2"},
+	{R200_PP_TXFILTER_3, 6, "R200_PP_TXFILTER_3"},
+	{R200_PP_TXFILTER_4, 6, "R200_PP_TXFILTER_4"},
+	{R200_PP_TXFILTER_5, 6, "R200_PP_TXFILTER_5"},
+	{R200_PP_TXOFFSET_0, 1, "R200_PP_TXOFFSET_0"},
+	{R200_PP_TXOFFSET_1, 1, "R200_PP_TXOFFSET_1"},
+	{R200_PP_TXOFFSET_2, 1, "R200_PP_TXOFFSET_2"},
+	{R200_PP_TXOFFSET_3, 1, "R200_PP_TXOFFSET_3"},
+	{R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"},
+	{R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"},
+	{R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"},
+	{R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1,
+	 "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
+	{R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"},
+	{R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"},
+	{R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"},
+	{R200_RE_AUX_SCISSOR_CNTL, 1, "R200_RE_AUX_SCISSOR_CNTL"},
+	{R200_RE_SCISSOR_TL_0, 2, "R200_RE_SCISSOR_TL_0"},
+	{R200_RE_SCISSOR_TL_1, 2, "R200_RE_SCISSOR_TL_1"},
+	{R200_RE_SCISSOR_TL_2, 2, "R200_RE_SCISSOR_TL_2"},
+	{R200_SE_VAP_CNTL_STATUS, 1, "R200_SE_VAP_CNTL_STATUS"},
+	{R200_SE_VTX_STATE_CNTL, 1, "R200_SE_VTX_STATE_CNTL"},
+	{R200_RE_POINTSIZE, 1, "R200_RE_POINTSIZE"},
+	{R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4,
+		    "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"},
+	{R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"},	/* 61 */
+	{R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */
+	{R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"},
+	{R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"},
+	{R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"},
+	{R200_PP_CUBIC_OFFSET_F1_2, 5, "R200_PP_CUBIC_OFFSET_F1_2"},
+	{R200_PP_CUBIC_FACES_3, 1, "R200_PP_CUBIC_FACES_3"},
+	{R200_PP_CUBIC_OFFSET_F1_3, 5, "R200_PP_CUBIC_OFFSET_F1_3"},
+	{R200_PP_CUBIC_FACES_4, 1, "R200_PP_CUBIC_FACES_4"},
+	{R200_PP_CUBIC_OFFSET_F1_4, 5, "R200_PP_CUBIC_OFFSET_F1_4"},
+	{R200_PP_CUBIC_FACES_5, 1, "R200_PP_CUBIC_FACES_5"},
+	{R200_PP_CUBIC_OFFSET_F1_5, 5, "R200_PP_CUBIC_OFFSET_F1_5"},
+	{RADEON_PP_TEX_SIZE_0, 2, "RADEON_PP_TEX_SIZE_0"},
+	{RADEON_PP_TEX_SIZE_1, 2, "RADEON_PP_TEX_SIZE_1"},
+	{RADEON_PP_TEX_SIZE_2, 2, "RADEON_PP_TEX_SIZE_2"},
+	{R200_RB3D_BLENDCOLOR, 3, "R200_RB3D_BLENDCOLOR"},
+	{R200_SE_TCL_POINT_SPRITE_CNTL, 1, "R200_SE_TCL_POINT_SPRITE_CNTL"},
+	{RADEON_PP_CUBIC_FACES_0, 1, "RADEON_PP_CUBIC_FACES_0"},
+	{RADEON_PP_CUBIC_OFFSET_T0_0, 5, "RADEON_PP_CUBIC_OFFSET_T0_0"},
+	{RADEON_PP_CUBIC_FACES_1, 1, "RADEON_PP_CUBIC_FACES_1"},
+	{RADEON_PP_CUBIC_OFFSET_T1_0, 5, "RADEON_PP_CUBIC_OFFSET_T1_0"},
+	{RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"},
+	{RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"},
+	{R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"},
+	{R200_PP_AFS_0, 32, "R200_PP_AFS_0"},     /* 85 */
+	{R200_PP_AFS_1, 32, "R200_PP_AFS_1"},
+	{R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"},
+	{R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"},
+	{R200_PP_TXFILTER_1, 8, "R200_PP_TXCTLALL_1"},
+	{R200_PP_TXFILTER_2, 8, "R200_PP_TXCTLALL_2"},
+	{R200_PP_TXFILTER_3, 8, "R200_PP_TXCTLALL_3"},
+	{R200_PP_TXFILTER_4, 8, "R200_PP_TXCTLALL_4"},
+	{R200_PP_TXFILTER_5, 8, "R200_PP_TXCTLALL_5"},
+	{R200_VAP_PVS_CNTL_1, 2, "R200_VAP_PVS_CNTL"},
+};
+
+/* ================================================================
+ * Performance monitoring functions
+ */
+
+static void radeon_clear_box(drm_radeon_private_t * dev_priv,
+			     int x, int y, int w, int h, int r, int g, int b)
+{
+	u32 color;
+	RING_LOCALS;
+
+	x += dev_priv->sarea_priv->boxes[0].x1;
+	y += dev_priv->sarea_priv->boxes[0].y1;
+
+	switch (dev_priv->color_fmt) {
+	case RADEON_COLOR_FORMAT_RGB565:
+		color = (((r & 0xf8) << 8) |
+			 ((g & 0xfc) << 3) | ((b & 0xf8) >> 3));
+		break;
+	case RADEON_COLOR_FORMAT_ARGB8888:
+	default:
+		color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
+		break;
+	}
+
+	BEGIN_RING(4);
+	RADEON_WAIT_UNTIL_3D_IDLE();
+	OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0));
+	OUT_RING(0xffffffff);
+	ADVANCE_RING();
+
+	BEGIN_RING(6);
+
+	OUT_RING(CP_PACKET3(RADEON_CNTL_PAINT_MULTI, 4));
+	OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+		 RADEON_GMC_BRUSH_SOLID_COLOR |
+		 (dev_priv->color_fmt << 8) |
+		 RADEON_GMC_SRC_DATATYPE_COLOR |
+		 RADEON_ROP3_P | RADEON_GMC_CLR_CMP_CNTL_DIS);
+
+	if (dev_priv->sarea_priv->pfCurrentPage == 1) {
+		OUT_RING(dev_priv->front_pitch_offset);
+	} else {
+		OUT_RING(dev_priv->back_pitch_offset);
+	}
+
+	OUT_RING(color);
+
+	OUT_RING((x << 16) | y);
+	OUT_RING((w << 16) | h);
+
+	ADVANCE_RING();
+}
+
+static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv)
+{
+	/* Collapse various things into a wait flag -- trying to
+	 * guess if userspase slept -- better just to have them tell us.
+	 */
+	if (dev_priv->stats.last_frame_reads > 1 ||
+	    dev_priv->stats.last_clear_reads > dev_priv->stats.clears) {
+		dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+	}
+
+	if (dev_priv->stats.freelist_loops) {
+		dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
+	}
+
+	/* Purple box for page flipping
+	 */
+	if (dev_priv->stats.boxes & RADEON_BOX_FLIP)
+		radeon_clear_box(dev_priv, 4, 4, 8, 8, 255, 0, 255);
+
+	/* Red box if we have to wait for idle at any point
+	 */
+	if (dev_priv->stats.boxes & RADEON_BOX_WAIT_IDLE)
+		radeon_clear_box(dev_priv, 16, 4, 8, 8, 255, 0, 0);
+
+	/* Blue box: lost context?
+	 */
+
+	/* Yellow box for texture swaps
+	 */
+	if (dev_priv->stats.boxes & RADEON_BOX_TEXTURE_LOAD)
+		radeon_clear_box(dev_priv, 40, 4, 8, 8, 255, 255, 0);
+
+	/* Green box if hardware never idles (as far as we can tell)
+	 */
+	if (!(dev_priv->stats.boxes & RADEON_BOX_DMA_IDLE))
+		radeon_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
+
+	/* Draw bars indicating number of buffers allocated
+	 * (not a great measure, easily confused)
+	 */
+	if (dev_priv->stats.requested_bufs) {
+		if (dev_priv->stats.requested_bufs > 100)
+			dev_priv->stats.requested_bufs = 100;
+
+		radeon_clear_box(dev_priv, 4, 16,
+				 dev_priv->stats.requested_bufs, 4,
+				 196, 128, 128);
+	}
+
+	memset(&dev_priv->stats, 0, sizeof(dev_priv->stats));
+
+}
+
+/* ================================================================
+ * CP command dispatch functions
+ */
+
+static void radeon_cp_dispatch_clear(struct drm_device * dev,
+				     drm_radeon_clear_t * clear,
+				     drm_radeon_clear_rect_t * depth_boxes)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear;
+	int nbox = sarea_priv->nbox;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	unsigned int flags = clear->flags;
+	u32 rb3d_cntl = 0, rb3d_stencilrefmask = 0;
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG("flags = 0x%x\n", flags);
+
+	dev_priv->stats.clears++;
+
+	if (dev_priv->sarea_priv->pfCurrentPage == 1) {
+		unsigned int tmp = flags;
+
+		flags &= ~(RADEON_FRONT | RADEON_BACK);
+		if (tmp & RADEON_FRONT)
+			flags |= RADEON_BACK;
+		if (tmp & RADEON_BACK)
+			flags |= RADEON_FRONT;
+	}
+
+	if (flags & (RADEON_FRONT | RADEON_BACK)) {
+
+		BEGIN_RING(4);
+
+		/* Ensure the 3D stream is idle before doing a
+		 * 2D fill to clear the front or back buffer.
+		 */
+		RADEON_WAIT_UNTIL_3D_IDLE();
+
+		OUT_RING(CP_PACKET0(RADEON_DP_WRITE_MASK, 0));
+		OUT_RING(clear->color_mask);
+
+		ADVANCE_RING();
+
+		/* Make sure we restore the 3D state next time.
+		 */
+		dev_priv->sarea_priv->ctx_owner = 0;
+
+		for (i = 0; i < nbox; i++) {
+			int x = pbox[i].x1;
+			int y = pbox[i].y1;
+			int w = pbox[i].x2 - x;
+			int h = pbox[i].y2 - y;
+
+			DRM_DEBUG("%d,%d-%d,%d flags 0x%x\n",
+				  x, y, w, h, flags);
+
+			if (flags & RADEON_FRONT) {
+				BEGIN_RING(6);
+
+				OUT_RING(CP_PACKET3
+					 (RADEON_CNTL_PAINT_MULTI, 4));
+				OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+					 RADEON_GMC_BRUSH_SOLID_COLOR |
+					 (dev_priv->
+					  color_fmt << 8) |
+					 RADEON_GMC_SRC_DATATYPE_COLOR |
+					 RADEON_ROP3_P |
+					 RADEON_GMC_CLR_CMP_CNTL_DIS);
+
+				OUT_RING(dev_priv->front_pitch_offset);
+				OUT_RING(clear->clear_color);
+
+				OUT_RING((x << 16) | y);
+				OUT_RING((w << 16) | h);
+
+				ADVANCE_RING();
+			}
+
+			if (flags & RADEON_BACK) {
+				BEGIN_RING(6);
+
+				OUT_RING(CP_PACKET3
+					 (RADEON_CNTL_PAINT_MULTI, 4));
+				OUT_RING(RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+					 RADEON_GMC_BRUSH_SOLID_COLOR |
+					 (dev_priv->
+					  color_fmt << 8) |
+					 RADEON_GMC_SRC_DATATYPE_COLOR |
+					 RADEON_ROP3_P |
+					 RADEON_GMC_CLR_CMP_CNTL_DIS);
+
+				OUT_RING(dev_priv->back_pitch_offset);
+				OUT_RING(clear->clear_color);
+
+				OUT_RING((x << 16) | y);
+				OUT_RING((w << 16) | h);
+
+				ADVANCE_RING();
+			}
+		}
+	}
+
+	/* hyper z clear */
+	/* no docs available, based on reverse engeneering by Stephane Marchesin */
+	if ((flags & (RADEON_DEPTH | RADEON_STENCIL))
+	    && (flags & RADEON_CLEAR_FASTZ)) {
+
+		int i;
+		int depthpixperline =
+		    dev_priv->depth_fmt ==
+		    RADEON_DEPTH_FORMAT_16BIT_INT_Z ? (dev_priv->depth_pitch /
+						       2) : (dev_priv->
+							     depth_pitch / 4);
+
+		u32 clearmask;
+
+		u32 tempRB3D_DEPTHCLEARVALUE = clear->clear_depth |
+		    ((clear->depth_mask & 0xff) << 24);
+
+		/* Make sure we restore the 3D state next time.
+		 * we haven't touched any "normal" state - still need this?
+		 */
+		dev_priv->sarea_priv->ctx_owner = 0;
+
+		if ((dev_priv->flags & RADEON_HAS_HIERZ)
+		    && (flags & RADEON_USE_HIERZ)) {
+			/* FIXME : reverse engineer that for Rx00 cards */
+			/* FIXME : the mask supposedly contains low-res z values. So can't set
+			   just to the max (0xff? or actually 0x3fff?), need to take z clear
+			   value into account? */
+			/* pattern seems to work for r100, though get slight
+			   rendering errors with glxgears. If hierz is not enabled for r100,
+			   only 4 bits which indicate clear (15,16,31,32, all zero) matter, the
+			   other ones are ignored, and the same clear mask can be used. That's
+			   very different behaviour than R200 which needs different clear mask
+			   and different number of tiles to clear if hierz is enabled or not !?!
+			 */
+			clearmask = (0xff << 22) | (0xff << 6) | 0x003f003f;
+		} else {
+			/* clear mask : chooses the clearing pattern.
+			   rv250: could be used to clear only parts of macrotiles
+			   (but that would get really complicated...)?
+			   bit 0 and 1 (either or both of them ?!?!) are used to
+			   not clear tile (or maybe one of the bits indicates if the tile is
+			   compressed or not), bit 2 and 3 to not clear tile 1,...,.
+			   Pattern is as follows:
+			   | 0,1 | 4,5 | 8,9 |12,13|16,17|20,21|24,25|28,29|
+			   bits -------------------------------------------------
+			   | 2,3 | 6,7 |10,11|14,15|18,19|22,23|26,27|30,31|
+			   rv100: clearmask covers 2x8 4x1 tiles, but one clear still
+			   covers 256 pixels ?!?
+			 */
+			clearmask = 0x0;
+		}
+
+		BEGIN_RING(8);
+		RADEON_WAIT_UNTIL_2D_IDLE();
+		OUT_RING_REG(RADEON_RB3D_DEPTHCLEARVALUE,
+			     tempRB3D_DEPTHCLEARVALUE);
+		/* what offset is this exactly ? */
+		OUT_RING_REG(RADEON_RB3D_ZMASKOFFSET, 0);
+		/* need ctlstat, otherwise get some strange black flickering */
+		OUT_RING_REG(RADEON_RB3D_ZCACHE_CTLSTAT,
+			     RADEON_RB3D_ZC_FLUSH_ALL);
+		ADVANCE_RING();
+
+		for (i = 0; i < nbox; i++) {
+			int tileoffset, nrtilesx, nrtilesy, j;
+			/* it looks like r200 needs rv-style clears, at least if hierz is not enabled? */
+			if ((dev_priv->flags & RADEON_HAS_HIERZ)
+			    && !(dev_priv->microcode_version == UCODE_R200)) {
+				/* FIXME : figure this out for r200 (when hierz is enabled). Or
+				   maybe r200 actually doesn't need to put the low-res z value into
+				   the tile cache like r100, but just needs to clear the hi-level z-buffer?
+				   Works for R100, both with hierz and without.
+				   R100 seems to operate on 2x1 8x8 tiles, but...
+				   odd: offset/nrtiles need to be 64 pix (4 block) aligned? Potentially
+				   problematic with resolutions which are not 64 pix aligned? */
+				tileoffset =
+				    ((pbox[i].y1 >> 3) * depthpixperline +
+				     pbox[i].x1) >> 6;
+				nrtilesx =
+				    ((pbox[i].x2 & ~63) -
+				     (pbox[i].x1 & ~63)) >> 4;
+				nrtilesy =
+				    (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
+				for (j = 0; j <= nrtilesy; j++) {
+					BEGIN_RING(4);
+					OUT_RING(CP_PACKET3
+						 (RADEON_3D_CLEAR_ZMASK, 2));
+					/* first tile */
+					OUT_RING(tileoffset * 8);
+					/* the number of tiles to clear */
+					OUT_RING(nrtilesx + 4);
+					/* clear mask : chooses the clearing pattern. */
+					OUT_RING(clearmask);
+					ADVANCE_RING();
+					tileoffset += depthpixperline >> 6;
+				}
+			} else if (dev_priv->microcode_version == UCODE_R200) {
+				/* works for rv250. */
+				/* find first macro tile (8x2 4x4 z-pixels on rv250) */
+				tileoffset =
+				    ((pbox[i].y1 >> 3) * depthpixperline +
+				     pbox[i].x1) >> 5;
+				nrtilesx =
+				    (pbox[i].x2 >> 5) - (pbox[i].x1 >> 5);
+				nrtilesy =
+				    (pbox[i].y2 >> 3) - (pbox[i].y1 >> 3);
+				for (j = 0; j <= nrtilesy; j++) {
+					BEGIN_RING(4);
+					OUT_RING(CP_PACKET3
+						 (RADEON_3D_CLEAR_ZMASK, 2));
+					/* first tile */
+					/* judging by the first tile offset needed, could possibly
+					   directly address/clear 4x4 tiles instead of 8x2 * 4x4
+					   macro tiles, though would still need clear mask for
+					   right/bottom if truely 4x4 granularity is desired ? */
+					OUT_RING(tileoffset * 16);
+					/* the number of tiles to clear */
+					OUT_RING(nrtilesx + 1);
+					/* clear mask : chooses the clearing pattern. */
+					OUT_RING(clearmask);
+					ADVANCE_RING();
+					tileoffset += depthpixperline >> 5;
+				}
+			} else {	/* rv 100 */
+				/* rv100 might not need 64 pix alignment, who knows */
+				/* offsets are, hmm, weird */
+				tileoffset =
+				    ((pbox[i].y1 >> 4) * depthpixperline +
+				     pbox[i].x1) >> 6;
+				nrtilesx =
+				    ((pbox[i].x2 & ~63) -
+				     (pbox[i].x1 & ~63)) >> 4;
+				nrtilesy =
+				    (pbox[i].y2 >> 4) - (pbox[i].y1 >> 4);
+				for (j = 0; j <= nrtilesy; j++) {
+					BEGIN_RING(4);
+					OUT_RING(CP_PACKET3
+						 (RADEON_3D_CLEAR_ZMASK, 2));
+					OUT_RING(tileoffset * 128);
+					/* the number of tiles to clear */
+					OUT_RING(nrtilesx + 4);
+					/* clear mask : chooses the clearing pattern. */
+					OUT_RING(clearmask);
+					ADVANCE_RING();
+					tileoffset += depthpixperline >> 6;
+				}
+			}
+		}
+
+		/* TODO don't always clear all hi-level z tiles */
+		if ((dev_priv->flags & RADEON_HAS_HIERZ)
+		    && (dev_priv->microcode_version == UCODE_R200)
+		    && (flags & RADEON_USE_HIERZ))
+			/* r100 and cards without hierarchical z-buffer have no high-level z-buffer */
+			/* FIXME : the mask supposedly contains low-res z values. So can't set
+			   just to the max (0xff? or actually 0x3fff?), need to take z clear
+			   value into account? */
+		{
+			BEGIN_RING(4);
+			OUT_RING(CP_PACKET3(RADEON_3D_CLEAR_HIZ, 2));
+			OUT_RING(0x0);	/* First tile */
+			OUT_RING(0x3cc0);
+			OUT_RING((0xff << 22) | (0xff << 6) | 0x003f003f);
+			ADVANCE_RING();
+		}
+	}
+
+	/* We have to clear the depth and/or stencil buffers by
+	 * rendering a quad into just those buffers.  Thus, we have to
+	 * make sure the 3D engine is configured correctly.
+	 */
+	else if ((dev_priv->microcode_version == UCODE_R200) &&
+		(flags & (RADEON_DEPTH | RADEON_STENCIL))) {
+
+		int tempPP_CNTL;
+		int tempRE_CNTL;
+		int tempRB3D_CNTL;
+		int tempRB3D_ZSTENCILCNTL;
+		int tempRB3D_STENCILREFMASK;
+		int tempRB3D_PLANEMASK;
+		int tempSE_CNTL;
+		int tempSE_VTE_CNTL;
+		int tempSE_VTX_FMT_0;
+		int tempSE_VTX_FMT_1;
+		int tempSE_VAP_CNTL;
+		int tempRE_AUX_SCISSOR_CNTL;
+
+		tempPP_CNTL = 0;
+		tempRE_CNTL = 0;
+
+		tempRB3D_CNTL = depth_clear->rb3d_cntl;
+
+		tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
+		tempRB3D_STENCILREFMASK = 0x0;
+
+		tempSE_CNTL = depth_clear->se_cntl;
+
+		/* Disable TCL */
+
+		tempSE_VAP_CNTL = (	/* SE_VAP_CNTL__FORCE_W_TO_ONE_MASK |  */
+					  (0x9 <<
+					   SE_VAP_CNTL__VF_MAX_VTX_NUM__SHIFT));
+
+		tempRB3D_PLANEMASK = 0x0;
+
+		tempRE_AUX_SCISSOR_CNTL = 0x0;
+
+		tempSE_VTE_CNTL =
+		    SE_VTE_CNTL__VTX_XY_FMT_MASK | SE_VTE_CNTL__VTX_Z_FMT_MASK;
+
+		/* Vertex format (X, Y, Z, W) */
+		tempSE_VTX_FMT_0 =
+		    SE_VTX_FMT_0__VTX_Z0_PRESENT_MASK |
+		    SE_VTX_FMT_0__VTX_W0_PRESENT_MASK;
+		tempSE_VTX_FMT_1 = 0x0;
+
+		/*
+		 * Depth buffer specific enables
+		 */
+		if (flags & RADEON_DEPTH) {
+			/* Enable depth buffer */
+			tempRB3D_CNTL |= RADEON_Z_ENABLE;
+		} else {
+			/* Disable depth buffer */
+			tempRB3D_CNTL &= ~RADEON_Z_ENABLE;
+		}
+
+		/*
+		 * Stencil buffer specific enables
+		 */
+		if (flags & RADEON_STENCIL) {
+			tempRB3D_CNTL |= RADEON_STENCIL_ENABLE;
+			tempRB3D_STENCILREFMASK = clear->depth_mask;
+		} else {
+			tempRB3D_CNTL &= ~RADEON_STENCIL_ENABLE;
+			tempRB3D_STENCILREFMASK = 0x00000000;
+		}
+
+		if (flags & RADEON_USE_COMP_ZBUF) {
+			tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
+			    RADEON_Z_DECOMPRESSION_ENABLE;
+		}
+		if (flags & RADEON_USE_HIERZ) {
+			tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
+		}
+
+		BEGIN_RING(26);
+		RADEON_WAIT_UNTIL_2D_IDLE();
+
+		OUT_RING_REG(RADEON_PP_CNTL, tempPP_CNTL);
+		OUT_RING_REG(R200_RE_CNTL, tempRE_CNTL);
+		OUT_RING_REG(RADEON_RB3D_CNTL, tempRB3D_CNTL);
+		OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL);
+		OUT_RING_REG(RADEON_RB3D_STENCILREFMASK,
+			     tempRB3D_STENCILREFMASK);
+		OUT_RING_REG(RADEON_RB3D_PLANEMASK, tempRB3D_PLANEMASK);
+		OUT_RING_REG(RADEON_SE_CNTL, tempSE_CNTL);
+		OUT_RING_REG(R200_SE_VTE_CNTL, tempSE_VTE_CNTL);
+		OUT_RING_REG(R200_SE_VTX_FMT_0, tempSE_VTX_FMT_0);
+		OUT_RING_REG(R200_SE_VTX_FMT_1, tempSE_VTX_FMT_1);
+		OUT_RING_REG(R200_SE_VAP_CNTL, tempSE_VAP_CNTL);
+		OUT_RING_REG(R200_RE_AUX_SCISSOR_CNTL, tempRE_AUX_SCISSOR_CNTL);
+		ADVANCE_RING();
+
+		/* Make sure we restore the 3D state next time.
+		 */
+		dev_priv->sarea_priv->ctx_owner = 0;
+
+		for (i = 0; i < nbox; i++) {
+
+			/* Funny that this should be required --
+			 *  sets top-left?
+			 */
+			radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
+
+			BEGIN_RING(14);
+			OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 12));
+			OUT_RING((RADEON_PRIM_TYPE_RECT_LIST |
+				  RADEON_PRIM_WALK_RING |
+				  (3 << RADEON_NUM_VERTICES_SHIFT)));
+			OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_Y1]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+			OUT_RING(0x3f800000);
+			OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+			OUT_RING(0x3f800000);
+			OUT_RING(depth_boxes[i].ui[CLEAR_X2]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+			OUT_RING(0x3f800000);
+			ADVANCE_RING();
+		}
+	} else if ((flags & (RADEON_DEPTH | RADEON_STENCIL))) {
+
+		int tempRB3D_ZSTENCILCNTL = depth_clear->rb3d_zstencilcntl;
+
+		rb3d_cntl = depth_clear->rb3d_cntl;
+
+		if (flags & RADEON_DEPTH) {
+			rb3d_cntl |= RADEON_Z_ENABLE;
+		} else {
+			rb3d_cntl &= ~RADEON_Z_ENABLE;
+		}
+
+		if (flags & RADEON_STENCIL) {
+			rb3d_cntl |= RADEON_STENCIL_ENABLE;
+			rb3d_stencilrefmask = clear->depth_mask;	/* misnamed field */
+		} else {
+			rb3d_cntl &= ~RADEON_STENCIL_ENABLE;
+			rb3d_stencilrefmask = 0x00000000;
+		}
+
+		if (flags & RADEON_USE_COMP_ZBUF) {
+			tempRB3D_ZSTENCILCNTL |= RADEON_Z_COMPRESSION_ENABLE |
+			    RADEON_Z_DECOMPRESSION_ENABLE;
+		}
+		if (flags & RADEON_USE_HIERZ) {
+			tempRB3D_ZSTENCILCNTL |= RADEON_Z_HIERARCHY_ENABLE;
+		}
+
+		BEGIN_RING(13);
+		RADEON_WAIT_UNTIL_2D_IDLE();
+
+		OUT_RING(CP_PACKET0(RADEON_PP_CNTL, 1));
+		OUT_RING(0x00000000);
+		OUT_RING(rb3d_cntl);
+
+		OUT_RING_REG(RADEON_RB3D_ZSTENCILCNTL, tempRB3D_ZSTENCILCNTL);
+		OUT_RING_REG(RADEON_RB3D_STENCILREFMASK, rb3d_stencilrefmask);
+		OUT_RING_REG(RADEON_RB3D_PLANEMASK, 0x00000000);
+		OUT_RING_REG(RADEON_SE_CNTL, depth_clear->se_cntl);
+		ADVANCE_RING();
+
+		/* Make sure we restore the 3D state next time.
+		 */
+		dev_priv->sarea_priv->ctx_owner = 0;
+
+		for (i = 0; i < nbox; i++) {
+
+			/* Funny that this should be required --
+			 *  sets top-left?
+			 */
+			radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
+
+			BEGIN_RING(15);
+
+			OUT_RING(CP_PACKET3(RADEON_3D_DRAW_IMMD, 13));
+			OUT_RING(RADEON_VTX_Z_PRESENT |
+				 RADEON_VTX_PKCOLOR_PRESENT);
+			OUT_RING((RADEON_PRIM_TYPE_RECT_LIST |
+				  RADEON_PRIM_WALK_RING |
+				  RADEON_MAOS_ENABLE |
+				  RADEON_VTX_FMT_RADEON_MODE |
+				  (3 << RADEON_NUM_VERTICES_SHIFT)));
+
+			OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_Y1]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+			OUT_RING(0x0);
+
+			OUT_RING(depth_boxes[i].ui[CLEAR_X1]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+			OUT_RING(0x0);
+
+			OUT_RING(depth_boxes[i].ui[CLEAR_X2]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_Y2]);
+			OUT_RING(depth_boxes[i].ui[CLEAR_DEPTH]);
+			OUT_RING(0x0);
+
+			ADVANCE_RING();
+		}
+	}
+
+	/* Increment the clear counter.  The client-side 3D driver must
+	 * wait on this value before performing the clear ioctl.  We
+	 * need this because the card's so damned fast...
+	 */
+	dev_priv->sarea_priv->last_clear++;
+
+	BEGIN_RING(4);
+
+	RADEON_CLEAR_AGE(dev_priv->sarea_priv->last_clear);
+	RADEON_WAIT_UNTIL_IDLE();
+
+	ADVANCE_RING();
+}
+
+static void radeon_cp_dispatch_swap(struct drm_device * dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	int nbox = sarea_priv->nbox;
+	struct drm_clip_rect *pbox = sarea_priv->boxes;
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	/* Do some trivial performance monitoring...
+	 */
+	if (dev_priv->do_boxes)
+		radeon_cp_performance_boxes(dev_priv);
+
+	/* Wait for the 3D stream to idle before dispatching the bitblt.
+	 * This will prevent data corruption between the two streams.
+	 */
+	BEGIN_RING(2);
+
+	RADEON_WAIT_UNTIL_3D_IDLE();
+
+	ADVANCE_RING();
+
+	for (i = 0; i < nbox; i++) {
+		int x = pbox[i].x1;
+		int y = pbox[i].y1;
+		int w = pbox[i].x2 - x;
+		int h = pbox[i].y2 - y;
+
+		DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h);
+
+		BEGIN_RING(9);
+
+		OUT_RING(CP_PACKET0(RADEON_DP_GUI_MASTER_CNTL, 0));
+		OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
+			 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+			 RADEON_GMC_BRUSH_NONE |
+			 (dev_priv->color_fmt << 8) |
+			 RADEON_GMC_SRC_DATATYPE_COLOR |
+			 RADEON_ROP3_S |
+			 RADEON_DP_SRC_SOURCE_MEMORY |
+			 RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
+
+		/* Make this work even if front & back are flipped:
+		 */
+		OUT_RING(CP_PACKET0(RADEON_SRC_PITCH_OFFSET, 1));
+		if (dev_priv->sarea_priv->pfCurrentPage == 0) {
+			OUT_RING(dev_priv->back_pitch_offset);
+			OUT_RING(dev_priv->front_pitch_offset);
+		} else {
+			OUT_RING(dev_priv->front_pitch_offset);
+			OUT_RING(dev_priv->back_pitch_offset);
+		}
+
+		OUT_RING(CP_PACKET0(RADEON_SRC_X_Y, 2));
+		OUT_RING((x << 16) | y);
+		OUT_RING((x << 16) | y);
+		OUT_RING((w << 16) | h);
+
+		ADVANCE_RING();
+	}
+
+	/* Increment the frame counter.  The client-side 3D driver must
+	 * throttle the framerate by waiting for this value before
+	 * performing the swapbuffer ioctl.
+	 */
+	dev_priv->sarea_priv->last_frame++;
+
+	BEGIN_RING(4);
+
+	RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame);
+	RADEON_WAIT_UNTIL_2D_IDLE();
+
+	ADVANCE_RING();
+}
+
+static void radeon_cp_dispatch_flip(struct drm_device * dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_sarea *sarea = (struct drm_sarea *) dev_priv->sarea->handle;
+	int offset = (dev_priv->sarea_priv->pfCurrentPage == 1)
+	    ? dev_priv->front_offset : dev_priv->back_offset;
+	RING_LOCALS;
+	DRM_DEBUG("pfCurrentPage=%d\n",
+		  dev_priv->sarea_priv->pfCurrentPage);
+
+	/* Do some trivial performance monitoring...
+	 */
+	if (dev_priv->do_boxes) {
+		dev_priv->stats.boxes |= RADEON_BOX_FLIP;
+		radeon_cp_performance_boxes(dev_priv);
+	}
+
+	/* Update the frame offsets for both CRTCs
+	 */
+	BEGIN_RING(6);
+
+	RADEON_WAIT_UNTIL_3D_IDLE();
+	OUT_RING_REG(RADEON_CRTC_OFFSET,
+		     ((sarea->frame.y * dev_priv->front_pitch +
+		       sarea->frame.x * (dev_priv->color_fmt - 2)) & ~7)
+		     + offset);
+	OUT_RING_REG(RADEON_CRTC2_OFFSET, dev_priv->sarea_priv->crtc2_base
+		     + offset);
+
+	ADVANCE_RING();
+
+	/* Increment the frame counter.  The client-side 3D driver must
+	 * throttle the framerate by waiting for this value before
+	 * performing the swapbuffer ioctl.
+	 */
+	dev_priv->sarea_priv->last_frame++;
+	dev_priv->sarea_priv->pfCurrentPage =
+		1 - dev_priv->sarea_priv->pfCurrentPage;
+
+	BEGIN_RING(2);
+
+	RADEON_FRAME_AGE(dev_priv->sarea_priv->last_frame);
+
+	ADVANCE_RING();
+}
+
+static int bad_prim_vertex_nr(int primitive, int nr)
+{
+	switch (primitive & RADEON_PRIM_TYPE_MASK) {
+	case RADEON_PRIM_TYPE_NONE:
+	case RADEON_PRIM_TYPE_POINT:
+		return nr < 1;
+	case RADEON_PRIM_TYPE_LINE:
+		return (nr & 1) || nr == 0;
+	case RADEON_PRIM_TYPE_LINE_STRIP:
+		return nr < 2;
+	case RADEON_PRIM_TYPE_TRI_LIST:
+	case RADEON_PRIM_TYPE_3VRT_POINT_LIST:
+	case RADEON_PRIM_TYPE_3VRT_LINE_LIST:
+	case RADEON_PRIM_TYPE_RECT_LIST:
+		return nr % 3 || nr == 0;
+	case RADEON_PRIM_TYPE_TRI_FAN:
+	case RADEON_PRIM_TYPE_TRI_STRIP:
+		return nr < 3;
+	default:
+		return 1;
+	}
+}
+
+typedef struct {
+	unsigned int start;
+	unsigned int finish;
+	unsigned int prim;
+	unsigned int numverts;
+	unsigned int offset;
+	unsigned int vc_format;
+} drm_radeon_tcl_prim_t;
+
+static void radeon_cp_dispatch_vertex(struct drm_device * dev,
+				      struct drm_buf * buf,
+				      drm_radeon_tcl_prim_t * prim)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	int offset = dev_priv->gart_buffers_offset + buf->offset + prim->start;
+	int numverts = (int)prim->numverts;
+	int nbox = sarea_priv->nbox;
+	int i = 0;
+	RING_LOCALS;
+
+	DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d %d verts\n",
+		  prim->prim,
+		  prim->vc_format, prim->start, prim->finish, prim->numverts);
+
+	if (bad_prim_vertex_nr(prim->prim, prim->numverts)) {
+		DRM_ERROR("bad prim %x numverts %d\n",
+			  prim->prim, prim->numverts);
+		return;
+	}
+
+	do {
+		/* Emit the next cliprect */
+		if (i < nbox) {
+			radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
+		}
+
+		/* Emit the vertex buffer rendering commands */
+		BEGIN_RING(5);
+
+		OUT_RING(CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, 3));
+		OUT_RING(offset);
+		OUT_RING(numverts);
+		OUT_RING(prim->vc_format);
+		OUT_RING(prim->prim | RADEON_PRIM_WALK_LIST |
+			 RADEON_COLOR_ORDER_RGBA |
+			 RADEON_VTX_FMT_RADEON_MODE |
+			 (numverts << RADEON_NUM_VERTICES_SHIFT));
+
+		ADVANCE_RING();
+
+		i++;
+	} while (i < nbox);
+}
+
+static void radeon_cp_discard_buffer(struct drm_device * dev, struct drm_buf * buf)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
+	RING_LOCALS;
+
+	buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
+
+	/* Emit the vertex buffer age */
+	BEGIN_RING(2);
+	RADEON_DISPATCH_AGE(buf_priv->age);
+	ADVANCE_RING();
+
+	buf->pending = 1;
+	buf->used = 0;
+}
+
+static void radeon_cp_dispatch_indirect(struct drm_device * dev,
+					struct drm_buf * buf, int start, int end)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+	DRM_DEBUG("buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);
+
+	if (start != end) {
+		int offset = (dev_priv->gart_buffers_offset
+			      + buf->offset + start);
+		int dwords = (end - start + 3) / sizeof(u32);
+
+		/* Indirect buffer data must be an even number of
+		 * dwords, so if we've been given an odd number we must
+		 * pad the data with a Type-2 CP packet.
+		 */
+		if (dwords & 1) {
+			u32 *data = (u32 *)
+			    ((char *)dev->agp_buffer_map->handle
+			     + buf->offset + start);
+			data[dwords++] = RADEON_CP_PACKET2;
+		}
+
+		/* Fire off the indirect buffer */
+		BEGIN_RING(3);
+
+		OUT_RING(CP_PACKET0(RADEON_CP_IB_BASE, 1));
+		OUT_RING(offset);
+		OUT_RING(dwords);
+
+		ADVANCE_RING();
+	}
+}
+
+static void radeon_cp_dispatch_indices(struct drm_device * dev,
+				       struct drm_buf * elt_buf,
+				       drm_radeon_tcl_prim_t * prim)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	int offset = dev_priv->gart_buffers_offset + prim->offset;
+	u32 *data;
+	int dwords;
+	int i = 0;
+	int start = prim->start + RADEON_INDEX_PRIM_OFFSET;
+	int count = (prim->finish - start) / sizeof(u16);
+	int nbox = sarea_priv->nbox;
+
+	DRM_DEBUG("hwprim 0x%x vfmt 0x%x %d..%d offset: %x nr %d\n",
+		  prim->prim,
+		  prim->vc_format,
+		  prim->start, prim->finish, prim->offset, prim->numverts);
+
+	if (bad_prim_vertex_nr(prim->prim, count)) {
+		DRM_ERROR("bad prim %x count %d\n", prim->prim, count);
+		return;
+	}
+
+	if (start >= prim->finish || (prim->start & 0x7)) {
+		DRM_ERROR("buffer prim %d\n", prim->prim);
+		return;
+	}
+
+	dwords = (prim->finish - prim->start + 3) / sizeof(u32);
+
+	data = (u32 *) ((char *)dev->agp_buffer_map->handle +
+			elt_buf->offset + prim->start);
+
+	data[0] = CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, dwords - 2);
+	data[1] = offset;
+	data[2] = prim->numverts;
+	data[3] = prim->vc_format;
+	data[4] = (prim->prim |
+		   RADEON_PRIM_WALK_IND |
+		   RADEON_COLOR_ORDER_RGBA |
+		   RADEON_VTX_FMT_RADEON_MODE |
+		   (count << RADEON_NUM_VERTICES_SHIFT));
+
+	do {
+		if (i < nbox)
+			radeon_emit_clip_rect(dev_priv, &sarea_priv->boxes[i]);
+
+		radeon_cp_dispatch_indirect(dev, elt_buf,
+					    prim->start, prim->finish);
+
+		i++;
+	} while (i < nbox);
+
+}
+
+#define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE
+
+static int radeon_cp_dispatch_texture(struct drm_device * dev,
+				      struct drm_file *file_priv,
+				      drm_radeon_texture_t * tex,
+				      drm_radeon_tex_image_t * image)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_buf *buf;
+	u32 format;
+	u32 *buffer;
+	const u8 __user *data;
+	int size, dwords, tex_width, blit_width, spitch;
+	u32 height;
+	int i;
+	u32 texpitch, microtile;
+	u32 offset, byte_offset;
+	RING_LOCALS;
+
+	if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex->offset)) {
+		DRM_ERROR("Invalid destination offset\n");
+		return -EINVAL;
+	}
+
+	dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
+
+	/* Flush the pixel cache.  This ensures no pixel data gets mixed
+	 * up with the texture data from the host data blit, otherwise
+	 * part of the texture image may be corrupted.
+	 */
+	BEGIN_RING(4);
+	RADEON_FLUSH_CACHE();
+	RADEON_WAIT_UNTIL_IDLE();
+	ADVANCE_RING();
+
+	/* The compiler won't optimize away a division by a variable,
+	 * even if the only legal values are powers of two.  Thus, we'll
+	 * use a shift instead.
+	 */
+	switch (tex->format) {
+	case RADEON_TXFORMAT_ARGB8888:
+	case RADEON_TXFORMAT_RGBA8888:
+		format = RADEON_COLOR_FORMAT_ARGB8888;
+		tex_width = tex->width * 4;
+		blit_width = image->width * 4;
+		break;
+	case RADEON_TXFORMAT_AI88:
+	case RADEON_TXFORMAT_ARGB1555:
+	case RADEON_TXFORMAT_RGB565:
+	case RADEON_TXFORMAT_ARGB4444:
+	case RADEON_TXFORMAT_VYUY422:
+	case RADEON_TXFORMAT_YVYU422:
+		format = RADEON_COLOR_FORMAT_RGB565;
+		tex_width = tex->width * 2;
+		blit_width = image->width * 2;
+		break;
+	case RADEON_TXFORMAT_I8:
+	case RADEON_TXFORMAT_RGB332:
+		format = RADEON_COLOR_FORMAT_CI8;
+		tex_width = tex->width * 1;
+		blit_width = image->width * 1;
+		break;
+	default:
+		DRM_ERROR("invalid texture format %d\n", tex->format);
+		return -EINVAL;
+	}
+	spitch = blit_width >> 6;
+	if (spitch == 0 && image->height > 1)
+		return -EINVAL;
+
+	texpitch = tex->pitch;
+	if ((texpitch << 22) & RADEON_DST_TILE_MICRO) {
+		microtile = 1;
+		if (tex_width < 64) {
+			texpitch &= ~(RADEON_DST_TILE_MICRO >> 22);
+			/* we got tiled coordinates, untile them */
+			image->x *= 2;
+		}
+	} else
+		microtile = 0;
+
+	/* this might fail for zero-sized uploads - are those illegal? */
+	if (!radeon_check_offset(dev_priv, tex->offset + image->height *
+				blit_width - 1)) {
+		DRM_ERROR("Invalid final destination offset\n");
+		return -EINVAL;
+	}
+
+	DRM_DEBUG("tex=%dx%d blit=%d\n", tex_width, tex->height, blit_width);
+
+	do {
+		DRM_DEBUG("tex: ofs=0x%x p=%d f=%d x=%hd y=%hd w=%hd h=%hd\n",
+			  tex->offset >> 10, tex->pitch, tex->format,
+			  image->x, image->y, image->width, image->height);
+
+		/* Make a copy of some parameters in case we have to
+		 * update them for a multi-pass texture blit.
+		 */
+		height = image->height;
+		data = (const u8 __user *)image->data;
+
+		size = height * blit_width;
+
+		if (size > RADEON_MAX_TEXTURE_SIZE) {
+			height = RADEON_MAX_TEXTURE_SIZE / blit_width;
+			size = height * blit_width;
+		} else if (size < 4 && size > 0) {
+			size = 4;
+		} else if (size == 0) {
+			return 0;
+		}
+
+		buf = radeon_freelist_get(dev);
+		if (0 && !buf) {
+			radeon_do_cp_idle(dev_priv);
+			buf = radeon_freelist_get(dev);
+		}
+		if (!buf) {
+			DRM_DEBUG("EAGAIN\n");
+			if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
+				return -EFAULT;
+			return -EAGAIN;
+		}
+
+		/* Dispatch the indirect buffer.
+		 */
+		buffer =
+		    (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
+		dwords = size / 4;
+
+#define RADEON_COPY_MT(_buf, _data, _width) \
+	do { \
+		if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
+			DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
+			return -EFAULT; \
+		} \
+	} while(0)
+
+		if (microtile) {
+			/* texture micro tiling in use, minimum texture width is thus 16 bytes.
+			   however, we cannot use blitter directly for texture width < 64 bytes,
+			   since minimum tex pitch is 64 bytes and we need this to match
+			   the texture width, otherwise the blitter will tile it wrong.
+			   Thus, tiling manually in this case. Additionally, need to special
+			   case tex height = 1, since our actual image will have height 2
+			   and we need to ensure we don't read beyond the texture size
+			   from user space. */
+			if (tex->height == 1) {
+				if (tex_width >= 64 || tex_width <= 16) {
+					RADEON_COPY_MT(buffer, data,
+						(int)(tex_width * sizeof(u32)));
+				} else if (tex_width == 32) {
+					RADEON_COPY_MT(buffer, data, 16);
+					RADEON_COPY_MT(buffer + 8,
+						       data + 16, 16);
+				}
+			} else if (tex_width >= 64 || tex_width == 16) {
+				RADEON_COPY_MT(buffer, data,
+					       (int)(dwords * sizeof(u32)));
+			} else if (tex_width < 16) {
+				for (i = 0; i < tex->height; i++) {
+					RADEON_COPY_MT(buffer, data, tex_width);
+					buffer += 4;
+					data += tex_width;
+				}
+			} else if (tex_width == 32) {
+				/* TODO: make sure this works when not fitting in one buffer
+				   (i.e. 32bytes x 2048...) */
+				for (i = 0; i < tex->height; i += 2) {
+					RADEON_COPY_MT(buffer, data, 16);
+					data += 16;
+					RADEON_COPY_MT(buffer + 8, data, 16);
+					data += 16;
+					RADEON_COPY_MT(buffer + 4, data, 16);
+					data += 16;
+					RADEON_COPY_MT(buffer + 12, data, 16);
+					data += 16;
+					buffer += 16;
+				}
+			}
+		} else {
+			if (tex_width >= 32) {
+				/* Texture image width is larger than the minimum, so we
+				 * can upload it directly.
+				 */
+				RADEON_COPY_MT(buffer, data,
+					       (int)(dwords * sizeof(u32)));
+			} else {
+				/* Texture image width is less than the minimum, so we
+				 * need to pad out each image scanline to the minimum
+				 * width.
+				 */
+				for (i = 0; i < tex->height; i++) {
+					RADEON_COPY_MT(buffer, data, tex_width);
+					buffer += 8;
+					data += tex_width;
+				}
+			}
+		}
+
+#undef RADEON_COPY_MT
+		byte_offset = (image->y & ~2047) * blit_width;
+		buf->file_priv = file_priv;
+		buf->used = size;
+		offset = dev_priv->gart_buffers_offset + buf->offset;
+		BEGIN_RING(9);
+		OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
+		OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
+			 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+			 RADEON_GMC_BRUSH_NONE |
+			 (format << 8) |
+			 RADEON_GMC_SRC_DATATYPE_COLOR |
+			 RADEON_ROP3_S |
+			 RADEON_DP_SRC_SOURCE_MEMORY |
+			 RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
+		OUT_RING((spitch << 22) | (offset >> 10));
+		OUT_RING((texpitch << 22) | ((tex->offset >> 10) + (byte_offset >> 10)));
+		OUT_RING(0);
+		OUT_RING((image->x << 16) | (image->y % 2048));
+		OUT_RING((image->width << 16) | height);
+		RADEON_WAIT_UNTIL_2D_IDLE();
+		ADVANCE_RING();
+		COMMIT_RING();
+
+		radeon_cp_discard_buffer(dev, buf);
+
+		/* Update the input parameters for next time */
+		image->y += height;
+		image->height -= height;
+		image->data = (const u8 __user *)image->data + size;
+	} while (image->height > 0);
+
+	/* Flush the pixel cache after the blit completes.  This ensures
+	 * the texture data is written out to memory before rendering
+	 * continues.
+	 */
+	BEGIN_RING(4);
+	RADEON_FLUSH_CACHE();
+	RADEON_WAIT_UNTIL_2D_IDLE();
+	ADVANCE_RING();
+	COMMIT_RING();
+
+	return 0;
+}
+
+static void radeon_cp_dispatch_stipple(struct drm_device * dev, u32 * stipple)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	int i;
+	RING_LOCALS;
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(35);
+
+	OUT_RING(CP_PACKET0(RADEON_RE_STIPPLE_ADDR, 0));
+	OUT_RING(0x00000000);
+
+	OUT_RING(CP_PACKET0_TABLE(RADEON_RE_STIPPLE_DATA, 31));
+	for (i = 0; i < 32; i++) {
+		OUT_RING(stipple[i]);
+	}
+
+	ADVANCE_RING();
+}
+
+static void radeon_apply_surface_regs(int surf_index,
+				      drm_radeon_private_t *dev_priv)
+{
+	if (!dev_priv->mmio)
+		return;
+
+	radeon_do_cp_idle(dev_priv);
+
+	RADEON_WRITE(RADEON_SURFACE0_INFO + 16 * surf_index,
+		     dev_priv->surfaces[surf_index].flags);
+	RADEON_WRITE(RADEON_SURFACE0_LOWER_BOUND + 16 * surf_index,
+		     dev_priv->surfaces[surf_index].lower);
+	RADEON_WRITE(RADEON_SURFACE0_UPPER_BOUND + 16 * surf_index,
+		     dev_priv->surfaces[surf_index].upper);
+}
+
+/* Allocates a virtual surface
+ * doesn't always allocate a real surface, will stretch an existing
+ * surface when possible.
+ *
+ * Note that refcount can be at most 2, since during a free refcount=3
+ * might mean we have to allocate a new surface which might not always
+ * be available.
+ * For example : we allocate three contigous surfaces ABC. If B is
+ * freed, we suddenly need two surfaces to store A and C, which might
+ * not always be available.
+ */
+static int alloc_surface(drm_radeon_surface_alloc_t *new,
+			 drm_radeon_private_t *dev_priv,
+			 struct drm_file *file_priv)
+{
+	struct radeon_virt_surface *s;
+	int i;
+	int virt_surface_index;
+	uint32_t new_upper, new_lower;
+
+	new_lower = new->address;
+	new_upper = new_lower + new->size - 1;
+
+	/* sanity check */
+	if ((new_lower >= new_upper) || (new->flags == 0) || (new->size == 0) ||
+	    ((new_upper & RADEON_SURF_ADDRESS_FIXED_MASK) !=
+	     RADEON_SURF_ADDRESS_FIXED_MASK)
+	    || ((new_lower & RADEON_SURF_ADDRESS_FIXED_MASK) != 0))
+		return -1;
+
+	/* make sure there is no overlap with existing surfaces */
+	for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+		if ((dev_priv->surfaces[i].refcount != 0) &&
+		    (((new_lower >= dev_priv->surfaces[i].lower) &&
+		      (new_lower < dev_priv->surfaces[i].upper)) ||
+		     ((new_lower < dev_priv->surfaces[i].lower) &&
+		      (new_upper > dev_priv->surfaces[i].lower)))) {
+			return -1;
+		}
+	}
+
+	/* find a virtual surface */
+	for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++)
+		if (dev_priv->virt_surfaces[i].file_priv == 0)
+			break;
+	if (i == 2 * RADEON_MAX_SURFACES) {
+		return -1;
+	}
+	virt_surface_index = i;
+
+	/* try to reuse an existing surface */
+	for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+		/* extend before */
+		if ((dev_priv->surfaces[i].refcount == 1) &&
+		    (new->flags == dev_priv->surfaces[i].flags) &&
+		    (new_upper + 1 == dev_priv->surfaces[i].lower)) {
+			s = &(dev_priv->virt_surfaces[virt_surface_index]);
+			s->surface_index = i;
+			s->lower = new_lower;
+			s->upper = new_upper;
+			s->flags = new->flags;
+			s->file_priv = file_priv;
+			dev_priv->surfaces[i].refcount++;
+			dev_priv->surfaces[i].lower = s->lower;
+			radeon_apply_surface_regs(s->surface_index, dev_priv);
+			return virt_surface_index;
+		}
+
+		/* extend after */
+		if ((dev_priv->surfaces[i].refcount == 1) &&
+		    (new->flags == dev_priv->surfaces[i].flags) &&
+		    (new_lower == dev_priv->surfaces[i].upper + 1)) {
+			s = &(dev_priv->virt_surfaces[virt_surface_index]);
+			s->surface_index = i;
+			s->lower = new_lower;
+			s->upper = new_upper;
+			s->flags = new->flags;
+			s->file_priv = file_priv;
+			dev_priv->surfaces[i].refcount++;
+			dev_priv->surfaces[i].upper = s->upper;
+			radeon_apply_surface_regs(s->surface_index, dev_priv);
+			return virt_surface_index;
+		}
+	}
+
+	/* okay, we need a new one */
+	for (i = 0; i < RADEON_MAX_SURFACES; i++) {
+		if (dev_priv->surfaces[i].refcount == 0) {
+			s = &(dev_priv->virt_surfaces[virt_surface_index]);
+			s->surface_index = i;
+			s->lower = new_lower;
+			s->upper = new_upper;
+			s->flags = new->flags;
+			s->file_priv = file_priv;
+			dev_priv->surfaces[i].refcount = 1;
+			dev_priv->surfaces[i].lower = s->lower;
+			dev_priv->surfaces[i].upper = s->upper;
+			dev_priv->surfaces[i].flags = s->flags;
+			radeon_apply_surface_regs(s->surface_index, dev_priv);
+			return virt_surface_index;
+		}
+	}
+
+	/* we didn't find anything */
+	return -1;
+}
+
+static int free_surface(struct drm_file *file_priv,
+			drm_radeon_private_t * dev_priv,
+			int lower)
+{
+	struct radeon_virt_surface *s;
+	int i;
+	/* find the virtual surface */
+	for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
+		s = &(dev_priv->virt_surfaces[i]);
+		if (s->file_priv) {
+			if ((lower == s->lower) && (file_priv == s->file_priv))
+			{
+				if (dev_priv->surfaces[s->surface_index].
+				    lower == s->lower)
+					dev_priv->surfaces[s->surface_index].
+					    lower = s->upper;
+
+				if (dev_priv->surfaces[s->surface_index].
+				    upper == s->upper)
+					dev_priv->surfaces[s->surface_index].
+					    upper = s->lower;
+
+				dev_priv->surfaces[s->surface_index].refcount--;
+				if (dev_priv->surfaces[s->surface_index].
+				    refcount == 0)
+					dev_priv->surfaces[s->surface_index].
+					    flags = 0;
+				s->file_priv = NULL;
+				radeon_apply_surface_regs(s->surface_index,
+							  dev_priv);
+				return 0;
+			}
+		}
+	}
+	return 1;
+}
+
+static void radeon_surfaces_release(struct drm_file *file_priv,
+				    drm_radeon_private_t * dev_priv)
+{
+	int i;
+	for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
+		if (dev_priv->virt_surfaces[i].file_priv == file_priv)
+			free_surface(file_priv, dev_priv,
+				     dev_priv->virt_surfaces[i].lower);
+	}
+}
+
+/* ================================================================
+ * IOCTL functions
+ */
+static int radeon_surface_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_surface_alloc_t *alloc = data;
+
+	if (alloc_surface(alloc, dev_priv, file_priv) == -1)
+		return -EINVAL;
+	else
+		return 0;
+}
+
+static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_surface_free_t *memfree = data;
+
+	if (free_surface(file_priv, dev_priv, memfree->address))
+		return -EINVAL;
+	else
+		return 0;
+}
+
+static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	drm_radeon_clear_t *clear = data;
+	drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+	if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
+		sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
+
+	if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
+			       sarea_priv->nbox * sizeof(depth_boxes[0])))
+		return -EFAULT;
+
+	radeon_cp_dispatch_clear(dev, clear, depth_boxes);
+
+	COMMIT_RING();
+	return 0;
+}
+
+/* Not sure why this isn't set all the time:
+ */
+static int radeon_do_init_pageflip(struct drm_device * dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+
+	DRM_DEBUG("\n");
+
+	BEGIN_RING(6);
+	RADEON_WAIT_UNTIL_3D_IDLE();
+	OUT_RING(CP_PACKET0(RADEON_CRTC_OFFSET_CNTL, 0));
+	OUT_RING(RADEON_READ(RADEON_CRTC_OFFSET_CNTL) |
+		 RADEON_CRTC_OFFSET_FLIP_CNTL);
+	OUT_RING(CP_PACKET0(RADEON_CRTC2_OFFSET_CNTL, 0));
+	OUT_RING(RADEON_READ(RADEON_CRTC2_OFFSET_CNTL) |
+		 RADEON_CRTC_OFFSET_FLIP_CNTL);
+	ADVANCE_RING();
+
+	dev_priv->page_flipping = 1;
+
+	if (dev_priv->sarea_priv->pfCurrentPage != 1)
+		dev_priv->sarea_priv->pfCurrentPage = 0;
+
+	return 0;
+}
+
+/* Swapping and flipping are different operations, need different ioctls.
+ * They can & should be intermixed to support multiple 3d windows.
+ */
+static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+	if (!dev_priv->page_flipping)
+		radeon_do_init_pageflip(dev);
+
+	radeon_cp_dispatch_flip(dev);
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+	if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
+		sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
+
+	radeon_cp_dispatch_swap(dev);
+	dev_priv->sarea_priv->ctx_owner = 0;
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_radeon_vertex_t *vertex = data;
+	drm_radeon_tcl_prim_t prim;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
+		  DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
+
+	if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
+		DRM_ERROR("buffer index %d (of %d max)\n",
+			  vertex->idx, dma->buf_count - 1);
+		return -EINVAL;
+	}
+	if (vertex->prim < 0 || vertex->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
+		DRM_ERROR("buffer prim %d\n", vertex->prim);
+		return -EINVAL;
+	}
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	buf = dma->buflist[vertex->idx];
+
+	if (buf->file_priv != file_priv) {
+		DRM_ERROR("process %d using buffer owned by %p\n",
+			  DRM_CURRENTPID, buf->file_priv);
+		return -EINVAL;
+	}
+	if (buf->pending) {
+		DRM_ERROR("sending pending buffer %d\n", vertex->idx);
+		return -EINVAL;
+	}
+
+	/* Build up a prim_t record:
+	 */
+	if (vertex->count) {
+		buf->used = vertex->count;	/* not used? */
+
+		if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
+			if (radeon_emit_state(dev_priv, file_priv,
+					      &sarea_priv->context_state,
+					      sarea_priv->tex_state,
+					      sarea_priv->dirty)) {
+				DRM_ERROR("radeon_emit_state failed\n");
+				return -EINVAL;
+			}
+
+			sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
+					       RADEON_UPLOAD_TEX1IMAGES |
+					       RADEON_UPLOAD_TEX2IMAGES |
+					       RADEON_REQUIRE_QUIESCENCE);
+		}
+
+		prim.start = 0;
+		prim.finish = vertex->count;	/* unused */
+		prim.prim = vertex->prim;
+		prim.numverts = vertex->count;
+		prim.vc_format = dev_priv->sarea_priv->vc_format;
+
+		radeon_cp_dispatch_vertex(dev, buf, &prim);
+	}
+
+	if (vertex->discard) {
+		radeon_cp_discard_buffer(dev, buf);
+	}
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_radeon_indices_t *elts = data;
+	drm_radeon_tcl_prim_t prim;
+	int count;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n",
+		  DRM_CURRENTPID, elts->idx, elts->start, elts->end,
+		  elts->discard);
+
+	if (elts->idx < 0 || elts->idx >= dma->buf_count) {
+		DRM_ERROR("buffer index %d (of %d max)\n",
+			  elts->idx, dma->buf_count - 1);
+		return -EINVAL;
+	}
+	if (elts->prim < 0 || elts->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
+		DRM_ERROR("buffer prim %d\n", elts->prim);
+		return -EINVAL;
+	}
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	buf = dma->buflist[elts->idx];
+
+	if (buf->file_priv != file_priv) {
+		DRM_ERROR("process %d using buffer owned by %p\n",
+			  DRM_CURRENTPID, buf->file_priv);
+		return -EINVAL;
+	}
+	if (buf->pending) {
+		DRM_ERROR("sending pending buffer %d\n", elts->idx);
+		return -EINVAL;
+	}
+
+	count = (elts->end - elts->start) / sizeof(u16);
+	elts->start -= RADEON_INDEX_PRIM_OFFSET;
+
+	if (elts->start & 0x7) {
+		DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
+		return -EINVAL;
+	}
+	if (elts->start < buf->used) {
+		DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
+		return -EINVAL;
+	}
+
+	buf->used = elts->end;
+
+	if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
+		if (radeon_emit_state(dev_priv, file_priv,
+				      &sarea_priv->context_state,
+				      sarea_priv->tex_state,
+				      sarea_priv->dirty)) {
+			DRM_ERROR("radeon_emit_state failed\n");
+			return -EINVAL;
+		}
+
+		sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
+				       RADEON_UPLOAD_TEX1IMAGES |
+				       RADEON_UPLOAD_TEX2IMAGES |
+				       RADEON_REQUIRE_QUIESCENCE);
+	}
+
+	/* Build up a prim_t record:
+	 */
+	prim.start = elts->start;
+	prim.finish = elts->end;
+	prim.prim = elts->prim;
+	prim.offset = 0;	/* offset from start of dma buffers */
+	prim.numverts = RADEON_MAX_VB_VERTS;	/* duh */
+	prim.vc_format = dev_priv->sarea_priv->vc_format;
+
+	radeon_cp_dispatch_indices(dev, buf, &prim);
+	if (elts->discard) {
+		radeon_cp_discard_buffer(dev, buf);
+	}
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_texture_t *tex = data;
+	drm_radeon_tex_image_t image;
+	int ret;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (tex->image == NULL) {
+		DRM_ERROR("null texture image!\n");
+		return -EINVAL;
+	}
+
+	if (DRM_COPY_FROM_USER(&image,
+			       (drm_radeon_tex_image_t __user *) tex->image,
+			       sizeof(image)))
+		return -EFAULT;
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image);
+
+	return ret;
+}
+
+static int radeon_cp_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_stipple_t *stipple = data;
+	u32 mask[32];
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
+		return -EFAULT;
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+
+	radeon_cp_dispatch_stipple(dev, mask);
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_radeon_indirect_t *indirect = data;
+	RING_LOCALS;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",
+		  indirect->idx, indirect->start, indirect->end,
+		  indirect->discard);
+
+	if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
+		DRM_ERROR("buffer index %d (of %d max)\n",
+			  indirect->idx, dma->buf_count - 1);
+		return -EINVAL;
+	}
+
+	buf = dma->buflist[indirect->idx];
+
+	if (buf->file_priv != file_priv) {
+		DRM_ERROR("process %d using buffer owned by %p\n",
+			  DRM_CURRENTPID, buf->file_priv);
+		return -EINVAL;
+	}
+	if (buf->pending) {
+		DRM_ERROR("sending pending buffer %d\n", indirect->idx);
+		return -EINVAL;
+	}
+
+	if (indirect->start < buf->used) {
+		DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
+			  indirect->start, buf->used);
+		return -EINVAL;
+	}
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	buf->used = indirect->end;
+
+	/* Wait for the 3D stream to idle before the indirect buffer
+	 * containing 2D acceleration commands is processed.
+	 */
+	BEGIN_RING(2);
+
+	RADEON_WAIT_UNTIL_3D_IDLE();
+
+	ADVANCE_RING();
+
+	/* Dispatch the indirect buffer full of commands from the
+	 * X server.  This is insecure and is thus only available to
+	 * privileged clients.
+	 */
+	radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end);
+	if (indirect->discard) {
+		radeon_cp_discard_buffer(dev, buf);
+	}
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_radeon_vertex2_t *vertex = data;
+	int i;
+	unsigned char laststate;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DRM_DEBUG("pid=%d index=%d discard=%d\n",
+		  DRM_CURRENTPID, vertex->idx, vertex->discard);
+
+	if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
+		DRM_ERROR("buffer index %d (of %d max)\n",
+			  vertex->idx, dma->buf_count - 1);
+		return -EINVAL;
+	}
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	buf = dma->buflist[vertex->idx];
+
+	if (buf->file_priv != file_priv) {
+		DRM_ERROR("process %d using buffer owned by %p\n",
+			  DRM_CURRENTPID, buf->file_priv);
+		return -EINVAL;
+	}
+
+	if (buf->pending) {
+		DRM_ERROR("sending pending buffer %d\n", vertex->idx);
+		return -EINVAL;
+	}
+
+	if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
+		return -EINVAL;
+
+	for (laststate = 0xff, i = 0; i < vertex->nr_prims; i++) {
+		drm_radeon_prim_t prim;
+		drm_radeon_tcl_prim_t tclprim;
+
+		if (DRM_COPY_FROM_USER(&prim, &vertex->prim[i], sizeof(prim)))
+			return -EFAULT;
+
+		if (prim.stateidx != laststate) {
+			drm_radeon_state_t state;
+
+			if (DRM_COPY_FROM_USER(&state,
+					       &vertex->state[prim.stateidx],
+					       sizeof(state)))
+				return -EFAULT;
+
+			if (radeon_emit_state2(dev_priv, file_priv, &state)) {
+				DRM_ERROR("radeon_emit_state2 failed\n");
+				return -EINVAL;
+			}
+
+			laststate = prim.stateidx;
+		}
+
+		tclprim.start = prim.start;
+		tclprim.finish = prim.finish;
+		tclprim.prim = prim.prim;
+		tclprim.vc_format = prim.vc_format;
+
+		if (prim.prim & RADEON_PRIM_WALK_IND) {
+			tclprim.offset = prim.numverts * 64;
+			tclprim.numverts = RADEON_MAX_VB_VERTS;	/* duh */
+
+			radeon_cp_dispatch_indices(dev, buf, &tclprim);
+		} else {
+			tclprim.numverts = prim.numverts;
+			tclprim.offset = 0;	/* not used */
+
+			radeon_cp_dispatch_vertex(dev, buf, &tclprim);
+		}
+
+		if (sarea_priv->nbox == 1)
+			sarea_priv->nbox = 0;
+	}
+
+	if (vertex->discard) {
+		radeon_cp_discard_buffer(dev, buf);
+	}
+
+	COMMIT_RING();
+	return 0;
+}
+
+static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
+			       struct drm_file *file_priv,
+			       drm_radeon_cmd_header_t header,
+			       drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	int id = (int)header.packet.packet_id;
+	int sz, reg;
+	int *data = (int *)cmdbuf->buf;
+	RING_LOCALS;
+
+	if (id >= RADEON_MAX_STATE_PACKETS)
+		return -EINVAL;
+
+	sz = packet[id].len;
+	reg = packet[id].start;
+
+	if (sz * sizeof(int) > cmdbuf->bufsz) {
+		DRM_ERROR("Packet size provided larger than data provided\n");
+		return -EINVAL;
+	}
+
+	if (radeon_check_and_fixup_packets(dev_priv, file_priv, id, data)) {
+		DRM_ERROR("Packet verification failed\n");
+		return -EINVAL;
+	}
+
+	BEGIN_RING(sz + 1);
+	OUT_RING(CP_PACKET0(reg, (sz - 1)));
+	OUT_RING_TABLE(data, sz);
+	ADVANCE_RING();
+
+	cmdbuf->buf += sz * sizeof(int);
+	cmdbuf->bufsz -= sz * sizeof(int);
+	return 0;
+}
+
+static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
+					  drm_radeon_cmd_header_t header,
+					  drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	int sz = header.scalars.count;
+	int start = header.scalars.offset;
+	int stride = header.scalars.stride;
+	RING_LOCALS;
+
+	BEGIN_RING(3 + sz);
+	OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
+	OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
+	OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
+	OUT_RING_TABLE(cmdbuf->buf, sz);
+	ADVANCE_RING();
+	cmdbuf->buf += sz * sizeof(int);
+	cmdbuf->bufsz -= sz * sizeof(int);
+	return 0;
+}
+
+/* God this is ugly
+ */
+static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
+					   drm_radeon_cmd_header_t header,
+					   drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	int sz = header.scalars.count;
+	int start = ((unsigned int)header.scalars.offset) + 0x100;
+	int stride = header.scalars.stride;
+	RING_LOCALS;
+
+	BEGIN_RING(3 + sz);
+	OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
+	OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
+	OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
+	OUT_RING_TABLE(cmdbuf->buf, sz);
+	ADVANCE_RING();
+	cmdbuf->buf += sz * sizeof(int);
+	cmdbuf->bufsz -= sz * sizeof(int);
+	return 0;
+}
+
+static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
+					  drm_radeon_cmd_header_t header,
+					  drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	int sz = header.vectors.count;
+	int start = header.vectors.offset;
+	int stride = header.vectors.stride;
+	RING_LOCALS;
+
+	BEGIN_RING(5 + sz);
+	OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
+	OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
+	OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
+	OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
+	OUT_RING_TABLE(cmdbuf->buf, sz);
+	ADVANCE_RING();
+
+	cmdbuf->buf += sz * sizeof(int);
+	cmdbuf->bufsz -= sz * sizeof(int);
+	return 0;
+}
+
+static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
+					  drm_radeon_cmd_header_t header,
+					  drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	int sz = header.veclinear.count * 4;
+	int start = header.veclinear.addr_lo | (header.veclinear.addr_hi << 8);
+	RING_LOCALS;
+
+        if (!sz)
+                return 0;
+        if (sz * 4 > cmdbuf->bufsz)
+                return -EINVAL;
+
+	BEGIN_RING(5 + sz);
+	OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
+	OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
+	OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
+	OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
+	OUT_RING_TABLE(cmdbuf->buf, sz);
+	ADVANCE_RING();
+
+	cmdbuf->buf += sz * sizeof(int);
+	cmdbuf->bufsz -= sz * sizeof(int);
+	return 0;
+}
+
+static int radeon_emit_packet3(struct drm_device * dev,
+			       struct drm_file *file_priv,
+			       drm_radeon_kcmd_buffer_t *cmdbuf)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	unsigned int cmdsz;
+	int ret;
+	RING_LOCALS;
+
+	DRM_DEBUG("\n");
+
+	if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
+						  cmdbuf, &cmdsz))) {
+		DRM_ERROR("Packet verification failed\n");
+		return ret;
+	}
+
+	BEGIN_RING(cmdsz);
+	OUT_RING_TABLE(cmdbuf->buf, cmdsz);
+	ADVANCE_RING();
+
+	cmdbuf->buf += cmdsz * 4;
+	cmdbuf->bufsz -= cmdsz * 4;
+	return 0;
+}
+
+static int radeon_emit_packet3_cliprect(struct drm_device *dev,
+					struct drm_file *file_priv,
+					drm_radeon_kcmd_buffer_t *cmdbuf,
+					int orig_nbox)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_clip_rect box;
+	unsigned int cmdsz;
+	int ret;
+	struct drm_clip_rect __user *boxes = cmdbuf->boxes;
+	int i = 0;
+	RING_LOCALS;
+
+	DRM_DEBUG("\n");
+
+	if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
+						  cmdbuf, &cmdsz))) {
+		DRM_ERROR("Packet verification failed\n");
+		return ret;
+	}
+
+	if (!orig_nbox)
+		goto out;
+
+	do {
+		if (i < cmdbuf->nbox) {
+			if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box)))
+				return -EFAULT;
+			/* FIXME The second and subsequent times round
+			 * this loop, send a WAIT_UNTIL_3D_IDLE before
+			 * calling emit_clip_rect(). This fixes a
+			 * lockup on fast machines when sending
+			 * several cliprects with a cmdbuf, as when
+			 * waving a 2D window over a 3D
+			 * window. Something in the commands from user
+			 * space seems to hang the card when they're
+			 * sent several times in a row. That would be
+			 * the correct place to fix it but this works
+			 * around it until I can figure that out - Tim
+			 * Smith */
+			if (i) {
+				BEGIN_RING(2);
+				RADEON_WAIT_UNTIL_3D_IDLE();
+				ADVANCE_RING();
+			}
+			radeon_emit_clip_rect(dev_priv, &box);
+		}
+
+		BEGIN_RING(cmdsz);
+		OUT_RING_TABLE(cmdbuf->buf, cmdsz);
+		ADVANCE_RING();
+
+	} while (++i < cmdbuf->nbox);
+	if (cmdbuf->nbox == 1)
+		cmdbuf->nbox = 0;
+
+      out:
+	cmdbuf->buf += cmdsz * 4;
+	cmdbuf->bufsz -= cmdsz * 4;
+	return 0;
+}
+
+static int radeon_emit_wait(struct drm_device * dev, int flags)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	RING_LOCALS;
+
+	DRM_DEBUG("%x\n", flags);
+	switch (flags) {
+	case RADEON_WAIT_2D:
+		BEGIN_RING(2);
+		RADEON_WAIT_UNTIL_2D_IDLE();
+		ADVANCE_RING();
+		break;
+	case RADEON_WAIT_3D:
+		BEGIN_RING(2);
+		RADEON_WAIT_UNTIL_3D_IDLE();
+		ADVANCE_RING();
+		break;
+	case RADEON_WAIT_2D | RADEON_WAIT_3D:
+		BEGIN_RING(2);
+		RADEON_WAIT_UNTIL_IDLE();
+		ADVANCE_RING();
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf = NULL;
+	int idx;
+	drm_radeon_kcmd_buffer_t *cmdbuf = data;
+	drm_radeon_cmd_header_t header;
+	int orig_nbox, orig_bufsz;
+	char *kbuf = NULL;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	RING_SPACE_TEST_WITH_RETURN(dev_priv);
+	VB_AGE_TEST_WITH_RETURN(dev_priv);
+
+	if (cmdbuf->bufsz > 64 * 1024 || cmdbuf->bufsz < 0) {
+		return -EINVAL;
+	}
+
+	/* Allocate an in-kernel area and copy in the cmdbuf.  Do this to avoid
+	 * races between checking values and using those values in other code,
+	 * and simply to avoid a lot of function calls to copy in data.
+	 */
+	orig_bufsz = cmdbuf->bufsz;
+	if (orig_bufsz != 0) {
+		kbuf = drm_alloc(cmdbuf->bufsz, DRM_MEM_DRIVER);
+		if (kbuf == NULL)
+			return -ENOMEM;
+		if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf,
+				       cmdbuf->bufsz)) {
+			drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
+			return -EFAULT;
+		}
+		cmdbuf->buf = kbuf;
+	}
+
+	orig_nbox = cmdbuf->nbox;
+
+	if (dev_priv->microcode_version == UCODE_R300) {
+		int temp;
+		temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
+
+		if (orig_bufsz != 0)
+			drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
+
+		return temp;
+	}
+
+	/* microcode_version != r300 */
+	while (cmdbuf->bufsz >= sizeof(header)) {
+
+		header.i = *(int *)cmdbuf->buf;
+		cmdbuf->buf += sizeof(header);
+		cmdbuf->bufsz -= sizeof(header);
+
+		switch (header.header.cmd_type) {
+		case RADEON_CMD_PACKET:
+			DRM_DEBUG("RADEON_CMD_PACKET\n");
+			if (radeon_emit_packets
+			    (dev_priv, file_priv, header, cmdbuf)) {
+				DRM_ERROR("radeon_emit_packets failed\n");
+				goto err;
+			}
+			break;
+
+		case RADEON_CMD_SCALARS:
+			DRM_DEBUG("RADEON_CMD_SCALARS\n");
+			if (radeon_emit_scalars(dev_priv, header, cmdbuf)) {
+				DRM_ERROR("radeon_emit_scalars failed\n");
+				goto err;
+			}
+			break;
+
+		case RADEON_CMD_VECTORS:
+			DRM_DEBUG("RADEON_CMD_VECTORS\n");
+			if (radeon_emit_vectors(dev_priv, header, cmdbuf)) {
+				DRM_ERROR("radeon_emit_vectors failed\n");
+				goto err;
+			}
+			break;
+
+		case RADEON_CMD_DMA_DISCARD:
+			DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
+			idx = header.dma.buf_idx;
+			if (idx < 0 || idx >= dma->buf_count) {
+				DRM_ERROR("buffer index %d (of %d max)\n",
+					  idx, dma->buf_count - 1);
+				goto err;
+			}
+
+			buf = dma->buflist[idx];
+			if (buf->file_priv != file_priv || buf->pending) {
+				DRM_ERROR("bad buffer %p %p %d\n",
+					  buf->file_priv, file_priv,
+					  buf->pending);
+				goto err;
+			}
+
+			radeon_cp_discard_buffer(dev, buf);
+			break;
+
+		case RADEON_CMD_PACKET3:
+			DRM_DEBUG("RADEON_CMD_PACKET3\n");
+			if (radeon_emit_packet3(dev, file_priv, cmdbuf)) {
+				DRM_ERROR("radeon_emit_packet3 failed\n");
+				goto err;
+			}
+			break;
+
+		case RADEON_CMD_PACKET3_CLIP:
+			DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n");
+			if (radeon_emit_packet3_cliprect
+			    (dev, file_priv, cmdbuf, orig_nbox)) {
+				DRM_ERROR("radeon_emit_packet3_clip failed\n");
+				goto err;
+			}
+			break;
+
+		case RADEON_CMD_SCALARS2:
+			DRM_DEBUG("RADEON_CMD_SCALARS2\n");
+			if (radeon_emit_scalars2(dev_priv, header, cmdbuf)) {
+				DRM_ERROR("radeon_emit_scalars2 failed\n");
+				goto err;
+			}
+			break;
+
+		case RADEON_CMD_WAIT:
+			DRM_DEBUG("RADEON_CMD_WAIT\n");
+			if (radeon_emit_wait(dev, header.wait.flags)) {
+				DRM_ERROR("radeon_emit_wait failed\n");
+				goto err;
+			}
+			break;
+		case RADEON_CMD_VECLINEAR:
+			DRM_DEBUG("RADEON_CMD_VECLINEAR\n");
+			if (radeon_emit_veclinear(dev_priv, header, cmdbuf)) {
+				DRM_ERROR("radeon_emit_veclinear failed\n");
+				goto err;
+			}
+			break;
+
+		default:
+			DRM_ERROR("bad cmd_type %d at %p\n",
+				  header.header.cmd_type,
+				  cmdbuf->buf - sizeof(header));
+			goto err;
+		}
+	}
+
+	if (orig_bufsz != 0)
+		drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
+
+	DRM_DEBUG("DONE\n");
+	COMMIT_RING();
+	return 0;
+
+      err:
+	if (orig_bufsz != 0)
+		drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
+	return -EINVAL;
+}
+
+static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_getparam_t *param = data;
+	int value;
+
+	DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+
+	switch (param->param) {
+	case RADEON_PARAM_GART_BUFFER_OFFSET:
+		value = dev_priv->gart_buffers_offset;
+		break;
+	case RADEON_PARAM_LAST_FRAME:
+		dev_priv->stats.last_frame_reads++;
+		value = GET_SCRATCH(0);
+		break;
+	case RADEON_PARAM_LAST_DISPATCH:
+		value = GET_SCRATCH(1);
+		break;
+	case RADEON_PARAM_LAST_CLEAR:
+		dev_priv->stats.last_clear_reads++;
+		value = GET_SCRATCH(2);
+		break;
+	case RADEON_PARAM_IRQ_NR:
+		value = dev->irq;
+		break;
+	case RADEON_PARAM_GART_BASE:
+		value = dev_priv->gart_vm_start;
+		break;
+	case RADEON_PARAM_REGISTER_HANDLE:
+		value = dev_priv->mmio->offset;
+		break;
+	case RADEON_PARAM_STATUS_HANDLE:
+		value = dev_priv->ring_rptr_offset;
+		break;
+#if BITS_PER_LONG == 32
+		/*
+		 * This ioctl() doesn't work on 64-bit platforms because hw_lock is a
+		 * pointer which can't fit into an int-sized variable.  According to
+		 * Michel Dänzer, the ioctl() is only used on embedded platforms, so
+		 * not supporting it shouldn't be a problem.  If the same functionality
+		 * is needed on 64-bit platforms, a new ioctl() would have to be added,
+		 * so backwards-compatibility for the embedded platforms can be
+		 * maintained.  --davidm 4-Feb-2004.
+		 */
+	case RADEON_PARAM_SAREA_HANDLE:
+		/* The lock is the first dword in the sarea. */
+		value = (long)dev->lock.hw_lock;
+		break;
+#endif
+	case RADEON_PARAM_GART_TEX_HANDLE:
+		value = dev_priv->gart_textures_offset;
+		break;
+	case RADEON_PARAM_SCRATCH_OFFSET:
+		if (!dev_priv->writeback_works)
+			return -EINVAL;
+		value = RADEON_SCRATCH_REG_OFFSET;
+		break;
+	case RADEON_PARAM_CARD_TYPE:
+		if (dev_priv->flags & RADEON_IS_PCIE)
+			value = RADEON_CARD_PCIE;
+		else if (dev_priv->flags & RADEON_IS_AGP)
+			value = RADEON_CARD_AGP;
+		else
+			value = RADEON_CARD_PCI;
+		break;
+	case RADEON_PARAM_VBLANK_CRTC:
+		value = radeon_vblank_crtc_get(dev);
+		break;
+	case RADEON_PARAM_FB_LOCATION:
+		value = radeon_read_fb_location(dev_priv);
+		break;
+	case RADEON_PARAM_NUM_GB_PIPES:
+		value = dev_priv->num_gb_pipes;
+		break;
+	default:
+		DRM_DEBUG("Invalid parameter %d\n", param->param);
+		return -EINVAL;
+	}
+
+	if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
+		DRM_ERROR("copy_to_user\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_setparam_t *sp = data;
+	struct drm_radeon_driver_file_fields *radeon_priv;
+
+	switch (sp->param) {
+	case RADEON_SETPARAM_FB_LOCATION:
+		radeon_priv = file_priv->driver_priv;
+		radeon_priv->radeon_fb_delta = dev_priv->fb_location -
+		    sp->value;
+		break;
+	case RADEON_SETPARAM_SWITCH_TILING:
+		if (sp->value == 0) {
+			DRM_DEBUG("color tiling disabled\n");
+			dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO;
+			dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO;
+			dev_priv->sarea_priv->tiling_enabled = 0;
+		} else if (sp->value == 1) {
+			DRM_DEBUG("color tiling enabled\n");
+			dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO;
+			dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO;
+			dev_priv->sarea_priv->tiling_enabled = 1;
+		}
+		break;
+	case RADEON_SETPARAM_PCIGART_LOCATION:
+		dev_priv->pcigart_offset = sp->value;
+		dev_priv->pcigart_offset_set = 1;
+		break;
+	case RADEON_SETPARAM_NEW_MEMMAP:
+		dev_priv->new_memmap = sp->value;
+		break;
+	case RADEON_SETPARAM_PCIGART_TABLE_SIZE:
+		dev_priv->gart_info.table_size = sp->value;
+		if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE)
+			dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
+		break;
+	case RADEON_SETPARAM_VBLANK_CRTC:
+		return radeon_vblank_crtc_set(dev, sp->value);
+		break;
+	default:
+		DRM_DEBUG("Invalid parameter %d\n", sp->param);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* When a client dies:
+ *    - Check for and clean up flipped page state
+ *    - Free any alloced GART memory.
+ *    - Free any alloced radeon surfaces.
+ *
+ * DRM infrastructure takes care of reclaiming dma buffers.
+ */
+void radeon_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+	if (dev->dev_private) {
+		drm_radeon_private_t *dev_priv = dev->dev_private;
+		dev_priv->page_flipping = 0;
+		radeon_mem_release(file_priv, dev_priv->gart_heap);
+		radeon_mem_release(file_priv, dev_priv->fb_heap);
+		radeon_surfaces_release(file_priv, dev_priv);
+	}
+}
+
+void radeon_driver_lastclose(struct drm_device *dev)
+{
+	if (dev->dev_private) {
+		drm_radeon_private_t *dev_priv = dev->dev_private;
+
+		if (dev_priv->sarea_priv &&
+		    dev_priv->sarea_priv->pfCurrentPage != 0)
+			radeon_cp_dispatch_flip(dev);
+	}
+
+	radeon_do_release(dev);
+}
+
+int radeon_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	struct drm_radeon_driver_file_fields *radeon_priv;
+
+	DRM_DEBUG("\n");
+	radeon_priv =
+	    (struct drm_radeon_driver_file_fields *)
+	    drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES);
+
+	if (!radeon_priv)
+		return -ENOMEM;
+
+	file_priv->driver_priv = radeon_priv;
+
+	if (dev_priv)
+		radeon_priv->radeon_fb_delta = dev_priv->fb_location;
+	else
+		radeon_priv->radeon_fb_delta = 0;
+	return 0;
+}
+
+void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+	struct drm_radeon_driver_file_fields *radeon_priv =
+	    file_priv->driver_priv;
+
+	drm_free(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES);
+}
+
+struct drm_ioctl_desc radeon_ioctls[] = {
+	DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH)
+};
+
+int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
diff --git a/drivers/gpu/drm/savage/Makefile b/drivers/gpu/drm/savage/Makefile
new file mode 100644
index 000000000000..d8f84ac7bb26
--- /dev/null
+++ b/drivers/gpu/drm/savage/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y = -Iinclude/drm
+savage-y := savage_drv.o savage_bci.o savage_state.o
+
+obj-$(CONFIG_DRM_SAVAGE)+= savage.o
+
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
new file mode 100644
index 000000000000..d465b2f9c1cd
--- /dev/null
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -0,0 +1,1095 @@
+/* savage_bci.c -- BCI support for Savage
+ *
+ * Copyright 2004  Felix Kuehling
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "drmP.h"
+#include "savage_drm.h"
+#include "savage_drv.h"
+
+/* Need a long timeout for shadow status updates can take a while
+ * and so can waiting for events when the queue is full. */
+#define SAVAGE_DEFAULT_USEC_TIMEOUT	1000000	/* 1s */
+#define SAVAGE_EVENT_USEC_TIMEOUT	5000000	/* 5s */
+#define SAVAGE_FREELIST_DEBUG		0
+
+static int savage_do_cleanup_bci(struct drm_device *dev);
+
+static int
+savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n)
+{
+	uint32_t mask = dev_priv->status_used_mask;
+	uint32_t threshold = dev_priv->bci_threshold_hi;
+	uint32_t status;
+	int i;
+
+#if SAVAGE_BCI_DEBUG
+	if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold)
+		DRM_ERROR("Trying to emit %d words "
+			  "(more than guaranteed space in COB)\n", n);
+#endif
+
+	for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
+		DRM_MEMORYBARRIER();
+		status = dev_priv->status_ptr[0];
+		if ((status & mask) < threshold)
+			return 0;
+		DRM_UDELAY(1);
+	}
+
+#if SAVAGE_BCI_DEBUG
+	DRM_ERROR("failed!\n");
+	DRM_INFO("   status=0x%08x, threshold=0x%08x\n", status, threshold);
+#endif
+	return -EBUSY;
+}
+
+static int
+savage_bci_wait_fifo_s3d(drm_savage_private_t * dev_priv, unsigned int n)
+{
+	uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
+	uint32_t status;
+	int i;
+
+	for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
+		status = SAVAGE_READ(SAVAGE_STATUS_WORD0);
+		if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed)
+			return 0;
+		DRM_UDELAY(1);
+	}
+
+#if SAVAGE_BCI_DEBUG
+	DRM_ERROR("failed!\n");
+	DRM_INFO("   status=0x%08x\n", status);
+#endif
+	return -EBUSY;
+}
+
+static int
+savage_bci_wait_fifo_s4(drm_savage_private_t * dev_priv, unsigned int n)
+{
+	uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
+	uint32_t status;
+	int i;
+
+	for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
+		status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0);
+		if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed)
+			return 0;
+		DRM_UDELAY(1);
+	}
+
+#if SAVAGE_BCI_DEBUG
+	DRM_ERROR("failed!\n");
+	DRM_INFO("   status=0x%08x\n", status);
+#endif
+	return -EBUSY;
+}
+
+/*
+ * Waiting for events.
+ *
+ * The BIOSresets the event tag to 0 on mode changes. Therefore we
+ * never emit 0 to the event tag. If we find a 0 event tag we know the
+ * BIOS stomped on it and return success assuming that the BIOS waited
+ * for engine idle.
+ *
+ * Note: if the Xserver uses the event tag it has to follow the same
+ * rule. Otherwise there may be glitches every 2^16 events.
+ */
+static int
+savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e)
+{
+	uint32_t status;
+	int i;
+
+	for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
+		DRM_MEMORYBARRIER();
+		status = dev_priv->status_ptr[1];
+		if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
+		    (status & 0xffff) == 0)
+			return 0;
+		DRM_UDELAY(1);
+	}
+
+#if SAVAGE_BCI_DEBUG
+	DRM_ERROR("failed!\n");
+	DRM_INFO("   status=0x%08x, e=0x%04x\n", status, e);
+#endif
+
+	return -EBUSY;
+}
+
+static int
+savage_bci_wait_event_reg(drm_savage_private_t * dev_priv, uint16_t e)
+{
+	uint32_t status;
+	int i;
+
+	for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
+		status = SAVAGE_READ(SAVAGE_STATUS_WORD1);
+		if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
+		    (status & 0xffff) == 0)
+			return 0;
+		DRM_UDELAY(1);
+	}
+
+#if SAVAGE_BCI_DEBUG
+	DRM_ERROR("failed!\n");
+	DRM_INFO("   status=0x%08x, e=0x%04x\n", status, e);
+#endif
+
+	return -EBUSY;
+}
+
+uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv,
+			       unsigned int flags)
+{
+	uint16_t count;
+	BCI_LOCALS;
+
+	if (dev_priv->status_ptr) {
+		/* coordinate with Xserver */
+		count = dev_priv->status_ptr[1023];
+		if (count < dev_priv->event_counter)
+			dev_priv->event_wrap++;
+	} else {
+		count = dev_priv->event_counter;
+	}
+	count = (count + 1) & 0xffff;
+	if (count == 0) {
+		count++;	/* See the comment above savage_wait_event_*. */
+		dev_priv->event_wrap++;
+	}
+	dev_priv->event_counter = count;
+	if (dev_priv->status_ptr)
+		dev_priv->status_ptr[1023] = (uint32_t) count;
+
+	if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) {
+		unsigned int wait_cmd = BCI_CMD_WAIT;
+		if ((flags & SAVAGE_WAIT_2D))
+			wait_cmd |= BCI_CMD_WAIT_2D;
+		if ((flags & SAVAGE_WAIT_3D))
+			wait_cmd |= BCI_CMD_WAIT_3D;
+		BEGIN_BCI(2);
+		BCI_WRITE(wait_cmd);
+	} else {
+		BEGIN_BCI(1);
+	}
+	BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t) count);
+
+	return count;
+}
+
+/*
+ * Freelist management
+ */
+static int savage_freelist_init(struct drm_device * dev)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *buf;
+	drm_savage_buf_priv_t *entry;
+	int i;
+	DRM_DEBUG("count=%d\n", dma->buf_count);
+
+	dev_priv->head.next = &dev_priv->tail;
+	dev_priv->head.prev = NULL;
+	dev_priv->head.buf = NULL;
+
+	dev_priv->tail.next = NULL;
+	dev_priv->tail.prev = &dev_priv->head;
+	dev_priv->tail.buf = NULL;
+
+	for (i = 0; i < dma->buf_count; i++) {
+		buf = dma->buflist[i];
+		entry = buf->dev_private;
+
+		SET_AGE(&entry->age, 0, 0);
+		entry->buf = buf;
+
+		entry->next = dev_priv->head.next;
+		entry->prev = &dev_priv->head;
+		dev_priv->head.next->prev = entry;
+		dev_priv->head.next = entry;
+	}
+
+	return 0;
+}
+
+static struct drm_buf *savage_freelist_get(struct drm_device * dev)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
+	uint16_t event;
+	unsigned int wrap;
+	DRM_DEBUG("\n");
+
+	UPDATE_EVENT_COUNTER();
+	if (dev_priv->status_ptr)
+		event = dev_priv->status_ptr[1] & 0xffff;
+	else
+		event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
+	wrap = dev_priv->event_wrap;
+	if (event > dev_priv->event_counter)
+		wrap--;		/* hardware hasn't passed the last wrap yet */
+
+	DRM_DEBUG("   tail=0x%04x %d\n", tail->age.event, tail->age.wrap);
+	DRM_DEBUG("   head=0x%04x %d\n", event, wrap);
+
+	if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) {
+		drm_savage_buf_priv_t *next = tail->next;
+		drm_savage_buf_priv_t *prev = tail->prev;
+		prev->next = next;
+		next->prev = prev;
+		tail->next = tail->prev = NULL;
+		return tail->buf;
+	}
+
+	DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf);
+	return NULL;
+}
+
+void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;
+
+	DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap);
+
+	if (entry->next != NULL || entry->prev != NULL) {
+		DRM_ERROR("entry already on freelist.\n");
+		return;
+	}
+
+	prev = &dev_priv->head;
+	next = prev->next;
+	prev->next = entry;
+	next->prev = entry;
+	entry->prev = prev;
+	entry->next = next;
+}
+
+/*
+ * Command DMA
+ */
+static int savage_dma_init(drm_savage_private_t * dev_priv)
+{
+	unsigned int i;
+
+	dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
+	    (SAVAGE_DMA_PAGE_SIZE * 4);
+	dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) *
+					dev_priv->nr_dma_pages, DRM_MEM_DRIVER);
+	if (dev_priv->dma_pages == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
+		SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
+		dev_priv->dma_pages[i].used = 0;
+		dev_priv->dma_pages[i].flushed = 0;
+	}
+	SET_AGE(&dev_priv->last_dma_age, 0, 0);
+
+	dev_priv->first_dma_page = 0;
+	dev_priv->current_dma_page = 0;
+
+	return 0;
+}
+
+void savage_dma_reset(drm_savage_private_t * dev_priv)
+{
+	uint16_t event;
+	unsigned int wrap, i;
+	event = savage_bci_emit_event(dev_priv, 0);
+	wrap = dev_priv->event_wrap;
+	for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
+		SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
+		dev_priv->dma_pages[i].used = 0;
+		dev_priv->dma_pages[i].flushed = 0;
+	}
+	SET_AGE(&dev_priv->last_dma_age, event, wrap);
+	dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
+}
+
+void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page)
+{
+	uint16_t event;
+	unsigned int wrap;
+
+	/* Faked DMA buffer pages don't age. */
+	if (dev_priv->cmd_dma == &dev_priv->fake_dma)
+		return;
+
+	UPDATE_EVENT_COUNTER();
+	if (dev_priv->status_ptr)
+		event = dev_priv->status_ptr[1] & 0xffff;
+	else
+		event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
+	wrap = dev_priv->event_wrap;
+	if (event > dev_priv->event_counter)
+		wrap--;		/* hardware hasn't passed the last wrap yet */
+
+	if (dev_priv->dma_pages[page].age.wrap > wrap ||
+	    (dev_priv->dma_pages[page].age.wrap == wrap &&
+	     dev_priv->dma_pages[page].age.event > event)) {
+		if (dev_priv->wait_evnt(dev_priv,
+					dev_priv->dma_pages[page].age.event)
+		    < 0)
+			DRM_ERROR("wait_evnt failed!\n");
+	}
+}
+
+uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv, unsigned int n)
+{
+	unsigned int cur = dev_priv->current_dma_page;
+	unsigned int rest = SAVAGE_DMA_PAGE_SIZE -
+	    dev_priv->dma_pages[cur].used;
+	unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) /
+	    SAVAGE_DMA_PAGE_SIZE;
+	uint32_t *dma_ptr;
+	unsigned int i;
+
+	DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n",
+		  cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages);
+
+	if (cur + nr_pages < dev_priv->nr_dma_pages) {
+		dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
+		    cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
+		if (n < rest)
+			rest = n;
+		dev_priv->dma_pages[cur].used += rest;
+		n -= rest;
+		cur++;
+	} else {
+		dev_priv->dma_flush(dev_priv);
+		nr_pages =
+		    (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE;
+		for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
+			dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
+			dev_priv->dma_pages[i].used = 0;
+			dev_priv->dma_pages[i].flushed = 0;
+		}
+		dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle;
+		dev_priv->first_dma_page = cur = 0;
+	}
+	for (i = cur; nr_pages > 0; ++i, --nr_pages) {
+#if SAVAGE_DMA_DEBUG
+		if (dev_priv->dma_pages[i].used) {
+			DRM_ERROR("unflushed page %u: used=%u\n",
+				  i, dev_priv->dma_pages[i].used);
+		}
+#endif
+		if (n > SAVAGE_DMA_PAGE_SIZE)
+			dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE;
+		else
+			dev_priv->dma_pages[i].used = n;
+		n -= SAVAGE_DMA_PAGE_SIZE;
+	}
+	dev_priv->current_dma_page = --i;
+
+	DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n",
+		  i, dev_priv->dma_pages[i].used, n);
+
+	savage_dma_wait(dev_priv, dev_priv->current_dma_page);
+
+	return dma_ptr;
+}
+
+static void savage_dma_flush(drm_savage_private_t * dev_priv)
+{
+	unsigned int first = dev_priv->first_dma_page;
+	unsigned int cur = dev_priv->current_dma_page;
+	uint16_t event;
+	unsigned int wrap, pad, align, len, i;
+	unsigned long phys_addr;
+	BCI_LOCALS;
+
+	if (first == cur &&
+	    dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed)
+		return;
+
+	/* pad length to multiples of 2 entries
+	 * align start of next DMA block to multiles of 8 entries */
+	pad = -dev_priv->dma_pages[cur].used & 1;
+	align = -(dev_priv->dma_pages[cur].used + pad) & 7;
+
+	DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, "
+		  "pad=%u, align=%u\n",
+		  first, cur, dev_priv->dma_pages[first].flushed,
+		  dev_priv->dma_pages[cur].used, pad, align);
+
+	/* pad with noops */
+	if (pad) {
+		uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
+		    cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
+		dev_priv->dma_pages[cur].used += pad;
+		while (pad != 0) {
+			*dma_ptr++ = BCI_CMD_WAIT;
+			pad--;
+		}
+	}
+
+	DRM_MEMORYBARRIER();
+
+	/* do flush ... */
+	phys_addr = dev_priv->cmd_dma->offset +
+	    (first * SAVAGE_DMA_PAGE_SIZE +
+	     dev_priv->dma_pages[first].flushed) * 4;
+	len = (cur - first) * SAVAGE_DMA_PAGE_SIZE +
+	    dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed;
+
+	DRM_DEBUG("phys_addr=%lx, len=%u\n",
+		  phys_addr | dev_priv->dma_type, len);
+
+	BEGIN_BCI(3);
+	BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1);
+	BCI_WRITE(phys_addr | dev_priv->dma_type);
+	BCI_DMA(len);
+
+	/* fix alignment of the start of the next block */
+	dev_priv->dma_pages[cur].used += align;
+
+	/* age DMA pages */
+	event = savage_bci_emit_event(dev_priv, 0);
+	wrap = dev_priv->event_wrap;
+	for (i = first; i < cur; ++i) {
+		SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
+		dev_priv->dma_pages[i].used = 0;
+		dev_priv->dma_pages[i].flushed = 0;
+	}
+	/* age the current page only when it's full */
+	if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) {
+		SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap);
+		dev_priv->dma_pages[cur].used = 0;
+		dev_priv->dma_pages[cur].flushed = 0;
+		/* advance to next page */
+		cur++;
+		if (cur == dev_priv->nr_dma_pages)
+			cur = 0;
+		dev_priv->first_dma_page = dev_priv->current_dma_page = cur;
+	} else {
+		dev_priv->first_dma_page = cur;
+		dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used;
+	}
+	SET_AGE(&dev_priv->last_dma_age, event, wrap);
+
+	DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur,
+		  dev_priv->dma_pages[cur].used,
+		  dev_priv->dma_pages[cur].flushed);
+}
+
+static void savage_fake_dma_flush(drm_savage_private_t * dev_priv)
+{
+	unsigned int i, j;
+	BCI_LOCALS;
+
+	if (dev_priv->first_dma_page == dev_priv->current_dma_page &&
+	    dev_priv->dma_pages[dev_priv->current_dma_page].used == 0)
+		return;
+
+	DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n",
+		  dev_priv->first_dma_page, dev_priv->current_dma_page,
+		  dev_priv->dma_pages[dev_priv->current_dma_page].used);
+
+	for (i = dev_priv->first_dma_page;
+	     i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used;
+	     ++i) {
+		uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
+		    i * SAVAGE_DMA_PAGE_SIZE;
+#if SAVAGE_DMA_DEBUG
+		/* Sanity check: all pages except the last one must be full. */
+		if (i < dev_priv->current_dma_page &&
+		    dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) {
+			DRM_ERROR("partial DMA page %u: used=%u",
+				  i, dev_priv->dma_pages[i].used);
+		}
+#endif
+		BEGIN_BCI(dev_priv->dma_pages[i].used);
+		for (j = 0; j < dev_priv->dma_pages[i].used; ++j) {
+			BCI_WRITE(dma_ptr[j]);
+		}
+		dev_priv->dma_pages[i].used = 0;
+	}
+
+	/* reset to first page */
+	dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
+}
+
+int savage_driver_load(struct drm_device *dev, unsigned long chipset)
+{
+	drm_savage_private_t *dev_priv;
+
+	dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
+	if (dev_priv == NULL)
+		return -ENOMEM;
+
+	memset(dev_priv, 0, sizeof(drm_savage_private_t));
+	dev->dev_private = (void *)dev_priv;
+
+	dev_priv->chipset = (enum savage_family)chipset;
+
+	return 0;
+}
+
+
+/*
+ * Initalize mappings. On Savage4 and SavageIX the alignment
+ * and size of the aperture is not suitable for automatic MTRR setup
+ * in drm_addmap. Therefore we add them manually before the maps are
+ * initialized, and tear them down on last close.
+ */
+int savage_driver_firstopen(struct drm_device *dev)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	unsigned long mmio_base, fb_base, fb_size, aperture_base;
+	/* fb_rsrc and aper_rsrc aren't really used currently, but still exist
+	 * in case we decide we need information on the BAR for BSD in the
+	 * future.
+	 */
+	unsigned int fb_rsrc, aper_rsrc;
+	int ret = 0;
+
+	dev_priv->mtrr[0].handle = -1;
+	dev_priv->mtrr[1].handle = -1;
+	dev_priv->mtrr[2].handle = -1;
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+		fb_rsrc = 0;
+		fb_base = drm_get_resource_start(dev, 0);
+		fb_size = SAVAGE_FB_SIZE_S3;
+		mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
+		aper_rsrc = 0;
+		aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
+		/* this should always be true */
+		if (drm_get_resource_len(dev, 0) == 0x08000000) {
+			/* Don't make MMIO write-cobining! We need 3
+			 * MTRRs. */
+			dev_priv->mtrr[0].base = fb_base;
+			dev_priv->mtrr[0].size = 0x01000000;
+			dev_priv->mtrr[0].handle =
+			    drm_mtrr_add(dev_priv->mtrr[0].base,
+				         dev_priv->mtrr[0].size, DRM_MTRR_WC);
+			dev_priv->mtrr[1].base = fb_base + 0x02000000;
+			dev_priv->mtrr[1].size = 0x02000000;
+			dev_priv->mtrr[1].handle =
+			    drm_mtrr_add(dev_priv->mtrr[1].base,
+					 dev_priv->mtrr[1].size, DRM_MTRR_WC);
+			dev_priv->mtrr[2].base = fb_base + 0x04000000;
+			dev_priv->mtrr[2].size = 0x04000000;
+			dev_priv->mtrr[2].handle =
+			    drm_mtrr_add(dev_priv->mtrr[2].base,
+					 dev_priv->mtrr[2].size, DRM_MTRR_WC);
+		} else {
+			DRM_ERROR("strange pci_resource_len %08lx\n",
+				  drm_get_resource_len(dev, 0));
+		}
+	} else if (dev_priv->chipset != S3_SUPERSAVAGE &&
+		   dev_priv->chipset != S3_SAVAGE2000) {
+		mmio_base = drm_get_resource_start(dev, 0);
+		fb_rsrc = 1;
+		fb_base = drm_get_resource_start(dev, 1);
+		fb_size = SAVAGE_FB_SIZE_S4;
+		aper_rsrc = 1;
+		aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
+		/* this should always be true */
+		if (drm_get_resource_len(dev, 1) == 0x08000000) {
+			/* Can use one MTRR to cover both fb and
+			 * aperture. */
+			dev_priv->mtrr[0].base = fb_base;
+			dev_priv->mtrr[0].size = 0x08000000;
+			dev_priv->mtrr[0].handle =
+			    drm_mtrr_add(dev_priv->mtrr[0].base,
+					 dev_priv->mtrr[0].size, DRM_MTRR_WC);
+		} else {
+			DRM_ERROR("strange pci_resource_len %08lx\n",
+				  drm_get_resource_len(dev, 1));
+		}
+	} else {
+		mmio_base = drm_get_resource_start(dev, 0);
+		fb_rsrc = 1;
+		fb_base = drm_get_resource_start(dev, 1);
+		fb_size = drm_get_resource_len(dev, 1);
+		aper_rsrc = 2;
+		aperture_base = drm_get_resource_start(dev, 2);
+		/* Automatic MTRR setup will do the right thing. */
+	}
+
+	ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS,
+			 _DRM_READ_ONLY, &dev_priv->mmio);
+	if (ret)
+		return ret;
+
+	ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
+			 _DRM_WRITE_COMBINING, &dev_priv->fb);
+	if (ret)
+		return ret;
+
+	ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
+			 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
+			 &dev_priv->aperture);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+/*
+ * Delete MTRRs and free device-private data.
+ */
+void savage_driver_lastclose(struct drm_device *dev)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	int i;
+
+	for (i = 0; i < 3; ++i)
+		if (dev_priv->mtrr[i].handle >= 0)
+			drm_mtrr_del(dev_priv->mtrr[i].handle,
+				 dev_priv->mtrr[i].base,
+				 dev_priv->mtrr[i].size, DRM_MTRR_WC);
+}
+
+int savage_driver_unload(struct drm_device *dev)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+
+	drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
+
+	return 0;
+}
+
+static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+
+	if (init->fb_bpp != 16 && init->fb_bpp != 32) {
+		DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
+		return -EINVAL;
+	}
+	if (init->depth_bpp != 16 && init->depth_bpp != 32) {
+		DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
+		return -EINVAL;
+	}
+	if (init->dma_type != SAVAGE_DMA_AGP &&
+	    init->dma_type != SAVAGE_DMA_PCI) {
+		DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
+		return -EINVAL;
+	}
+
+	dev_priv->cob_size = init->cob_size;
+	dev_priv->bci_threshold_lo = init->bci_threshold_lo;
+	dev_priv->bci_threshold_hi = init->bci_threshold_hi;
+	dev_priv->dma_type = init->dma_type;
+
+	dev_priv->fb_bpp = init->fb_bpp;
+	dev_priv->front_offset = init->front_offset;
+	dev_priv->front_pitch = init->front_pitch;
+	dev_priv->back_offset = init->back_offset;
+	dev_priv->back_pitch = init->back_pitch;
+	dev_priv->depth_bpp = init->depth_bpp;
+	dev_priv->depth_offset = init->depth_offset;
+	dev_priv->depth_pitch = init->depth_pitch;
+
+	dev_priv->texture_offset = init->texture_offset;
+	dev_priv->texture_size = init->texture_size;
+
+	dev_priv->sarea = drm_getsarea(dev);
+	if (!dev_priv->sarea) {
+		DRM_ERROR("could not find sarea!\n");
+		savage_do_cleanup_bci(dev);
+		return -EINVAL;
+	}
+	if (init->status_offset != 0) {
+		dev_priv->status = drm_core_findmap(dev, init->status_offset);
+		if (!dev_priv->status) {
+			DRM_ERROR("could not find shadow status region!\n");
+			savage_do_cleanup_bci(dev);
+			return -EINVAL;
+		}
+	} else {
+		dev_priv->status = NULL;
+	}
+	if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
+		dev->agp_buffer_token = init->buffers_offset;
+		dev->agp_buffer_map = drm_core_findmap(dev,
+						       init->buffers_offset);
+		if (!dev->agp_buffer_map) {
+			DRM_ERROR("could not find DMA buffer region!\n");
+			savage_do_cleanup_bci(dev);
+			return -EINVAL;
+		}
+		drm_core_ioremap(dev->agp_buffer_map, dev);
+		if (!dev->agp_buffer_map) {
+			DRM_ERROR("failed to ioremap DMA buffer region!\n");
+			savage_do_cleanup_bci(dev);
+			return -ENOMEM;
+		}
+	}
+	if (init->agp_textures_offset) {
+		dev_priv->agp_textures =
+		    drm_core_findmap(dev, init->agp_textures_offset);
+		if (!dev_priv->agp_textures) {
+			DRM_ERROR("could not find agp texture region!\n");
+			savage_do_cleanup_bci(dev);
+			return -EINVAL;
+		}
+	} else {
+		dev_priv->agp_textures = NULL;
+	}
+
+	if (init->cmd_dma_offset) {
+		if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+			DRM_ERROR("command DMA not supported on "
+				  "Savage3D/MX/IX.\n");
+			savage_do_cleanup_bci(dev);
+			return -EINVAL;
+		}
+		if (dev->dma && dev->dma->buflist) {
+			DRM_ERROR("command and vertex DMA not supported "
+				  "at the same time.\n");
+			savage_do_cleanup_bci(dev);
+			return -EINVAL;
+		}
+		dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset);
+		if (!dev_priv->cmd_dma) {
+			DRM_ERROR("could not find command DMA region!\n");
+			savage_do_cleanup_bci(dev);
+			return -EINVAL;
+		}
+		if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
+			if (dev_priv->cmd_dma->type != _DRM_AGP) {
+				DRM_ERROR("AGP command DMA region is not a "
+					  "_DRM_AGP map!\n");
+				savage_do_cleanup_bci(dev);
+				return -EINVAL;
+			}
+			drm_core_ioremap(dev_priv->cmd_dma, dev);
+			if (!dev_priv->cmd_dma->handle) {
+				DRM_ERROR("failed to ioremap command "
+					  "DMA region!\n");
+				savage_do_cleanup_bci(dev);
+				return -ENOMEM;
+			}
+		} else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
+			DRM_ERROR("PCI command DMA region is not a "
+				  "_DRM_CONSISTENT map!\n");
+			savage_do_cleanup_bci(dev);
+			return -EINVAL;
+		}
+	} else {
+		dev_priv->cmd_dma = NULL;
+	}
+
+	dev_priv->dma_flush = savage_dma_flush;
+	if (!dev_priv->cmd_dma) {
+		DRM_DEBUG("falling back to faked command DMA.\n");
+		dev_priv->fake_dma.offset = 0;
+		dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE;
+		dev_priv->fake_dma.type = _DRM_SHM;
+		dev_priv->fake_dma.handle = drm_alloc(SAVAGE_FAKE_DMA_SIZE,
+						      DRM_MEM_DRIVER);
+		if (!dev_priv->fake_dma.handle) {
+			DRM_ERROR("could not allocate faked DMA buffer!\n");
+			savage_do_cleanup_bci(dev);
+			return -ENOMEM;
+		}
+		dev_priv->cmd_dma = &dev_priv->fake_dma;
+		dev_priv->dma_flush = savage_fake_dma_flush;
+	}
+
+	dev_priv->sarea_priv =
+	    (drm_savage_sarea_t *) ((uint8_t *) dev_priv->sarea->handle +
+				    init->sarea_priv_offset);
+
+	/* setup bitmap descriptors */
+	{
+		unsigned int color_tile_format;
+		unsigned int depth_tile_format;
+		unsigned int front_stride, back_stride, depth_stride;
+		if (dev_priv->chipset <= S3_SAVAGE4) {
+			color_tile_format = dev_priv->fb_bpp == 16 ?
+			    SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
+			depth_tile_format = dev_priv->depth_bpp == 16 ?
+			    SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
+		} else {
+			color_tile_format = SAVAGE_BD_TILE_DEST;
+			depth_tile_format = SAVAGE_BD_TILE_DEST;
+		}
+		front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8);
+		back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8);
+		depth_stride =
+		    dev_priv->depth_pitch / (dev_priv->depth_bpp / 8);
+
+		dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE |
+		    (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
+		    (color_tile_format << SAVAGE_BD_TILE_SHIFT);
+
+		dev_priv->back_bd = back_stride | SAVAGE_BD_BW_DISABLE |
+		    (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
+		    (color_tile_format << SAVAGE_BD_TILE_SHIFT);
+
+		dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE |
+		    (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) |
+		    (depth_tile_format << SAVAGE_BD_TILE_SHIFT);
+	}
+
+	/* setup status and bci ptr */
+	dev_priv->event_counter = 0;
+	dev_priv->event_wrap = 0;
+	dev_priv->bci_ptr = (volatile uint32_t *)
+	    ((uint8_t *) dev_priv->mmio->handle + SAVAGE_BCI_OFFSET);
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+		dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D;
+	} else {
+		dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4;
+	}
+	if (dev_priv->status != NULL) {
+		dev_priv->status_ptr =
+		    (volatile uint32_t *)dev_priv->status->handle;
+		dev_priv->wait_fifo = savage_bci_wait_fifo_shadow;
+		dev_priv->wait_evnt = savage_bci_wait_event_shadow;
+		dev_priv->status_ptr[1023] = dev_priv->event_counter;
+	} else {
+		dev_priv->status_ptr = NULL;
+		if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+			dev_priv->wait_fifo = savage_bci_wait_fifo_s3d;
+		} else {
+			dev_priv->wait_fifo = savage_bci_wait_fifo_s4;
+		}
+		dev_priv->wait_evnt = savage_bci_wait_event_reg;
+	}
+
+	/* cliprect functions */
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset))
+		dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d;
+	else
+		dev_priv->emit_clip_rect = savage_emit_clip_rect_s4;
+
+	if (savage_freelist_init(dev) < 0) {
+		DRM_ERROR("could not initialize freelist\n");
+		savage_do_cleanup_bci(dev);
+		return -ENOMEM;
+	}
+
+	if (savage_dma_init(dev_priv) < 0) {
+		DRM_ERROR("could not initialize command DMA\n");
+		savage_do_cleanup_bci(dev);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int savage_do_cleanup_bci(struct drm_device * dev)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+
+	if (dev_priv->cmd_dma == &dev_priv->fake_dma) {
+		if (dev_priv->fake_dma.handle)
+			drm_free(dev_priv->fake_dma.handle,
+				 SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER);
+	} else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&
+		   dev_priv->cmd_dma->type == _DRM_AGP &&
+		   dev_priv->dma_type == SAVAGE_DMA_AGP)
+		drm_core_ioremapfree(dev_priv->cmd_dma, dev);
+
+	if (dev_priv->dma_type == SAVAGE_DMA_AGP &&
+	    dev->agp_buffer_map && dev->agp_buffer_map->handle) {
+		drm_core_ioremapfree(dev->agp_buffer_map, dev);
+		/* make sure the next instance (which may be running
+		 * in PCI mode) doesn't try to use an old
+		 * agp_buffer_map. */
+		dev->agp_buffer_map = NULL;
+	}
+
+	if (dev_priv->dma_pages)
+		drm_free(dev_priv->dma_pages,
+			 sizeof(drm_savage_dma_page_t) * dev_priv->nr_dma_pages,
+			 DRM_MEM_DRIVER);
+
+	return 0;
+}
+
+static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_savage_init_t *init = data;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	switch (init->func) {
+	case SAVAGE_INIT_BCI:
+		return savage_do_init_bci(dev, init);
+	case SAVAGE_CLEANUP_BCI:
+		return savage_do_cleanup_bci(dev);
+	}
+
+	return -EINVAL;
+}
+
+static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	drm_savage_event_emit_t *event = data;
+
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	event->count = savage_bci_emit_event(dev_priv, event->flags);
+	event->count |= dev_priv->event_wrap << 16;
+
+	return 0;
+}
+
+static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	drm_savage_event_wait_t *event = data;
+	unsigned int event_e, hw_e;
+	unsigned int event_w, hw_w;
+
+	DRM_DEBUG("\n");
+
+	UPDATE_EVENT_COUNTER();
+	if (dev_priv->status_ptr)
+		hw_e = dev_priv->status_ptr[1] & 0xffff;
+	else
+		hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
+	hw_w = dev_priv->event_wrap;
+	if (hw_e > dev_priv->event_counter)
+		hw_w--;		/* hardware hasn't passed the last wrap yet */
+
+	event_e = event->count & 0xffff;
+	event_w = event->count >> 16;
+
+	/* Don't need to wait if
+	 * - event counter wrapped since the event was emitted or
+	 * - the hardware has advanced up to or over the event to wait for.
+	 */
+	if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e))
+		return 0;
+	else
+		return dev_priv->wait_evnt(dev_priv, event_e);
+}
+
+/*
+ * DMA buffer management
+ */
+
+static int savage_bci_get_buffers(struct drm_device *dev,
+				  struct drm_file *file_priv,
+				  struct drm_dma *d)
+{
+	struct drm_buf *buf;
+	int i;
+
+	for (i = d->granted_count; i < d->request_count; i++) {
+		buf = savage_freelist_get(dev);
+		if (!buf)
+			return -EAGAIN;
+
+		buf->file_priv = file_priv;
+
+		if (DRM_COPY_TO_USER(&d->request_indices[i],
+				     &buf->idx, sizeof(buf->idx)))
+			return -EFAULT;
+		if (DRM_COPY_TO_USER(&d->request_sizes[i],
+				     &buf->total, sizeof(buf->total)))
+			return -EFAULT;
+
+		d->granted_count++;
+	}
+	return 0;
+}
+
+int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_dma *d = data;
+	int ret = 0;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	/* Please don't send us buffers.
+	 */
+	if (d->send_count != 0) {
+		DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
+			  DRM_CURRENTPID, d->send_count);
+		return -EINVAL;
+	}
+
+	/* We'll send you buffers.
+	 */
+	if (d->request_count < 0 || d->request_count > dma->buf_count) {
+		DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
+			  DRM_CURRENTPID, d->request_count, dma->buf_count);
+		return -EINVAL;
+	}
+
+	d->granted_count = 0;
+
+	if (d->request_count) {
+		ret = savage_bci_get_buffers(dev, file_priv, d);
+	}
+
+	return ret;
+}
+
+void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
+{
+	struct drm_device_dma *dma = dev->dma;
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	int i;
+
+	if (!dma)
+		return;
+	if (!dev_priv)
+		return;
+	if (!dma->buflist)
+		return;
+
+	/*i830_flush_queue(dev); */
+
+	for (i = 0; i < dma->buf_count; i++) {
+		struct drm_buf *buf = dma->buflist[i];
+		drm_savage_buf_priv_t *buf_priv = buf->dev_private;
+
+		if (buf->file_priv == file_priv && buf_priv &&
+		    buf_priv->next == NULL && buf_priv->prev == NULL) {
+			uint16_t event;
+			DRM_DEBUG("reclaimed from client\n");
+			event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
+			SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
+			savage_freelist_put(dev, buf);
+		}
+	}
+
+	drm_core_reclaim_buffers(dev, file_priv);
+}
+
+struct drm_ioctl_desc savage_ioctls[] = {
+	DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
+};
+
+int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
new file mode 100644
index 000000000000..eee52aa92a7c
--- /dev/null
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -0,0 +1,88 @@
+/* savage_drv.c -- Savage driver for Linux
+ *
+ * Copyright 2004  Felix Kuehling
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "savage_drm.h"
+#include "savage_drv.h"
+
+#include "drm_pciids.h"
+
+static struct pci_device_id pciidlist[] = {
+	savage_PCI_IDS
+};
+
+static struct drm_driver driver = {
+	.driver_features =
+	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
+	.dev_priv_size = sizeof(drm_savage_buf_priv_t),
+	.load = savage_driver_load,
+	.firstopen = savage_driver_firstopen,
+	.lastclose = savage_driver_lastclose,
+	.unload = savage_driver_unload,
+	.reclaim_buffers = savage_reclaim_buffers,
+	.get_map_ofs = drm_core_get_map_ofs,
+	.get_reg_ofs = drm_core_get_reg_ofs,
+	.ioctls = savage_ioctls,
+	.dma_ioctl = savage_bci_buffers,
+	.fops = {
+		 .owner = THIS_MODULE,
+		 .open = drm_open,
+		 .release = drm_release,
+		 .ioctl = drm_ioctl,
+		 .mmap = drm_mmap,
+		 .poll = drm_poll,
+		 .fasync = drm_fasync,
+	},
+
+	.pci_driver = {
+		 .name = DRIVER_NAME,
+		 .id_table = pciidlist,
+	},
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int __init savage_init(void)
+{
+	driver.num_ioctls = savage_max_ioctl;
+	return drm_init(&driver);
+}
+
+static void __exit savage_exit(void)
+{
+	drm_exit(&driver);
+}
+
+module_init(savage_init);
+module_exit(savage_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/savage/savage_drv.h b/drivers/gpu/drm/savage/savage_drv.h
new file mode 100644
index 000000000000..df2aac6636f7
--- /dev/null
+++ b/drivers/gpu/drm/savage/savage_drv.h
@@ -0,0 +1,575 @@
+/* savage_drv.h -- Private header for the savage driver */
+/*
+ * Copyright 2004  Felix Kuehling
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __SAVAGE_DRV_H__
+#define __SAVAGE_DRV_H__
+
+#define DRIVER_AUTHOR	"Felix Kuehling"
+
+#define DRIVER_NAME	"savage"
+#define DRIVER_DESC	"Savage3D/MX/IX, Savage4, SuperSavage, Twister, ProSavage[DDR]"
+#define DRIVER_DATE	"20050313"
+
+#define DRIVER_MAJOR		2
+#define DRIVER_MINOR		4
+#define DRIVER_PATCHLEVEL	1
+/* Interface history:
+ *
+ * 1.x   The DRM driver from the VIA/S3 code drop, basically a dummy
+ * 2.0   The first real DRM
+ * 2.1   Scissors registers managed by the DRM, 3D operations clipped by
+ *       cliprects of the cmdbuf ioctl
+ * 2.2   Implemented SAVAGE_CMD_DMA_IDX and SAVAGE_CMD_VB_IDX
+ * 2.3   Event counters used by BCI_EVENT_EMIT/WAIT ioctls are now 32 bits
+ *       wide and thus very long lived (unlikely to ever wrap). The size
+ *       in the struct was 32 bits before, but only 16 bits were used
+ * 2.4   Implemented command DMA. Now drm_savage_init_t.cmd_dma_offset is
+ *       actually used
+ */
+
+typedef struct drm_savage_age {
+	uint16_t event;
+	unsigned int wrap;
+} drm_savage_age_t;
+
+typedef struct drm_savage_buf_priv {
+	struct drm_savage_buf_priv *next;
+	struct drm_savage_buf_priv *prev;
+	drm_savage_age_t age;
+	struct drm_buf *buf;
+} drm_savage_buf_priv_t;
+
+typedef struct drm_savage_dma_page {
+	drm_savage_age_t age;
+	unsigned int used, flushed;
+} drm_savage_dma_page_t;
+#define SAVAGE_DMA_PAGE_SIZE 1024	/* in dwords */
+/* Fake DMA buffer size in bytes. 4 pages. Allows a maximum command
+ * size of 16kbytes or 4k entries. Minimum requirement would be
+ * 10kbytes for 255 40-byte vertices in one drawing command. */
+#define SAVAGE_FAKE_DMA_SIZE (SAVAGE_DMA_PAGE_SIZE*4*4)
+
+/* interesting bits of hardware state that are saved in dev_priv */
+typedef union {
+	struct drm_savage_common_state {
+		uint32_t vbaddr;
+	} common;
+	struct {
+		unsigned char pad[sizeof(struct drm_savage_common_state)];
+		uint32_t texctrl, texaddr;
+		uint32_t scstart, new_scstart;
+		uint32_t scend, new_scend;
+	} s3d;
+	struct {
+		unsigned char pad[sizeof(struct drm_savage_common_state)];
+		uint32_t texdescr, texaddr0, texaddr1;
+		uint32_t drawctrl0, new_drawctrl0;
+		uint32_t drawctrl1, new_drawctrl1;
+	} s4;
+} drm_savage_state_t;
+
+/* these chip tags should match the ones in the 2D driver in savage_regs.h. */
+enum savage_family {
+	S3_UNKNOWN = 0,
+	S3_SAVAGE3D,
+	S3_SAVAGE_MX,
+	S3_SAVAGE4,
+	S3_PROSAVAGE,
+	S3_TWISTER,
+	S3_PROSAVAGEDDR,
+	S3_SUPERSAVAGE,
+	S3_SAVAGE2000,
+	S3_LAST
+};
+
+extern struct drm_ioctl_desc savage_ioctls[];
+extern int savage_max_ioctl;
+
+#define S3_SAVAGE3D_SERIES(chip)  ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
+
+#define S3_SAVAGE4_SERIES(chip)  ((chip==S3_SAVAGE4)            \
+                                  || (chip==S3_PROSAVAGE)       \
+                                  || (chip==S3_TWISTER)         \
+                                  || (chip==S3_PROSAVAGEDDR))
+
+#define	S3_SAVAGE_MOBILE_SERIES(chip)	((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE))
+
+#define S3_SAVAGE_SERIES(chip)    ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000))
+
+#define S3_MOBILE_TWISTER_SERIES(chip)   ((chip==S3_TWISTER)    \
+                                          ||(chip==S3_PROSAVAGEDDR))
+
+/* flags */
+#define SAVAGE_IS_AGP 1
+
+typedef struct drm_savage_private {
+	drm_savage_sarea_t *sarea_priv;
+
+	drm_savage_buf_priv_t head, tail;
+
+	/* who am I? */
+	enum savage_family chipset;
+
+	unsigned int cob_size;
+	unsigned int bci_threshold_lo, bci_threshold_hi;
+	unsigned int dma_type;
+
+	/* frame buffer layout */
+	unsigned int fb_bpp;
+	unsigned int front_offset, front_pitch;
+	unsigned int back_offset, back_pitch;
+	unsigned int depth_bpp;
+	unsigned int depth_offset, depth_pitch;
+
+	/* bitmap descriptors for swap and clear */
+	unsigned int front_bd, back_bd, depth_bd;
+
+	/* local textures */
+	unsigned int texture_offset;
+	unsigned int texture_size;
+
+	/* memory regions in physical memory */
+	drm_local_map_t *sarea;
+	drm_local_map_t *mmio;
+	drm_local_map_t *fb;
+	drm_local_map_t *aperture;
+	drm_local_map_t *status;
+	drm_local_map_t *agp_textures;
+	drm_local_map_t *cmd_dma;
+	drm_local_map_t fake_dma;
+
+	struct {
+		int handle;
+		unsigned long base, size;
+	} mtrr[3];
+
+	/* BCI and status-related stuff */
+	volatile uint32_t *status_ptr, *bci_ptr;
+	uint32_t status_used_mask;
+	uint16_t event_counter;
+	unsigned int event_wrap;
+
+	/* Savage4 command DMA */
+	drm_savage_dma_page_t *dma_pages;
+	unsigned int nr_dma_pages, first_dma_page, current_dma_page;
+	drm_savage_age_t last_dma_age;
+
+	/* saved hw state for global/local check on S3D */
+	uint32_t hw_draw_ctrl, hw_zbuf_ctrl;
+	/* and for scissors (global, so don't emit if not changed) */
+	uint32_t hw_scissors_start, hw_scissors_end;
+
+	drm_savage_state_t state;
+
+	/* after emitting a wait cmd Savage3D needs 63 nops before next DMA */
+	unsigned int waiting;
+
+	/* config/hardware-dependent function pointers */
+	int (*wait_fifo) (struct drm_savage_private * dev_priv, unsigned int n);
+	int (*wait_evnt) (struct drm_savage_private * dev_priv, uint16_t e);
+	/* Err, there is a macro wait_event in include/linux/wait.h.
+	 * Avoid unwanted macro expansion. */
+	void (*emit_clip_rect) (struct drm_savage_private * dev_priv,
+				const struct drm_clip_rect * pbox);
+	void (*dma_flush) (struct drm_savage_private * dev_priv);
+} drm_savage_private_t;
+
+/* ioctls */
+extern int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
+
+/* BCI functions */
+extern uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv,
+				      unsigned int flags);
+extern void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf);
+extern void savage_dma_reset(drm_savage_private_t * dev_priv);
+extern void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page);
+extern uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv,
+				  unsigned int n);
+extern int savage_driver_load(struct drm_device *dev, unsigned long chipset);
+extern int savage_driver_firstopen(struct drm_device *dev);
+extern void savage_driver_lastclose(struct drm_device *dev);
+extern int savage_driver_unload(struct drm_device *dev);
+extern void savage_reclaim_buffers(struct drm_device *dev,
+				   struct drm_file *file_priv);
+
+/* state functions */
+extern void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
+				      const struct drm_clip_rect * pbox);
+extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
+				     const struct drm_clip_rect * pbox);
+
+#define SAVAGE_FB_SIZE_S3	0x01000000	/*  16MB */
+#define SAVAGE_FB_SIZE_S4	0x02000000	/*  32MB */
+#define SAVAGE_MMIO_SIZE        0x00080000	/* 512kB */
+#define SAVAGE_APERTURE_OFFSET  0x02000000	/*  32MB */
+#define SAVAGE_APERTURE_SIZE    0x05000000	/* 5 tiled surfaces, 16MB each */
+
+#define SAVAGE_BCI_OFFSET       0x00010000	/* offset of the BCI region
+						 * inside the MMIO region */
+#define SAVAGE_BCI_FIFO_SIZE	32	/* number of entries in on-chip
+					 * BCI FIFO */
+
+/*
+ * MMIO registers
+ */
+#define SAVAGE_STATUS_WORD0		0x48C00
+#define SAVAGE_STATUS_WORD1		0x48C04
+#define SAVAGE_ALT_STATUS_WORD0 	0x48C60
+
+#define SAVAGE_FIFO_USED_MASK_S3D	0x0001ffff
+#define SAVAGE_FIFO_USED_MASK_S4	0x001fffff
+
+/* Copied from savage_bci.h in the 2D driver with some renaming. */
+
+/* Bitmap descriptors */
+#define SAVAGE_BD_STRIDE_SHIFT 0
+#define SAVAGE_BD_BPP_SHIFT   16
+#define SAVAGE_BD_TILE_SHIFT  24
+#define SAVAGE_BD_BW_DISABLE  (1<<28)
+/* common: */
+#define	SAVAGE_BD_TILE_LINEAR		0
+/* savage4, MX, IX, 3D */
+#define	SAVAGE_BD_TILE_16BPP		2
+#define	SAVAGE_BD_TILE_32BPP		3
+/* twister, prosavage, DDR, supersavage, 2000 */
+#define	SAVAGE_BD_TILE_DEST		1
+#define	SAVAGE_BD_TILE_TEXTURE		2
+/* GBD - BCI enable */
+/* savage4, MX, IX, 3D */
+#define SAVAGE_GBD_BCI_ENABLE                    8
+/* twister, prosavage, DDR, supersavage, 2000 */
+#define SAVAGE_GBD_BCI_ENABLE_TWISTER            0
+
+#define SAVAGE_GBD_BIG_ENDIAN                    4
+#define SAVAGE_GBD_LITTLE_ENDIAN                 0
+#define SAVAGE_GBD_64                            1
+
+/*  Global Bitmap Descriptor */
+#define SAVAGE_BCI_GLB_BD_LOW             0x8168
+#define SAVAGE_BCI_GLB_BD_HIGH            0x816C
+
+/*
+ * BCI registers
+ */
+/* Savage4/Twister/ProSavage 3D registers */
+#define SAVAGE_DRAWLOCALCTRL_S4		0x1e
+#define SAVAGE_TEXPALADDR_S4		0x1f
+#define SAVAGE_TEXCTRL0_S4		0x20
+#define SAVAGE_TEXCTRL1_S4		0x21
+#define SAVAGE_TEXADDR0_S4		0x22
+#define SAVAGE_TEXADDR1_S4		0x23
+#define SAVAGE_TEXBLEND0_S4		0x24
+#define SAVAGE_TEXBLEND1_S4		0x25
+#define SAVAGE_TEXXPRCLR_S4		0x26	/* never used */
+#define SAVAGE_TEXDESCR_S4		0x27
+#define SAVAGE_FOGTABLE_S4		0x28
+#define SAVAGE_FOGCTRL_S4		0x30
+#define SAVAGE_STENCILCTRL_S4		0x31
+#define SAVAGE_ZBUFCTRL_S4		0x32
+#define SAVAGE_ZBUFOFF_S4		0x33
+#define SAVAGE_DESTCTRL_S4		0x34
+#define SAVAGE_DRAWCTRL0_S4		0x35
+#define SAVAGE_DRAWCTRL1_S4		0x36
+#define SAVAGE_ZWATERMARK_S4		0x37
+#define SAVAGE_DESTTEXRWWATERMARK_S4	0x38
+#define SAVAGE_TEXBLENDCOLOR_S4		0x39
+/* Savage3D/MX/IX 3D registers */
+#define SAVAGE_TEXPALADDR_S3D		0x18
+#define SAVAGE_TEXXPRCLR_S3D		0x19	/* never used */
+#define SAVAGE_TEXADDR_S3D		0x1A
+#define SAVAGE_TEXDESCR_S3D		0x1B
+#define SAVAGE_TEXCTRL_S3D		0x1C
+#define SAVAGE_FOGTABLE_S3D		0x20
+#define SAVAGE_FOGCTRL_S3D		0x30
+#define SAVAGE_DRAWCTRL_S3D		0x31
+#define SAVAGE_ZBUFCTRL_S3D		0x32
+#define SAVAGE_ZBUFOFF_S3D		0x33
+#define SAVAGE_DESTCTRL_S3D		0x34
+#define SAVAGE_SCSTART_S3D		0x35
+#define SAVAGE_SCEND_S3D		0x36
+#define SAVAGE_ZWATERMARK_S3D		0x37
+#define SAVAGE_DESTTEXRWWATERMARK_S3D	0x38
+/* common stuff */
+#define SAVAGE_VERTBUFADDR		0x3e
+#define SAVAGE_BITPLANEWTMASK		0xd7
+#define SAVAGE_DMABUFADDR		0x51
+
+/* texture enable bits (needed for tex addr checking) */
+#define SAVAGE_TEXCTRL_TEXEN_MASK	0x00010000	/* S3D */
+#define SAVAGE_TEXDESCR_TEX0EN_MASK	0x02000000	/* S4 */
+#define SAVAGE_TEXDESCR_TEX1EN_MASK	0x04000000	/* S4 */
+
+/* Global fields in Savage4/Twister/ProSavage 3D registers:
+ *
+ * All texture registers and DrawLocalCtrl are local. All other
+ * registers are global. */
+
+/* Global fields in Savage3D/MX/IX 3D registers:
+ *
+ * All texture registers are local. DrawCtrl and ZBufCtrl are
+ * partially local. All other registers are global.
+ *
+ * DrawCtrl global fields: cullMode, alphaTestCmpFunc, alphaTestEn, alphaRefVal
+ * ZBufCtrl global fields: zCmpFunc, zBufEn
+ */
+#define SAVAGE_DRAWCTRL_S3D_GLOBAL	0x03f3c00c
+#define SAVAGE_ZBUFCTRL_S3D_GLOBAL	0x00000027
+
+/* Masks for scissor bits (drawCtrl[01] on s4, scissorStart/End on s3d)
+ */
+#define SAVAGE_SCISSOR_MASK_S4		0x00fff7ff
+#define SAVAGE_SCISSOR_MASK_S3D		0x07ff07ff
+
+/*
+ * BCI commands
+ */
+#define BCI_CMD_NOP                  0x40000000
+#define BCI_CMD_RECT                 0x48000000
+#define BCI_CMD_RECT_XP              0x01000000
+#define BCI_CMD_RECT_YP              0x02000000
+#define BCI_CMD_SCANLINE             0x50000000
+#define BCI_CMD_LINE                 0x5C000000
+#define BCI_CMD_LINE_LAST_PIXEL      0x58000000
+#define BCI_CMD_BYTE_TEXT            0x63000000
+#define BCI_CMD_NT_BYTE_TEXT         0x67000000
+#define BCI_CMD_BIT_TEXT             0x6C000000
+#define BCI_CMD_GET_ROP(cmd)         (((cmd) >> 16) & 0xFF)
+#define BCI_CMD_SET_ROP(cmd, rop)    ((cmd) |= ((rop & 0xFF) << 16))
+#define BCI_CMD_SEND_COLOR           0x00008000
+
+#define BCI_CMD_CLIP_NONE            0x00000000
+#define BCI_CMD_CLIP_CURRENT         0x00002000
+#define BCI_CMD_CLIP_LR              0x00004000
+#define BCI_CMD_CLIP_NEW             0x00006000
+
+#define BCI_CMD_DEST_GBD             0x00000000
+#define BCI_CMD_DEST_PBD             0x00000800
+#define BCI_CMD_DEST_PBD_NEW         0x00000C00
+#define BCI_CMD_DEST_SBD             0x00001000
+#define BCI_CMD_DEST_SBD_NEW         0x00001400
+
+#define BCI_CMD_SRC_TRANSPARENT      0x00000200
+#define BCI_CMD_SRC_SOLID            0x00000000
+#define BCI_CMD_SRC_GBD              0x00000020
+#define BCI_CMD_SRC_COLOR            0x00000040
+#define BCI_CMD_SRC_MONO             0x00000060
+#define BCI_CMD_SRC_PBD_COLOR        0x00000080
+#define BCI_CMD_SRC_PBD_MONO         0x000000A0
+#define BCI_CMD_SRC_PBD_COLOR_NEW    0x000000C0
+#define BCI_CMD_SRC_PBD_MONO_NEW     0x000000E0
+#define BCI_CMD_SRC_SBD_COLOR        0x00000100
+#define BCI_CMD_SRC_SBD_MONO         0x00000120
+#define BCI_CMD_SRC_SBD_COLOR_NEW    0x00000140
+#define BCI_CMD_SRC_SBD_MONO_NEW     0x00000160
+
+#define BCI_CMD_PAT_TRANSPARENT      0x00000010
+#define BCI_CMD_PAT_NONE             0x00000000
+#define BCI_CMD_PAT_COLOR            0x00000002
+#define BCI_CMD_PAT_MONO             0x00000003
+#define BCI_CMD_PAT_PBD_COLOR        0x00000004
+#define BCI_CMD_PAT_PBD_MONO         0x00000005
+#define BCI_CMD_PAT_PBD_COLOR_NEW    0x00000006
+#define BCI_CMD_PAT_PBD_MONO_NEW     0x00000007
+#define BCI_CMD_PAT_SBD_COLOR        0x00000008
+#define BCI_CMD_PAT_SBD_MONO         0x00000009
+#define BCI_CMD_PAT_SBD_COLOR_NEW    0x0000000A
+#define BCI_CMD_PAT_SBD_MONO_NEW     0x0000000B
+
+#define BCI_BD_BW_DISABLE            0x10000000
+#define BCI_BD_TILE_MASK             0x03000000
+#define BCI_BD_TILE_NONE             0x00000000
+#define BCI_BD_TILE_16               0x02000000
+#define BCI_BD_TILE_32               0x03000000
+#define BCI_BD_GET_BPP(bd)           (((bd) >> 16) & 0xFF)
+#define BCI_BD_SET_BPP(bd, bpp)      ((bd) |= (((bpp) & 0xFF) << 16))
+#define BCI_BD_GET_STRIDE(bd)        ((bd) & 0xFFFF)
+#define BCI_BD_SET_STRIDE(bd, st)    ((bd) |= ((st) & 0xFFFF))
+
+#define BCI_CMD_SET_REGISTER            0x96000000
+
+#define BCI_CMD_WAIT                    0xC0000000
+#define BCI_CMD_WAIT_3D                 0x00010000
+#define BCI_CMD_WAIT_2D                 0x00020000
+
+#define BCI_CMD_UPDATE_EVENT_TAG        0x98000000
+
+#define BCI_CMD_DRAW_PRIM               0x80000000
+#define BCI_CMD_DRAW_INDEXED_PRIM       0x88000000
+#define BCI_CMD_DRAW_CONT               0x01000000
+#define BCI_CMD_DRAW_TRILIST            0x00000000
+#define BCI_CMD_DRAW_TRISTRIP           0x02000000
+#define BCI_CMD_DRAW_TRIFAN             0x04000000
+#define BCI_CMD_DRAW_SKIPFLAGS          0x000000ff
+#define BCI_CMD_DRAW_NO_Z		0x00000001
+#define BCI_CMD_DRAW_NO_W		0x00000002
+#define BCI_CMD_DRAW_NO_CD		0x00000004
+#define BCI_CMD_DRAW_NO_CS		0x00000008
+#define BCI_CMD_DRAW_NO_U0		0x00000010
+#define BCI_CMD_DRAW_NO_V0		0x00000020
+#define BCI_CMD_DRAW_NO_UV0		0x00000030
+#define BCI_CMD_DRAW_NO_U1		0x00000040
+#define BCI_CMD_DRAW_NO_V1		0x00000080
+#define BCI_CMD_DRAW_NO_UV1		0x000000c0
+
+#define BCI_CMD_DMA			0xa8000000
+
+#define BCI_W_H(w, h)                ((((h) << 16) | (w)) & 0x0FFF0FFF)
+#define BCI_X_Y(x, y)                ((((y) << 16) | (x)) & 0x0FFF0FFF)
+#define BCI_X_W(x, y)                ((((w) << 16) | (x)) & 0x0FFF0FFF)
+#define BCI_CLIP_LR(l, r)            ((((r) << 16) | (l)) & 0x0FFF0FFF)
+#define BCI_CLIP_TL(t, l)            ((((t) << 16) | (l)) & 0x0FFF0FFF)
+#define BCI_CLIP_BR(b, r)            ((((b) << 16) | (r)) & 0x0FFF0FFF)
+
+#define BCI_LINE_X_Y(x, y)           (((y) << 16) | ((x) & 0xFFFF))
+#define BCI_LINE_STEPS(diag, axi)    (((axi) << 16) | ((diag) & 0xFFFF))
+#define BCI_LINE_MISC(maj, ym, xp, yp, err) \
+	(((maj) & 0x1FFF) | \
+	((ym) ? 1<<13 : 0) | \
+	((xp) ? 1<<14 : 0) | \
+	((yp) ? 1<<15 : 0) | \
+	((err) << 16))
+
+/*
+ * common commands
+ */
+#define BCI_SET_REGISTERS( first, n )			\
+	BCI_WRITE(BCI_CMD_SET_REGISTER |		\
+		  ((uint32_t)(n) & 0xff) << 16 |	\
+		  ((uint32_t)(first) & 0xffff))
+#define DMA_SET_REGISTERS( first, n )			\
+	DMA_WRITE(BCI_CMD_SET_REGISTER |		\
+		  ((uint32_t)(n) & 0xff) << 16 |	\
+		  ((uint32_t)(first) & 0xffff))
+
+#define BCI_DRAW_PRIMITIVE(n, type, skip)         \
+        BCI_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
+		  ((n) << 16))
+#define DMA_DRAW_PRIMITIVE(n, type, skip)         \
+        DMA_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
+		  ((n) << 16))
+
+#define BCI_DRAW_INDICES_S3D(n, type, i0)         \
+        BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) |  \
+		  ((n) << 16) | (i0))
+
+#define BCI_DRAW_INDICES_S4(n, type, skip)        \
+        BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) |  \
+                  (skip) | ((n) << 16))
+
+#define BCI_DMA(n)	\
+	BCI_WRITE(BCI_CMD_DMA | (((n) >> 1) - 1))
+
+/*
+ * access to MMIO
+ */
+#define SAVAGE_READ(reg)	DRM_READ32(  dev_priv->mmio, (reg) )
+#define SAVAGE_WRITE(reg)	DRM_WRITE32( dev_priv->mmio, (reg) )
+
+/*
+ * access to the burst command interface (BCI)
+ */
+#define SAVAGE_BCI_DEBUG 1
+
+#define BCI_LOCALS    volatile uint32_t *bci_ptr;
+
+#define BEGIN_BCI( n ) do {			\
+	dev_priv->wait_fifo(dev_priv, (n));	\
+	bci_ptr = dev_priv->bci_ptr;		\
+} while(0)
+
+#define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val)
+
+/*
+ * command DMA support
+ */
+#define SAVAGE_DMA_DEBUG 1
+
+#define DMA_LOCALS   uint32_t *dma_ptr;
+
+#define BEGIN_DMA( n ) do {						\
+	unsigned int cur = dev_priv->current_dma_page;			\
+	unsigned int rest = SAVAGE_DMA_PAGE_SIZE -			\
+		dev_priv->dma_pages[cur].used;				\
+	if ((n) > rest) {						\
+		dma_ptr = savage_dma_alloc(dev_priv, (n));		\
+	} else { /* fast path for small allocations */			\
+		dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +	\
+			cur * SAVAGE_DMA_PAGE_SIZE +			\
+			dev_priv->dma_pages[cur].used;			\
+		if (dev_priv->dma_pages[cur].used == 0)			\
+			savage_dma_wait(dev_priv, cur);			\
+		dev_priv->dma_pages[cur].used += (n);			\
+	}								\
+} while(0)
+
+#define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val)
+
+#define DMA_COPY(src, n) do {					\
+	memcpy(dma_ptr, (src), (n)*4);				\
+	dma_ptr += n;						\
+} while(0)
+
+#if SAVAGE_DMA_DEBUG
+#define DMA_COMMIT() do {						\
+	unsigned int cur = dev_priv->current_dma_page;			\
+	uint32_t *expected = (uint32_t *)dev_priv->cmd_dma->handle +	\
+			cur * SAVAGE_DMA_PAGE_SIZE +			\
+			dev_priv->dma_pages[cur].used;			\
+	if (dma_ptr != expected) {					\
+		DRM_ERROR("DMA allocation and use don't match: "	\
+			  "%p != %p\n", expected, dma_ptr);		\
+		savage_dma_reset(dev_priv);				\
+	}								\
+} while(0)
+#else
+#define DMA_COMMIT() do {/* nothing */} while(0)
+#endif
+
+#define DMA_FLUSH() dev_priv->dma_flush(dev_priv)
+
+/* Buffer aging via event tag
+ */
+
+#define UPDATE_EVENT_COUNTER( ) do {			\
+	if (dev_priv->status_ptr) {			\
+		uint16_t count;				\
+		/* coordinate with Xserver */		\
+		count = dev_priv->status_ptr[1023];	\
+		if (count < dev_priv->event_counter)	\
+			dev_priv->event_wrap++;		\
+		dev_priv->event_counter = count;	\
+	}						\
+} while(0)
+
+#define SET_AGE( age, e, w ) do {	\
+	(age)->event = e;		\
+	(age)->wrap = w;		\
+} while(0)
+
+#define TEST_AGE( age, e, w )				\
+	( (age)->wrap < (w) || ( (age)->wrap == (w) && (age)->event <= (e) ) )
+
+#endif				/* __SAVAGE_DRV_H__ */
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
new file mode 100644
index 000000000000..5f6238fdf1fa
--- /dev/null
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -0,0 +1,1163 @@
+/* savage_state.c -- State and drawing support for Savage
+ *
+ * Copyright 2004  Felix Kuehling
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "drmP.h"
+#include "savage_drm.h"
+#include "savage_drv.h"
+
+void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
+			       const struct drm_clip_rect * pbox)
+{
+	uint32_t scstart = dev_priv->state.s3d.new_scstart;
+	uint32_t scend = dev_priv->state.s3d.new_scend;
+	scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) |
+	    ((uint32_t) pbox->x1 & 0x000007ff) |
+	    (((uint32_t) pbox->y1 << 16) & 0x07ff0000);
+	scend = (scend & ~SAVAGE_SCISSOR_MASK_S3D) |
+	    (((uint32_t) pbox->x2 - 1) & 0x000007ff) |
+	    ((((uint32_t) pbox->y2 - 1) << 16) & 0x07ff0000);
+	if (scstart != dev_priv->state.s3d.scstart ||
+	    scend != dev_priv->state.s3d.scend) {
+		DMA_LOCALS;
+		BEGIN_DMA(4);
+		DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
+		DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2);
+		DMA_WRITE(scstart);
+		DMA_WRITE(scend);
+		dev_priv->state.s3d.scstart = scstart;
+		dev_priv->state.s3d.scend = scend;
+		dev_priv->waiting = 1;
+		DMA_COMMIT();
+	}
+}
+
+void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
+			      const struct drm_clip_rect * pbox)
+{
+	uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
+	uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
+	drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) |
+	    ((uint32_t) pbox->x1 & 0x000007ff) |
+	    (((uint32_t) pbox->y1 << 12) & 0x00fff000);
+	drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) |
+	    (((uint32_t) pbox->x2 - 1) & 0x000007ff) |
+	    ((((uint32_t) pbox->y2 - 1) << 12) & 0x00fff000);
+	if (drawctrl0 != dev_priv->state.s4.drawctrl0 ||
+	    drawctrl1 != dev_priv->state.s4.drawctrl1) {
+		DMA_LOCALS;
+		BEGIN_DMA(4);
+		DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
+		DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2);
+		DMA_WRITE(drawctrl0);
+		DMA_WRITE(drawctrl1);
+		dev_priv->state.s4.drawctrl0 = drawctrl0;
+		dev_priv->state.s4.drawctrl1 = drawctrl1;
+		dev_priv->waiting = 1;
+		DMA_COMMIT();
+	}
+}
+
+static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit,
+				 uint32_t addr)
+{
+	if ((addr & 6) != 2) {	/* reserved bits */
+		DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr);
+		return -EINVAL;
+	}
+	if (!(addr & 1)) {	/* local */
+		addr &= ~7;
+		if (addr < dev_priv->texture_offset ||
+		    addr >= dev_priv->texture_offset + dev_priv->texture_size) {
+			DRM_ERROR
+			    ("bad texAddr%d %08x (local addr out of range)\n",
+			     unit, addr);
+			return -EINVAL;
+		}
+	} else {		/* AGP */
+		if (!dev_priv->agp_textures) {
+			DRM_ERROR("bad texAddr%d %08x (AGP not available)\n",
+				  unit, addr);
+			return -EINVAL;
+		}
+		addr &= ~7;
+		if (addr < dev_priv->agp_textures->offset ||
+		    addr >= (dev_priv->agp_textures->offset +
+			     dev_priv->agp_textures->size)) {
+			DRM_ERROR
+			    ("bad texAddr%d %08x (AGP addr out of range)\n",
+			     unit, addr);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+#define SAVE_STATE(reg,where)			\
+	if(start <= reg && start+count > reg)	\
+		dev_priv->state.where = regs[reg - start]
+#define SAVE_STATE_MASK(reg,where,mask) do {			\
+	if(start <= reg && start+count > reg) {			\
+		uint32_t tmp;					\
+		tmp = regs[reg - start];			\
+		dev_priv->state.where = (tmp & (mask)) |	\
+			(dev_priv->state.where & ~(mask));	\
+	}							\
+} while (0)
+
+static int savage_verify_state_s3d(drm_savage_private_t * dev_priv,
+				   unsigned int start, unsigned int count,
+				   const uint32_t *regs)
+{
+	if (start < SAVAGE_TEXPALADDR_S3D ||
+	    start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
+		DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
+			  start, start + count - 1);
+		return -EINVAL;
+	}
+
+	SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart,
+			~SAVAGE_SCISSOR_MASK_S3D);
+	SAVE_STATE_MASK(SAVAGE_SCEND_S3D, s3d.new_scend,
+			~SAVAGE_SCISSOR_MASK_S3D);
+
+	/* if any texture regs were changed ... */
+	if (start <= SAVAGE_TEXCTRL_S3D &&
+	    start + count > SAVAGE_TEXPALADDR_S3D) {
+		/* ... check texture state */
+		SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl);
+		SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
+		if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK)
+			return savage_verify_texaddr(dev_priv, 0,
+						dev_priv->state.s3d.texaddr);
+	}
+
+	return 0;
+}
+
+static int savage_verify_state_s4(drm_savage_private_t * dev_priv,
+				  unsigned int start, unsigned int count,
+				  const uint32_t *regs)
+{
+	int ret = 0;
+
+	if (start < SAVAGE_DRAWLOCALCTRL_S4 ||
+	    start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) {
+		DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
+			  start, start + count - 1);
+		return -EINVAL;
+	}
+
+	SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0,
+			~SAVAGE_SCISSOR_MASK_S4);
+	SAVE_STATE_MASK(SAVAGE_DRAWCTRL1_S4, s4.new_drawctrl1,
+			~SAVAGE_SCISSOR_MASK_S4);
+
+	/* if any texture regs were changed ... */
+	if (start <= SAVAGE_TEXDESCR_S4 &&
+	    start + count > SAVAGE_TEXPALADDR_S4) {
+		/* ... check texture state */
+		SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr);
+		SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0);
+		SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1);
+		if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK)
+			ret |= savage_verify_texaddr(dev_priv, 0,
+						dev_priv->state.s4.texaddr0);
+		if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK)
+			ret |= savage_verify_texaddr(dev_priv, 1,
+						dev_priv->state.s4.texaddr1);
+	}
+
+	return ret;
+}
+
+#undef SAVE_STATE
+#undef SAVE_STATE_MASK
+
+static int savage_dispatch_state(drm_savage_private_t * dev_priv,
+				 const drm_savage_cmd_header_t * cmd_header,
+				 const uint32_t *regs)
+{
+	unsigned int count = cmd_header->state.count;
+	unsigned int start = cmd_header->state.start;
+	unsigned int count2 = 0;
+	unsigned int bci_size;
+	int ret;
+	DMA_LOCALS;
+
+	if (!count)
+		return 0;
+
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+		ret = savage_verify_state_s3d(dev_priv, start, count, regs);
+		if (ret != 0)
+			return ret;
+		/* scissor regs are emitted in savage_dispatch_draw */
+		if (start < SAVAGE_SCSTART_S3D) {
+			if (start + count > SAVAGE_SCEND_S3D + 1)
+				count2 = count - (SAVAGE_SCEND_S3D + 1 - start);
+			if (start + count > SAVAGE_SCSTART_S3D)
+				count = SAVAGE_SCSTART_S3D - start;
+		} else if (start <= SAVAGE_SCEND_S3D) {
+			if (start + count > SAVAGE_SCEND_S3D + 1) {
+				count -= SAVAGE_SCEND_S3D + 1 - start;
+				start = SAVAGE_SCEND_S3D + 1;
+			} else
+				return 0;
+		}
+	} else {
+		ret = savage_verify_state_s4(dev_priv, start, count, regs);
+		if (ret != 0)
+			return ret;
+		/* scissor regs are emitted in savage_dispatch_draw */
+		if (start < SAVAGE_DRAWCTRL0_S4) {
+			if (start + count > SAVAGE_DRAWCTRL1_S4 + 1)
+				count2 = count -
+					 (SAVAGE_DRAWCTRL1_S4 + 1 - start);
+			if (start + count > SAVAGE_DRAWCTRL0_S4)
+				count = SAVAGE_DRAWCTRL0_S4 - start;
+		} else if (start <= SAVAGE_DRAWCTRL1_S4) {
+			if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) {
+				count -= SAVAGE_DRAWCTRL1_S4 + 1 - start;
+				start = SAVAGE_DRAWCTRL1_S4 + 1;
+			} else
+				return 0;
+		}
+	}
+
+	bci_size = count + (count + 254) / 255 + count2 + (count2 + 254) / 255;
+
+	if (cmd_header->state.global) {
+		BEGIN_DMA(bci_size + 1);
+		DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
+		dev_priv->waiting = 1;
+	} else {
+		BEGIN_DMA(bci_size);
+	}
+
+	do {
+		while (count > 0) {
+			unsigned int n = count < 255 ? count : 255;
+			DMA_SET_REGISTERS(start, n);
+			DMA_COPY(regs, n);
+			count -= n;
+			start += n;
+			regs += n;
+		}
+		start += 2;
+		regs += 2;
+		count = count2;
+		count2 = 0;
+	} while (count);
+
+	DMA_COMMIT();
+
+	return 0;
+}
+
+static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
+				    const drm_savage_cmd_header_t * cmd_header,
+				    const struct drm_buf * dmabuf)
+{
+	unsigned char reorder = 0;
+	unsigned int prim = cmd_header->prim.prim;
+	unsigned int skip = cmd_header->prim.skip;
+	unsigned int n = cmd_header->prim.count;
+	unsigned int start = cmd_header->prim.start;
+	unsigned int i;
+	BCI_LOCALS;
+
+	if (!dmabuf) {
+		DRM_ERROR("called without dma buffers!\n");
+		return -EINVAL;
+	}
+
+	if (!n)
+		return 0;
+
+	switch (prim) {
+	case SAVAGE_PRIM_TRILIST_201:
+		reorder = 1;
+		prim = SAVAGE_PRIM_TRILIST;
+	case SAVAGE_PRIM_TRILIST:
+		if (n % 3 != 0) {
+			DRM_ERROR("wrong number of vertices %u in TRILIST\n",
+				  n);
+			return -EINVAL;
+		}
+		break;
+	case SAVAGE_PRIM_TRISTRIP:
+	case SAVAGE_PRIM_TRIFAN:
+		if (n < 3) {
+			DRM_ERROR
+			    ("wrong number of vertices %u in TRIFAN/STRIP\n",
+			     n);
+			return -EINVAL;
+		}
+		break;
+	default:
+		DRM_ERROR("invalid primitive type %u\n", prim);
+		return -EINVAL;
+	}
+
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+		if (skip != 0) {
+			DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
+			return -EINVAL;
+		}
+	} else {
+		unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
+		    (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
+		    (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
+		if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
+			DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
+			return -EINVAL;
+		}
+		if (reorder) {
+			DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
+			return -EINVAL;
+		}
+	}
+
+	if (start + n > dmabuf->total / 32) {
+		DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
+			  start, start + n - 1, dmabuf->total / 32);
+		return -EINVAL;
+	}
+
+	/* Vertex DMA doesn't work with command DMA at the same time,
+	 * so we use BCI_... to submit commands here. Flush buffered
+	 * faked DMA first. */
+	DMA_FLUSH();
+
+	if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
+		BEGIN_BCI(2);
+		BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
+		BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
+		dev_priv->state.common.vbaddr = dmabuf->bus_address;
+	}
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
+		/* Workaround for what looks like a hardware bug. If a
+		 * WAIT_3D_IDLE was emitted some time before the
+		 * indexed drawing command then the engine will lock
+		 * up. There are two known workarounds:
+		 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
+		BEGIN_BCI(63);
+		for (i = 0; i < 63; ++i)
+			BCI_WRITE(BCI_CMD_WAIT);
+		dev_priv->waiting = 0;
+	}
+
+	prim <<= 25;
+	while (n != 0) {
+		/* Can emit up to 255 indices (85 triangles) at once. */
+		unsigned int count = n > 255 ? 255 : n;
+		if (reorder) {
+			/* Need to reorder indices for correct flat
+			 * shading while preserving the clock sense
+			 * for correct culling. Only on Savage3D. */
+			int reorder[3] = { -1, -1, -1 };
+			reorder[start % 3] = 2;
+
+			BEGIN_BCI((count + 1 + 1) / 2);
+			BCI_DRAW_INDICES_S3D(count, prim, start + 2);
+
+			for (i = start + 1; i + 1 < start + count; i += 2)
+				BCI_WRITE((i + reorder[i % 3]) |
+					  ((i + 1 +
+					    reorder[(i + 1) % 3]) << 16));
+			if (i < start + count)
+				BCI_WRITE(i + reorder[i % 3]);
+		} else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+			BEGIN_BCI((count + 1 + 1) / 2);
+			BCI_DRAW_INDICES_S3D(count, prim, start);
+
+			for (i = start + 1; i + 1 < start + count; i += 2)
+				BCI_WRITE(i | ((i + 1) << 16));
+			if (i < start + count)
+				BCI_WRITE(i);
+		} else {
+			BEGIN_BCI((count + 2 + 1) / 2);
+			BCI_DRAW_INDICES_S4(count, prim, skip);
+
+			for (i = start; i + 1 < start + count; i += 2)
+				BCI_WRITE(i | ((i + 1) << 16));
+			if (i < start + count)
+				BCI_WRITE(i);
+		}
+
+		start += count;
+		n -= count;
+
+		prim |= BCI_CMD_DRAW_CONT;
+	}
+
+	return 0;
+}
+
+static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
+				   const drm_savage_cmd_header_t * cmd_header,
+				   const uint32_t *vtxbuf, unsigned int vb_size,
+				   unsigned int vb_stride)
+{
+	unsigned char reorder = 0;
+	unsigned int prim = cmd_header->prim.prim;
+	unsigned int skip = cmd_header->prim.skip;
+	unsigned int n = cmd_header->prim.count;
+	unsigned int start = cmd_header->prim.start;
+	unsigned int vtx_size;
+	unsigned int i;
+	DMA_LOCALS;
+
+	if (!n)
+		return 0;
+
+	switch (prim) {
+	case SAVAGE_PRIM_TRILIST_201:
+		reorder = 1;
+		prim = SAVAGE_PRIM_TRILIST;
+	case SAVAGE_PRIM_TRILIST:
+		if (n % 3 != 0) {
+			DRM_ERROR("wrong number of vertices %u in TRILIST\n",
+				  n);
+			return -EINVAL;
+		}
+		break;
+	case SAVAGE_PRIM_TRISTRIP:
+	case SAVAGE_PRIM_TRIFAN:
+		if (n < 3) {
+			DRM_ERROR
+			    ("wrong number of vertices %u in TRIFAN/STRIP\n",
+			     n);
+			return -EINVAL;
+		}
+		break;
+	default:
+		DRM_ERROR("invalid primitive type %u\n", prim);
+		return -EINVAL;
+	}
+
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+		if (skip > SAVAGE_SKIP_ALL_S3D) {
+			DRM_ERROR("invalid skip flags 0x%04x\n", skip);
+			return -EINVAL;
+		}
+		vtx_size = 8;	/* full vertex */
+	} else {
+		if (skip > SAVAGE_SKIP_ALL_S4) {
+			DRM_ERROR("invalid skip flags 0x%04x\n", skip);
+			return -EINVAL;
+		}
+		vtx_size = 10;	/* full vertex */
+	}
+
+	vtx_size -= (skip & 1) + (skip >> 1 & 1) +
+	    (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
+	    (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
+
+	if (vtx_size > vb_stride) {
+		DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
+			  vtx_size, vb_stride);
+		return -EINVAL;
+	}
+
+	if (start + n > vb_size / (vb_stride * 4)) {
+		DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
+			  start, start + n - 1, vb_size / (vb_stride * 4));
+		return -EINVAL;
+	}
+
+	prim <<= 25;
+	while (n != 0) {
+		/* Can emit up to 255 vertices (85 triangles) at once. */
+		unsigned int count = n > 255 ? 255 : n;
+		if (reorder) {
+			/* Need to reorder vertices for correct flat
+			 * shading while preserving the clock sense
+			 * for correct culling. Only on Savage3D. */
+			int reorder[3] = { -1, -1, -1 };
+			reorder[start % 3] = 2;
+
+			BEGIN_DMA(count * vtx_size + 1);
+			DMA_DRAW_PRIMITIVE(count, prim, skip);
+
+			for (i = start; i < start + count; ++i) {
+				unsigned int j = i + reorder[i % 3];
+				DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
+			}
+
+			DMA_COMMIT();
+		} else {
+			BEGIN_DMA(count * vtx_size + 1);
+			DMA_DRAW_PRIMITIVE(count, prim, skip);
+
+			if (vb_stride == vtx_size) {
+				DMA_COPY(&vtxbuf[vb_stride * start],
+					 vtx_size * count);
+			} else {
+				for (i = start; i < start + count; ++i) {
+					DMA_COPY(&vtxbuf [vb_stride * i],
+						 vtx_size);
+				}
+			}
+
+			DMA_COMMIT();
+		}
+
+		start += count;
+		n -= count;
+
+		prim |= BCI_CMD_DRAW_CONT;
+	}
+
+	return 0;
+}
+
+static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
+				   const drm_savage_cmd_header_t * cmd_header,
+				   const uint16_t *idx,
+				   const struct drm_buf * dmabuf)
+{
+	unsigned char reorder = 0;
+	unsigned int prim = cmd_header->idx.prim;
+	unsigned int skip = cmd_header->idx.skip;
+	unsigned int n = cmd_header->idx.count;
+	unsigned int i;
+	BCI_LOCALS;
+
+	if (!dmabuf) {
+		DRM_ERROR("called without dma buffers!\n");
+		return -EINVAL;
+	}
+
+	if (!n)
+		return 0;
+
+	switch (prim) {
+	case SAVAGE_PRIM_TRILIST_201:
+		reorder = 1;
+		prim = SAVAGE_PRIM_TRILIST;
+	case SAVAGE_PRIM_TRILIST:
+		if (n % 3 != 0) {
+			DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
+			return -EINVAL;
+		}
+		break;
+	case SAVAGE_PRIM_TRISTRIP:
+	case SAVAGE_PRIM_TRIFAN:
+		if (n < 3) {
+			DRM_ERROR
+			    ("wrong number of indices %u in TRIFAN/STRIP\n", n);
+			return -EINVAL;
+		}
+		break;
+	default:
+		DRM_ERROR("invalid primitive type %u\n", prim);
+		return -EINVAL;
+	}
+
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+		if (skip != 0) {
+			DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
+			return -EINVAL;
+		}
+	} else {
+		unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
+		    (skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
+		    (skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
+		if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
+			DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
+			return -EINVAL;
+		}
+		if (reorder) {
+			DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
+			return -EINVAL;
+		}
+	}
+
+	/* Vertex DMA doesn't work with command DMA at the same time,
+	 * so we use BCI_... to submit commands here. Flush buffered
+	 * faked DMA first. */
+	DMA_FLUSH();
+
+	if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
+		BEGIN_BCI(2);
+		BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
+		BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
+		dev_priv->state.common.vbaddr = dmabuf->bus_address;
+	}
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
+		/* Workaround for what looks like a hardware bug. If a
+		 * WAIT_3D_IDLE was emitted some time before the
+		 * indexed drawing command then the engine will lock
+		 * up. There are two known workarounds:
+		 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
+		BEGIN_BCI(63);
+		for (i = 0; i < 63; ++i)
+			BCI_WRITE(BCI_CMD_WAIT);
+		dev_priv->waiting = 0;
+	}
+
+	prim <<= 25;
+	while (n != 0) {
+		/* Can emit up to 255 indices (85 triangles) at once. */
+		unsigned int count = n > 255 ? 255 : n;
+
+		/* check indices */
+		for (i = 0; i < count; ++i) {
+			if (idx[i] > dmabuf->total / 32) {
+				DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
+					  i, idx[i], dmabuf->total / 32);
+				return -EINVAL;
+			}
+		}
+
+		if (reorder) {
+			/* Need to reorder indices for correct flat
+			 * shading while preserving the clock sense
+			 * for correct culling. Only on Savage3D. */
+			int reorder[3] = { 2, -1, -1 };
+
+			BEGIN_BCI((count + 1 + 1) / 2);
+			BCI_DRAW_INDICES_S3D(count, prim, idx[2]);
+
+			for (i = 1; i + 1 < count; i += 2)
+				BCI_WRITE(idx[i + reorder[i % 3]] |
+					  (idx[i + 1 +
+					   reorder[(i + 1) % 3]] << 16));
+			if (i < count)
+				BCI_WRITE(idx[i + reorder[i % 3]]);
+		} else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+			BEGIN_BCI((count + 1 + 1) / 2);
+			BCI_DRAW_INDICES_S3D(count, prim, idx[0]);
+
+			for (i = 1; i + 1 < count; i += 2)
+				BCI_WRITE(idx[i] | (idx[i + 1] << 16));
+			if (i < count)
+				BCI_WRITE(idx[i]);
+		} else {
+			BEGIN_BCI((count + 2 + 1) / 2);
+			BCI_DRAW_INDICES_S4(count, prim, skip);
+
+			for (i = 0; i + 1 < count; i += 2)
+				BCI_WRITE(idx[i] | (idx[i + 1] << 16));
+			if (i < count)
+				BCI_WRITE(idx[i]);
+		}
+
+		idx += count;
+		n -= count;
+
+		prim |= BCI_CMD_DRAW_CONT;
+	}
+
+	return 0;
+}
+
+static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
+				  const drm_savage_cmd_header_t * cmd_header,
+				  const uint16_t *idx,
+				  const uint32_t *vtxbuf,
+				  unsigned int vb_size, unsigned int vb_stride)
+{
+	unsigned char reorder = 0;
+	unsigned int prim = cmd_header->idx.prim;
+	unsigned int skip = cmd_header->idx.skip;
+	unsigned int n = cmd_header->idx.count;
+	unsigned int vtx_size;
+	unsigned int i;
+	DMA_LOCALS;
+
+	if (!n)
+		return 0;
+
+	switch (prim) {
+	case SAVAGE_PRIM_TRILIST_201:
+		reorder = 1;
+		prim = SAVAGE_PRIM_TRILIST;
+	case SAVAGE_PRIM_TRILIST:
+		if (n % 3 != 0) {
+			DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
+			return -EINVAL;
+		}
+		break;
+	case SAVAGE_PRIM_TRISTRIP:
+	case SAVAGE_PRIM_TRIFAN:
+		if (n < 3) {
+			DRM_ERROR
+			    ("wrong number of indices %u in TRIFAN/STRIP\n", n);
+			return -EINVAL;
+		}
+		break;
+	default:
+		DRM_ERROR("invalid primitive type %u\n", prim);
+		return -EINVAL;
+	}
+
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+		if (skip > SAVAGE_SKIP_ALL_S3D) {
+			DRM_ERROR("invalid skip flags 0x%04x\n", skip);
+			return -EINVAL;
+		}
+		vtx_size = 8;	/* full vertex */
+	} else {
+		if (skip > SAVAGE_SKIP_ALL_S4) {
+			DRM_ERROR("invalid skip flags 0x%04x\n", skip);
+			return -EINVAL;
+		}
+		vtx_size = 10;	/* full vertex */
+	}
+
+	vtx_size -= (skip & 1) + (skip >> 1 & 1) +
+	    (skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
+	    (skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
+
+	if (vtx_size > vb_stride) {
+		DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
+			  vtx_size, vb_stride);
+		return -EINVAL;
+	}
+
+	prim <<= 25;
+	while (n != 0) {
+		/* Can emit up to 255 vertices (85 triangles) at once. */
+		unsigned int count = n > 255 ? 255 : n;
+
+		/* Check indices */
+		for (i = 0; i < count; ++i) {
+			if (idx[i] > vb_size / (vb_stride * 4)) {
+				DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
+					  i, idx[i], vb_size / (vb_stride * 4));
+				return -EINVAL;
+			}
+		}
+
+		if (reorder) {
+			/* Need to reorder vertices for correct flat
+			 * shading while preserving the clock sense
+			 * for correct culling. Only on Savage3D. */
+			int reorder[3] = { 2, -1, -1 };
+
+			BEGIN_DMA(count * vtx_size + 1);
+			DMA_DRAW_PRIMITIVE(count, prim, skip);
+
+			for (i = 0; i < count; ++i) {
+				unsigned int j = idx[i + reorder[i % 3]];
+				DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
+			}
+
+			DMA_COMMIT();
+		} else {
+			BEGIN_DMA(count * vtx_size + 1);
+			DMA_DRAW_PRIMITIVE(count, prim, skip);
+
+			for (i = 0; i < count; ++i) {
+				unsigned int j = idx[i];
+				DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
+			}
+
+			DMA_COMMIT();
+		}
+
+		idx += count;
+		n -= count;
+
+		prim |= BCI_CMD_DRAW_CONT;
+	}
+
+	return 0;
+}
+
+static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
+				 const drm_savage_cmd_header_t * cmd_header,
+				 const drm_savage_cmd_header_t *data,
+				 unsigned int nbox,
+				 const struct drm_clip_rect *boxes)
+{
+	unsigned int flags = cmd_header->clear0.flags;
+	unsigned int clear_cmd;
+	unsigned int i, nbufs;
+	DMA_LOCALS;
+
+	if (nbox == 0)
+		return 0;
+
+	clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
+	    BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW;
+	BCI_CMD_SET_ROP(clear_cmd, 0xCC);
+
+	nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) +
+	    ((flags & SAVAGE_BACK) ? 1 : 0) + ((flags & SAVAGE_DEPTH) ? 1 : 0);
+	if (nbufs == 0)
+		return 0;
+
+	if (data->clear1.mask != 0xffffffff) {
+		/* set mask */
+		BEGIN_DMA(2);
+		DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
+		DMA_WRITE(data->clear1.mask);
+		DMA_COMMIT();
+	}
+	for (i = 0; i < nbox; ++i) {
+		unsigned int x, y, w, h;
+		unsigned int buf;
+		x = boxes[i].x1, y = boxes[i].y1;
+		w = boxes[i].x2 - boxes[i].x1;
+		h = boxes[i].y2 - boxes[i].y1;
+		BEGIN_DMA(nbufs * 6);
+		for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
+			if (!(flags & buf))
+				continue;
+			DMA_WRITE(clear_cmd);
+			switch (buf) {
+			case SAVAGE_FRONT:
+				DMA_WRITE(dev_priv->front_offset);
+				DMA_WRITE(dev_priv->front_bd);
+				break;
+			case SAVAGE_BACK:
+				DMA_WRITE(dev_priv->back_offset);
+				DMA_WRITE(dev_priv->back_bd);
+				break;
+			case SAVAGE_DEPTH:
+				DMA_WRITE(dev_priv->depth_offset);
+				DMA_WRITE(dev_priv->depth_bd);
+				break;
+			}
+			DMA_WRITE(data->clear1.value);
+			DMA_WRITE(BCI_X_Y(x, y));
+			DMA_WRITE(BCI_W_H(w, h));
+		}
+		DMA_COMMIT();
+	}
+	if (data->clear1.mask != 0xffffffff) {
+		/* reset mask */
+		BEGIN_DMA(2);
+		DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
+		DMA_WRITE(0xffffffff);
+		DMA_COMMIT();
+	}
+
+	return 0;
+}
+
+static int savage_dispatch_swap(drm_savage_private_t * dev_priv,
+				unsigned int nbox, const struct drm_clip_rect *boxes)
+{
+	unsigned int swap_cmd;
+	unsigned int i;
+	DMA_LOCALS;
+
+	if (nbox == 0)
+		return 0;
+
+	swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
+	    BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD;
+	BCI_CMD_SET_ROP(swap_cmd, 0xCC);
+
+	for (i = 0; i < nbox; ++i) {
+		BEGIN_DMA(6);
+		DMA_WRITE(swap_cmd);
+		DMA_WRITE(dev_priv->back_offset);
+		DMA_WRITE(dev_priv->back_bd);
+		DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
+		DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
+		DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1,
+				  boxes[i].y2 - boxes[i].y1));
+		DMA_COMMIT();
+	}
+
+	return 0;
+}
+
+static int savage_dispatch_draw(drm_savage_private_t * dev_priv,
+				const drm_savage_cmd_header_t *start,
+				const drm_savage_cmd_header_t *end,
+				const struct drm_buf * dmabuf,
+				const unsigned int *vtxbuf,
+				unsigned int vb_size, unsigned int vb_stride,
+				unsigned int nbox,
+				const struct drm_clip_rect *boxes)
+{
+	unsigned int i, j;
+	int ret;
+
+	for (i = 0; i < nbox; ++i) {
+		const drm_savage_cmd_header_t *cmdbuf;
+		dev_priv->emit_clip_rect(dev_priv, &boxes[i]);
+
+		cmdbuf = start;
+		while (cmdbuf < end) {
+			drm_savage_cmd_header_t cmd_header;
+			cmd_header = *cmdbuf;
+			cmdbuf++;
+			switch (cmd_header.cmd.cmd) {
+			case SAVAGE_CMD_DMA_PRIM:
+				ret = savage_dispatch_dma_prim(
+					dev_priv, &cmd_header, dmabuf);
+				break;
+			case SAVAGE_CMD_VB_PRIM:
+				ret = savage_dispatch_vb_prim(
+					dev_priv, &cmd_header,
+					vtxbuf, vb_size, vb_stride);
+				break;
+			case SAVAGE_CMD_DMA_IDX:
+				j = (cmd_header.idx.count + 3) / 4;
+				/* j was check in savage_bci_cmdbuf */
+				ret = savage_dispatch_dma_idx(dev_priv,
+					&cmd_header, (const uint16_t *)cmdbuf,
+					dmabuf);
+				cmdbuf += j;
+				break;
+			case SAVAGE_CMD_VB_IDX:
+				j = (cmd_header.idx.count + 3) / 4;
+				/* j was check in savage_bci_cmdbuf */
+				ret = savage_dispatch_vb_idx(dev_priv,
+					&cmd_header, (const uint16_t *)cmdbuf,
+					(const uint32_t *)vtxbuf, vb_size,
+					vb_stride);
+				cmdbuf += j;
+				break;
+			default:
+				/* What's the best return code? EFAULT? */
+				DRM_ERROR("IMPLEMENTATION ERROR: "
+					  "non-drawing-command %d\n",
+					  cmd_header.cmd.cmd);
+				return -EINVAL;
+			}
+
+			if (ret != 0)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
+int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	struct drm_device_dma *dma = dev->dma;
+	struct drm_buf *dmabuf;
+	drm_savage_cmdbuf_t *cmdbuf = data;
+	drm_savage_cmd_header_t *kcmd_addr = NULL;
+	drm_savage_cmd_header_t *first_draw_cmd;
+	unsigned int *kvb_addr = NULL;
+	struct drm_clip_rect *kbox_addr = NULL;
+	unsigned int i, j;
+	int ret = 0;
+
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	if (dma && dma->buflist) {
+		if (cmdbuf->dma_idx > dma->buf_count) {
+			DRM_ERROR
+			    ("vertex buffer index %u out of range (0-%u)\n",
+			     cmdbuf->dma_idx, dma->buf_count - 1);
+			return -EINVAL;
+		}
+		dmabuf = dma->buflist[cmdbuf->dma_idx];
+	} else {
+		dmabuf = NULL;
+	}
+
+	/* Copy the user buffers into kernel temporary areas.  This hasn't been
+	 * a performance loss compared to VERIFYAREA_READ/
+	 * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct
+	 * for locking on FreeBSD.
+	 */
+	if (cmdbuf->size) {
+		kcmd_addr = drm_alloc(cmdbuf->size * 8, DRM_MEM_DRIVER);
+		if (kcmd_addr == NULL)
+			return -ENOMEM;
+
+		if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr,
+				       cmdbuf->size * 8))
+		{
+			drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER);
+			return -EFAULT;
+		}
+		cmdbuf->cmd_addr = kcmd_addr;
+	}
+	if (cmdbuf->vb_size) {
+		kvb_addr = drm_alloc(cmdbuf->vb_size, DRM_MEM_DRIVER);
+		if (kvb_addr == NULL) {
+			ret = -ENOMEM;
+			goto done;
+		}
+
+		if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf->vb_addr,
+				       cmdbuf->vb_size)) {
+			ret = -EFAULT;
+			goto done;
+		}
+		cmdbuf->vb_addr = kvb_addr;
+	}
+	if (cmdbuf->nbox) {
+		kbox_addr = drm_alloc(cmdbuf->nbox * sizeof(struct drm_clip_rect),
+				       DRM_MEM_DRIVER);
+		if (kbox_addr == NULL) {
+			ret = -ENOMEM;
+			goto done;
+		}
+
+		if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf->box_addr,
+				       cmdbuf->nbox * sizeof(struct drm_clip_rect))) {
+			ret = -EFAULT;
+			goto done;
+		}
+	cmdbuf->box_addr = kbox_addr;
+	}
+
+	/* Make sure writes to DMA buffers are finished before sending
+	 * DMA commands to the graphics hardware. */
+	DRM_MEMORYBARRIER();
+
+	/* Coming from user space. Don't know if the Xserver has
+	 * emitted wait commands. Assuming the worst. */
+	dev_priv->waiting = 1;
+
+	i = 0;
+	first_draw_cmd = NULL;
+	while (i < cmdbuf->size) {
+		drm_savage_cmd_header_t cmd_header;
+		cmd_header = *(drm_savage_cmd_header_t *)cmdbuf->cmd_addr;
+		cmdbuf->cmd_addr++;
+		i++;
+
+		/* Group drawing commands with same state to minimize
+		 * iterations over clip rects. */
+		j = 0;
+		switch (cmd_header.cmd.cmd) {
+		case SAVAGE_CMD_DMA_IDX:
+		case SAVAGE_CMD_VB_IDX:
+			j = (cmd_header.idx.count + 3) / 4;
+			if (i + j > cmdbuf->size) {
+				DRM_ERROR("indexed drawing command extends "
+					  "beyond end of command buffer\n");
+				DMA_FLUSH();
+				return -EINVAL;
+			}
+			/* fall through */
+		case SAVAGE_CMD_DMA_PRIM:
+		case SAVAGE_CMD_VB_PRIM:
+			if (!first_draw_cmd)
+				first_draw_cmd = cmdbuf->cmd_addr - 1;
+			cmdbuf->cmd_addr += j;
+			i += j;
+			break;
+		default:
+			if (first_draw_cmd) {
+				ret = savage_dispatch_draw(
+				      dev_priv, first_draw_cmd,
+				      cmdbuf->cmd_addr - 1,
+				      dmabuf, cmdbuf->vb_addr, cmdbuf->vb_size,
+				      cmdbuf->vb_stride,
+				      cmdbuf->nbox, cmdbuf->box_addr);
+				if (ret != 0)
+					return ret;
+				first_draw_cmd = NULL;
+			}
+		}
+		if (first_draw_cmd)
+			continue;
+
+		switch (cmd_header.cmd.cmd) {
+		case SAVAGE_CMD_STATE:
+			j = (cmd_header.state.count + 1) / 2;
+			if (i + j > cmdbuf->size) {
+				DRM_ERROR("command SAVAGE_CMD_STATE extends "
+					  "beyond end of command buffer\n");
+				DMA_FLUSH();
+				ret = -EINVAL;
+				goto done;
+			}
+			ret = savage_dispatch_state(dev_priv, &cmd_header,
+				(const uint32_t *)cmdbuf->cmd_addr);
+			cmdbuf->cmd_addr += j;
+			i += j;
+			break;
+		case SAVAGE_CMD_CLEAR:
+			if (i + 1 > cmdbuf->size) {
+				DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
+					  "beyond end of command buffer\n");
+				DMA_FLUSH();
+				ret = -EINVAL;
+				goto done;
+			}
+			ret = savage_dispatch_clear(dev_priv, &cmd_header,
+						    cmdbuf->cmd_addr,
+						    cmdbuf->nbox,
+						    cmdbuf->box_addr);
+			cmdbuf->cmd_addr++;
+			i++;
+			break;
+		case SAVAGE_CMD_SWAP:
+			ret = savage_dispatch_swap(dev_priv, cmdbuf->nbox,
+						   cmdbuf->box_addr);
+			break;
+		default:
+			DRM_ERROR("invalid command 0x%x\n",
+				  cmd_header.cmd.cmd);
+			DMA_FLUSH();
+			ret = -EINVAL;
+			goto done;
+		}
+
+		if (ret != 0) {
+			DMA_FLUSH();
+			goto done;
+		}
+	}
+
+	if (first_draw_cmd) {
+		ret = savage_dispatch_draw (
+			dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf,
+			cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride,
+			cmdbuf->nbox, cmdbuf->box_addr);
+		if (ret != 0) {
+			DMA_FLUSH();
+			goto done;
+		}
+	}
+
+	DMA_FLUSH();
+
+	if (dmabuf && cmdbuf->discard) {
+		drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private;
+		uint16_t event;
+		event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
+		SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
+		savage_freelist_put(dev, dmabuf);
+	}
+
+done:
+	/* If we didn't need to allocate them, these'll be NULL */
+	drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER);
+	drm_free(kvb_addr, cmdbuf->vb_size, DRM_MEM_DRIVER);
+	drm_free(kbox_addr, cmdbuf->nbox * sizeof(struct drm_clip_rect),
+		 DRM_MEM_DRIVER);
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/sis/Makefile b/drivers/gpu/drm/sis/Makefile
new file mode 100644
index 000000000000..441c061c3ad0
--- /dev/null
+++ b/drivers/gpu/drm/sis/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y = -Iinclude/drm
+sis-y := sis_drv.o sis_mm.o
+
+obj-$(CONFIG_DRM_SIS)   += sis.o
+
+
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
new file mode 100644
index 000000000000..7dacc64e9b56
--- /dev/null
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -0,0 +1,117 @@
+/* sis.c -- sis driver -*- linux-c -*-
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "sis_drm.h"
+#include "sis_drv.h"
+
+#include "drm_pciids.h"
+
+static struct pci_device_id pciidlist[] = {
+	sisdrv_PCI_IDS
+};
+
+static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
+{
+	drm_sis_private_t *dev_priv;
+	int ret;
+
+	dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER);
+	if (dev_priv == NULL)
+		return -ENOMEM;
+
+	dev->dev_private = (void *)dev_priv;
+	dev_priv->chipset = chipset;
+	ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
+	if (ret) {
+		drm_free(dev_priv, sizeof(dev_priv), DRM_MEM_DRIVER);
+	}
+
+	return ret;
+}
+
+static int sis_driver_unload(struct drm_device *dev)
+{
+	drm_sis_private_t *dev_priv = dev->dev_private;
+
+	drm_sman_takedown(&dev_priv->sman);
+	drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
+
+	return 0;
+}
+
+static struct drm_driver driver = {
+	.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
+	.load = sis_driver_load,
+	.unload = sis_driver_unload,
+	.context_dtor = NULL,
+	.dma_quiescent = sis_idle,
+	.reclaim_buffers = NULL,
+	.reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
+	.lastclose = sis_lastclose,
+	.get_map_ofs = drm_core_get_map_ofs,
+	.get_reg_ofs = drm_core_get_reg_ofs,
+	.ioctls = sis_ioctls,
+	.fops = {
+		 .owner = THIS_MODULE,
+		 .open = drm_open,
+		 .release = drm_release,
+		 .ioctl = drm_ioctl,
+		 .mmap = drm_mmap,
+		 .poll = drm_poll,
+		 .fasync = drm_fasync,
+	},
+	.pci_driver = {
+		 .name = DRIVER_NAME,
+		 .id_table = pciidlist,
+	},
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int __init sis_init(void)
+{
+	driver.num_ioctls = sis_max_ioctl;
+	return drm_init(&driver);
+}
+
+static void __exit sis_exit(void)
+{
+	drm_exit(&driver);
+}
+
+module_init(sis_init);
+module_exit(sis_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h
new file mode 100644
index 000000000000..ef940bad63f7
--- /dev/null
+++ b/drivers/gpu/drm/sis/sis_drv.h
@@ -0,0 +1,73 @@
+/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _SIS_DRV_H_
+#define _SIS_DRV_H_
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR		"SIS, Tungsten Graphics"
+#define DRIVER_NAME		"sis"
+#define DRIVER_DESC		"SIS 300/630/540 and XGI V3XE/V5/V8"
+#define DRIVER_DATE		"20070626"
+#define DRIVER_MAJOR		1
+#define DRIVER_MINOR		3
+#define DRIVER_PATCHLEVEL	0
+
+enum sis_family {
+	SIS_OTHER = 0,
+	SIS_CHIP_315 = 1,
+};
+
+#include "drm_sman.h"
+
+
+#define SIS_BASE (dev_priv->mmio)
+#define SIS_READ(reg)         DRM_READ32(SIS_BASE, reg);
+#define SIS_WRITE(reg, val)   DRM_WRITE32(SIS_BASE, reg, val);
+
+typedef struct drm_sis_private {
+	drm_local_map_t *mmio;
+	unsigned int idle_fault;
+	struct drm_sman sman;
+	unsigned int chipset;
+	int vram_initialized;
+	int agp_initialized;
+	unsigned long vram_offset;
+	unsigned long agp_offset;
+} drm_sis_private_t;
+
+extern int sis_idle(struct drm_device *dev);
+extern void sis_reclaim_buffers_locked(struct drm_device *dev,
+				       struct drm_file *file_priv);
+extern void sis_lastclose(struct drm_device *dev);
+
+extern struct drm_ioctl_desc sis_ioctls[];
+extern int sis_max_ioctl;
+
+#endif
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
new file mode 100644
index 000000000000..b3878770fce1
--- /dev/null
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -0,0 +1,333 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+
+/*
+ * Authors:
+ *    Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "sis_drm.h"
+#include "sis_drv.h"
+
+#include <video/sisfb.h>
+
+#define VIDEO_TYPE 0
+#define AGP_TYPE 1
+
+
+#if defined(CONFIG_FB_SIS)
+/* fb management via fb device */
+
+#define SIS_MM_ALIGN_SHIFT 0
+#define SIS_MM_ALIGN_MASK 0
+
+static void *sis_sman_mm_allocate(void *private, unsigned long size,
+				  unsigned alignment)
+{
+	struct sis_memreq req;
+
+	req.size = size;
+	sis_malloc(&req);
+	if (req.size == 0)
+		return NULL;
+	else
+		return (void *)~req.offset;
+}
+
+static void sis_sman_mm_free(void *private, void *ref)
+{
+	sis_free(~((unsigned long)ref));
+}
+
+static void sis_sman_mm_destroy(void *private)
+{
+	;
+}
+
+static unsigned long sis_sman_mm_offset(void *private, void *ref)
+{
+	return ~((unsigned long)ref);
+}
+
+#else /* CONFIG_FB_SIS */
+
+#define SIS_MM_ALIGN_SHIFT 4
+#define SIS_MM_ALIGN_MASK ( (1 << SIS_MM_ALIGN_SHIFT) - 1)
+
+#endif /* CONFIG_FB_SIS */
+
+static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_sis_private_t *dev_priv = dev->dev_private;
+	drm_sis_fb_t *fb = data;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+#if defined(CONFIG_FB_SIS)
+	{
+		struct drm_sman_mm sman_mm;
+		sman_mm.private = (void *)0xFFFFFFFF;
+		sman_mm.allocate = sis_sman_mm_allocate;
+		sman_mm.free = sis_sman_mm_free;
+		sman_mm.destroy = sis_sman_mm_destroy;
+		sman_mm.offset = sis_sman_mm_offset;
+		ret =
+		    drm_sman_set_manager(&dev_priv->sman, VIDEO_TYPE, &sman_mm);
+	}
+#else
+	ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0,
+				 fb->size >> SIS_MM_ALIGN_SHIFT);
+#endif
+
+	if (ret) {
+		DRM_ERROR("VRAM memory manager initialisation error\n");
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	dev_priv->vram_initialized = 1;
+	dev_priv->vram_offset = fb->offset;
+
+	mutex_unlock(&dev->struct_mutex);
+	DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
+
+	return 0;
+}
+
+static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
+			 void *data, int pool)
+{
+	drm_sis_private_t *dev_priv = dev->dev_private;
+	drm_sis_mem_t *mem = data;
+	int retval = 0;
+	struct drm_memblock_item *item;
+
+	mutex_lock(&dev->struct_mutex);
+
+	if (0 == ((pool == 0) ? dev_priv->vram_initialized :
+		      dev_priv->agp_initialized)) {
+		DRM_ERROR
+		    ("Attempt to allocate from uninitialized memory manager.\n");
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
+	item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0,
+			      (unsigned long)file_priv);
+
+	mutex_unlock(&dev->struct_mutex);
+	if (item) {
+		mem->offset = ((pool == 0) ?
+			      dev_priv->vram_offset : dev_priv->agp_offset) +
+		    (item->mm->
+		     offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT);
+		mem->free = item->user_hash.key;
+		mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
+	} else {
+		mem->offset = 0;
+		mem->size = 0;
+		mem->free = 0;
+		retval = -ENOMEM;
+	}
+
+	DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size,
+		  mem->offset);
+
+	return retval;
+}
+
+static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_sis_private_t *dev_priv = dev->dev_private;
+	drm_sis_mem_t *mem = data;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	ret = drm_sman_free_key(&dev_priv->sman, mem->free);
+	mutex_unlock(&dev->struct_mutex);
+	DRM_DEBUG("free = 0x%lx\n", mem->free);
+
+	return ret;
+}
+
+static int sis_fb_alloc(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+	return sis_drm_alloc(dev, file_priv, data, VIDEO_TYPE);
+}
+
+static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
+			      struct drm_file *file_priv)
+{
+	drm_sis_private_t *dev_priv = dev->dev_private;
+	drm_sis_agp_t *agp = data;
+	int ret;
+	dev_priv = dev->dev_private;
+
+	mutex_lock(&dev->struct_mutex);
+	ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0,
+				 agp->size >> SIS_MM_ALIGN_SHIFT);
+
+	if (ret) {
+		DRM_ERROR("AGP memory manager initialisation error\n");
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	dev_priv->agp_initialized = 1;
+	dev_priv->agp_offset = agp->offset;
+	mutex_unlock(&dev->struct_mutex);
+
+	DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
+	return 0;
+}
+
+static int sis_ioctl_agp_alloc(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv)
+{
+
+	return sis_drm_alloc(dev, file_priv, data, AGP_TYPE);
+}
+
+static drm_local_map_t *sis_reg_init(struct drm_device *dev)
+{
+	struct drm_map_list *entry;
+	drm_local_map_t *map;
+
+	list_for_each_entry(entry, &dev->maplist, head) {
+		map = entry->map;
+		if (!map)
+			continue;
+		if (map->type == _DRM_REGISTERS) {
+			return map;
+		}
+	}
+	return NULL;
+}
+
+int sis_idle(struct drm_device *dev)
+{
+	drm_sis_private_t *dev_priv = dev->dev_private;
+	uint32_t idle_reg;
+	unsigned long end;
+	int i;
+
+	if (dev_priv->idle_fault)
+		return 0;
+
+	if (dev_priv->mmio == NULL) {
+		dev_priv->mmio = sis_reg_init(dev);
+		if (dev_priv->mmio == NULL) {
+			DRM_ERROR("Could not find register map.\n");
+			return 0;
+		}
+	}
+
+	/*
+	 * Implement a device switch here if needed
+	 */
+
+	if (dev_priv->chipset != SIS_CHIP_315)
+		return 0;
+
+	/*
+	 * Timeout after 3 seconds. We cannot use DRM_WAIT_ON here
+	 * because its polling frequency is too low.
+	 */
+
+	end = jiffies + (DRM_HZ * 3);
+
+	for (i=0; i<4; ++i) {
+		do {
+			idle_reg = SIS_READ(0x85cc);
+		} while ( !time_after_eq(jiffies, end) &&
+			  ((idle_reg & 0x80000000) != 0x80000000));
+	}
+
+	if (time_after_eq(jiffies, end)) {
+		DRM_ERROR("Graphics engine idle timeout. "
+			  "Disabling idle check\n");
+		dev_priv->idle_fault = 1;
+	}
+
+	/*
+	 * The caller never sees an error code. It gets trapped
+	 * in libdrm.
+	 */
+
+	return 0;
+}
+
+
+void sis_lastclose(struct drm_device *dev)
+{
+	drm_sis_private_t *dev_priv = dev->dev_private;
+
+	if (!dev_priv)
+		return;
+
+	mutex_lock(&dev->struct_mutex);
+	drm_sman_cleanup(&dev_priv->sman);
+	dev_priv->vram_initialized = 0;
+	dev_priv->agp_initialized = 0;
+	dev_priv->mmio = NULL;
+	mutex_unlock(&dev->struct_mutex);
+}
+
+void sis_reclaim_buffers_locked(struct drm_device * dev,
+				struct drm_file *file_priv)
+{
+	drm_sis_private_t *dev_priv = dev->dev_private;
+
+	mutex_lock(&dev->struct_mutex);
+	if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
+		mutex_unlock(&dev->struct_mutex);
+		return;
+	}
+
+	if (dev->driver->dma_quiescent) {
+		dev->driver->dma_quiescent(dev);
+	}
+
+	drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
+	mutex_unlock(&dev->struct_mutex);
+	return;
+}
+
+struct drm_ioctl_desc sis_ioctls[] = {
+	DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
+};
+
+int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
diff --git a/drivers/gpu/drm/tdfx/Makefile b/drivers/gpu/drm/tdfx/Makefile
new file mode 100644
index 000000000000..0379f294b32a
--- /dev/null
+++ b/drivers/gpu/drm/tdfx/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+tdfx-y := tdfx_drv.o
+
+obj-$(CONFIG_DRM_TDFX)	+= tdfx.o
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
new file mode 100644
index 000000000000..012ff2e356b2
--- /dev/null
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -0,0 +1,84 @@
+/* tdfx_drv.c -- tdfx driver -*- linux-c -*-
+ * Created: Thu Oct  7 10:38:32 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Rickard E. (Rik) Faith <faith@valinux.com>
+ *    Daryll Strauss <daryll@valinux.com>
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#include "drmP.h"
+#include "tdfx_drv.h"
+
+#include "drm_pciids.h"
+
+static struct pci_device_id pciidlist[] = {
+	tdfx_PCI_IDS
+};
+
+static struct drm_driver driver = {
+	.driver_features = DRIVER_USE_MTRR,
+	.reclaim_buffers = drm_core_reclaim_buffers,
+	.get_map_ofs = drm_core_get_map_ofs,
+	.get_reg_ofs = drm_core_get_reg_ofs,
+	.fops = {
+		 .owner = THIS_MODULE,
+		 .open = drm_open,
+		 .release = drm_release,
+		 .ioctl = drm_ioctl,
+		 .mmap = drm_mmap,
+		 .poll = drm_poll,
+		 .fasync = drm_fasync,
+	},
+	.pci_driver = {
+		 .name = DRIVER_NAME,
+		 .id_table = pciidlist,
+	},
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int __init tdfx_init(void)
+{
+	return drm_init(&driver);
+}
+
+static void __exit tdfx_exit(void)
+{
+	drm_exit(&driver);
+}
+
+module_init(tdfx_init);
+module_exit(tdfx_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.h b/drivers/gpu/drm/tdfx/tdfx_drv.h
new file mode 100644
index 000000000000..84204ec1b046
--- /dev/null
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.h
@@ -0,0 +1,47 @@
+/* tdfx.h -- 3dfx DRM template customization -*- linux-c -*-
+ * Created: Wed Feb 14 12:32:32 2001 by gareth@valinux.com
+ */
+/*
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Gareth Hughes <gareth@valinux.com>
+ */
+
+#ifndef __TDFX_H__
+#define __TDFX_H__
+
+/* General customization:
+ */
+
+#define DRIVER_AUTHOR		"VA Linux Systems Inc."
+
+#define DRIVER_NAME		"tdfx"
+#define DRIVER_DESC		"3dfx Banshee/Voodoo3+"
+#define DRIVER_DATE		"20010216"
+
+#define DRIVER_MAJOR		1
+#define DRIVER_MINOR		0
+#define DRIVER_PATCHLEVEL	0
+
+#endif
diff --git a/drivers/gpu/drm/via/Makefile b/drivers/gpu/drm/via/Makefile
new file mode 100644
index 000000000000..d59e258e2c13
--- /dev/null
+++ b/drivers/gpu/drm/via/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+via-y    := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via_dmablit.o
+
+obj-$(CONFIG_DRM_VIA)	+=via.o
diff --git a/drivers/gpu/drm/via/via_3d_reg.h b/drivers/gpu/drm/via/via_3d_reg.h
new file mode 100644
index 000000000000..462375d543b9
--- /dev/null
+++ b/drivers/gpu/drm/via/via_3d_reg.h
@@ -0,0 +1,1650 @@
+/*
+ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef VIA_3D_REG_H
+#define VIA_3D_REG_H
+#define HC_REG_BASE             0x0400
+
+#define HC_REG_TRANS_SPACE      0x0040
+
+#define HC_ParaN_MASK           0xffffffff
+#define HC_Para_MASK            0x00ffffff
+#define HC_SubA_MASK            0xff000000
+#define HC_SubA_SHIFT           24
+/* Transmission Setting
+ */
+#define HC_REG_TRANS_SET        0x003c
+#define HC_ParaSubType_MASK     0xff000000
+#define HC_ParaType_MASK        0x00ff0000
+#define HC_ParaOS_MASK          0x0000ff00
+#define HC_ParaAdr_MASK         0x000000ff
+#define HC_ParaSubType_SHIFT    24
+#define HC_ParaType_SHIFT       16
+#define HC_ParaOS_SHIFT         8
+#define HC_ParaAdr_SHIFT        0
+
+#define HC_ParaType_CmdVdata    0x0000
+#define HC_ParaType_NotTex      0x0001
+#define HC_ParaType_Tex         0x0002
+#define HC_ParaType_Palette     0x0003
+#define HC_ParaType_PreCR       0x0010
+#define HC_ParaType_Auto        0x00fe
+
+/* Transmission Space
+ */
+#define HC_REG_Hpara0           0x0040
+#define HC_REG_HpataAF          0x02fc
+
+/* Read
+ */
+#define HC_REG_HREngSt          0x0000
+#define HC_REG_HRFIFOempty      0x0004
+#define HC_REG_HRFIFOfull       0x0008
+#define HC_REG_HRErr            0x000c
+#define HC_REG_FIFOstatus       0x0010
+/* HC_REG_HREngSt          0x0000
+ */
+#define HC_HDASZC_MASK          0x00010000
+#define HC_HSGEMI_MASK          0x0000f000
+#define HC_HLGEMISt_MASK        0x00000f00
+#define HC_HCRSt_MASK           0x00000080
+#define HC_HSE0St_MASK          0x00000040
+#define HC_HSE1St_MASK          0x00000020
+#define HC_HPESt_MASK           0x00000010
+#define HC_HXESt_MASK           0x00000008
+#define HC_HBESt_MASK           0x00000004
+#define HC_HE2St_MASK           0x00000002
+#define HC_HE3St_MASK           0x00000001
+/* HC_REG_HRFIFOempty      0x0004
+ */
+#define HC_HRZDempty_MASK       0x00000010
+#define HC_HRTXAempty_MASK      0x00000008
+#define HC_HRTXDempty_MASK      0x00000004
+#define HC_HWZDempty_MASK       0x00000002
+#define HC_HWCDempty_MASK       0x00000001
+/* HC_REG_HRFIFOfull       0x0008
+ */
+#define HC_HRZDfull_MASK        0x00000010
+#define HC_HRTXAfull_MASK       0x00000008
+#define HC_HRTXDfull_MASK       0x00000004
+#define HC_HWZDfull_MASK        0x00000002
+#define HC_HWCDfull_MASK        0x00000001
+/* HC_REG_HRErr            0x000c
+ */
+#define HC_HAGPCMErr_MASK       0x80000000
+#define HC_HAGPCMErrC_MASK      0x70000000
+/* HC_REG_FIFOstatus       0x0010
+ */
+#define HC_HRFIFOATall_MASK     0x80000000
+#define HC_HRFIFOATbusy_MASK    0x40000000
+#define HC_HRATFGMDo_MASK       0x00000100
+#define HC_HRATFGMDi_MASK       0x00000080
+#define HC_HRATFRZD_MASK        0x00000040
+#define HC_HRATFRTXA_MASK       0x00000020
+#define HC_HRATFRTXD_MASK       0x00000010
+#define HC_HRATFWZD_MASK        0x00000008
+#define HC_HRATFWCD_MASK        0x00000004
+#define HC_HRATTXTAG_MASK       0x00000002
+#define HC_HRATTXCH_MASK        0x00000001
+
+/* AGP Command Setting
+ */
+#define HC_SubA_HAGPBstL        0x0060
+#define HC_SubA_HAGPBendL       0x0061
+#define HC_SubA_HAGPCMNT        0x0062
+#define HC_SubA_HAGPBpL         0x0063
+#define HC_SubA_HAGPBpH         0x0064
+/* HC_SubA_HAGPCMNT        0x0062
+ */
+#define HC_HAGPCMNT_MASK        0x00800000
+#define HC_HCmdErrClr_MASK      0x00400000
+#define HC_HAGPBendH_MASK       0x0000ff00
+#define HC_HAGPBstH_MASK        0x000000ff
+#define HC_HAGPBendH_SHIFT      8
+#define HC_HAGPBstH_SHIFT       0
+/* HC_SubA_HAGPBpL         0x0063
+ */
+#define HC_HAGPBpL_MASK         0x00fffffc
+#define HC_HAGPBpID_MASK        0x00000003
+#define HC_HAGPBpID_PAUSE       0x00000000
+#define HC_HAGPBpID_JUMP        0x00000001
+#define HC_HAGPBpID_STOP        0x00000002
+/* HC_SubA_HAGPBpH         0x0064
+ */
+#define HC_HAGPBpH_MASK         0x00ffffff
+
+/* Miscellaneous Settings
+ */
+#define HC_SubA_HClipTB         0x0070
+#define HC_SubA_HClipLR         0x0071
+#define HC_SubA_HFPClipTL       0x0072
+#define HC_SubA_HFPClipBL       0x0073
+#define HC_SubA_HFPClipLL       0x0074
+#define HC_SubA_HFPClipRL       0x0075
+#define HC_SubA_HFPClipTBH      0x0076
+#define HC_SubA_HFPClipLRH      0x0077
+#define HC_SubA_HLP             0x0078
+#define HC_SubA_HLPRF           0x0079
+#define HC_SubA_HSolidCL        0x007a
+#define HC_SubA_HPixGC          0x007b
+#define HC_SubA_HSPXYOS         0x007c
+#define HC_SubA_HVertexCNT      0x007d
+
+#define HC_HClipT_MASK          0x00fff000
+#define HC_HClipT_SHIFT         12
+#define HC_HClipB_MASK          0x00000fff
+#define HC_HClipB_SHIFT         0
+#define HC_HClipL_MASK          0x00fff000
+#define HC_HClipL_SHIFT         12
+#define HC_HClipR_MASK          0x00000fff
+#define HC_HClipR_SHIFT         0
+#define HC_HFPClipBH_MASK       0x0000ff00
+#define HC_HFPClipBH_SHIFT      8
+#define HC_HFPClipTH_MASK       0x000000ff
+#define HC_HFPClipTH_SHIFT      0
+#define HC_HFPClipRH_MASK       0x0000ff00
+#define HC_HFPClipRH_SHIFT      8
+#define HC_HFPClipLH_MASK       0x000000ff
+#define HC_HFPClipLH_SHIFT      0
+#define HC_HSolidCH_MASK        0x000000ff
+#define HC_HPixGC_MASK          0x00800000
+#define HC_HSPXOS_MASK          0x00fff000
+#define HC_HSPXOS_SHIFT         12
+#define HC_HSPYOS_MASK          0x00000fff
+
+/* Command
+ * Command A
+ */
+#define HC_HCmdHeader_MASK      0xfe000000	/*0xffe00000 */
+#define HC_HE3Fire_MASK         0x00100000
+#define HC_HPMType_MASK         0x000f0000
+#define HC_HEFlag_MASK          0x0000e000
+#define HC_HShading_MASK        0x00001c00
+#define HC_HPMValidN_MASK       0x00000200
+#define HC_HPLEND_MASK          0x00000100
+#define HC_HVCycle_MASK         0x000000ff
+#define HC_HVCycle_Style_MASK   0x000000c0
+#define HC_HVCycle_ChgA_MASK    0x00000030
+#define HC_HVCycle_ChgB_MASK    0x0000000c
+#define HC_HVCycle_ChgC_MASK    0x00000003
+#define HC_HPMType_Point        0x00000000
+#define HC_HPMType_Line         0x00010000
+#define HC_HPMType_Tri          0x00020000
+#define HC_HPMType_TriWF        0x00040000
+#define HC_HEFlag_NoAA          0x00000000
+#define HC_HEFlag_ab            0x00008000
+#define HC_HEFlag_bc            0x00004000
+#define HC_HEFlag_ca            0x00002000
+#define HC_HShading_Solid       0x00000000
+#define HC_HShading_FlatA       0x00000400
+#define HC_HShading_FlatB       0x00000800
+#define HC_HShading_FlatC       0x00000c00
+#define HC_HShading_Gouraud     0x00001000
+#define HC_HVCycle_Full         0x00000000
+#define HC_HVCycle_AFP          0x00000040
+#define HC_HVCycle_One          0x000000c0
+#define HC_HVCycle_NewA         0x00000000
+#define HC_HVCycle_AA           0x00000010
+#define HC_HVCycle_AB           0x00000020
+#define HC_HVCycle_AC           0x00000030
+#define HC_HVCycle_NewB         0x00000000
+#define HC_HVCycle_BA           0x00000004
+#define HC_HVCycle_BB           0x00000008
+#define HC_HVCycle_BC           0x0000000c
+#define HC_HVCycle_NewC         0x00000000
+#define HC_HVCycle_CA           0x00000001
+#define HC_HVCycle_CB           0x00000002
+#define HC_HVCycle_CC           0x00000003
+
+/* Command B
+ */
+#define HC_HLPrst_MASK          0x00010000
+#define HC_HLLastP_MASK         0x00008000
+#define HC_HVPMSK_MASK          0x00007f80
+#define HC_HBFace_MASK          0x00000040
+#define HC_H2nd1VT_MASK         0x0000003f
+#define HC_HVPMSK_X             0x00004000
+#define HC_HVPMSK_Y             0x00002000
+#define HC_HVPMSK_Z             0x00001000
+#define HC_HVPMSK_W             0x00000800
+#define HC_HVPMSK_Cd            0x00000400
+#define HC_HVPMSK_Cs            0x00000200
+#define HC_HVPMSK_S             0x00000100
+#define HC_HVPMSK_T             0x00000080
+
+/* Enable Setting
+ */
+#define HC_SubA_HEnable         0x0000
+#define HC_HenTXEnvMap_MASK     0x00200000
+#define HC_HenVertexCNT_MASK    0x00100000
+#define HC_HenCPUDAZ_MASK       0x00080000
+#define HC_HenDASZWC_MASK       0x00040000
+#define HC_HenFBCull_MASK       0x00020000
+#define HC_HenCW_MASK           0x00010000
+#define HC_HenAA_MASK           0x00008000
+#define HC_HenST_MASK           0x00004000
+#define HC_HenZT_MASK           0x00002000
+#define HC_HenZW_MASK           0x00001000
+#define HC_HenAT_MASK           0x00000800
+#define HC_HenAW_MASK           0x00000400
+#define HC_HenSP_MASK           0x00000200
+#define HC_HenLP_MASK           0x00000100
+#define HC_HenTXCH_MASK         0x00000080
+#define HC_HenTXMP_MASK         0x00000040
+#define HC_HenTXPP_MASK         0x00000020
+#define HC_HenTXTR_MASK         0x00000010
+#define HC_HenCS_MASK           0x00000008
+#define HC_HenFOG_MASK          0x00000004
+#define HC_HenABL_MASK          0x00000002
+#define HC_HenDT_MASK           0x00000001
+
+/* Z Setting
+ */
+#define HC_SubA_HZWBBasL        0x0010
+#define HC_SubA_HZWBBasH        0x0011
+#define HC_SubA_HZWBType        0x0012
+#define HC_SubA_HZBiasL         0x0013
+#define HC_SubA_HZWBend         0x0014
+#define HC_SubA_HZWTMD          0x0015
+#define HC_SubA_HZWCDL          0x0016
+#define HC_SubA_HZWCTAGnum      0x0017
+#define HC_SubA_HZCYNum         0x0018
+#define HC_SubA_HZWCFire        0x0019
+/* HC_SubA_HZWBType
+ */
+#define HC_HZWBType_MASK        0x00800000
+#define HC_HZBiasedWB_MASK      0x00400000
+#define HC_HZONEasFF_MASK       0x00200000
+#define HC_HZOONEasFF_MASK      0x00100000
+#define HC_HZWBFM_MASK          0x00030000
+#define HC_HZWBLoc_MASK         0x0000c000
+#define HC_HZWBPit_MASK         0x00003fff
+#define HC_HZWBFM_16            0x00000000
+#define HC_HZWBFM_32            0x00020000
+#define HC_HZWBFM_24            0x00030000
+#define HC_HZWBLoc_Local        0x00000000
+#define HC_HZWBLoc_SyS          0x00004000
+/* HC_SubA_HZWBend
+ */
+#define HC_HZWBend_MASK         0x00ffe000
+#define HC_HZBiasH_MASK         0x000000ff
+#define HC_HZWBend_SHIFT        10
+/* HC_SubA_HZWTMD
+ */
+#define HC_HZWTMD_MASK          0x00070000
+#define HC_HEBEBias_MASK        0x00007f00
+#define HC_HZNF_MASK            0x000000ff
+#define HC_HZWTMD_NeverPass     0x00000000
+#define HC_HZWTMD_LT            0x00010000
+#define HC_HZWTMD_EQ            0x00020000
+#define HC_HZWTMD_LE            0x00030000
+#define HC_HZWTMD_GT            0x00040000
+#define HC_HZWTMD_NE            0x00050000
+#define HC_HZWTMD_GE            0x00060000
+#define HC_HZWTMD_AllPass       0x00070000
+#define HC_HEBEBias_SHIFT       8
+/* HC_SubA_HZWCDL          0x0016
+ */
+#define HC_HZWCDL_MASK          0x00ffffff
+/* HC_SubA_HZWCTAGnum      0x0017
+ */
+#define HC_HZWCTAGnum_MASK      0x00ff0000
+#define HC_HZWCTAGnum_SHIFT     16
+#define HC_HZWCDH_MASK          0x000000ff
+#define HC_HZWCDH_SHIFT         0
+/* HC_SubA_HZCYNum         0x0018
+ */
+#define HC_HZCYNum_MASK         0x00030000
+#define HC_HZCYNum_SHIFT        16
+#define HC_HZWCQWnum_MASK       0x00003fff
+#define HC_HZWCQWnum_SHIFT      0
+/* HC_SubA_HZWCFire        0x0019
+ */
+#define HC_ZWCFire_MASK         0x00010000
+#define HC_HZWCQWnumLast_MASK   0x00003fff
+#define HC_HZWCQWnumLast_SHIFT  0
+
+/* Stencil Setting
+ */
+#define HC_SubA_HSTREF          0x0023
+#define HC_SubA_HSTMD           0x0024
+/* HC_SubA_HSBFM
+ */
+#define HC_HSBFM_MASK           0x00030000
+#define HC_HSBLoc_MASK          0x0000c000
+#define HC_HSBPit_MASK          0x00003fff
+/* HC_SubA_HSTREF
+ */
+#define HC_HSTREF_MASK          0x00ff0000
+#define HC_HSTOPMSK_MASK        0x0000ff00
+#define HC_HSTBMSK_MASK         0x000000ff
+#define HC_HSTREF_SHIFT         16
+#define HC_HSTOPMSK_SHIFT       8
+/* HC_SubA_HSTMD
+ */
+#define HC_HSTMD_MASK           0x00070000
+#define HC_HSTOPSF_MASK         0x000001c0
+#define HC_HSTOPSPZF_MASK       0x00000038
+#define HC_HSTOPSPZP_MASK       0x00000007
+#define HC_HSTMD_NeverPass      0x00000000
+#define HC_HSTMD_LT             0x00010000
+#define HC_HSTMD_EQ             0x00020000
+#define HC_HSTMD_LE             0x00030000
+#define HC_HSTMD_GT             0x00040000
+#define HC_HSTMD_NE             0x00050000
+#define HC_HSTMD_GE             0x00060000
+#define HC_HSTMD_AllPass        0x00070000
+#define HC_HSTOPSF_KEEP         0x00000000
+#define HC_HSTOPSF_ZERO         0x00000040
+#define HC_HSTOPSF_REPLACE      0x00000080
+#define HC_HSTOPSF_INCRSAT      0x000000c0
+#define HC_HSTOPSF_DECRSAT      0x00000100
+#define HC_HSTOPSF_INVERT       0x00000140
+#define HC_HSTOPSF_INCR         0x00000180
+#define HC_HSTOPSF_DECR         0x000001c0
+#define HC_HSTOPSPZF_KEEP       0x00000000
+#define HC_HSTOPSPZF_ZERO       0x00000008
+#define HC_HSTOPSPZF_REPLACE    0x00000010
+#define HC_HSTOPSPZF_INCRSAT    0x00000018
+#define HC_HSTOPSPZF_DECRSAT    0x00000020
+#define HC_HSTOPSPZF_INVERT     0x00000028
+#define HC_HSTOPSPZF_INCR       0x00000030
+#define HC_HSTOPSPZF_DECR       0x00000038
+#define HC_HSTOPSPZP_KEEP       0x00000000
+#define HC_HSTOPSPZP_ZERO       0x00000001
+#define HC_HSTOPSPZP_REPLACE    0x00000002
+#define HC_HSTOPSPZP_INCRSAT    0x00000003
+#define HC_HSTOPSPZP_DECRSAT    0x00000004
+#define HC_HSTOPSPZP_INVERT     0x00000005
+#define HC_HSTOPSPZP_INCR       0x00000006
+#define HC_HSTOPSPZP_DECR       0x00000007
+
+/* Alpha Setting
+ */
+#define HC_SubA_HABBasL         0x0030
+#define HC_SubA_HABBasH         0x0031
+#define HC_SubA_HABFM           0x0032
+#define HC_SubA_HATMD           0x0033
+#define HC_SubA_HABLCsat        0x0034
+#define HC_SubA_HABLCop         0x0035
+#define HC_SubA_HABLAsat        0x0036
+#define HC_SubA_HABLAop         0x0037
+#define HC_SubA_HABLRCa         0x0038
+#define HC_SubA_HABLRFCa        0x0039
+#define HC_SubA_HABLRCbias      0x003a
+#define HC_SubA_HABLRCb         0x003b
+#define HC_SubA_HABLRFCb        0x003c
+#define HC_SubA_HABLRAa         0x003d
+#define HC_SubA_HABLRAb         0x003e
+/* HC_SubA_HABFM
+ */
+#define HC_HABFM_MASK           0x00030000
+#define HC_HABLoc_MASK          0x0000c000
+#define HC_HABPit_MASK          0x000007ff
+/* HC_SubA_HATMD
+ */
+#define HC_HATMD_MASK           0x00000700
+#define HC_HATREF_MASK          0x000000ff
+#define HC_HATMD_NeverPass      0x00000000
+#define HC_HATMD_LT             0x00000100
+#define HC_HATMD_EQ             0x00000200
+#define HC_HATMD_LE             0x00000300
+#define HC_HATMD_GT             0x00000400
+#define HC_HATMD_NE             0x00000500
+#define HC_HATMD_GE             0x00000600
+#define HC_HATMD_AllPass        0x00000700
+/* HC_SubA_HABLCsat
+ */
+#define HC_HABLCsat_MASK        0x00010000
+#define HC_HABLCa_MASK          0x0000fc00
+#define HC_HABLCa_C_MASK        0x0000c000
+#define HC_HABLCa_OPC_MASK      0x00003c00
+#define HC_HABLFCa_MASK         0x000003f0
+#define HC_HABLFCa_C_MASK       0x00000300
+#define HC_HABLFCa_OPC_MASK     0x000000f0
+#define HC_HABLCbias_MASK       0x0000000f
+#define HC_HABLCbias_C_MASK     0x00000008
+#define HC_HABLCbias_OPC_MASK   0x00000007
+/*-- Define the input color.
+ */
+#define HC_XC_Csrc              0x00000000
+#define HC_XC_Cdst              0x00000001
+#define HC_XC_Asrc              0x00000002
+#define HC_XC_Adst              0x00000003
+#define HC_XC_Fog               0x00000004
+#define HC_XC_HABLRC            0x00000005
+#define HC_XC_minSrcDst         0x00000006
+#define HC_XC_maxSrcDst         0x00000007
+#define HC_XC_mimAsrcInvAdst    0x00000008
+#define HC_XC_OPC               0x00000000
+#define HC_XC_InvOPC            0x00000010
+#define HC_XC_OPCp5             0x00000020
+/*-- Define the input Alpha
+ */
+#define HC_XA_OPA               0x00000000
+#define HC_XA_InvOPA            0x00000010
+#define HC_XA_OPAp5             0x00000020
+#define HC_XA_0                 0x00000000
+#define HC_XA_Asrc              0x00000001
+#define HC_XA_Adst              0x00000002
+#define HC_XA_Fog               0x00000003
+#define HC_XA_minAsrcFog        0x00000004
+#define HC_XA_minAsrcAdst       0x00000005
+#define HC_XA_maxAsrcFog        0x00000006
+#define HC_XA_maxAsrcAdst       0x00000007
+#define HC_XA_HABLRA            0x00000008
+#define HC_XA_minAsrcInvAdst    0x00000008
+#define HC_XA_HABLFRA           0x00000009
+/*--
+ */
+#define HC_HABLCa_OPC           (HC_XC_OPC << 10)
+#define HC_HABLCa_InvOPC        (HC_XC_InvOPC << 10)
+#define HC_HABLCa_OPCp5         (HC_XC_OPCp5 << 10)
+#define HC_HABLCa_Csrc          (HC_XC_Csrc << 10)
+#define HC_HABLCa_Cdst          (HC_XC_Cdst << 10)
+#define HC_HABLCa_Asrc          (HC_XC_Asrc << 10)
+#define HC_HABLCa_Adst          (HC_XC_Adst << 10)
+#define HC_HABLCa_Fog           (HC_XC_Fog << 10)
+#define HC_HABLCa_HABLRCa       (HC_XC_HABLRC << 10)
+#define HC_HABLCa_minSrcDst     (HC_XC_minSrcDst << 10)
+#define HC_HABLCa_maxSrcDst     (HC_XC_maxSrcDst << 10)
+#define HC_HABLFCa_OPC              (HC_XC_OPC << 4)
+#define HC_HABLFCa_InvOPC           (HC_XC_InvOPC << 4)
+#define HC_HABLFCa_OPCp5            (HC_XC_OPCp5 << 4)
+#define HC_HABLFCa_Csrc             (HC_XC_Csrc << 4)
+#define HC_HABLFCa_Cdst             (HC_XC_Cdst << 4)
+#define HC_HABLFCa_Asrc             (HC_XC_Asrc << 4)
+#define HC_HABLFCa_Adst             (HC_XC_Adst << 4)
+#define HC_HABLFCa_Fog              (HC_XC_Fog << 4)
+#define HC_HABLFCa_HABLRCa          (HC_XC_HABLRC << 4)
+#define HC_HABLFCa_minSrcDst        (HC_XC_minSrcDst << 4)
+#define HC_HABLFCa_maxSrcDst        (HC_XC_maxSrcDst << 4)
+#define HC_HABLFCa_mimAsrcInvAdst   (HC_XC_mimAsrcInvAdst << 4)
+#define HC_HABLCbias_HABLRCbias 0x00000000
+#define HC_HABLCbias_Asrc       0x00000001
+#define HC_HABLCbias_Adst       0x00000002
+#define HC_HABLCbias_Fog        0x00000003
+#define HC_HABLCbias_Cin        0x00000004
+/* HC_SubA_HABLCop         0x0035
+ */
+#define HC_HABLdot_MASK         0x00010000
+#define HC_HABLCop_MASK         0x00004000
+#define HC_HABLCb_MASK          0x00003f00
+#define HC_HABLCb_C_MASK        0x00003000
+#define HC_HABLCb_OPC_MASK      0x00000f00
+#define HC_HABLFCb_MASK         0x000000fc
+#define HC_HABLFCb_C_MASK       0x000000c0
+#define HC_HABLFCb_OPC_MASK     0x0000003c
+#define HC_HABLCshift_MASK      0x00000003
+#define HC_HABLCb_OPC           (HC_XC_OPC << 8)
+#define HC_HABLCb_InvOPC        (HC_XC_InvOPC << 8)
+#define HC_HABLCb_OPCp5         (HC_XC_OPCp5 << 8)
+#define HC_HABLCb_Csrc          (HC_XC_Csrc << 8)
+#define HC_HABLCb_Cdst          (HC_XC_Cdst << 8)
+#define HC_HABLCb_Asrc          (HC_XC_Asrc << 8)
+#define HC_HABLCb_Adst          (HC_XC_Adst << 8)
+#define HC_HABLCb_Fog           (HC_XC_Fog << 8)
+#define HC_HABLCb_HABLRCa       (HC_XC_HABLRC << 8)
+#define HC_HABLCb_minSrcDst     (HC_XC_minSrcDst << 8)
+#define HC_HABLCb_maxSrcDst     (HC_XC_maxSrcDst << 8)
+#define HC_HABLFCb_OPC              (HC_XC_OPC << 2)
+#define HC_HABLFCb_InvOPC           (HC_XC_InvOPC << 2)
+#define HC_HABLFCb_OPCp5            (HC_XC_OPCp5 << 2)
+#define HC_HABLFCb_Csrc             (HC_XC_Csrc << 2)
+#define HC_HABLFCb_Cdst             (HC_XC_Cdst << 2)
+#define HC_HABLFCb_Asrc             (HC_XC_Asrc << 2)
+#define HC_HABLFCb_Adst             (HC_XC_Adst << 2)
+#define HC_HABLFCb_Fog              (HC_XC_Fog << 2)
+#define HC_HABLFCb_HABLRCb          (HC_XC_HABLRC << 2)
+#define HC_HABLFCb_minSrcDst        (HC_XC_minSrcDst << 2)
+#define HC_HABLFCb_maxSrcDst        (HC_XC_maxSrcDst << 2)
+#define HC_HABLFCb_mimAsrcInvAdst   (HC_XC_mimAsrcInvAdst << 2)
+/* HC_SubA_HABLAsat        0x0036
+ */
+#define HC_HABLAsat_MASK        0x00010000
+#define HC_HABLAa_MASK          0x0000fc00
+#define HC_HABLAa_A_MASK        0x0000c000
+#define HC_HABLAa_OPA_MASK      0x00003c00
+#define HC_HABLFAa_MASK         0x000003f0
+#define HC_HABLFAa_A_MASK       0x00000300
+#define HC_HABLFAa_OPA_MASK     0x000000f0
+#define HC_HABLAbias_MASK       0x0000000f
+#define HC_HABLAbias_A_MASK     0x00000008
+#define HC_HABLAbias_OPA_MASK   0x00000007
+#define HC_HABLAa_OPA           (HC_XA_OPA << 10)
+#define HC_HABLAa_InvOPA        (HC_XA_InvOPA << 10)
+#define HC_HABLAa_OPAp5         (HC_XA_OPAp5 << 10)
+#define HC_HABLAa_0             (HC_XA_0 << 10)
+#define HC_HABLAa_Asrc          (HC_XA_Asrc << 10)
+#define HC_HABLAa_Adst          (HC_XA_Adst << 10)
+#define HC_HABLAa_Fog           (HC_XA_Fog << 10)
+#define HC_HABLAa_minAsrcFog    (HC_XA_minAsrcFog << 10)
+#define HC_HABLAa_minAsrcAdst   (HC_XA_minAsrcAdst << 10)
+#define HC_HABLAa_maxAsrcFog    (HC_XA_maxAsrcFog << 10)
+#define HC_HABLAa_maxAsrcAdst   (HC_XA_maxAsrcAdst << 10)
+#define HC_HABLAa_HABLRA        (HC_XA_HABLRA << 10)
+#define HC_HABLFAa_OPA          (HC_XA_OPA << 4)
+#define HC_HABLFAa_InvOPA       (HC_XA_InvOPA << 4)
+#define HC_HABLFAa_OPAp5        (HC_XA_OPAp5 << 4)
+#define HC_HABLFAa_0            (HC_XA_0 << 4)
+#define HC_HABLFAa_Asrc         (HC_XA_Asrc << 4)
+#define HC_HABLFAa_Adst         (HC_XA_Adst << 4)
+#define HC_HABLFAa_Fog          (HC_XA_Fog << 4)
+#define HC_HABLFAa_minAsrcFog   (HC_XA_minAsrcFog << 4)
+#define HC_HABLFAa_minAsrcAdst  (HC_XA_minAsrcAdst << 4)
+#define HC_HABLFAa_maxAsrcFog   (HC_XA_maxAsrcFog << 4)
+#define HC_HABLFAa_maxAsrcAdst  (HC_XA_maxAsrcAdst << 4)
+#define HC_HABLFAa_minAsrcInvAdst   (HC_XA_minAsrcInvAdst << 4)
+#define HC_HABLFAa_HABLFRA          (HC_XA_HABLFRA << 4)
+#define HC_HABLAbias_HABLRAbias 0x00000000
+#define HC_HABLAbias_Asrc       0x00000001
+#define HC_HABLAbias_Adst       0x00000002
+#define HC_HABLAbias_Fog        0x00000003
+#define HC_HABLAbias_Aaa        0x00000004
+/* HC_SubA_HABLAop         0x0037
+ */
+#define HC_HABLAop_MASK         0x00004000
+#define HC_HABLAb_MASK          0x00003f00
+#define HC_HABLAb_OPA_MASK      0x00000f00
+#define HC_HABLFAb_MASK         0x000000fc
+#define HC_HABLFAb_OPA_MASK     0x0000003c
+#define HC_HABLAshift_MASK      0x00000003
+#define HC_HABLAb_OPA           (HC_XA_OPA << 8)
+#define HC_HABLAb_InvOPA        (HC_XA_InvOPA << 8)
+#define HC_HABLAb_OPAp5         (HC_XA_OPAp5 << 8)
+#define HC_HABLAb_0             (HC_XA_0 << 8)
+#define HC_HABLAb_Asrc          (HC_XA_Asrc << 8)
+#define HC_HABLAb_Adst          (HC_XA_Adst << 8)
+#define HC_HABLAb_Fog           (HC_XA_Fog << 8)
+#define HC_HABLAb_minAsrcFog    (HC_XA_minAsrcFog << 8)
+#define HC_HABLAb_minAsrcAdst   (HC_XA_minAsrcAdst << 8)
+#define HC_HABLAb_maxAsrcFog    (HC_XA_maxAsrcFog << 8)
+#define HC_HABLAb_maxAsrcAdst   (HC_XA_maxAsrcAdst << 8)
+#define HC_HABLAb_HABLRA        (HC_XA_HABLRA << 8)
+#define HC_HABLFAb_OPA          (HC_XA_OPA << 2)
+#define HC_HABLFAb_InvOPA       (HC_XA_InvOPA << 2)
+#define HC_HABLFAb_OPAp5        (HC_XA_OPAp5 << 2)
+#define HC_HABLFAb_0            (HC_XA_0 << 2)
+#define HC_HABLFAb_Asrc         (HC_XA_Asrc << 2)
+#define HC_HABLFAb_Adst         (HC_XA_Adst << 2)
+#define HC_HABLFAb_Fog          (HC_XA_Fog << 2)
+#define HC_HABLFAb_minAsrcFog   (HC_XA_minAsrcFog << 2)
+#define HC_HABLFAb_minAsrcAdst  (HC_XA_minAsrcAdst << 2)
+#define HC_HABLFAb_maxAsrcFog   (HC_XA_maxAsrcFog << 2)
+#define HC_HABLFAb_maxAsrcAdst  (HC_XA_maxAsrcAdst << 2)
+#define HC_HABLFAb_minAsrcInvAdst   (HC_XA_minAsrcInvAdst << 2)
+#define HC_HABLFAb_HABLFRA          (HC_XA_HABLFRA << 2)
+/* HC_SubA_HABLRAa         0x003d
+ */
+#define HC_HABLRAa_MASK         0x00ff0000
+#define HC_HABLRFAa_MASK        0x0000ff00
+#define HC_HABLRAbias_MASK      0x000000ff
+#define HC_HABLRAa_SHIFT        16
+#define HC_HABLRFAa_SHIFT       8
+/* HC_SubA_HABLRAb         0x003e
+ */
+#define HC_HABLRAb_MASK         0x0000ff00
+#define HC_HABLRFAb_MASK        0x000000ff
+#define HC_HABLRAb_SHIFT        8
+
+/* Destination Setting
+ */
+#define HC_SubA_HDBBasL         0x0040
+#define HC_SubA_HDBBasH         0x0041
+#define HC_SubA_HDBFM           0x0042
+#define HC_SubA_HFBBMSKL        0x0043
+#define HC_SubA_HROP            0x0044
+/* HC_SubA_HDBFM           0x0042
+ */
+#define HC_HDBFM_MASK           0x001f0000
+#define HC_HDBLoc_MASK          0x0000c000
+#define HC_HDBPit_MASK          0x00003fff
+#define HC_HDBFM_RGB555         0x00000000
+#define HC_HDBFM_RGB565         0x00010000
+#define HC_HDBFM_ARGB4444       0x00020000
+#define HC_HDBFM_ARGB1555       0x00030000
+#define HC_HDBFM_BGR555         0x00040000
+#define HC_HDBFM_BGR565         0x00050000
+#define HC_HDBFM_ABGR4444       0x00060000
+#define HC_HDBFM_ABGR1555       0x00070000
+#define HC_HDBFM_ARGB0888       0x00080000
+#define HC_HDBFM_ARGB8888       0x00090000
+#define HC_HDBFM_ABGR0888       0x000a0000
+#define HC_HDBFM_ABGR8888       0x000b0000
+#define HC_HDBLoc_Local         0x00000000
+#define HC_HDBLoc_Sys           0x00004000
+/* HC_SubA_HROP            0x0044
+ */
+#define HC_HROP_MASK            0x00000f00
+#define HC_HFBBMSKH_MASK        0x000000ff
+#define HC_HROP_BLACK           0x00000000
+#define HC_HROP_DPon            0x00000100
+#define HC_HROP_DPna            0x00000200
+#define HC_HROP_Pn              0x00000300
+#define HC_HROP_PDna            0x00000400
+#define HC_HROP_Dn              0x00000500
+#define HC_HROP_DPx             0x00000600
+#define HC_HROP_DPan            0x00000700
+#define HC_HROP_DPa             0x00000800
+#define HC_HROP_DPxn            0x00000900
+#define HC_HROP_D               0x00000a00
+#define HC_HROP_DPno            0x00000b00
+#define HC_HROP_P               0x00000c00
+#define HC_HROP_PDno            0x00000d00
+#define HC_HROP_DPo             0x00000e00
+#define HC_HROP_WHITE           0x00000f00
+
+/* Fog Setting
+ */
+#define HC_SubA_HFogLF          0x0050
+#define HC_SubA_HFogCL          0x0051
+#define HC_SubA_HFogCH          0x0052
+#define HC_SubA_HFogStL         0x0053
+#define HC_SubA_HFogStH         0x0054
+#define HC_SubA_HFogOOdMF       0x0055
+#define HC_SubA_HFogOOdEF       0x0056
+#define HC_SubA_HFogEndL        0x0057
+#define HC_SubA_HFogDenst       0x0058
+/* HC_SubA_FogLF           0x0050
+ */
+#define HC_FogLF_MASK           0x00000010
+#define HC_FogEq_MASK           0x00000008
+#define HC_FogMD_MASK           0x00000007
+#define HC_FogMD_LocalFog        0x00000000
+#define HC_FogMD_LinearFog       0x00000002
+#define HC_FogMD_ExponentialFog  0x00000004
+#define HC_FogMD_Exponential2Fog 0x00000005
+/* #define HC_FogMD_FogTable       0x00000003 */
+
+/* HC_SubA_HFogDenst        0x0058
+ */
+#define HC_FogDenst_MASK        0x001fff00
+#define HC_FogEndL_MASK         0x000000ff
+
+/* Texture subtype definitions
+ */
+#define HC_SubType_Tex0         0x00000000
+#define HC_SubType_Tex1         0x00000001
+#define HC_SubType_TexGeneral   0x000000fe
+
+/* Attribute of texture n
+ */
+#define HC_SubA_HTXnL0BasL      0x0000
+#define HC_SubA_HTXnL1BasL      0x0001
+#define HC_SubA_HTXnL2BasL      0x0002
+#define HC_SubA_HTXnL3BasL      0x0003
+#define HC_SubA_HTXnL4BasL      0x0004
+#define HC_SubA_HTXnL5BasL      0x0005
+#define HC_SubA_HTXnL6BasL      0x0006
+#define HC_SubA_HTXnL7BasL      0x0007
+#define HC_SubA_HTXnL8BasL      0x0008
+#define HC_SubA_HTXnL9BasL      0x0009
+#define HC_SubA_HTXnLaBasL      0x000a
+#define HC_SubA_HTXnLbBasL      0x000b
+#define HC_SubA_HTXnLcBasL      0x000c
+#define HC_SubA_HTXnLdBasL      0x000d
+#define HC_SubA_HTXnLeBasL      0x000e
+#define HC_SubA_HTXnLfBasL      0x000f
+#define HC_SubA_HTXnL10BasL     0x0010
+#define HC_SubA_HTXnL11BasL     0x0011
+#define HC_SubA_HTXnL012BasH    0x0020
+#define HC_SubA_HTXnL345BasH    0x0021
+#define HC_SubA_HTXnL678BasH    0x0022
+#define HC_SubA_HTXnL9abBasH    0x0023
+#define HC_SubA_HTXnLcdeBasH    0x0024
+#define HC_SubA_HTXnLf1011BasH  0x0025
+#define HC_SubA_HTXnL0Pit       0x002b
+#define HC_SubA_HTXnL1Pit       0x002c
+#define HC_SubA_HTXnL2Pit       0x002d
+#define HC_SubA_HTXnL3Pit       0x002e
+#define HC_SubA_HTXnL4Pit       0x002f
+#define HC_SubA_HTXnL5Pit       0x0030
+#define HC_SubA_HTXnL6Pit       0x0031
+#define HC_SubA_HTXnL7Pit       0x0032
+#define HC_SubA_HTXnL8Pit       0x0033
+#define HC_SubA_HTXnL9Pit       0x0034
+#define HC_SubA_HTXnLaPit       0x0035
+#define HC_SubA_HTXnLbPit       0x0036
+#define HC_SubA_HTXnLcPit       0x0037
+#define HC_SubA_HTXnLdPit       0x0038
+#define HC_SubA_HTXnLePit       0x0039
+#define HC_SubA_HTXnLfPit       0x003a
+#define HC_SubA_HTXnL10Pit      0x003b
+#define HC_SubA_HTXnL11Pit      0x003c
+#define HC_SubA_HTXnL0_5WE      0x004b
+#define HC_SubA_HTXnL6_bWE      0x004c
+#define HC_SubA_HTXnLc_11WE     0x004d
+#define HC_SubA_HTXnL0_5HE      0x0051
+#define HC_SubA_HTXnL6_bHE      0x0052
+#define HC_SubA_HTXnLc_11HE     0x0053
+#define HC_SubA_HTXnL0OS        0x0077
+#define HC_SubA_HTXnTB          0x0078
+#define HC_SubA_HTXnMPMD        0x0079
+#define HC_SubA_HTXnCLODu       0x007a
+#define HC_SubA_HTXnFM          0x007b
+#define HC_SubA_HTXnTRCH        0x007c
+#define HC_SubA_HTXnTRCL        0x007d
+#define HC_SubA_HTXnTBC         0x007e
+#define HC_SubA_HTXnTRAH        0x007f
+#define HC_SubA_HTXnTBLCsat     0x0080
+#define HC_SubA_HTXnTBLCop      0x0081
+#define HC_SubA_HTXnTBLMPfog    0x0082
+#define HC_SubA_HTXnTBLAsat     0x0083
+#define HC_SubA_HTXnTBLRCa      0x0085
+#define HC_SubA_HTXnTBLRCb      0x0086
+#define HC_SubA_HTXnTBLRCc      0x0087
+#define HC_SubA_HTXnTBLRCbias   0x0088
+#define HC_SubA_HTXnTBLRAa      0x0089
+#define HC_SubA_HTXnTBLRFog     0x008a
+#define HC_SubA_HTXnBumpM00     0x0090
+#define HC_SubA_HTXnBumpM01     0x0091
+#define HC_SubA_HTXnBumpM10     0x0092
+#define HC_SubA_HTXnBumpM11     0x0093
+#define HC_SubA_HTXnLScale      0x0094
+#define HC_SubA_HTXSMD          0x0000
+/* HC_SubA_HTXnL012BasH    0x0020
+ */
+#define HC_HTXnL0BasH_MASK      0x000000ff
+#define HC_HTXnL1BasH_MASK      0x0000ff00
+#define HC_HTXnL2BasH_MASK      0x00ff0000
+#define HC_HTXnL1BasH_SHIFT     8
+#define HC_HTXnL2BasH_SHIFT     16
+/* HC_SubA_HTXnL345BasH    0x0021
+ */
+#define HC_HTXnL3BasH_MASK      0x000000ff
+#define HC_HTXnL4BasH_MASK      0x0000ff00
+#define HC_HTXnL5BasH_MASK      0x00ff0000
+#define HC_HTXnL4BasH_SHIFT     8
+#define HC_HTXnL5BasH_SHIFT     16
+/* HC_SubA_HTXnL678BasH    0x0022
+ */
+#define HC_HTXnL6BasH_MASK      0x000000ff
+#define HC_HTXnL7BasH_MASK      0x0000ff00
+#define HC_HTXnL8BasH_MASK      0x00ff0000
+#define HC_HTXnL7BasH_SHIFT     8
+#define HC_HTXnL8BasH_SHIFT     16
+/* HC_SubA_HTXnL9abBasH    0x0023
+ */
+#define HC_HTXnL9BasH_MASK      0x000000ff
+#define HC_HTXnLaBasH_MASK      0x0000ff00
+#define HC_HTXnLbBasH_MASK      0x00ff0000
+#define HC_HTXnLaBasH_SHIFT     8
+#define HC_HTXnLbBasH_SHIFT     16
+/* HC_SubA_HTXnLcdeBasH    0x0024
+ */
+#define HC_HTXnLcBasH_MASK      0x000000ff
+#define HC_HTXnLdBasH_MASK      0x0000ff00
+#define HC_HTXnLeBasH_MASK      0x00ff0000
+#define HC_HTXnLdBasH_SHIFT     8
+#define HC_HTXnLeBasH_SHIFT     16
+/* HC_SubA_HTXnLcdeBasH    0x0025
+ */
+#define HC_HTXnLfBasH_MASK      0x000000ff
+#define HC_HTXnL10BasH_MASK      0x0000ff00
+#define HC_HTXnL11BasH_MASK      0x00ff0000
+#define HC_HTXnL10BasH_SHIFT     8
+#define HC_HTXnL11BasH_SHIFT     16
+/* HC_SubA_HTXnL0Pit       0x002b
+ */
+#define HC_HTXnLnPit_MASK       0x00003fff
+#define HC_HTXnEnPit_MASK       0x00080000
+#define HC_HTXnLnPitE_MASK      0x00f00000
+#define HC_HTXnLnPitE_SHIFT     20
+/* HC_SubA_HTXnL0_5WE      0x004b
+ */
+#define HC_HTXnL0WE_MASK        0x0000000f
+#define HC_HTXnL1WE_MASK        0x000000f0
+#define HC_HTXnL2WE_MASK        0x00000f00
+#define HC_HTXnL3WE_MASK        0x0000f000
+#define HC_HTXnL4WE_MASK        0x000f0000
+#define HC_HTXnL5WE_MASK        0x00f00000
+#define HC_HTXnL1WE_SHIFT       4
+#define HC_HTXnL2WE_SHIFT       8
+#define HC_HTXnL3WE_SHIFT       12
+#define HC_HTXnL4WE_SHIFT       16
+#define HC_HTXnL5WE_SHIFT       20
+/* HC_SubA_HTXnL6_bWE      0x004c
+ */
+#define HC_HTXnL6WE_MASK        0x0000000f
+#define HC_HTXnL7WE_MASK        0x000000f0
+#define HC_HTXnL8WE_MASK        0x00000f00
+#define HC_HTXnL9WE_MASK        0x0000f000
+#define HC_HTXnLaWE_MASK        0x000f0000
+#define HC_HTXnLbWE_MASK        0x00f00000
+#define HC_HTXnL7WE_SHIFT       4
+#define HC_HTXnL8WE_SHIFT       8
+#define HC_HTXnL9WE_SHIFT       12
+#define HC_HTXnLaWE_SHIFT       16
+#define HC_HTXnLbWE_SHIFT       20
+/* HC_SubA_HTXnLc_11WE      0x004d
+ */
+#define HC_HTXnLcWE_MASK        0x0000000f
+#define HC_HTXnLdWE_MASK        0x000000f0
+#define HC_HTXnLeWE_MASK        0x00000f00
+#define HC_HTXnLfWE_MASK        0x0000f000
+#define HC_HTXnL10WE_MASK       0x000f0000
+#define HC_HTXnL11WE_MASK       0x00f00000
+#define HC_HTXnLdWE_SHIFT       4
+#define HC_HTXnLeWE_SHIFT       8
+#define HC_HTXnLfWE_SHIFT       12
+#define HC_HTXnL10WE_SHIFT      16
+#define HC_HTXnL11WE_SHIFT      20
+/* HC_SubA_HTXnL0_5HE      0x0051
+ */
+#define HC_HTXnL0HE_MASK        0x0000000f
+#define HC_HTXnL1HE_MASK        0x000000f0
+#define HC_HTXnL2HE_MASK        0x00000f00
+#define HC_HTXnL3HE_MASK        0x0000f000
+#define HC_HTXnL4HE_MASK        0x000f0000
+#define HC_HTXnL5HE_MASK        0x00f00000
+#define HC_HTXnL1HE_SHIFT       4
+#define HC_HTXnL2HE_SHIFT       8
+#define HC_HTXnL3HE_SHIFT       12
+#define HC_HTXnL4HE_SHIFT       16
+#define HC_HTXnL5HE_SHIFT       20
+/* HC_SubA_HTXnL6_bHE      0x0052
+ */
+#define HC_HTXnL6HE_MASK        0x0000000f
+#define HC_HTXnL7HE_MASK        0x000000f0
+#define HC_HTXnL8HE_MASK        0x00000f00
+#define HC_HTXnL9HE_MASK        0x0000f000
+#define HC_HTXnLaHE_MASK        0x000f0000
+#define HC_HTXnLbHE_MASK        0x00f00000
+#define HC_HTXnL7HE_SHIFT       4
+#define HC_HTXnL8HE_SHIFT       8
+#define HC_HTXnL9HE_SHIFT       12
+#define HC_HTXnLaHE_SHIFT       16
+#define HC_HTXnLbHE_SHIFT       20
+/* HC_SubA_HTXnLc_11HE      0x0053
+ */
+#define HC_HTXnLcHE_MASK        0x0000000f
+#define HC_HTXnLdHE_MASK        0x000000f0
+#define HC_HTXnLeHE_MASK        0x00000f00
+#define HC_HTXnLfHE_MASK        0x0000f000
+#define HC_HTXnL10HE_MASK       0x000f0000
+#define HC_HTXnL11HE_MASK       0x00f00000
+#define HC_HTXnLdHE_SHIFT       4
+#define HC_HTXnLeHE_SHIFT       8
+#define HC_HTXnLfHE_SHIFT       12
+#define HC_HTXnL10HE_SHIFT      16
+#define HC_HTXnL11HE_SHIFT      20
+/* HC_SubA_HTXnL0OS        0x0077
+ */
+#define HC_HTXnL0OS_MASK        0x003ff000
+#define HC_HTXnLVmax_MASK       0x00000fc0
+#define HC_HTXnLVmin_MASK       0x0000003f
+#define HC_HTXnL0OS_SHIFT       12
+#define HC_HTXnLVmax_SHIFT      6
+/* HC_SubA_HTXnTB          0x0078
+ */
+#define HC_HTXnTB_MASK          0x00f00000
+#define HC_HTXnFLSe_MASK        0x0000e000
+#define HC_HTXnFLSs_MASK        0x00001c00
+#define HC_HTXnFLTe_MASK        0x00000380
+#define HC_HTXnFLTs_MASK        0x00000070
+#define HC_HTXnFLDs_MASK        0x0000000f
+#define HC_HTXnTB_NoTB          0x00000000
+#define HC_HTXnTB_TBC_S         0x00100000
+#define HC_HTXnTB_TBC_T         0x00200000
+#define HC_HTXnTB_TB_S          0x00400000
+#define HC_HTXnTB_TB_T          0x00800000
+#define HC_HTXnFLSe_Nearest     0x00000000
+#define HC_HTXnFLSe_Linear      0x00002000
+#define HC_HTXnFLSe_NonLinear   0x00004000
+#define HC_HTXnFLSe_Sharp       0x00008000
+#define HC_HTXnFLSe_Flat_Gaussian_Cubic 0x0000c000
+#define HC_HTXnFLSs_Nearest     0x00000000
+#define HC_HTXnFLSs_Linear      0x00000400
+#define HC_HTXnFLSs_NonLinear   0x00000800
+#define HC_HTXnFLSs_Flat_Gaussian_Cubic 0x00001800
+#define HC_HTXnFLTe_Nearest     0x00000000
+#define HC_HTXnFLTe_Linear      0x00000080
+#define HC_HTXnFLTe_NonLinear   0x00000100
+#define HC_HTXnFLTe_Sharp       0x00000180
+#define HC_HTXnFLTe_Flat_Gaussian_Cubic 0x00000300
+#define HC_HTXnFLTs_Nearest     0x00000000
+#define HC_HTXnFLTs_Linear      0x00000010
+#define HC_HTXnFLTs_NonLinear   0x00000020
+#define HC_HTXnFLTs_Flat_Gaussian_Cubic 0x00000060
+#define HC_HTXnFLDs_Tex0        0x00000000
+#define HC_HTXnFLDs_Nearest     0x00000001
+#define HC_HTXnFLDs_Linear      0x00000002
+#define HC_HTXnFLDs_NonLinear   0x00000003
+#define HC_HTXnFLDs_Dither      0x00000004
+#define HC_HTXnFLDs_ConstLOD    0x00000005
+#define HC_HTXnFLDs_Ani         0x00000006
+#define HC_HTXnFLDs_AniDither   0x00000007
+/* HC_SubA_HTXnMPMD        0x0079
+ */
+#define HC_HTXnMPMD_SMASK       0x00070000
+#define HC_HTXnMPMD_TMASK       0x00380000
+#define HC_HTXnLODDTf_MASK      0x00000007
+#define HC_HTXnXY2ST_MASK       0x00000008
+#define HC_HTXnMPMD_Tsingle     0x00000000
+#define HC_HTXnMPMD_Tclamp      0x00080000
+#define HC_HTXnMPMD_Trepeat     0x00100000
+#define HC_HTXnMPMD_Tmirror     0x00180000
+#define HC_HTXnMPMD_Twrap       0x00200000
+#define HC_HTXnMPMD_Ssingle     0x00000000
+#define HC_HTXnMPMD_Sclamp      0x00010000
+#define HC_HTXnMPMD_Srepeat     0x00020000
+#define HC_HTXnMPMD_Smirror     0x00030000
+#define HC_HTXnMPMD_Swrap       0x00040000
+/* HC_SubA_HTXnCLODu       0x007a
+ */
+#define HC_HTXnCLODu_MASK       0x000ffc00
+#define HC_HTXnCLODd_MASK       0x000003ff
+#define HC_HTXnCLODu_SHIFT      10
+/* HC_SubA_HTXnFM          0x007b
+ */
+#define HC_HTXnFM_MASK          0x00ff0000
+#define HC_HTXnLoc_MASK         0x00000003
+#define HC_HTXnFM_INDEX         0x00000000
+#define HC_HTXnFM_Intensity     0x00080000
+#define HC_HTXnFM_Lum           0x00100000
+#define HC_HTXnFM_Alpha         0x00180000
+#define HC_HTXnFM_DX            0x00280000
+#define HC_HTXnFM_ARGB16        0x00880000
+#define HC_HTXnFM_ARGB32        0x00980000
+#define HC_HTXnFM_ABGR16        0x00a80000
+#define HC_HTXnFM_ABGR32        0x00b80000
+#define HC_HTXnFM_RGBA16        0x00c80000
+#define HC_HTXnFM_RGBA32        0x00d80000
+#define HC_HTXnFM_BGRA16        0x00e80000
+#define HC_HTXnFM_BGRA32        0x00f80000
+#define HC_HTXnFM_BUMPMAP       0x00380000
+#define HC_HTXnFM_Index1        (HC_HTXnFM_INDEX     | 0x00000000)
+#define HC_HTXnFM_Index2        (HC_HTXnFM_INDEX     | 0x00010000)
+#define HC_HTXnFM_Index4        (HC_HTXnFM_INDEX     | 0x00020000)
+#define HC_HTXnFM_Index8        (HC_HTXnFM_INDEX     | 0x00030000)
+#define HC_HTXnFM_T1            (HC_HTXnFM_Intensity | 0x00000000)
+#define HC_HTXnFM_T2            (HC_HTXnFM_Intensity | 0x00010000)
+#define HC_HTXnFM_T4            (HC_HTXnFM_Intensity | 0x00020000)
+#define HC_HTXnFM_T8            (HC_HTXnFM_Intensity | 0x00030000)
+#define HC_HTXnFM_L1            (HC_HTXnFM_Lum       | 0x00000000)
+#define HC_HTXnFM_L2            (HC_HTXnFM_Lum       | 0x00010000)
+#define HC_HTXnFM_L4            (HC_HTXnFM_Lum       | 0x00020000)
+#define HC_HTXnFM_L8            (HC_HTXnFM_Lum       | 0x00030000)
+#define HC_HTXnFM_AL44          (HC_HTXnFM_Lum       | 0x00040000)
+#define HC_HTXnFM_AL88          (HC_HTXnFM_Lum       | 0x00050000)
+#define HC_HTXnFM_A1            (HC_HTXnFM_Alpha     | 0x00000000)
+#define HC_HTXnFM_A2            (HC_HTXnFM_Alpha     | 0x00010000)
+#define HC_HTXnFM_A4            (HC_HTXnFM_Alpha     | 0x00020000)
+#define HC_HTXnFM_A8            (HC_HTXnFM_Alpha     | 0x00030000)
+#define HC_HTXnFM_DX1           (HC_HTXnFM_DX        | 0x00010000)
+#define HC_HTXnFM_DX23          (HC_HTXnFM_DX        | 0x00020000)
+#define HC_HTXnFM_DX45          (HC_HTXnFM_DX        | 0x00030000)
+#define HC_HTXnFM_RGB555        (HC_HTXnFM_ARGB16    | 0x00000000)
+#define HC_HTXnFM_RGB565        (HC_HTXnFM_ARGB16    | 0x00010000)
+#define HC_HTXnFM_ARGB1555      (HC_HTXnFM_ARGB16    | 0x00020000)
+#define HC_HTXnFM_ARGB4444      (HC_HTXnFM_ARGB16    | 0x00030000)
+#define HC_HTXnFM_ARGB0888      (HC_HTXnFM_ARGB32    | 0x00000000)
+#define HC_HTXnFM_ARGB8888      (HC_HTXnFM_ARGB32    | 0x00010000)
+#define HC_HTXnFM_BGR555        (HC_HTXnFM_ABGR16    | 0x00000000)
+#define HC_HTXnFM_BGR565        (HC_HTXnFM_ABGR16    | 0x00010000)
+#define HC_HTXnFM_ABGR1555      (HC_HTXnFM_ABGR16    | 0x00020000)
+#define HC_HTXnFM_ABGR4444      (HC_HTXnFM_ABGR16    | 0x00030000)
+#define HC_HTXnFM_ABGR0888      (HC_HTXnFM_ABGR32    | 0x00000000)
+#define HC_HTXnFM_ABGR8888      (HC_HTXnFM_ABGR32    | 0x00010000)
+#define HC_HTXnFM_RGBA5550      (HC_HTXnFM_RGBA16    | 0x00000000)
+#define HC_HTXnFM_RGBA5551      (HC_HTXnFM_RGBA16    | 0x00020000)
+#define HC_HTXnFM_RGBA4444      (HC_HTXnFM_RGBA16    | 0x00030000)
+#define HC_HTXnFM_RGBA8880      (HC_HTXnFM_RGBA32    | 0x00000000)
+#define HC_HTXnFM_RGBA8888      (HC_HTXnFM_RGBA32    | 0x00010000)
+#define HC_HTXnFM_BGRA5550      (HC_HTXnFM_BGRA16    | 0x00000000)
+#define HC_HTXnFM_BGRA5551      (HC_HTXnFM_BGRA16    | 0x00020000)
+#define HC_HTXnFM_BGRA4444      (HC_HTXnFM_BGRA16    | 0x00030000)
+#define HC_HTXnFM_BGRA8880      (HC_HTXnFM_BGRA32    | 0x00000000)
+#define HC_HTXnFM_BGRA8888      (HC_HTXnFM_BGRA32    | 0x00010000)
+#define HC_HTXnFM_VU88          (HC_HTXnFM_BUMPMAP   | 0x00000000)
+#define HC_HTXnFM_LVU655        (HC_HTXnFM_BUMPMAP   | 0x00010000)
+#define HC_HTXnFM_LVU888        (HC_HTXnFM_BUMPMAP   | 0x00020000)
+#define HC_HTXnLoc_Local        0x00000000
+#define HC_HTXnLoc_Sys          0x00000002
+#define HC_HTXnLoc_AGP          0x00000003
+/* HC_SubA_HTXnTRAH        0x007f
+ */
+#define HC_HTXnTRAH_MASK        0x00ff0000
+#define HC_HTXnTRAL_MASK        0x0000ff00
+#define HC_HTXnTBA_MASK         0x000000ff
+#define HC_HTXnTRAH_SHIFT       16
+#define HC_HTXnTRAL_SHIFT       8
+/* HC_SubA_HTXnTBLCsat     0x0080
+ *-- Define the input texture.
+ */
+#define HC_XTC_TOPC             0x00000000
+#define HC_XTC_InvTOPC          0x00000010
+#define HC_XTC_TOPCp5           0x00000020
+#define HC_XTC_Cbias            0x00000000
+#define HC_XTC_InvCbias         0x00000010
+#define HC_XTC_0                0x00000000
+#define HC_XTC_Dif              0x00000001
+#define HC_XTC_Spec             0x00000002
+#define HC_XTC_Tex              0x00000003
+#define HC_XTC_Cur              0x00000004
+#define HC_XTC_Adif             0x00000005
+#define HC_XTC_Fog              0x00000006
+#define HC_XTC_Atex             0x00000007
+#define HC_XTC_Acur             0x00000008
+#define HC_XTC_HTXnTBLRC        0x00000009
+#define HC_XTC_Ctexnext         0x0000000a
+/*--
+ */
+#define HC_HTXnTBLCsat_MASK     0x00800000
+#define HC_HTXnTBLCa_MASK       0x000fc000
+#define HC_HTXnTBLCb_MASK       0x00001f80
+#define HC_HTXnTBLCc_MASK       0x0000003f
+#define HC_HTXnTBLCa_TOPC       (HC_XTC_TOPC << 14)
+#define HC_HTXnTBLCa_InvTOPC    (HC_XTC_InvTOPC << 14)
+#define HC_HTXnTBLCa_TOPCp5     (HC_XTC_TOPCp5 << 14)
+#define HC_HTXnTBLCa_0          (HC_XTC_0 << 14)
+#define HC_HTXnTBLCa_Dif        (HC_XTC_Dif << 14)
+#define HC_HTXnTBLCa_Spec       (HC_XTC_Spec << 14)
+#define HC_HTXnTBLCa_Tex        (HC_XTC_Tex << 14)
+#define HC_HTXnTBLCa_Cur        (HC_XTC_Cur << 14)
+#define HC_HTXnTBLCa_Adif       (HC_XTC_Adif << 14)
+#define HC_HTXnTBLCa_Fog        (HC_XTC_Fog << 14)
+#define HC_HTXnTBLCa_Atex       (HC_XTC_Atex << 14)
+#define HC_HTXnTBLCa_Acur       (HC_XTC_Acur << 14)
+#define HC_HTXnTBLCa_HTXnTBLRC  (HC_XTC_HTXnTBLRC << 14)
+#define HC_HTXnTBLCa_Ctexnext   (HC_XTC_Ctexnext << 14)
+#define HC_HTXnTBLCb_TOPC       (HC_XTC_TOPC << 7)
+#define HC_HTXnTBLCb_InvTOPC    (HC_XTC_InvTOPC << 7)
+#define HC_HTXnTBLCb_TOPCp5     (HC_XTC_TOPCp5 << 7)
+#define HC_HTXnTBLCb_0          (HC_XTC_0 << 7)
+#define HC_HTXnTBLCb_Dif        (HC_XTC_Dif << 7)
+#define HC_HTXnTBLCb_Spec       (HC_XTC_Spec << 7)
+#define HC_HTXnTBLCb_Tex        (HC_XTC_Tex << 7)
+#define HC_HTXnTBLCb_Cur        (HC_XTC_Cur << 7)
+#define HC_HTXnTBLCb_Adif       (HC_XTC_Adif << 7)
+#define HC_HTXnTBLCb_Fog        (HC_XTC_Fog << 7)
+#define HC_HTXnTBLCb_Atex       (HC_XTC_Atex << 7)
+#define HC_HTXnTBLCb_Acur       (HC_XTC_Acur << 7)
+#define HC_HTXnTBLCb_HTXnTBLRC  (HC_XTC_HTXnTBLRC << 7)
+#define HC_HTXnTBLCb_Ctexnext   (HC_XTC_Ctexnext << 7)
+#define HC_HTXnTBLCc_TOPC       (HC_XTC_TOPC << 0)
+#define HC_HTXnTBLCc_InvTOPC    (HC_XTC_InvTOPC << 0)
+#define HC_HTXnTBLCc_TOPCp5     (HC_XTC_TOPCp5 << 0)
+#define HC_HTXnTBLCc_0          (HC_XTC_0 << 0)
+#define HC_HTXnTBLCc_Dif        (HC_XTC_Dif << 0)
+#define HC_HTXnTBLCc_Spec       (HC_XTC_Spec << 0)
+#define HC_HTXnTBLCc_Tex        (HC_XTC_Tex << 0)
+#define HC_HTXnTBLCc_Cur        (HC_XTC_Cur << 0)
+#define HC_HTXnTBLCc_Adif       (HC_XTC_Adif << 0)
+#define HC_HTXnTBLCc_Fog        (HC_XTC_Fog << 0)
+#define HC_HTXnTBLCc_Atex       (HC_XTC_Atex << 0)
+#define HC_HTXnTBLCc_Acur       (HC_XTC_Acur << 0)
+#define HC_HTXnTBLCc_HTXnTBLRC  (HC_XTC_HTXnTBLRC << 0)
+#define HC_HTXnTBLCc_Ctexnext   (HC_XTC_Ctexnext << 0)
+/* HC_SubA_HTXnTBLCop      0x0081
+ */
+#define HC_HTXnTBLdot_MASK      0x00c00000
+#define HC_HTXnTBLCop_MASK      0x00380000
+#define HC_HTXnTBLCbias_MASK    0x0007c000
+#define HC_HTXnTBLCshift_MASK   0x00001800
+#define HC_HTXnTBLAop_MASK      0x00000380
+#define HC_HTXnTBLAbias_MASK    0x00000078
+#define HC_HTXnTBLAshift_MASK   0x00000003
+#define HC_HTXnTBLCop_Add       0x00000000
+#define HC_HTXnTBLCop_Sub       0x00080000
+#define HC_HTXnTBLCop_Min       0x00100000
+#define HC_HTXnTBLCop_Max       0x00180000
+#define HC_HTXnTBLCop_Mask      0x00200000
+#define HC_HTXnTBLCbias_Cbias           (HC_XTC_Cbias << 14)
+#define HC_HTXnTBLCbias_InvCbias        (HC_XTC_InvCbias << 14)
+#define HC_HTXnTBLCbias_0               (HC_XTC_0 << 14)
+#define HC_HTXnTBLCbias_Dif             (HC_XTC_Dif << 14)
+#define HC_HTXnTBLCbias_Spec            (HC_XTC_Spec << 14)
+#define HC_HTXnTBLCbias_Tex             (HC_XTC_Tex << 14)
+#define HC_HTXnTBLCbias_Cur             (HC_XTC_Cur << 14)
+#define HC_HTXnTBLCbias_Adif            (HC_XTC_Adif << 14)
+#define HC_HTXnTBLCbias_Fog             (HC_XTC_Fog << 14)
+#define HC_HTXnTBLCbias_Atex            (HC_XTC_Atex << 14)
+#define HC_HTXnTBLCbias_Acur            (HC_XTC_Acur << 14)
+#define HC_HTXnTBLCbias_HTXnTBLRC       (HC_XTC_HTXnTBLRC << 14)
+#define HC_HTXnTBLCshift_1      0x00000000
+#define HC_HTXnTBLCshift_2      0x00000800
+#define HC_HTXnTBLCshift_No     0x00001000
+#define HC_HTXnTBLCshift_DotP   0x00001800
+/*=* John Sheng [2003.7.18] texture combine *=*/
+#define HC_HTXnTBLDOT3   0x00080000
+#define HC_HTXnTBLDOT4   0x000C0000
+
+#define HC_HTXnTBLAop_Add       0x00000000
+#define HC_HTXnTBLAop_Sub       0x00000080
+#define HC_HTXnTBLAop_Min       0x00000100
+#define HC_HTXnTBLAop_Max       0x00000180
+#define HC_HTXnTBLAop_Mask      0x00000200
+#define HC_HTXnTBLAbias_Inv             0x00000040
+#define HC_HTXnTBLAbias_Adif            0x00000000
+#define HC_HTXnTBLAbias_Fog             0x00000008
+#define HC_HTXnTBLAbias_Acur            0x00000010
+#define HC_HTXnTBLAbias_HTXnTBLRAbias   0x00000018
+#define HC_HTXnTBLAbias_Atex            0x00000020
+#define HC_HTXnTBLAshift_1      0x00000000
+#define HC_HTXnTBLAshift_2      0x00000001
+#define HC_HTXnTBLAshift_No     0x00000002
+/* #define HC_HTXnTBLAshift_DotP   0x00000003 */
+/* HC_SubA_HTXnTBLMPFog    0x0082
+ */
+#define HC_HTXnTBLMPfog_MASK    0x00e00000
+#define HC_HTXnTBLMPfog_0       0x00000000
+#define HC_HTXnTBLMPfog_Adif    0x00200000
+#define HC_HTXnTBLMPfog_Fog     0x00400000
+#define HC_HTXnTBLMPfog_Atex    0x00600000
+#define HC_HTXnTBLMPfog_Acur    0x00800000
+#define HC_HTXnTBLMPfog_GHTXnTBLRFog    0x00a00000
+/* HC_SubA_HTXnTBLAsat     0x0083
+ *-- Define the texture alpha input.
+ */
+#define HC_XTA_TOPA             0x00000000
+#define HC_XTA_InvTOPA          0x00000008
+#define HC_XTA_TOPAp5           0x00000010
+#define HC_XTA_Adif             0x00000000
+#define HC_XTA_Fog              0x00000001
+#define HC_XTA_Acur             0x00000002
+#define HC_XTA_HTXnTBLRA        0x00000003
+#define HC_XTA_Atex             0x00000004
+#define HC_XTA_Atexnext         0x00000005
+/*--
+ */
+#define HC_HTXnTBLAsat_MASK     0x00800000
+#define HC_HTXnTBLAMB_MASK      0x00700000
+#define HC_HTXnTBLAa_MASK       0x0007c000
+#define HC_HTXnTBLAb_MASK       0x00000f80
+#define HC_HTXnTBLAc_MASK       0x0000001f
+#define HC_HTXnTBLAMB_SHIFT     20
+#define HC_HTXnTBLAa_TOPA       (HC_XTA_TOPA << 14)
+#define HC_HTXnTBLAa_InvTOPA    (HC_XTA_InvTOPA << 14)
+#define HC_HTXnTBLAa_TOPAp5     (HC_XTA_TOPAp5 << 14)
+#define HC_HTXnTBLAa_Adif       (HC_XTA_Adif << 14)
+#define HC_HTXnTBLAa_Fog        (HC_XTA_Fog << 14)
+#define HC_HTXnTBLAa_Acur       (HC_XTA_Acur << 14)
+#define HC_HTXnTBLAa_HTXnTBLRA  (HC_XTA_HTXnTBLRA << 14)
+#define HC_HTXnTBLAa_Atex       (HC_XTA_Atex << 14)
+#define HC_HTXnTBLAa_Atexnext   (HC_XTA_Atexnext << 14)
+#define HC_HTXnTBLAb_TOPA       (HC_XTA_TOPA << 7)
+#define HC_HTXnTBLAb_InvTOPA    (HC_XTA_InvTOPA << 7)
+#define HC_HTXnTBLAb_TOPAp5     (HC_XTA_TOPAp5 << 7)
+#define HC_HTXnTBLAb_Adif       (HC_XTA_Adif << 7)
+#define HC_HTXnTBLAb_Fog        (HC_XTA_Fog << 7)
+#define HC_HTXnTBLAb_Acur       (HC_XTA_Acur << 7)
+#define HC_HTXnTBLAb_HTXnTBLRA  (HC_XTA_HTXnTBLRA << 7)
+#define HC_HTXnTBLAb_Atex       (HC_XTA_Atex << 7)
+#define HC_HTXnTBLAb_Atexnext   (HC_XTA_Atexnext << 7)
+#define HC_HTXnTBLAc_TOPA       (HC_XTA_TOPA << 0)
+#define HC_HTXnTBLAc_InvTOPA    (HC_XTA_InvTOPA << 0)
+#define HC_HTXnTBLAc_TOPAp5     (HC_XTA_TOPAp5 << 0)
+#define HC_HTXnTBLAc_Adif       (HC_XTA_Adif << 0)
+#define HC_HTXnTBLAc_Fog        (HC_XTA_Fog << 0)
+#define HC_HTXnTBLAc_Acur       (HC_XTA_Acur << 0)
+#define HC_HTXnTBLAc_HTXnTBLRA  (HC_XTA_HTXnTBLRA << 0)
+#define HC_HTXnTBLAc_Atex       (HC_XTA_Atex << 0)
+#define HC_HTXnTBLAc_Atexnext   (HC_XTA_Atexnext << 0)
+/* HC_SubA_HTXnTBLRAa      0x0089
+ */
+#define HC_HTXnTBLRAa_MASK      0x00ff0000
+#define HC_HTXnTBLRAb_MASK      0x0000ff00
+#define HC_HTXnTBLRAc_MASK      0x000000ff
+#define HC_HTXnTBLRAa_SHIFT     16
+#define HC_HTXnTBLRAb_SHIFT     8
+#define HC_HTXnTBLRAc_SHIFT     0
+/* HC_SubA_HTXnTBLRFog     0x008a
+ */
+#define HC_HTXnTBLRFog_MASK     0x0000ff00
+#define HC_HTXnTBLRAbias_MASK   0x000000ff
+#define HC_HTXnTBLRFog_SHIFT    8
+#define HC_HTXnTBLRAbias_SHIFT  0
+/* HC_SubA_HTXnLScale      0x0094
+ */
+#define HC_HTXnLScale_MASK      0x0007fc00
+#define HC_HTXnLOff_MASK        0x000001ff
+#define HC_HTXnLScale_SHIFT     10
+/* HC_SubA_HTXSMD          0x0000
+ */
+#define HC_HTXSMD_MASK          0x00000080
+#define HC_HTXTMD_MASK          0x00000040
+#define HC_HTXNum_MASK          0x00000038
+#define HC_HTXTRMD_MASK         0x00000006
+#define HC_HTXCHCLR_MASK        0x00000001
+#define HC_HTXNum_SHIFT         3
+
+/* Texture Palette n
+ */
+#define HC_SubType_TexPalette0  0x00000000
+#define HC_SubType_TexPalette1  0x00000001
+#define HC_SubType_FogTable     0x00000010
+#define HC_SubType_Stipple      0x00000014
+/* HC_SubA_TexPalette0     0x0000
+ */
+#define HC_HTPnA_MASK           0xff000000
+#define HC_HTPnR_MASK           0x00ff0000
+#define HC_HTPnG_MASK           0x0000ff00
+#define HC_HTPnB_MASK           0x000000ff
+/* HC_SubA_FogTable        0x0010
+ */
+#define HC_HFPn3_MASK           0xff000000
+#define HC_HFPn2_MASK           0x00ff0000
+#define HC_HFPn1_MASK           0x0000ff00
+#define HC_HFPn_MASK            0x000000ff
+#define HC_HFPn3_SHIFT          24
+#define HC_HFPn2_SHIFT          16
+#define HC_HFPn1_SHIFT          8
+
+/* Auto Testing & Security
+ */
+#define HC_SubA_HenFIFOAT       0x0000
+#define HC_SubA_HFBDrawFirst    0x0004
+#define HC_SubA_HFBBasL         0x0005
+#define HC_SubA_HFBDst          0x0006
+/* HC_SubA_HenFIFOAT       0x0000
+ */
+#define HC_HenFIFOAT_MASK       0x00000020
+#define HC_HenGEMILock_MASK     0x00000010
+#define HC_HenFBASwap_MASK      0x00000008
+#define HC_HenOT_MASK           0x00000004
+#define HC_HenCMDQ_MASK         0x00000002
+#define HC_HenTXCTSU_MASK       0x00000001
+/* HC_SubA_HFBDrawFirst    0x0004
+ */
+#define HC_HFBDrawFirst_MASK    0x00000800
+#define HC_HFBQueue_MASK        0x00000400
+#define HC_HFBLock_MASK         0x00000200
+#define HC_HEOF_MASK            0x00000100
+#define HC_HFBBasH_MASK         0x000000ff
+
+/* GEMI Setting
+ */
+#define HC_SubA_HTArbRCM        0x0008
+#define HC_SubA_HTArbRZ         0x000a
+#define HC_SubA_HTArbWZ         0x000b
+#define HC_SubA_HTArbRTX        0x000c
+#define HC_SubA_HTArbRCW        0x000d
+#define HC_SubA_HTArbE2         0x000e
+#define HC_SubA_HArbRQCM        0x0010
+#define HC_SubA_HArbWQCM        0x0011
+#define HC_SubA_HGEMITout       0x0020
+#define HC_SubA_HFthRTXD        0x0040
+#define HC_SubA_HFthRTXA        0x0044
+#define HC_SubA_HCMDQstL        0x0050
+#define HC_SubA_HCMDQendL       0x0051
+#define HC_SubA_HCMDQLen        0x0052
+/* HC_SubA_HTArbRCM        0x0008
+ */
+#define HC_HTArbRCM_MASK        0x0000ffff
+/* HC_SubA_HTArbRZ         0x000a
+ */
+#define HC_HTArbRZ_MASK         0x0000ffff
+/* HC_SubA_HTArbWZ         0x000b
+ */
+#define HC_HTArbWZ_MASK         0x0000ffff
+/* HC_SubA_HTArbRTX        0x000c
+ */
+#define HC_HTArbRTX_MASK        0x0000ffff
+/* HC_SubA_HTArbRCW        0x000d
+ */
+#define HC_HTArbRCW_MASK        0x0000ffff
+/* HC_SubA_HTArbE2         0x000e
+ */
+#define HC_HTArbE2_MASK         0x0000ffff
+/* HC_SubA_HArbRQCM        0x0010
+ */
+#define HC_HTArbRQCM_MASK       0x0000ffff
+/* HC_SubA_HArbWQCM        0x0011
+ */
+#define HC_HArbWQCM_MASK        0x0000ffff
+/* HC_SubA_HGEMITout       0x0020
+ */
+#define HC_HGEMITout_MASK       0x000f0000
+#define HC_HNPArbZC_MASK        0x0000ffff
+#define HC_HGEMITout_SHIFT      16
+/* HC_SubA_HFthRTXD        0x0040
+ */
+#define HC_HFthRTXD_MASK        0x00ff0000
+#define HC_HFthRZD_MASK         0x0000ff00
+#define HC_HFthWZD_MASK         0x000000ff
+#define HC_HFthRTXD_SHIFT       16
+#define HC_HFthRZD_SHIFT        8
+/* HC_SubA_HFthRTXA        0x0044
+ */
+#define HC_HFthRTXA_MASK        0x000000ff
+
+/******************************************************************************
+** Define the Halcyon Internal register access constants. For simulator only.
+******************************************************************************/
+#define HC_SIMA_HAGPBstL        0x0000
+#define HC_SIMA_HAGPBendL       0x0001
+#define HC_SIMA_HAGPCMNT        0x0002
+#define HC_SIMA_HAGPBpL         0x0003
+#define HC_SIMA_HAGPBpH         0x0004
+#define HC_SIMA_HClipTB         0x0005
+#define HC_SIMA_HClipLR         0x0006
+#define HC_SIMA_HFPClipTL       0x0007
+#define HC_SIMA_HFPClipBL       0x0008
+#define HC_SIMA_HFPClipLL       0x0009
+#define HC_SIMA_HFPClipRL       0x000a
+#define HC_SIMA_HFPClipTBH      0x000b
+#define HC_SIMA_HFPClipLRH      0x000c
+#define HC_SIMA_HLP             0x000d
+#define HC_SIMA_HLPRF           0x000e
+#define HC_SIMA_HSolidCL        0x000f
+#define HC_SIMA_HPixGC          0x0010
+#define HC_SIMA_HSPXYOS         0x0011
+#define HC_SIMA_HCmdA           0x0012
+#define HC_SIMA_HCmdB           0x0013
+#define HC_SIMA_HEnable         0x0014
+#define HC_SIMA_HZWBBasL        0x0015
+#define HC_SIMA_HZWBBasH        0x0016
+#define HC_SIMA_HZWBType        0x0017
+#define HC_SIMA_HZBiasL         0x0018
+#define HC_SIMA_HZWBend         0x0019
+#define HC_SIMA_HZWTMD          0x001a
+#define HC_SIMA_HZWCDL          0x001b
+#define HC_SIMA_HZWCTAGnum      0x001c
+#define HC_SIMA_HZCYNum         0x001d
+#define HC_SIMA_HZWCFire        0x001e
+/* #define HC_SIMA_HSBBasL         0x001d */
+/* #define HC_SIMA_HSBBasH         0x001e */
+/* #define HC_SIMA_HSBFM           0x001f */
+#define HC_SIMA_HSTREF          0x0020
+#define HC_SIMA_HSTMD           0x0021
+#define HC_SIMA_HABBasL         0x0022
+#define HC_SIMA_HABBasH         0x0023
+#define HC_SIMA_HABFM           0x0024
+#define HC_SIMA_HATMD           0x0025
+#define HC_SIMA_HABLCsat        0x0026
+#define HC_SIMA_HABLCop         0x0027
+#define HC_SIMA_HABLAsat        0x0028
+#define HC_SIMA_HABLAop         0x0029
+#define HC_SIMA_HABLRCa         0x002a
+#define HC_SIMA_HABLRFCa        0x002b
+#define HC_SIMA_HABLRCbias      0x002c
+#define HC_SIMA_HABLRCb         0x002d
+#define HC_SIMA_HABLRFCb        0x002e
+#define HC_SIMA_HABLRAa         0x002f
+#define HC_SIMA_HABLRAb         0x0030
+#define HC_SIMA_HDBBasL         0x0031
+#define HC_SIMA_HDBBasH         0x0032
+#define HC_SIMA_HDBFM           0x0033
+#define HC_SIMA_HFBBMSKL        0x0034
+#define HC_SIMA_HROP            0x0035
+#define HC_SIMA_HFogLF          0x0036
+#define HC_SIMA_HFogCL          0x0037
+#define HC_SIMA_HFogCH          0x0038
+#define HC_SIMA_HFogStL         0x0039
+#define HC_SIMA_HFogStH         0x003a
+#define HC_SIMA_HFogOOdMF       0x003b
+#define HC_SIMA_HFogOOdEF       0x003c
+#define HC_SIMA_HFogEndL        0x003d
+#define HC_SIMA_HFogDenst       0x003e
+/*---- start of texture 0 setting ----
+ */
+#define HC_SIMA_HTX0L0BasL      0x0040
+#define HC_SIMA_HTX0L1BasL      0x0041
+#define HC_SIMA_HTX0L2BasL      0x0042
+#define HC_SIMA_HTX0L3BasL      0x0043
+#define HC_SIMA_HTX0L4BasL      0x0044
+#define HC_SIMA_HTX0L5BasL      0x0045
+#define HC_SIMA_HTX0L6BasL      0x0046
+#define HC_SIMA_HTX0L7BasL      0x0047
+#define HC_SIMA_HTX0L8BasL      0x0048
+#define HC_SIMA_HTX0L9BasL      0x0049
+#define HC_SIMA_HTX0LaBasL      0x004a
+#define HC_SIMA_HTX0LbBasL      0x004b
+#define HC_SIMA_HTX0LcBasL      0x004c
+#define HC_SIMA_HTX0LdBasL      0x004d
+#define HC_SIMA_HTX0LeBasL      0x004e
+#define HC_SIMA_HTX0LfBasL      0x004f
+#define HC_SIMA_HTX0L10BasL     0x0050
+#define HC_SIMA_HTX0L11BasL     0x0051
+#define HC_SIMA_HTX0L012BasH    0x0052
+#define HC_SIMA_HTX0L345BasH    0x0053
+#define HC_SIMA_HTX0L678BasH    0x0054
+#define HC_SIMA_HTX0L9abBasH    0x0055
+#define HC_SIMA_HTX0LcdeBasH    0x0056
+#define HC_SIMA_HTX0Lf1011BasH  0x0057
+#define HC_SIMA_HTX0L0Pit       0x0058
+#define HC_SIMA_HTX0L1Pit       0x0059
+#define HC_SIMA_HTX0L2Pit       0x005a
+#define HC_SIMA_HTX0L3Pit       0x005b
+#define HC_SIMA_HTX0L4Pit       0x005c
+#define HC_SIMA_HTX0L5Pit       0x005d
+#define HC_SIMA_HTX0L6Pit       0x005e
+#define HC_SIMA_HTX0L7Pit       0x005f
+#define HC_SIMA_HTX0L8Pit       0x0060
+#define HC_SIMA_HTX0L9Pit       0x0061
+#define HC_SIMA_HTX0LaPit       0x0062
+#define HC_SIMA_HTX0LbPit       0x0063
+#define HC_SIMA_HTX0LcPit       0x0064
+#define HC_SIMA_HTX0LdPit       0x0065
+#define HC_SIMA_HTX0LePit       0x0066
+#define HC_SIMA_HTX0LfPit       0x0067
+#define HC_SIMA_HTX0L10Pit      0x0068
+#define HC_SIMA_HTX0L11Pit      0x0069
+#define HC_SIMA_HTX0L0_5WE      0x006a
+#define HC_SIMA_HTX0L6_bWE      0x006b
+#define HC_SIMA_HTX0Lc_11WE     0x006c
+#define HC_SIMA_HTX0L0_5HE      0x006d
+#define HC_SIMA_HTX0L6_bHE      0x006e
+#define HC_SIMA_HTX0Lc_11HE     0x006f
+#define HC_SIMA_HTX0L0OS        0x0070
+#define HC_SIMA_HTX0TB          0x0071
+#define HC_SIMA_HTX0MPMD        0x0072
+#define HC_SIMA_HTX0CLODu       0x0073
+#define HC_SIMA_HTX0FM          0x0074
+#define HC_SIMA_HTX0TRCH        0x0075
+#define HC_SIMA_HTX0TRCL        0x0076
+#define HC_SIMA_HTX0TBC         0x0077
+#define HC_SIMA_HTX0TRAH        0x0078
+#define HC_SIMA_HTX0TBLCsat     0x0079
+#define HC_SIMA_HTX0TBLCop      0x007a
+#define HC_SIMA_HTX0TBLMPfog    0x007b
+#define HC_SIMA_HTX0TBLAsat     0x007c
+#define HC_SIMA_HTX0TBLRCa      0x007d
+#define HC_SIMA_HTX0TBLRCb      0x007e
+#define HC_SIMA_HTX0TBLRCc      0x007f
+#define HC_SIMA_HTX0TBLRCbias   0x0080
+#define HC_SIMA_HTX0TBLRAa      0x0081
+#define HC_SIMA_HTX0TBLRFog     0x0082
+#define HC_SIMA_HTX0BumpM00     0x0083
+#define HC_SIMA_HTX0BumpM01     0x0084
+#define HC_SIMA_HTX0BumpM10     0x0085
+#define HC_SIMA_HTX0BumpM11     0x0086
+#define HC_SIMA_HTX0LScale      0x0087
+/*---- end of texture 0 setting ----      0x008f
+ */
+#define HC_SIMA_TX0TX1_OFF      0x0050
+/*---- start of texture 1 setting ----
+ */
+#define HC_SIMA_HTX1L0BasL      (HC_SIMA_HTX0L0BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L1BasL      (HC_SIMA_HTX0L1BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L2BasL      (HC_SIMA_HTX0L2BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L3BasL      (HC_SIMA_HTX0L3BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L4BasL      (HC_SIMA_HTX0L4BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L5BasL      (HC_SIMA_HTX0L5BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L6BasL      (HC_SIMA_HTX0L6BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L7BasL      (HC_SIMA_HTX0L7BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L8BasL      (HC_SIMA_HTX0L8BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L9BasL      (HC_SIMA_HTX0L9BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LaBasL      (HC_SIMA_HTX0LaBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LbBasL      (HC_SIMA_HTX0LbBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LcBasL      (HC_SIMA_HTX0LcBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LdBasL      (HC_SIMA_HTX0LdBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LeBasL      (HC_SIMA_HTX0LeBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LfBasL      (HC_SIMA_HTX0LfBasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L10BasL     (HC_SIMA_HTX0L10BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L11BasL     (HC_SIMA_HTX0L11BasL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L012BasH    (HC_SIMA_HTX0L012BasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L345BasH    (HC_SIMA_HTX0L345BasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L678BasH    (HC_SIMA_HTX0L678BasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L9abBasH    (HC_SIMA_HTX0L9abBasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LcdeBasH    (HC_SIMA_HTX0LcdeBasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1Lf1011BasH  (HC_SIMA_HTX0Lf1011BasH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L0Pit       (HC_SIMA_HTX0L0Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L1Pit       (HC_SIMA_HTX0L1Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L2Pit       (HC_SIMA_HTX0L2Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L3Pit       (HC_SIMA_HTX0L3Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L4Pit       (HC_SIMA_HTX0L4Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L5Pit       (HC_SIMA_HTX0L5Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L6Pit       (HC_SIMA_HTX0L6Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L7Pit       (HC_SIMA_HTX0L7Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L8Pit       (HC_SIMA_HTX0L8Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L9Pit       (HC_SIMA_HTX0L9Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LaPit       (HC_SIMA_HTX0LaPit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LbPit       (HC_SIMA_HTX0LbPit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LcPit       (HC_SIMA_HTX0LcPit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LdPit       (HC_SIMA_HTX0LdPit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LePit       (HC_SIMA_HTX0LePit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LfPit       (HC_SIMA_HTX0LfPit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L10Pit      (HC_SIMA_HTX0L10Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L11Pit      (HC_SIMA_HTX0L11Pit + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L0_5WE      (HC_SIMA_HTX0L0_5WE + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L6_bWE      (HC_SIMA_HTX0L6_bWE + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1Lc_11WE     (HC_SIMA_HTX0Lc_11WE + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L0_5HE      (HC_SIMA_HTX0L0_5HE + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L6_bHE      (HC_SIMA_HTX0L6_bHE + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1Lc_11HE      (HC_SIMA_HTX0Lc_11HE + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1L0OS        (HC_SIMA_HTX0L0OS + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TB          (HC_SIMA_HTX0TB + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1MPMD        (HC_SIMA_HTX0MPMD + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1CLODu       (HC_SIMA_HTX0CLODu + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1FM          (HC_SIMA_HTX0FM + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TRCH        (HC_SIMA_HTX0TRCH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TRCL        (HC_SIMA_HTX0TRCL + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBC         (HC_SIMA_HTX0TBC + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TRAH        (HC_SIMA_HTX0TRAH + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LTC         (HC_SIMA_HTX0LTC + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LTA         (HC_SIMA_HTX0LTA + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLCsat     (HC_SIMA_HTX0TBLCsat + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLCop      (HC_SIMA_HTX0TBLCop + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLMPfog    (HC_SIMA_HTX0TBLMPfog + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLAsat     (HC_SIMA_HTX0TBLAsat + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRCa      (HC_SIMA_HTX0TBLRCa + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRCb      (HC_SIMA_HTX0TBLRCb + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRCc      (HC_SIMA_HTX0TBLRCc + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRCbias   (HC_SIMA_HTX0TBLRCbias + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRAa      (HC_SIMA_HTX0TBLRAa + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1TBLRFog     (HC_SIMA_HTX0TBLRFog + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1BumpM00     (HC_SIMA_HTX0BumpM00 + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1BumpM01     (HC_SIMA_HTX0BumpM01 + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1BumpM10     (HC_SIMA_HTX0BumpM10 + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1BumpM11     (HC_SIMA_HTX0BumpM11 + HC_SIMA_TX0TX1_OFF)
+#define HC_SIMA_HTX1LScale      (HC_SIMA_HTX0LScale + HC_SIMA_TX0TX1_OFF)
+/*---- end of texture 1 setting ---- 0xaf
+ */
+#define HC_SIMA_HTXSMD          0x00b0
+#define HC_SIMA_HenFIFOAT       0x00b1
+#define HC_SIMA_HFBDrawFirst    0x00b2
+#define HC_SIMA_HFBBasL         0x00b3
+#define HC_SIMA_HTArbRCM        0x00b4
+#define HC_SIMA_HTArbRZ         0x00b5
+#define HC_SIMA_HTArbWZ         0x00b6
+#define HC_SIMA_HTArbRTX        0x00b7
+#define HC_SIMA_HTArbRCW        0x00b8
+#define HC_SIMA_HTArbE2         0x00b9
+#define HC_SIMA_HGEMITout       0x00ba
+#define HC_SIMA_HFthRTXD        0x00bb
+#define HC_SIMA_HFthRTXA        0x00bc
+/* Define the texture palette 0
+ */
+#define HC_SIMA_HTP0            0x0100
+#define HC_SIMA_HTP1            0x0200
+#define HC_SIMA_FOGTABLE        0x0300
+#define HC_SIMA_STIPPLE         0x0400
+#define HC_SIMA_HE3Fire         0x0440
+#define HC_SIMA_TRANS_SET       0x0441
+#define HC_SIMA_HREngSt         0x0442
+#define HC_SIMA_HRFIFOempty     0x0443
+#define HC_SIMA_HRFIFOfull      0x0444
+#define HC_SIMA_HRErr           0x0445
+#define HC_SIMA_FIFOstatus      0x0446
+
+/******************************************************************************
+** Define the AGP command header.
+******************************************************************************/
+#define HC_ACMD_MASK            0xfe000000
+#define HC_ACMD_SUB_MASK        0x0c000000
+#define HC_ACMD_HCmdA           0xee000000
+#define HC_ACMD_HCmdB           0xec000000
+#define HC_ACMD_HCmdC           0xea000000
+#define HC_ACMD_H1              0xf0000000
+#define HC_ACMD_H2              0xf2000000
+#define HC_ACMD_H3              0xf4000000
+#define HC_ACMD_H4              0xf6000000
+
+#define HC_ACMD_H1IO_MASK       0x000001ff
+#define HC_ACMD_H2IO1_MASK      0x001ff000
+#define HC_ACMD_H2IO2_MASK      0x000001ff
+#define HC_ACMD_H2IO1_SHIFT     12
+#define HC_ACMD_H2IO2_SHIFT     0
+#define HC_ACMD_H3IO_MASK       0x000001ff
+#define HC_ACMD_H3COUNT_MASK    0x01fff000
+#define HC_ACMD_H3COUNT_SHIFT   12
+#define HC_ACMD_H4ID_MASK       0x000001ff
+#define HC_ACMD_H4COUNT_MASK    0x01fffe00
+#define HC_ACMD_H4COUNT_SHIFT   9
+
+/********************************************************************************
+** Define Header
+********************************************************************************/
+#define HC_HEADER2		0xF210F110
+
+/********************************************************************************
+** Define Dummy Value
+********************************************************************************/
+#define HC_DUMMY		0xCCCCCCCC
+/********************************************************************************
+** Define for DMA use
+********************************************************************************/
+#define HALCYON_HEADER2     0XF210F110
+#define HALCYON_FIRECMD     0XEE100000
+#define HALCYON_FIREMASK    0XFFF00000
+#define HALCYON_CMDB        0XEC000000
+#define HALCYON_CMDBMASK    0XFFFE0000
+#define HALCYON_SUB_ADDR0   0X00000000
+#define HALCYON_HEADER1MASK 0XFFFFFC00
+#define HALCYON_HEADER1     0XF0000000
+#define HC_SubA_HAGPBstL        0x0060
+#define HC_SubA_HAGPBendL       0x0061
+#define HC_SubA_HAGPCMNT        0x0062
+#define HC_SubA_HAGPBpL         0x0063
+#define HC_SubA_HAGPBpH         0x0064
+#define HC_HAGPCMNT_MASK        0x00800000
+#define HC_HCmdErrClr_MASK      0x00400000
+#define HC_HAGPBendH_MASK       0x0000ff00
+#define HC_HAGPBstH_MASK        0x000000ff
+#define HC_HAGPBendH_SHIFT      8
+#define HC_HAGPBstH_SHIFT       0
+#define HC_HAGPBpL_MASK         0x00fffffc
+#define HC_HAGPBpID_MASK        0x00000003
+#define HC_HAGPBpID_PAUSE       0x00000000
+#define HC_HAGPBpID_JUMP        0x00000001
+#define HC_HAGPBpID_STOP        0x00000002
+#define HC_HAGPBpH_MASK         0x00ffffff
+
+#define VIA_VIDEO_HEADER5       0xFE040000
+#define VIA_VIDEO_HEADER6       0xFE050000
+#define VIA_VIDEO_HEADER7       0xFE060000
+#define VIA_VIDEOMASK           0xFFFF0000
+#endif
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
new file mode 100644
index 000000000000..7a339dba6a69
--- /dev/null
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -0,0 +1,755 @@
+/* via_dma.c -- DMA support for the VIA Unichrome/Pro
+ *
+ * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A.
+ * All Rights Reserved.
+ *
+ * Copyright 2004 The Unichrome project.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Tungsten Graphics,
+ *    Erdi Chen,
+ *    Thomas Hellstrom.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "via_drm.h"
+#include "via_drv.h"
+#include "via_3d_reg.h"
+
+#define CMDBUF_ALIGNMENT_SIZE   (0x100)
+#define CMDBUF_ALIGNMENT_MASK   (0x0ff)
+
+/* defines for VIA 3D registers */
+#define VIA_REG_STATUS          0x400
+#define VIA_REG_TRANSET         0x43C
+#define VIA_REG_TRANSPACE       0x440
+
+/* VIA_REG_STATUS(0x400): Engine Status */
+#define VIA_CMD_RGTR_BUSY       0x00000080	/* Command Regulator is busy */
+#define VIA_2D_ENG_BUSY         0x00000001	/* 2D Engine is busy */
+#define VIA_3D_ENG_BUSY         0x00000002	/* 3D Engine is busy */
+#define VIA_VR_QUEUE_BUSY       0x00020000	/* Virtual Queue is busy */
+
+#define SetReg2DAGP(nReg, nData) {				\
+	*((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1;	\
+	*((uint32_t *)(vb) + 1) = (nData);			\
+	vb = ((uint32_t *)vb) + 2;				\
+	dev_priv->dma_low +=8;					\
+}
+
+#define via_flush_write_combine() DRM_MEMORYBARRIER()
+
+#define VIA_OUT_RING_QW(w1,w2)			\
+	*vb++ = (w1);				\
+	*vb++ = (w2);				\
+	dev_priv->dma_low += 8;
+
+static void via_cmdbuf_start(drm_via_private_t * dev_priv);
+static void via_cmdbuf_pause(drm_via_private_t * dev_priv);
+static void via_cmdbuf_reset(drm_via_private_t * dev_priv);
+static void via_cmdbuf_rewind(drm_via_private_t * dev_priv);
+static int via_wait_idle(drm_via_private_t * dev_priv);
+static void via_pad_cache(drm_via_private_t * dev_priv, int qwords);
+
+/*
+ * Free space in command buffer.
+ */
+
+static uint32_t via_cmdbuf_space(drm_via_private_t * dev_priv)
+{
+	uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
+	uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
+
+	return ((hw_addr <= dev_priv->dma_low) ?
+		(dev_priv->dma_high + hw_addr - dev_priv->dma_low) :
+		(hw_addr - dev_priv->dma_low));
+}
+
+/*
+ * How much does the command regulator lag behind?
+ */
+
+static uint32_t via_cmdbuf_lag(drm_via_private_t * dev_priv)
+{
+	uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
+	uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
+
+	return ((hw_addr <= dev_priv->dma_low) ?
+		(dev_priv->dma_low - hw_addr) :
+		(dev_priv->dma_wrap + dev_priv->dma_low - hw_addr));
+}
+
+/*
+ * Check that the given size fits in the buffer, otherwise wait.
+ */
+
+static inline int
+via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
+{
+	uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
+	uint32_t cur_addr, hw_addr, next_addr;
+	volatile uint32_t *hw_addr_ptr;
+	uint32_t count;
+	hw_addr_ptr = dev_priv->hw_addr_ptr;
+	cur_addr = dev_priv->dma_low;
+	next_addr = cur_addr + size + 512 * 1024;
+	count = 1000000;
+	do {
+		hw_addr = *hw_addr_ptr - agp_base;
+		if (count-- == 0) {
+			DRM_ERROR
+			    ("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n",
+			     hw_addr, cur_addr, next_addr);
+			return -1;
+		}
+		if  ((cur_addr < hw_addr) && (next_addr >= hw_addr))
+			msleep(1);
+	} while ((cur_addr < hw_addr) && (next_addr >= hw_addr));
+	return 0;
+}
+
+/*
+ * Checks whether buffer head has reach the end. Rewind the ring buffer
+ * when necessary.
+ *
+ * Returns virtual pointer to ring buffer.
+ */
+
+static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
+				      unsigned int size)
+{
+	if ((dev_priv->dma_low + size + 4 * CMDBUF_ALIGNMENT_SIZE) >
+	    dev_priv->dma_high) {
+		via_cmdbuf_rewind(dev_priv);
+	}
+	if (via_cmdbuf_wait(dev_priv, size) != 0) {
+		return NULL;
+	}
+
+	return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
+}
+
+int via_dma_cleanup(struct drm_device * dev)
+{
+	if (dev->dev_private) {
+		drm_via_private_t *dev_priv =
+		    (drm_via_private_t *) dev->dev_private;
+
+		if (dev_priv->ring.virtual_start) {
+			via_cmdbuf_reset(dev_priv);
+
+			drm_core_ioremapfree(&dev_priv->ring.map, dev);
+			dev_priv->ring.virtual_start = NULL;
+		}
+
+	}
+
+	return 0;
+}
+
+static int via_initialize(struct drm_device * dev,
+			  drm_via_private_t * dev_priv,
+			  drm_via_dma_init_t * init)
+{
+	if (!dev_priv || !dev_priv->mmio) {
+		DRM_ERROR("via_dma_init called before via_map_init\n");
+		return -EFAULT;
+	}
+
+	if (dev_priv->ring.virtual_start != NULL) {
+		DRM_ERROR("called again without calling cleanup\n");
+		return -EFAULT;
+	}
+
+	if (!dev->agp || !dev->agp->base) {
+		DRM_ERROR("called with no agp memory available\n");
+		return -EFAULT;
+	}
+
+	if (dev_priv->chipset == VIA_DX9_0) {
+		DRM_ERROR("AGP DMA is not supported on this chip\n");
+		return -EINVAL;
+	}
+
+	dev_priv->ring.map.offset = dev->agp->base + init->offset;
+	dev_priv->ring.map.size = init->size;
+	dev_priv->ring.map.type = 0;
+	dev_priv->ring.map.flags = 0;
+	dev_priv->ring.map.mtrr = 0;
+
+	drm_core_ioremap(&dev_priv->ring.map, dev);
+
+	if (dev_priv->ring.map.handle == NULL) {
+		via_dma_cleanup(dev);
+		DRM_ERROR("can not ioremap virtual address for"
+			  " ring buffer\n");
+		return -ENOMEM;
+	}
+
+	dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+
+	dev_priv->dma_ptr = dev_priv->ring.virtual_start;
+	dev_priv->dma_low = 0;
+	dev_priv->dma_high = init->size;
+	dev_priv->dma_wrap = init->size;
+	dev_priv->dma_offset = init->offset;
+	dev_priv->last_pause_ptr = NULL;
+	dev_priv->hw_addr_ptr =
+		(volatile uint32_t *)((char *)dev_priv->mmio->handle +
+		init->reg_pause_addr);
+
+	via_cmdbuf_start(dev_priv);
+
+	return 0;
+}
+
+static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	drm_via_dma_init_t *init = data;
+	int retcode = 0;
+
+	switch (init->func) {
+	case VIA_INIT_DMA:
+		if (!DRM_SUSER(DRM_CURPROC))
+			retcode = -EPERM;
+		else
+			retcode = via_initialize(dev, dev_priv, init);
+		break;
+	case VIA_CLEANUP_DMA:
+		if (!DRM_SUSER(DRM_CURPROC))
+			retcode = -EPERM;
+		else
+			retcode = via_dma_cleanup(dev);
+		break;
+	case VIA_DMA_INITIALIZED:
+		retcode = (dev_priv->ring.virtual_start != NULL) ?
+			0 : -EFAULT;
+		break;
+	default:
+		retcode = -EINVAL;
+		break;
+	}
+
+	return retcode;
+}
+
+static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * cmd)
+{
+	drm_via_private_t *dev_priv;
+	uint32_t *vb;
+	int ret;
+
+	dev_priv = (drm_via_private_t *) dev->dev_private;
+
+	if (dev_priv->ring.virtual_start == NULL) {
+		DRM_ERROR("called without initializing AGP ring buffer.\n");
+		return -EFAULT;
+	}
+
+	if (cmd->size > VIA_PCI_BUF_SIZE) {
+		return -ENOMEM;
+	}
+
+	if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
+		return -EFAULT;
+
+	/*
+	 * Running this function on AGP memory is dead slow. Therefore
+	 * we run it on a temporary cacheable system memory buffer and
+	 * copy it to AGP memory when ready.
+	 */
+
+	if ((ret =
+	     via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
+				       cmd->size, dev, 1))) {
+		return ret;
+	}
+
+	vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
+	if (vb == NULL) {
+		return -EAGAIN;
+	}
+
+	memcpy(vb, dev_priv->pci_buf, cmd->size);
+
+	dev_priv->dma_low += cmd->size;
+
+	/*
+	 * Small submissions somehow stalls the CPU. (AGP cache effects?)
+	 * pad to greater size.
+	 */
+
+	if (cmd->size < 0x100)
+		via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3);
+	via_cmdbuf_pause(dev_priv);
+
+	return 0;
+}
+
+int via_driver_dma_quiescent(struct drm_device * dev)
+{
+	drm_via_private_t *dev_priv = dev->dev_private;
+
+	if (!via_wait_idle(dev_priv)) {
+		return -EBUSY;
+	}
+	return 0;
+}
+
+static int via_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	return via_driver_dma_quiescent(dev);
+}
+
+static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_via_cmdbuffer_t *cmdbuf = data;
+	int ret;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
+
+	ret = via_dispatch_cmdbuffer(dev, cmdbuf);
+	if (ret) {
+		return ret;
+	}
+
+	return 0;
+}
+
+static int via_dispatch_pci_cmdbuffer(struct drm_device * dev,
+				      drm_via_cmdbuffer_t * cmd)
+{
+	drm_via_private_t *dev_priv = dev->dev_private;
+	int ret;
+
+	if (cmd->size > VIA_PCI_BUF_SIZE) {
+		return -ENOMEM;
+	}
+	if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
+		return -EFAULT;
+
+	if ((ret =
+	     via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
+				       cmd->size, dev, 0))) {
+		return ret;
+	}
+
+	ret =
+	    via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf,
+				     cmd->size);
+	return ret;
+}
+
+static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_via_cmdbuffer_t *cmdbuf = data;
+	int ret;
+
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
+
+	ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
+	if (ret) {
+		return ret;
+	}
+
+	return 0;
+}
+
+static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
+					 uint32_t * vb, int qw_count)
+{
+	for (; qw_count > 0; --qw_count) {
+		VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
+	}
+	return vb;
+}
+
+/*
+ * This function is used internally by ring buffer management code.
+ *
+ * Returns virtual pointer to ring buffer.
+ */
+static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv)
+{
+	return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
+}
+
+/*
+ * Hooks a segment of data into the tail of the ring-buffer by
+ * modifying the pause address stored in the buffer itself. If
+ * the regulator has already paused, restart it.
+ */
+static int via_hook_segment(drm_via_private_t * dev_priv,
+			    uint32_t pause_addr_hi, uint32_t pause_addr_lo,
+			    int no_pci_fire)
+{
+	int paused, count;
+	volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
+	uint32_t reader,ptr;
+	uint32_t diff;
+
+	paused = 0;
+	via_flush_write_combine();
+	(void) *(volatile uint32_t *)(via_get_dma(dev_priv) -1);
+
+	*paused_at = pause_addr_lo;
+	via_flush_write_combine();
+	(void) *paused_at;
+
+	reader = *(dev_priv->hw_addr_ptr);
+	ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
+		dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
+
+	dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
+
+	/*
+	 * If there is a possibility that the command reader will 
+	 * miss the new pause address and pause on the old one,
+	 * In that case we need to program the new start address
+	 * using PCI.
+	 */
+
+	diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
+	count = 10000000;
+	while(diff == 0 && count--) {
+		paused = (VIA_READ(0x41c) & 0x80000000);
+		if (paused) 
+			break;
+		reader = *(dev_priv->hw_addr_ptr);
+		diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
+	}
+
+	paused = VIA_READ(0x41c) & 0x80000000;
+
+	if (paused && !no_pci_fire) {
+		reader = *(dev_priv->hw_addr_ptr);
+		diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
+		diff &= (dev_priv->dma_high - 1);
+		if (diff != 0 && diff < (dev_priv->dma_high >> 1)) {
+			DRM_ERROR("Paused at incorrect address. "
+				  "0x%08x, 0x%08x 0x%08x\n",
+				  ptr, reader, dev_priv->dma_diff);
+		} else if (diff == 0) {
+			/*
+			 * There is a concern that these writes may stall the PCI bus
+			 * if the GPU is not idle. However, idling the GPU first
+			 * doesn't make a difference.
+			 */
+
+			VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
+			VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
+			VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
+			VIA_READ(VIA_REG_TRANSPACE);
+		}
+	}
+	return paused;
+}
+
+static int via_wait_idle(drm_via_private_t * dev_priv)
+{
+	int count = 10000000;
+
+	while (!(VIA_READ(VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && count--);
+
+	while (count-- && (VIA_READ(VIA_REG_STATUS) &
+			   (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY |
+			    VIA_3D_ENG_BUSY))) ;
+	return count;
+}
+
+static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type,
+			       uint32_t addr, uint32_t * cmd_addr_hi,
+			       uint32_t * cmd_addr_lo, int skip_wait)
+{
+	uint32_t agp_base;
+	uint32_t cmd_addr, addr_lo, addr_hi;
+	uint32_t *vb;
+	uint32_t qw_pad_count;
+
+	if (!skip_wait)
+		via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE);
+
+	vb = via_get_dma(dev_priv);
+	VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) |
+			(VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);
+	agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
+	qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -
+	    ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3);
+
+	cmd_addr = (addr) ? addr :
+	    agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);
+	addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |
+		   (cmd_addr & HC_HAGPBpL_MASK));
+	addr_hi = ((HC_SubA_HAGPBpH << 24) | (cmd_addr >> 24));
+
+	vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1);
+	VIA_OUT_RING_QW(*cmd_addr_hi = addr_hi, *cmd_addr_lo = addr_lo);
+	return vb;
+}
+
+static void via_cmdbuf_start(drm_via_private_t * dev_priv)
+{
+	uint32_t pause_addr_lo, pause_addr_hi;
+	uint32_t start_addr, start_addr_lo;
+	uint32_t end_addr, end_addr_lo;
+	uint32_t command;
+	uint32_t agp_base;
+	uint32_t ptr;
+	uint32_t reader;
+	int count;
+
+	dev_priv->dma_low = 0;
+
+	agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
+	start_addr = agp_base;
+	end_addr = agp_base + dev_priv->dma_high;
+
+	start_addr_lo = ((HC_SubA_HAGPBstL << 24) | (start_addr & 0xFFFFFF));
+	end_addr_lo = ((HC_SubA_HAGPBendL << 24) | (end_addr & 0xFFFFFF));
+	command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |
+		   ((end_addr & 0xff000000) >> 16));
+
+	dev_priv->last_pause_ptr =
+	    via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,
+			  &pause_addr_hi, &pause_addr_lo, 1) - 1;
+
+	via_flush_write_combine();
+	(void) *(volatile uint32_t *)dev_priv->last_pause_ptr;
+
+	VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16));
+	VIA_WRITE(VIA_REG_TRANSPACE, command);
+	VIA_WRITE(VIA_REG_TRANSPACE, start_addr_lo);
+	VIA_WRITE(VIA_REG_TRANSPACE, end_addr_lo);
+
+	VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi);
+	VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo);
+	DRM_WRITEMEMORYBARRIER();
+	VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK);
+	VIA_READ(VIA_REG_TRANSPACE);
+
+	dev_priv->dma_diff = 0;
+
+	count = 10000000;
+	while (!(VIA_READ(0x41c) & 0x80000000) && count--);
+
+	reader = *(dev_priv->hw_addr_ptr);
+	ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
+	    dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
+
+	/*
+	 * This is the difference between where we tell the
+	 * command reader to pause and where it actually pauses.
+	 * This differs between hw implementation so we need to
+	 * detect it.
+	 */
+
+	dev_priv->dma_diff = ptr - reader;
+}
+
+static void via_pad_cache(drm_via_private_t * dev_priv, int qwords)
+{
+	uint32_t *vb;
+
+	via_cmdbuf_wait(dev_priv, qwords + 2);
+	vb = via_get_dma(dev_priv);
+	VIA_OUT_RING_QW(HC_HEADER2, HC_ParaType_NotTex << 16);
+	via_align_buffer(dev_priv, vb, qwords);
+}
+
+static inline void via_dummy_bitblt(drm_via_private_t * dev_priv)
+{
+	uint32_t *vb = via_get_dma(dev_priv);
+	SetReg2DAGP(0x0C, (0 | (0 << 16)));
+	SetReg2DAGP(0x10, 0 | (0 << 16));
+	SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
+}
+
+static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
+{
+	uint32_t agp_base;
+	uint32_t pause_addr_lo, pause_addr_hi;
+	uint32_t jump_addr_lo, jump_addr_hi;
+	volatile uint32_t *last_pause_ptr;
+	uint32_t dma_low_save1, dma_low_save2;
+
+	agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
+	via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,
+		      &jump_addr_lo, 0);
+
+	dev_priv->dma_wrap = dev_priv->dma_low;
+
+	/*
+	 * Wrap command buffer to the beginning.
+	 */
+
+	dev_priv->dma_low = 0;
+	if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0) {
+		DRM_ERROR("via_cmdbuf_jump failed\n");
+	}
+
+	via_dummy_bitblt(dev_priv);
+	via_dummy_bitblt(dev_priv);
+
+	last_pause_ptr =
+	    via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
+			  &pause_addr_lo, 0) - 1;
+	via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
+		      &pause_addr_lo, 0);
+
+	*last_pause_ptr = pause_addr_lo;
+	dma_low_save1 = dev_priv->dma_low;
+
+	/*
+	 * Now, set a trap that will pause the regulator if it tries to rerun the old
+	 * command buffer. (Which may happen if via_hook_segment detecs a command regulator pause
+	 * and reissues the jump command over PCI, while the regulator has already taken the jump
+	 * and actually paused at the current buffer end).
+	 * There appears to be no other way to detect this condition, since the hw_addr_pointer
+	 * does not seem to get updated immediately when a jump occurs.
+	 */
+
+	last_pause_ptr =
+		via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
+			      &pause_addr_lo, 0) - 1;
+	via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,
+		      &pause_addr_lo, 0);
+	*last_pause_ptr = pause_addr_lo;
+
+	dma_low_save2 = dev_priv->dma_low;
+	dev_priv->dma_low = dma_low_save1;
+	via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);
+	dev_priv->dma_low = dma_low_save2;
+	via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
+}
+
+
+static void via_cmdbuf_rewind(drm_via_private_t * dev_priv)
+{
+	via_cmdbuf_jump(dev_priv);
+}
+
+static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type)
+{
+	uint32_t pause_addr_lo, pause_addr_hi;
+
+	via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0);
+	via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
+}
+
+static void via_cmdbuf_pause(drm_via_private_t * dev_priv)
+{
+	via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
+}
+
+static void via_cmdbuf_reset(drm_via_private_t * dev_priv)
+{
+	via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
+	via_wait_idle(dev_priv);
+}
+
+/*
+ * User interface to the space and lag functions.
+ */
+
+static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_via_cmdbuf_size_t *d_siz = data;
+	int ret = 0;
+	uint32_t tmp_size, count;
+	drm_via_private_t *dev_priv;
+
+	DRM_DEBUG("\n");
+	LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	dev_priv = (drm_via_private_t *) dev->dev_private;
+
+	if (dev_priv->ring.virtual_start == NULL) {
+		DRM_ERROR("called without initializing AGP ring buffer.\n");
+		return -EFAULT;
+	}
+
+	count = 1000000;
+	tmp_size = d_siz->size;
+	switch (d_siz->func) {
+	case VIA_CMDBUF_SPACE:
+		while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
+		       && count--) {
+			if (!d_siz->wait) {
+				break;
+			}
+		}
+		if (!count) {
+			DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
+			ret = -EAGAIN;
+		}
+		break;
+	case VIA_CMDBUF_LAG:
+		while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
+		       && count--) {
+			if (!d_siz->wait) {
+				break;
+			}
+		}
+		if (!count) {
+			DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
+			ret = -EAGAIN;
+		}
+		break;
+	default:
+		ret = -EFAULT;
+	}
+	d_siz->size = tmp_size;
+
+	return ret;
+}
+
+struct drm_ioctl_desc via_ioctls[] = {
+	DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
+	DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
+	DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
+	DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
+};
+
+int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
new file mode 100644
index 000000000000..409e00afdd07
--- /dev/null
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -0,0 +1,816 @@
+/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
+ *
+ * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Thomas Hellstrom.
+ *    Partially based on code obtained from Digeo Inc.
+ */
+
+
+/*
+ * Unmaps the DMA mappings.
+ * FIXME: Is this a NoOp on x86? Also
+ * FIXME: What happens if this one is called and a pending blit has previously done
+ * the same DMA mappings?
+ */
+
+#include "drmP.h"
+#include "via_drm.h"
+#include "via_drv.h"
+#include "via_dmablit.h"
+
+#include <linux/pagemap.h>
+
+#define VIA_PGDN(x)	     (((unsigned long)(x)) & PAGE_MASK)
+#define VIA_PGOFF(x)	    (((unsigned long)(x)) & ~PAGE_MASK)
+#define VIA_PFN(x)	      ((unsigned long)(x) >> PAGE_SHIFT)
+
+typedef struct _drm_via_descriptor {
+	uint32_t mem_addr;
+	uint32_t dev_addr;
+	uint32_t size;
+	uint32_t next;
+} drm_via_descriptor_t;
+
+
+/*
+ * Unmap a DMA mapping.
+ */
+
+
+
+static void
+via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
+{
+	int num_desc = vsg->num_desc;
+	unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
+	unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
+	drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
+		descriptor_this_page;
+	dma_addr_t next = vsg->chain_start;
+
+	while(num_desc--) {
+		if (descriptor_this_page-- == 0) {
+			cur_descriptor_page--;
+			descriptor_this_page = vsg->descriptors_per_page - 1;
+			desc_ptr = vsg->desc_pages[cur_descriptor_page] +
+				descriptor_this_page;
+		}
+		dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
+		dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
+		next = (dma_addr_t) desc_ptr->next;
+		desc_ptr--;
+	}
+}
+
+/*
+ * If mode = 0, count how many descriptors are needed.
+ * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
+ * Descriptors are run in reverse order by the hardware because we are not allowed to update the
+ * 'next' field without syncing calls when the descriptor is already mapped.
+ */
+
+static void
+via_map_blit_for_device(struct pci_dev *pdev,
+		   const drm_via_dmablit_t *xfer,
+		   drm_via_sg_info_t *vsg,
+		   int mode)
+{
+	unsigned cur_descriptor_page = 0;
+	unsigned num_descriptors_this_page = 0;
+	unsigned char *mem_addr = xfer->mem_addr;
+	unsigned char *cur_mem;
+	unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
+	uint32_t fb_addr = xfer->fb_addr;
+	uint32_t cur_fb;
+	unsigned long line_len;
+	unsigned remaining_len;
+	int num_desc = 0;
+	int cur_line;
+	dma_addr_t next = 0 | VIA_DMA_DPR_EC;
+	drm_via_descriptor_t *desc_ptr = NULL;
+
+	if (mode == 1)
+		desc_ptr = vsg->desc_pages[cur_descriptor_page];
+
+	for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
+
+		line_len = xfer->line_length;
+		cur_fb = fb_addr;
+		cur_mem = mem_addr;
+
+		while (line_len > 0) {
+
+			remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
+			line_len -= remaining_len;
+
+			if (mode == 1) {
+				desc_ptr->mem_addr =
+					dma_map_page(&pdev->dev,
+						     vsg->pages[VIA_PFN(cur_mem) -
+								VIA_PFN(first_addr)],
+						     VIA_PGOFF(cur_mem), remaining_len,
+						     vsg->direction);
+				desc_ptr->dev_addr = cur_fb;
+
+				desc_ptr->size = remaining_len;
+				desc_ptr->next = (uint32_t) next;
+				next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
+						      DMA_TO_DEVICE);
+				desc_ptr++;
+				if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
+					num_descriptors_this_page = 0;
+					desc_ptr = vsg->desc_pages[++cur_descriptor_page];
+				}
+			}
+
+			num_desc++;
+			cur_mem += remaining_len;
+			cur_fb += remaining_len;
+		}
+
+		mem_addr += xfer->mem_stride;
+		fb_addr += xfer->fb_stride;
+	}
+
+	if (mode == 1) {
+		vsg->chain_start = next;
+		vsg->state = dr_via_device_mapped;
+	}
+	vsg->num_desc = num_desc;
+}
+
+/*
+ * Function that frees up all resources for a blit. It is usable even if the
+ * blit info has only been partially built as long as the status enum is consistent
+ * with the actual status of the used resources.
+ */
+
+
+static void
+via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
+{
+	struct page *page;
+	int i;
+
+	switch(vsg->state) {
+	case dr_via_device_mapped:
+		via_unmap_blit_from_device(pdev, vsg);
+	case dr_via_desc_pages_alloc:
+		for (i=0; i<vsg->num_desc_pages; ++i) {
+			if (vsg->desc_pages[i] != NULL)
+			  free_page((unsigned long)vsg->desc_pages[i]);
+		}
+		kfree(vsg->desc_pages);
+	case dr_via_pages_locked:
+		for (i=0; i<vsg->num_pages; ++i) {
+			if ( NULL != (page = vsg->pages[i])) {
+				if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
+					SetPageDirty(page);
+				page_cache_release(page);
+			}
+		}
+	case dr_via_pages_alloc:
+		vfree(vsg->pages);
+	default:
+		vsg->state = dr_via_sg_init;
+	}
+	if (vsg->bounce_buffer) {
+		vfree(vsg->bounce_buffer);
+		vsg->bounce_buffer = NULL;
+	}
+	vsg->free_on_sequence = 0;
+}
+
+/*
+ * Fire a blit engine.
+ */
+
+static void
+via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+
+	VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
+	VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
+	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
+		  VIA_DMA_CSR_DE);
+	VIA_WRITE(VIA_PCI_DMA_MR0  + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
+	VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
+	VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
+	DRM_WRITEMEMORYBARRIER();
+	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
+	VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04);
+}
+
+/*
+ * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
+ * occur here if the calling user does not have access to the submitted address.
+ */
+
+static int
+via_lock_all_dma_pages(drm_via_sg_info_t *vsg,  drm_via_dmablit_t *xfer)
+{
+	int ret;
+	unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
+	vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
+		first_pfn + 1;
+
+	if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
+		return -ENOMEM;
+	memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
+	down_read(&current->mm->mmap_sem);
+	ret = get_user_pages(current, current->mm,
+			     (unsigned long)xfer->mem_addr,
+			     vsg->num_pages,
+			     (vsg->direction == DMA_FROM_DEVICE),
+			     0, vsg->pages, NULL);
+
+	up_read(&current->mm->mmap_sem);
+	if (ret != vsg->num_pages) {
+		if (ret < 0)
+			return ret;
+		vsg->state = dr_via_pages_locked;
+		return -EINVAL;
+	}
+	vsg->state = dr_via_pages_locked;
+	DRM_DEBUG("DMA pages locked\n");
+	return 0;
+}
+
+/*
+ * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
+ * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
+ * quite large for some blits, and pages don't need to be contingous.
+ */
+
+static int
+via_alloc_desc_pages(drm_via_sg_info_t *vsg)
+{
+	int i;
+
+	vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
+	vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
+		vsg->descriptors_per_page;
+
+	if (NULL ==  (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL)))
+		return -ENOMEM;
+
+	vsg->state = dr_via_desc_pages_alloc;
+	for (i=0; i<vsg->num_desc_pages; ++i) {
+		if (NULL == (vsg->desc_pages[i] =
+			     (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
+			return -ENOMEM;
+	}
+	DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
+		  vsg->num_desc);
+	return 0;
+}
+
+static void
+via_abort_dmablit(struct drm_device *dev, int engine)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+
+	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
+}
+
+static void
+via_dmablit_engine_off(struct drm_device *dev, int engine)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+
+	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
+}
+
+
+
+/*
+ * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
+ * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
+ * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
+ * the workqueue task takes care of processing associated with the old blit.
+ */
+
+void
+via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+	drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
+	int cur;
+	int done_transfer;
+	unsigned long irqsave=0;
+	uint32_t status = 0;
+
+	DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
+		  engine, from_irq, (unsigned long) blitq);
+
+	if (from_irq) {
+		spin_lock(&blitq->blit_lock);
+	} else {
+		spin_lock_irqsave(&blitq->blit_lock, irqsave);
+	}
+
+	done_transfer = blitq->is_active &&
+	  (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
+	done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
+
+	cur = blitq->cur;
+	if (done_transfer) {
+
+		blitq->blits[cur]->aborted = blitq->aborting;
+		blitq->done_blit_handle++;
+		DRM_WAKEUP(blitq->blit_queue + cur);
+
+		cur++;
+		if (cur >= VIA_NUM_BLIT_SLOTS)
+			cur = 0;
+		blitq->cur = cur;
+
+		/*
+		 * Clear transfer done flag.
+		 */
+
+		VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04,  VIA_DMA_CSR_TD);
+
+		blitq->is_active = 0;
+		blitq->aborting = 0;
+		schedule_work(&blitq->wq);
+
+	} else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
+
+		/*
+		 * Abort transfer after one second.
+		 */
+
+		via_abort_dmablit(dev, engine);
+		blitq->aborting = 1;
+		blitq->end = jiffies + DRM_HZ;
+	}
+
+	if (!blitq->is_active) {
+		if (blitq->num_outstanding) {
+			via_fire_dmablit(dev, blitq->blits[cur], engine);
+			blitq->is_active = 1;
+			blitq->cur = cur;
+			blitq->num_outstanding--;
+			blitq->end = jiffies + DRM_HZ;
+			if (!timer_pending(&blitq->poll_timer))
+				mod_timer(&blitq->poll_timer, jiffies + 1);
+		} else {
+			if (timer_pending(&blitq->poll_timer)) {
+				del_timer(&blitq->poll_timer);
+			}
+			via_dmablit_engine_off(dev, engine);
+		}
+	}
+
+	if (from_irq) {
+		spin_unlock(&blitq->blit_lock);
+	} else {
+		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+	}
+}
+
+
+
+/*
+ * Check whether this blit is still active, performing necessary locking.
+ */
+
+static int
+via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
+{
+	unsigned long irqsave;
+	uint32_t slot;
+	int active;
+
+	spin_lock_irqsave(&blitq->blit_lock, irqsave);
+
+	/*
+	 * Allow for handle wraparounds.
+	 */
+
+	active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
+		((blitq->cur_blit_handle - handle) <= (1 << 23));
+
+	if (queue && active) {
+		slot = handle - blitq->done_blit_handle + blitq->cur -1;
+		if (slot >= VIA_NUM_BLIT_SLOTS) {
+			slot -= VIA_NUM_BLIT_SLOTS;
+		}
+		*queue = blitq->blit_queue + slot;
+	}
+
+	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+
+	return active;
+}
+
+/*
+ * Sync. Wait for at least three seconds for the blit to be performed.
+ */
+
+static int
+via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
+{
+
+	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+	drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
+	wait_queue_head_t *queue;
+	int ret = 0;
+
+	if (via_dmablit_active(blitq, engine, handle, &queue)) {
+		DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
+			    !via_dmablit_active(blitq, engine, handle, NULL));
+	}
+	DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
+		  handle, engine, ret);
+
+	return ret;
+}
+
+
+/*
+ * A timer that regularly polls the blit engine in cases where we don't have interrupts:
+ * a) Broken hardware (typically those that don't have any video capture facility).
+ * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
+ * The timer and hardware IRQ's can and do work in parallel. If the hardware has
+ * irqs, it will shorten the latency somewhat.
+ */
+
+
+
+static void
+via_dmablit_timer(unsigned long data)
+{
+	drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
+	struct drm_device *dev = blitq->dev;
+	int engine = (int)
+		(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
+
+	DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
+		  (unsigned long) jiffies);
+
+	via_dmablit_handler(dev, engine, 0);
+
+	if (!timer_pending(&blitq->poll_timer)) {
+		mod_timer(&blitq->poll_timer, jiffies + 1);
+
+	       /*
+		* Rerun handler to delete timer if engines are off, and
+		* to shorten abort latency. This is a little nasty.
+		*/
+
+	       via_dmablit_handler(dev, engine, 0);
+
+	}
+}
+
+
+
+
+/*
+ * Workqueue task that frees data and mappings associated with a blit.
+ * Also wakes up waiting processes. Each of these tasks handles one
+ * blit engine only and may not be called on each interrupt.
+ */
+
+
+static void
+via_dmablit_workqueue(struct work_struct *work)
+{
+	drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
+	struct drm_device *dev = blitq->dev;
+	unsigned long irqsave;
+	drm_via_sg_info_t *cur_sg;
+	int cur_released;
+
+
+	DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
+		  (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
+
+	spin_lock_irqsave(&blitq->blit_lock, irqsave);
+
+	while(blitq->serviced != blitq->cur) {
+
+		cur_released = blitq->serviced++;
+
+		DRM_DEBUG("Releasing blit slot %d\n", cur_released);
+
+		if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
+			blitq->serviced = 0;
+
+		cur_sg = blitq->blits[cur_released];
+		blitq->num_free++;
+
+		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+
+		DRM_WAKEUP(&blitq->busy_queue);
+
+		via_free_sg_info(dev->pdev, cur_sg);
+		kfree(cur_sg);
+
+		spin_lock_irqsave(&blitq->blit_lock, irqsave);
+	}
+
+	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+}
+
+
+/*
+ * Init all blit engines. Currently we use two, but some hardware have 4.
+ */
+
+
+void
+via_init_dmablit(struct drm_device *dev)
+{
+	int i,j;
+	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+	drm_via_blitq_t *blitq;
+
+	pci_set_master(dev->pdev);
+
+	for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
+		blitq = dev_priv->blit_queues + i;
+		blitq->dev = dev;
+		blitq->cur_blit_handle = 0;
+		blitq->done_blit_handle = 0;
+		blitq->head = 0;
+		blitq->cur = 0;
+		blitq->serviced = 0;
+		blitq->num_free = VIA_NUM_BLIT_SLOTS - 1;
+		blitq->num_outstanding = 0;
+		blitq->is_active = 0;
+		blitq->aborting = 0;
+		spin_lock_init(&blitq->blit_lock);
+		for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {
+			DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
+		}
+		DRM_INIT_WAITQUEUE(&blitq->busy_queue);
+		INIT_WORK(&blitq->wq, via_dmablit_workqueue);
+		setup_timer(&blitq->poll_timer, via_dmablit_timer,
+				(unsigned long)blitq);
+	}
+}
+
+/*
+ * Build all info and do all mappings required for a blit.
+ */
+
+
+static int
+via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
+{
+	int draw = xfer->to_fb;
+	int ret = 0;
+
+	vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+	vsg->bounce_buffer = NULL;
+
+	vsg->state = dr_via_sg_init;
+
+	if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
+		DRM_ERROR("Zero size bitblt.\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Below check is a driver limitation, not a hardware one. We
+	 * don't want to lock unused pages, and don't want to incoporate the
+	 * extra logic of avoiding them. Make sure there are no.
+	 * (Not a big limitation anyway.)
+	 */
+
+	if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) {
+		DRM_ERROR("Too large system memory stride. Stride: %d, "
+			  "Length: %d\n", xfer->mem_stride, xfer->line_length);
+		return -EINVAL;
+	}
+
+	if ((xfer->mem_stride == xfer->line_length) &&
+	   (xfer->fb_stride == xfer->line_length)) {
+		xfer->mem_stride *= xfer->num_lines;
+		xfer->line_length = xfer->mem_stride;
+		xfer->fb_stride = xfer->mem_stride;
+		xfer->num_lines = 1;
+	}
+
+	/*
+	 * Don't lock an arbitrary large number of pages, since that causes a
+	 * DOS security hole.
+	 */
+
+	if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
+		DRM_ERROR("Too large PCI DMA bitblt.\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * we allow a negative fb stride to allow flipping of images in
+	 * transfer.
+	 */
+
+	if (xfer->mem_stride < xfer->line_length ||
+		abs(xfer->fb_stride) < xfer->line_length) {
+		DRM_ERROR("Invalid frame-buffer / memory stride.\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * A hardware bug seems to be worked around if system memory addresses start on
+	 * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
+	 * about this. Meanwhile, impose the following restrictions:
+	 */
+
+#ifdef VIA_BUGFREE
+	if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
+	    ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
+		DRM_ERROR("Invalid DRM bitblt alignment.\n");
+		return -EINVAL;
+	}
+#else
+	if ((((unsigned long)xfer->mem_addr & 15) ||
+	      ((unsigned long)xfer->fb_addr & 3)) ||
+	   ((xfer->num_lines > 1) &&
+	   ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
+		DRM_ERROR("Invalid DRM bitblt alignment.\n");
+		return -EINVAL;
+	}
+#endif
+
+	if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
+		DRM_ERROR("Could not lock DMA pages.\n");
+		via_free_sg_info(dev->pdev, vsg);
+		return ret;
+	}
+
+	via_map_blit_for_device(dev->pdev, xfer, vsg, 0);
+	if (0 != (ret = via_alloc_desc_pages(vsg))) {
+		DRM_ERROR("Could not allocate DMA descriptor pages.\n");
+		via_free_sg_info(dev->pdev, vsg);
+		return ret;
+	}
+	via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
+
+	return 0;
+}
+
+
+/*
+ * Reserve one free slot in the blit queue. Will wait for one second for one
+ * to become available. Otherwise -EBUSY is returned.
+ */
+
+static int
+via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
+{
+	int ret=0;
+	unsigned long irqsave;
+
+	DRM_DEBUG("Num free is %d\n", blitq->num_free);
+	spin_lock_irqsave(&blitq->blit_lock, irqsave);
+	while(blitq->num_free == 0) {
+		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+
+		DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
+		if (ret) {
+			return (-EINTR == ret) ? -EAGAIN : ret;
+		}
+
+		spin_lock_irqsave(&blitq->blit_lock, irqsave);
+	}
+
+	blitq->num_free--;
+	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+
+	return 0;
+}
+
+/*
+ * Hand back a free slot if we changed our mind.
+ */
+
+static void
+via_dmablit_release_slot(drm_via_blitq_t *blitq)
+{
+	unsigned long irqsave;
+
+	spin_lock_irqsave(&blitq->blit_lock, irqsave);
+	blitq->num_free++;
+	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+	DRM_WAKEUP( &blitq->busy_queue );
+}
+
+/*
+ * Grab a free slot. Build blit info and queue a blit.
+ */
+
+
+static int
+via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+	drm_via_sg_info_t *vsg;
+	drm_via_blitq_t *blitq;
+	int ret;
+	int engine;
+	unsigned long irqsave;
+
+	if (dev_priv == NULL) {
+		DRM_ERROR("Called without initialization.\n");
+		return -EINVAL;
+	}
+
+	engine = (xfer->to_fb) ? 0 : 1;
+	blitq = dev_priv->blit_queues + engine;
+	if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) {
+		return ret;
+	}
+	if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
+		via_dmablit_release_slot(blitq);
+		return -ENOMEM;
+	}
+	if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
+		via_dmablit_release_slot(blitq);
+		kfree(vsg);
+		return ret;
+	}
+	spin_lock_irqsave(&blitq->blit_lock, irqsave);
+
+	blitq->blits[blitq->head++] = vsg;
+	if (blitq->head >= VIA_NUM_BLIT_SLOTS)
+		blitq->head = 0;
+	blitq->num_outstanding++;
+	xfer->sync.sync_handle = ++blitq->cur_blit_handle;
+
+	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+	xfer->sync.engine = engine;
+
+	via_dmablit_handler(dev, engine, 0);
+
+	return 0;
+}
+
+/*
+ * Sync on a previously submitted blit. Note that the X server use signals extensively, and
+ * that there is a very big probability that this IOCTL will be interrupted by a signal. In that
+ * case it returns with -EAGAIN for the signal to be delivered.
+ * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
+ */
+
+int
+via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv )
+{
+	drm_via_blitsync_t *sync = data;
+	int err;
+
+	if (sync->engine >= VIA_NUM_BLIT_ENGINES)
+		return -EINVAL;
+
+	err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
+
+	if (-EINTR == err)
+		err = -EAGAIN;
+
+	return err;
+}
+
+
+/*
+ * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
+ * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
+ * be reissued. See the above IOCTL code.
+ */
+
+int
+via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv )
+{
+	drm_via_dmablit_t *xfer = data;
+	int err;
+
+	err = via_dmablit(dev, xfer);
+
+	return err;
+}
diff --git a/drivers/gpu/drm/via/via_dmablit.h b/drivers/gpu/drm/via/via_dmablit.h
new file mode 100644
index 000000000000..7408a547a036
--- /dev/null
+++ b/drivers/gpu/drm/via/via_dmablit.h
@@ -0,0 +1,140 @@
+/* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
+ *
+ * Copyright 2005 Thomas Hellstrom.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Thomas Hellstrom.
+ *    Register info from Digeo Inc.
+ */
+
+#ifndef _VIA_DMABLIT_H
+#define _VIA_DMABLIT_H
+
+#include <linux/dma-mapping.h>
+
+#define VIA_NUM_BLIT_ENGINES 2
+#define VIA_NUM_BLIT_SLOTS 8
+
+struct _drm_via_descriptor;
+
+typedef struct _drm_via_sg_info {
+	struct page **pages;
+	unsigned long num_pages;
+	struct _drm_via_descriptor **desc_pages;
+	int num_desc_pages;
+	int num_desc;
+	enum dma_data_direction direction;
+	unsigned char *bounce_buffer;
+        dma_addr_t chain_start;
+	uint32_t free_on_sequence;
+        unsigned int descriptors_per_page;
+	int aborted;
+	enum {
+	        dr_via_device_mapped,
+		dr_via_desc_pages_alloc,
+		dr_via_pages_locked,
+		dr_via_pages_alloc,
+		dr_via_sg_init
+	} state;
+} drm_via_sg_info_t;
+
+typedef struct _drm_via_blitq {
+	struct drm_device *dev;
+	uint32_t cur_blit_handle;
+	uint32_t done_blit_handle;
+	unsigned serviced;
+	unsigned head;
+	unsigned cur;
+	unsigned num_free;
+	unsigned num_outstanding;
+	unsigned long end;
+        int aborting;
+	int is_active;
+	drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
+	spinlock_t blit_lock;
+	wait_queue_head_t blit_queue[VIA_NUM_BLIT_SLOTS];
+	wait_queue_head_t busy_queue;
+	struct work_struct wq;
+	struct timer_list poll_timer;
+} drm_via_blitq_t;
+
+
+/*
+ *  PCI DMA Registers
+ *  Channels 2 & 3 don't seem to be implemented in hardware.
+ */
+
+#define VIA_PCI_DMA_MAR0            0xE40   /* Memory Address Register of Channel 0 */
+#define VIA_PCI_DMA_DAR0            0xE44   /* Device Address Register of Channel 0 */
+#define VIA_PCI_DMA_BCR0            0xE48   /* Byte Count Register of Channel 0 */
+#define VIA_PCI_DMA_DPR0            0xE4C   /* Descriptor Pointer Register of Channel 0 */
+
+#define VIA_PCI_DMA_MAR1            0xE50   /* Memory Address Register of Channel 1 */
+#define VIA_PCI_DMA_DAR1            0xE54   /* Device Address Register of Channel 1 */
+#define VIA_PCI_DMA_BCR1            0xE58   /* Byte Count Register of Channel 1 */
+#define VIA_PCI_DMA_DPR1            0xE5C   /* Descriptor Pointer Register of Channel 1 */
+
+#define VIA_PCI_DMA_MAR2            0xE60   /* Memory Address Register of Channel 2 */
+#define VIA_PCI_DMA_DAR2            0xE64   /* Device Address Register of Channel 2 */
+#define VIA_PCI_DMA_BCR2            0xE68   /* Byte Count Register of Channel 2 */
+#define VIA_PCI_DMA_DPR2            0xE6C   /* Descriptor Pointer Register of Channel 2 */
+
+#define VIA_PCI_DMA_MAR3            0xE70   /* Memory Address Register of Channel 3 */
+#define VIA_PCI_DMA_DAR3            0xE74   /* Device Address Register of Channel 3 */
+#define VIA_PCI_DMA_BCR3            0xE78   /* Byte Count Register of Channel 3 */
+#define VIA_PCI_DMA_DPR3            0xE7C   /* Descriptor Pointer Register of Channel 3 */
+
+#define VIA_PCI_DMA_MR0             0xE80   /* Mode Register of Channel 0 */
+#define VIA_PCI_DMA_MR1             0xE84   /* Mode Register of Channel 1 */
+#define VIA_PCI_DMA_MR2             0xE88   /* Mode Register of Channel 2 */
+#define VIA_PCI_DMA_MR3             0xE8C   /* Mode Register of Channel 3 */
+
+#define VIA_PCI_DMA_CSR0            0xE90   /* Command/Status Register of Channel 0 */
+#define VIA_PCI_DMA_CSR1            0xE94   /* Command/Status Register of Channel 1 */
+#define VIA_PCI_DMA_CSR2            0xE98   /* Command/Status Register of Channel 2 */
+#define VIA_PCI_DMA_CSR3            0xE9C   /* Command/Status Register of Channel 3 */
+
+#define VIA_PCI_DMA_PTR             0xEA0   /* Priority Type Register */
+
+/* Define for DMA engine */
+/* DPR */
+#define VIA_DMA_DPR_EC		(1<<1)	/* end of chain */
+#define VIA_DMA_DPR_DDIE	(1<<2)	/* descriptor done interrupt enable */
+#define VIA_DMA_DPR_DT		(1<<3)	/* direction of transfer (RO) */
+
+/* MR */
+#define VIA_DMA_MR_CM		(1<<0)	/* chaining mode */
+#define VIA_DMA_MR_TDIE		(1<<1)	/* transfer done interrupt enable */
+#define VIA_DMA_MR_HENDMACMD		(1<<7) /* ? */
+
+/* CSR */
+#define VIA_DMA_CSR_DE		(1<<0)	/* DMA enable */
+#define VIA_DMA_CSR_TS		(1<<1)	/* transfer start */
+#define VIA_DMA_CSR_TA		(1<<2)	/* transfer abort */
+#define VIA_DMA_CSR_TD		(1<<3)	/* transfer done */
+#define VIA_DMA_CSR_DD		(1<<4)	/* descriptor done */
+#define VIA_DMA_DPR_EC          (1<<1)  /* end of chain */
+
+
+
+#endif
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
new file mode 100644
index 000000000000..80c01cdfa37d
--- /dev/null
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "via_drm.h"
+#include "via_drv.h"
+
+#include "drm_pciids.h"
+
+static int dri_library_name(struct drm_device *dev, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "unichrome");
+}
+
+static struct pci_device_id pciidlist[] = {
+	viadrv_PCI_IDS
+};
+
+static struct drm_driver driver = {
+	.driver_features =
+	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
+	    DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
+	.load = via_driver_load,
+	.unload = via_driver_unload,
+	.context_dtor = via_final_context,
+	.vblank_wait = via_driver_vblank_wait,
+	.irq_preinstall = via_driver_irq_preinstall,
+	.irq_postinstall = via_driver_irq_postinstall,
+	.irq_uninstall = via_driver_irq_uninstall,
+	.irq_handler = via_driver_irq_handler,
+	.dma_quiescent = via_driver_dma_quiescent,
+	.dri_library_name = dri_library_name,
+	.reclaim_buffers = drm_core_reclaim_buffers,
+	.reclaim_buffers_locked = NULL,
+	.reclaim_buffers_idlelocked = via_reclaim_buffers_locked,
+	.lastclose = via_lastclose,
+	.get_map_ofs = drm_core_get_map_ofs,
+	.get_reg_ofs = drm_core_get_reg_ofs,
+	.ioctls = via_ioctls,
+	.fops = {
+		 .owner = THIS_MODULE,
+		 .open = drm_open,
+		 .release = drm_release,
+		 .ioctl = drm_ioctl,
+		 .mmap = drm_mmap,
+		 .poll = drm_poll,
+		 .fasync = drm_fasync,
+	},
+	.pci_driver = {
+		 .name = DRIVER_NAME,
+		 .id_table = pciidlist,
+	},
+
+	.name = DRIVER_NAME,
+	.desc = DRIVER_DESC,
+	.date = DRIVER_DATE,
+	.major = DRIVER_MAJOR,
+	.minor = DRIVER_MINOR,
+	.patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int __init via_init(void)
+{
+	driver.num_ioctls = via_max_ioctl;
+	via_init_command_verifier();
+	return drm_init(&driver);
+}
+
+static void __exit via_exit(void)
+{
+	drm_exit(&driver);
+}
+
+module_init(via_init);
+module_exit(via_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
new file mode 100644
index 000000000000..2daae81874cd
--- /dev/null
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _VIA_DRV_H_
+#define _VIA_DRV_H_
+
+#include "drm_sman.h"
+#define DRIVER_AUTHOR	"Various"
+
+#define DRIVER_NAME		"via"
+#define DRIVER_DESC		"VIA Unichrome / Pro"
+#define DRIVER_DATE		"20070202"
+
+#define DRIVER_MAJOR		2
+#define DRIVER_MINOR		11
+#define DRIVER_PATCHLEVEL	1
+
+#include "via_verifier.h"
+
+#include "via_dmablit.h"
+
+#define VIA_PCI_BUF_SIZE 60000
+#define VIA_FIRE_BUF_SIZE  1024
+#define VIA_NUM_IRQS 4
+
+typedef struct drm_via_ring_buffer {
+	drm_local_map_t map;
+	char *virtual_start;
+} drm_via_ring_buffer_t;
+
+typedef uint32_t maskarray_t[5];
+
+typedef struct drm_via_irq {
+	atomic_t irq_received;
+	uint32_t pending_mask;
+	uint32_t enable_mask;
+	wait_queue_head_t irq_queue;
+} drm_via_irq_t;
+
+typedef struct drm_via_private {
+	drm_via_sarea_t *sarea_priv;
+	drm_local_map_t *sarea;
+	drm_local_map_t *fb;
+	drm_local_map_t *mmio;
+	unsigned long agpAddr;
+	wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
+	char *dma_ptr;
+	unsigned int dma_low;
+	unsigned int dma_high;
+	unsigned int dma_offset;
+	uint32_t dma_wrap;
+	volatile uint32_t *last_pause_ptr;
+	volatile uint32_t *hw_addr_ptr;
+	drm_via_ring_buffer_t ring;
+	struct timeval last_vblank;
+	int last_vblank_valid;
+	unsigned usec_per_vblank;
+	drm_via_state_t hc_state;
+	char pci_buf[VIA_PCI_BUF_SIZE];
+	const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
+	uint32_t num_fire_offsets;
+	int chipset;
+	drm_via_irq_t via_irqs[VIA_NUM_IRQS];
+	unsigned num_irqs;
+	maskarray_t *irq_masks;
+	uint32_t irq_enable_mask;
+	uint32_t irq_pending_mask;
+	int *irq_map;
+	unsigned int idle_fault;
+	struct drm_sman sman;
+	int vram_initialized;
+	int agp_initialized;
+	unsigned long vram_offset;
+	unsigned long agp_offset;
+	drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
+	uint32_t dma_diff;
+} drm_via_private_t;
+
+enum via_family {
+  VIA_OTHER = 0,     /* Baseline */
+  VIA_PRO_GROUP_A,   /* Another video engine and DMA commands */
+  VIA_DX9_0          /* Same video as pro_group_a, but 3D is unsupported */
+};
+
+/* VIA MMIO register access */
+#define VIA_BASE ((dev_priv->mmio))
+
+#define VIA_READ(reg)		DRM_READ32(VIA_BASE, reg)
+#define VIA_WRITE(reg,val)	DRM_WRITE32(VIA_BASE, reg, val)
+#define VIA_READ8(reg)		DRM_READ8(VIA_BASE, reg)
+#define VIA_WRITE8(reg,val)	DRM_WRITE8(VIA_BASE, reg, val)
+
+extern struct drm_ioctl_desc via_ioctls[];
+extern int via_max_ioctl;
+
+extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv );
+extern int via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv );
+
+extern int via_driver_load(struct drm_device *dev, unsigned long chipset);
+extern int via_driver_unload(struct drm_device *dev);
+
+extern int via_init_context(struct drm_device * dev, int context);
+extern int via_final_context(struct drm_device * dev, int context);
+
+extern int via_do_cleanup_map(struct drm_device * dev);
+extern int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
+
+extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
+extern void via_driver_irq_preinstall(struct drm_device * dev);
+extern void via_driver_irq_postinstall(struct drm_device * dev);
+extern void via_driver_irq_uninstall(struct drm_device * dev);
+
+extern int via_dma_cleanup(struct drm_device * dev);
+extern void via_init_command_verifier(void);
+extern int via_driver_dma_quiescent(struct drm_device * dev);
+extern void via_init_futex(drm_via_private_t * dev_priv);
+extern void via_cleanup_futex(drm_via_private_t * dev_priv);
+extern void via_release_futex(drm_via_private_t * dev_priv, int context);
+
+extern void via_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv);
+extern void via_lastclose(struct drm_device *dev);
+
+extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq);
+extern void via_init_dmablit(struct drm_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
new file mode 100644
index 000000000000..c6bb978a1106
--- /dev/null
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -0,0 +1,377 @@
+/* via_irq.c
+ *
+ * Copyright 2004 BEAM Ltd.
+ * Copyright 2002 Tungsten Graphics, Inc.
+ * Copyright 2005 Thomas Hellstrom.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * BEAM LTD, TUNGSTEN GRAPHICS  AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Terry Barnaby <terry1@beam.ltd.uk>
+ *    Keith Whitwell <keith@tungstengraphics.com>
+ *    Thomas Hellstrom <unichrome@shipmail.org>
+ *
+ * This code provides standard DRM access to the Via Unichrome / Pro Vertical blank
+ * interrupt, as well as an infrastructure to handle other interrupts of the chip.
+ * The refresh rate is also calculated for video playback sync purposes.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "via_drm.h"
+#include "via_drv.h"
+
+#define VIA_REG_INTERRUPT       0x200
+
+/* VIA_REG_INTERRUPT */
+#define VIA_IRQ_GLOBAL          (1 << 31)
+#define VIA_IRQ_VBLANK_ENABLE   (1 << 19)
+#define VIA_IRQ_VBLANK_PENDING  (1 << 3)
+#define VIA_IRQ_HQV0_ENABLE     (1 << 11)
+#define VIA_IRQ_HQV1_ENABLE     (1 << 25)
+#define VIA_IRQ_HQV0_PENDING    (1 << 9)
+#define VIA_IRQ_HQV1_PENDING    (1 << 10)
+#define VIA_IRQ_DMA0_DD_ENABLE  (1 << 20)
+#define VIA_IRQ_DMA0_TD_ENABLE  (1 << 21)
+#define VIA_IRQ_DMA1_DD_ENABLE  (1 << 22)
+#define VIA_IRQ_DMA1_TD_ENABLE  (1 << 23)
+#define VIA_IRQ_DMA0_DD_PENDING (1 << 4)
+#define VIA_IRQ_DMA0_TD_PENDING (1 << 5)
+#define VIA_IRQ_DMA1_DD_PENDING (1 << 6)
+#define VIA_IRQ_DMA1_TD_PENDING (1 << 7)
+
+
+/*
+ * Device-specific IRQs go here. This type might need to be extended with
+ * the register if there are multiple IRQ control registers.
+ * Currently we activate the HQV interrupts of  Unichrome Pro group A.
+ */
+
+static maskarray_t via_pro_group_a_irqs[] = {
+	{VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
+	 0x00000000},
+	{VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
+	 0x00000000},
+	{VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
+	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
+	{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
+	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
+};
+static int via_num_pro_group_a =
+    sizeof(via_pro_group_a_irqs) / sizeof(maskarray_t);
+static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
+
+static maskarray_t via_unichrome_irqs[] = {
+	{VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
+	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
+	{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
+	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
+};
+static int via_num_unichrome = sizeof(via_unichrome_irqs) / sizeof(maskarray_t);
+static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
+
+static unsigned time_diff(struct timeval *now, struct timeval *then)
+{
+	return (now->tv_usec >= then->tv_usec) ?
+	    now->tv_usec - then->tv_usec :
+	    1000000 - (then->tv_usec - now->tv_usec);
+}
+
+irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
+{
+	struct drm_device *dev = (struct drm_device *) arg;
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	u32 status;
+	int handled = 0;
+	struct timeval cur_vblank;
+	drm_via_irq_t *cur_irq = dev_priv->via_irqs;
+	int i;
+
+	status = VIA_READ(VIA_REG_INTERRUPT);
+	if (status & VIA_IRQ_VBLANK_PENDING) {
+		atomic_inc(&dev->vbl_received);
+		if (!(atomic_read(&dev->vbl_received) & 0x0F)) {
+			do_gettimeofday(&cur_vblank);
+			if (dev_priv->last_vblank_valid) {
+				dev_priv->usec_per_vblank =
+				    time_diff(&cur_vblank,
+					      &dev_priv->last_vblank) >> 4;
+			}
+			dev_priv->last_vblank = cur_vblank;
+			dev_priv->last_vblank_valid = 1;
+		}
+		if (!(atomic_read(&dev->vbl_received) & 0xFF)) {
+			DRM_DEBUG("US per vblank is: %u\n",
+				  dev_priv->usec_per_vblank);
+		}
+		DRM_WAKEUP(&dev->vbl_queue);
+		drm_vbl_send_signals(dev);
+		handled = 1;
+	}
+
+	for (i = 0; i < dev_priv->num_irqs; ++i) {
+		if (status & cur_irq->pending_mask) {
+			atomic_inc(&cur_irq->irq_received);
+			DRM_WAKEUP(&cur_irq->irq_queue);
+			handled = 1;
+			if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
+				via_dmablit_handler(dev, 0, 1);
+			} else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) {
+				via_dmablit_handler(dev, 1, 1);
+			}
+		}
+		cur_irq++;
+	}
+
+	/* Acknowlege interrupts */
+	VIA_WRITE(VIA_REG_INTERRUPT, status);
+
+	if (handled)
+		return IRQ_HANDLED;
+	else
+		return IRQ_NONE;
+}
+
+static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)
+{
+	u32 status;
+
+	if (dev_priv) {
+		/* Acknowlege interrupts */
+		status = VIA_READ(VIA_REG_INTERRUPT);
+		VIA_WRITE(VIA_REG_INTERRUPT, status |
+			  dev_priv->irq_pending_mask);
+	}
+}
+
+int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	unsigned int cur_vblank;
+	int ret = 0;
+
+	DRM_DEBUG("\n");
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	viadrv_acknowledge_irqs(dev_priv);
+
+	/* Assume that the user has missed the current sequence number
+	 * by about a day rather than she wants to wait for years
+	 * using vertical blanks...
+	 */
+
+	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+		    (((cur_vblank = atomic_read(&dev->vbl_received)) -
+		      *sequence) <= (1 << 23)));
+
+	*sequence = cur_vblank;
+	return ret;
+}
+
+static int
+via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequence,
+		    unsigned int *sequence)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	unsigned int cur_irq_sequence;
+	drm_via_irq_t *cur_irq;
+	int ret = 0;
+	maskarray_t *masks;
+	int real_irq;
+
+	DRM_DEBUG("\n");
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	if (irq >= drm_via_irq_num) {
+		DRM_ERROR("Trying to wait on unknown irq %d\n", irq);
+		return -EINVAL;
+	}
+
+	real_irq = dev_priv->irq_map[irq];
+
+	if (real_irq < 0) {
+		DRM_ERROR("Video IRQ %d not available on this hardware.\n",
+			  irq);
+		return -EINVAL;
+	}
+
+	masks = dev_priv->irq_masks;
+	cur_irq = dev_priv->via_irqs + real_irq;
+
+	if (masks[real_irq][2] && !force_sequence) {
+		DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
+			    ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
+			     masks[irq][4]));
+		cur_irq_sequence = atomic_read(&cur_irq->irq_received);
+	} else {
+		DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
+			    (((cur_irq_sequence =
+			       atomic_read(&cur_irq->irq_received)) -
+			      *sequence) <= (1 << 23)));
+	}
+	*sequence = cur_irq_sequence;
+	return ret;
+}
+
+/*
+ * drm_dma.h hooks
+ */
+
+void via_driver_irq_preinstall(struct drm_device * dev)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	u32 status;
+	drm_via_irq_t *cur_irq;
+	int i;
+
+	DRM_DEBUG("dev_priv: %p\n", dev_priv);
+	if (dev_priv) {
+		cur_irq = dev_priv->via_irqs;
+
+		dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE;
+		dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING;
+
+		if (dev_priv->chipset == VIA_PRO_GROUP_A ||
+		    dev_priv->chipset == VIA_DX9_0) {
+			dev_priv->irq_masks = via_pro_group_a_irqs;
+			dev_priv->num_irqs = via_num_pro_group_a;
+			dev_priv->irq_map = via_irqmap_pro_group_a;
+		} else {
+			dev_priv->irq_masks = via_unichrome_irqs;
+			dev_priv->num_irqs = via_num_unichrome;
+			dev_priv->irq_map = via_irqmap_unichrome;
+		}
+
+		for (i = 0; i < dev_priv->num_irqs; ++i) {
+			atomic_set(&cur_irq->irq_received, 0);
+			cur_irq->enable_mask = dev_priv->irq_masks[i][0];
+			cur_irq->pending_mask = dev_priv->irq_masks[i][1];
+			DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
+			dev_priv->irq_enable_mask |= cur_irq->enable_mask;
+			dev_priv->irq_pending_mask |= cur_irq->pending_mask;
+			cur_irq++;
+
+			DRM_DEBUG("Initializing IRQ %d\n", i);
+		}
+
+		dev_priv->last_vblank_valid = 0;
+
+		/* Clear VSync interrupt regs */
+		status = VIA_READ(VIA_REG_INTERRUPT);
+		VIA_WRITE(VIA_REG_INTERRUPT, status &
+			  ~(dev_priv->irq_enable_mask));
+
+		/* Clear bits if they're already high */
+		viadrv_acknowledge_irqs(dev_priv);
+	}
+}
+
+void via_driver_irq_postinstall(struct drm_device * dev)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	u32 status;
+
+	DRM_DEBUG("\n");
+	if (dev_priv) {
+		status = VIA_READ(VIA_REG_INTERRUPT);
+		VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
+			  | dev_priv->irq_enable_mask);
+
+		/* Some magic, oh for some data sheets ! */
+
+		VIA_WRITE8(0x83d4, 0x11);
+		VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
+
+	}
+}
+
+void via_driver_irq_uninstall(struct drm_device * dev)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	u32 status;
+
+	DRM_DEBUG("\n");
+	if (dev_priv) {
+
+		/* Some more magic, oh for some data sheets ! */
+
+		VIA_WRITE8(0x83d4, 0x11);
+		VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
+
+		status = VIA_READ(VIA_REG_INTERRUPT);
+		VIA_WRITE(VIA_REG_INTERRUPT, status &
+			  ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask));
+	}
+}
+
+int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_via_irqwait_t *irqwait = data;
+	struct timeval now;
+	int ret = 0;
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	drm_via_irq_t *cur_irq = dev_priv->via_irqs;
+	int force_sequence;
+
+	if (!dev->irq)
+		return -EINVAL;
+
+	if (irqwait->request.irq >= dev_priv->num_irqs) {
+		DRM_ERROR("Trying to wait on unknown irq %d\n",
+			  irqwait->request.irq);
+		return -EINVAL;
+	}
+
+	cur_irq += irqwait->request.irq;
+
+	switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
+	case VIA_IRQ_RELATIVE:
+		irqwait->request.sequence += atomic_read(&cur_irq->irq_received);
+		irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+	case VIA_IRQ_ABSOLUTE:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (irqwait->request.type & VIA_IRQ_SIGNAL) {
+		DRM_ERROR("Signals on Via IRQs not implemented yet.\n");
+		return -EINVAL;
+	}
+
+	force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE);
+
+	ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence,
+				  &irqwait->request.sequence);
+	do_gettimeofday(&now);
+	irqwait->reply.tval_sec = now.tv_sec;
+	irqwait->reply.tval_usec = now.tv_usec;
+
+	return ret;
+}
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
new file mode 100644
index 000000000000..a967556be014
--- /dev/null
+++ b/drivers/gpu/drm/via/via_map.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
+ * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "drmP.h"
+#include "via_drm.h"
+#include "via_drv.h"
+
+static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init)
+{
+	drm_via_private_t *dev_priv = dev->dev_private;
+
+	DRM_DEBUG("\n");
+
+	dev_priv->sarea = drm_getsarea(dev);
+	if (!dev_priv->sarea) {
+		DRM_ERROR("could not find sarea!\n");
+		dev->dev_private = (void *)dev_priv;
+		via_do_cleanup_map(dev);
+		return -EINVAL;
+	}
+
+	dev_priv->fb = drm_core_findmap(dev, init->fb_offset);
+	if (!dev_priv->fb) {
+		DRM_ERROR("could not find framebuffer!\n");
+		dev->dev_private = (void *)dev_priv;
+		via_do_cleanup_map(dev);
+		return -EINVAL;
+	}
+	dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
+	if (!dev_priv->mmio) {
+		DRM_ERROR("could not find mmio region!\n");
+		dev->dev_private = (void *)dev_priv;
+		via_do_cleanup_map(dev);
+		return -EINVAL;
+	}
+
+	dev_priv->sarea_priv =
+	    (drm_via_sarea_t *) ((u8 *) dev_priv->sarea->handle +
+				 init->sarea_priv_offset);
+
+	dev_priv->agpAddr = init->agpAddr;
+
+	via_init_futex(dev_priv);
+
+	via_init_dmablit(dev);
+
+	dev->dev_private = (void *)dev_priv;
+	return 0;
+}
+
+int via_do_cleanup_map(struct drm_device * dev)
+{
+	via_dma_cleanup(dev);
+
+	return 0;
+}
+
+int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_via_init_t *init = data;
+
+	DRM_DEBUG("\n");
+
+	switch (init->func) {
+	case VIA_INIT_MAP:
+		return via_do_init_map(dev, init);
+	case VIA_CLEANUP_MAP:
+		return via_do_cleanup_map(dev);
+	}
+
+	return -EINVAL;
+}
+
+int via_driver_load(struct drm_device *dev, unsigned long chipset)
+{
+	drm_via_private_t *dev_priv;
+	int ret = 0;
+
+	dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
+	if (dev_priv == NULL)
+		return -ENOMEM;
+
+	dev->dev_private = (void *)dev_priv;
+
+	dev_priv->chipset = chipset;
+
+	ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
+	if (ret) {
+		drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
+	}
+	return ret;
+}
+
+int via_driver_unload(struct drm_device *dev)
+{
+	drm_via_private_t *dev_priv = dev->dev_private;
+
+	drm_sman_takedown(&dev_priv->sman);
+
+	drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
new file mode 100644
index 000000000000..e64094916e4f
--- /dev/null
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright 2006 Tungsten Graphics Inc., Bismarck, ND., USA.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "via_drm.h"
+#include "via_drv.h"
+#include "drm_sman.h"
+
+#define VIA_MM_ALIGN_SHIFT 4
+#define VIA_MM_ALIGN_MASK ( (1 << VIA_MM_ALIGN_SHIFT) - 1)
+
+int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_via_agp_t *agp = data;
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0,
+				 agp->size >> VIA_MM_ALIGN_SHIFT);
+
+	if (ret) {
+		DRM_ERROR("AGP memory manager initialisation error\n");
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	dev_priv->agp_initialized = 1;
+	dev_priv->agp_offset = agp->offset;
+	mutex_unlock(&dev->struct_mutex);
+
+	DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
+	return 0;
+}
+
+int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_via_fb_t *fb = data;
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0,
+				 fb->size >> VIA_MM_ALIGN_SHIFT);
+
+	if (ret) {
+		DRM_ERROR("VRAM memory manager initialisation error\n");
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	dev_priv->vram_initialized = 1;
+	dev_priv->vram_offset = fb->offset;
+
+	mutex_unlock(&dev->struct_mutex);
+	DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
+
+	return 0;
+
+}
+
+int via_final_context(struct drm_device *dev, int context)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+
+	via_release_futex(dev_priv, context);
+
+	/* Linux specific until context tracking code gets ported to BSD */
+	/* Last context, perform cleanup */
+	if (dev->ctx_count == 1 && dev->dev_private) {
+		DRM_DEBUG("Last Context\n");
+		if (dev->irq)
+			drm_irq_uninstall(dev);
+		via_cleanup_futex(dev_priv);
+		via_do_cleanup_map(dev);
+	}
+	return 1;
+}
+
+void via_lastclose(struct drm_device *dev)
+{
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+
+	if (!dev_priv)
+		return;
+
+	mutex_lock(&dev->struct_mutex);
+	drm_sman_cleanup(&dev_priv->sman);
+	dev_priv->vram_initialized = 0;
+	dev_priv->agp_initialized = 0;
+	mutex_unlock(&dev->struct_mutex);
+}
+
+int via_mem_alloc(struct drm_device *dev, void *data,
+		  struct drm_file *file_priv)
+{
+	drm_via_mem_t *mem = data;
+	int retval = 0;
+	struct drm_memblock_item *item;
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	unsigned long tmpSize;
+
+	if (mem->type > VIA_MEM_AGP) {
+		DRM_ERROR("Unknown memory type allocation\n");
+		return -EINVAL;
+	}
+	mutex_lock(&dev->struct_mutex);
+	if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
+		      dev_priv->agp_initialized)) {
+		DRM_ERROR
+		    ("Attempt to allocate from uninitialized memory manager.\n");
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
+	item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, 0,
+			      (unsigned long)file_priv);
+	mutex_unlock(&dev->struct_mutex);
+	if (item) {
+		mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
+			      dev_priv->vram_offset : dev_priv->agp_offset) +
+		    (item->mm->
+		     offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT);
+		mem->index = item->user_hash.key;
+	} else {
+		mem->offset = 0;
+		mem->size = 0;
+		mem->index = 0;
+		DRM_DEBUG("Video memory allocation failed\n");
+		retval = -ENOMEM;
+	}
+
+	return retval;
+}
+
+int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_via_private_t *dev_priv = dev->dev_private;
+	drm_via_mem_t *mem = data;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+	ret = drm_sman_free_key(&dev_priv->sman, mem->index);
+	mutex_unlock(&dev->struct_mutex);
+	DRM_DEBUG("free = 0x%lx\n", mem->index);
+
+	return ret;
+}
+
+
+void via_reclaim_buffers_locked(struct drm_device * dev,
+				struct drm_file *file_priv)
+{
+	drm_via_private_t *dev_priv = dev->dev_private;
+
+	mutex_lock(&dev->struct_mutex);
+	if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
+		mutex_unlock(&dev->struct_mutex);
+		return;
+	}
+
+	if (dev->driver->dma_quiescent) {
+		dev->driver->dma_quiescent(dev);
+	}
+
+	drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
+	mutex_unlock(&dev->struct_mutex);
+	return;
+}
diff --git a/drivers/gpu/drm/via/via_verifier.c b/drivers/gpu/drm/via/via_verifier.c
new file mode 100644
index 000000000000..46a579198747
--- /dev/null
+++ b/drivers/gpu/drm/via/via_verifier.c
@@ -0,0 +1,1116 @@
+/*
+ * Copyright 2004 The Unichrome Project. All Rights Reserved.
+ * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Thomas Hellstrom 2004, 2005.
+ * This code was written using docs obtained under NDA from VIA Inc.
+ *
+ * Don't run this code directly on an AGP buffer. Due to cache problems it will
+ * be very slow.
+ */
+
+#include "via_3d_reg.h"
+#include "drmP.h"
+#include "drm.h"
+#include "via_drm.h"
+#include "via_verifier.h"
+#include "via_drv.h"
+
+typedef enum {
+	state_command,
+	state_header2,
+	state_header1,
+	state_vheader5,
+	state_vheader6,
+	state_error
+} verifier_state_t;
+
+typedef enum {
+	no_check = 0,
+	check_for_header2,
+	check_for_header1,
+	check_for_header2_err,
+	check_for_header1_err,
+	check_for_fire,
+	check_z_buffer_addr0,
+	check_z_buffer_addr1,
+	check_z_buffer_addr_mode,
+	check_destination_addr0,
+	check_destination_addr1,
+	check_destination_addr_mode,
+	check_for_dummy,
+	check_for_dd,
+	check_texture_addr0,
+	check_texture_addr1,
+	check_texture_addr2,
+	check_texture_addr3,
+	check_texture_addr4,
+	check_texture_addr5,
+	check_texture_addr6,
+	check_texture_addr7,
+	check_texture_addr8,
+	check_texture_addr_mode,
+	check_for_vertex_count,
+	check_number_texunits,
+	forbidden_command
+} hazard_t;
+
+/*
+ * Associates each hazard above with a possible multi-command
+ * sequence. For example an address that is split over multiple
+ * commands and that needs to be checked at the first command
+ * that does not include any part of the address.
+ */
+
+static drm_via_sequence_t seqs[] = {
+	no_sequence,
+	no_sequence,
+	no_sequence,
+	no_sequence,
+	no_sequence,
+	no_sequence,
+	z_address,
+	z_address,
+	z_address,
+	dest_address,
+	dest_address,
+	dest_address,
+	no_sequence,
+	no_sequence,
+	tex_address,
+	tex_address,
+	tex_address,
+	tex_address,
+	tex_address,
+	tex_address,
+	tex_address,
+	tex_address,
+	tex_address,
+	tex_address,
+	no_sequence
+};
+
+typedef struct {
+	unsigned int code;
+	hazard_t hz;
+} hz_init_t;
+
+static hz_init_t init_table1[] = {
+	{0xf2, check_for_header2_err},
+	{0xf0, check_for_header1_err},
+	{0xee, check_for_fire},
+	{0xcc, check_for_dummy},
+	{0xdd, check_for_dd},
+	{0x00, no_check},
+	{0x10, check_z_buffer_addr0},
+	{0x11, check_z_buffer_addr1},
+	{0x12, check_z_buffer_addr_mode},
+	{0x13, no_check},
+	{0x14, no_check},
+	{0x15, no_check},
+	{0x23, no_check},
+	{0x24, no_check},
+	{0x33, no_check},
+	{0x34, no_check},
+	{0x35, no_check},
+	{0x36, no_check},
+	{0x37, no_check},
+	{0x38, no_check},
+	{0x39, no_check},
+	{0x3A, no_check},
+	{0x3B, no_check},
+	{0x3C, no_check},
+	{0x3D, no_check},
+	{0x3E, no_check},
+	{0x40, check_destination_addr0},
+	{0x41, check_destination_addr1},
+	{0x42, check_destination_addr_mode},
+	{0x43, no_check},
+	{0x44, no_check},
+	{0x50, no_check},
+	{0x51, no_check},
+	{0x52, no_check},
+	{0x53, no_check},
+	{0x54, no_check},
+	{0x55, no_check},
+	{0x56, no_check},
+	{0x57, no_check},
+	{0x58, no_check},
+	{0x70, no_check},
+	{0x71, no_check},
+	{0x78, no_check},
+	{0x79, no_check},
+	{0x7A, no_check},
+	{0x7B, no_check},
+	{0x7C, no_check},
+	{0x7D, check_for_vertex_count}
+};
+
+static hz_init_t init_table2[] = {
+	{0xf2, check_for_header2_err},
+	{0xf0, check_for_header1_err},
+	{0xee, check_for_fire},
+	{0xcc, check_for_dummy},
+	{0x00, check_texture_addr0},
+	{0x01, check_texture_addr0},
+	{0x02, check_texture_addr0},
+	{0x03, check_texture_addr0},
+	{0x04, check_texture_addr0},
+	{0x05, check_texture_addr0},
+	{0x06, check_texture_addr0},
+	{0x07, check_texture_addr0},
+	{0x08, check_texture_addr0},
+	{0x09, check_texture_addr0},
+	{0x20, check_texture_addr1},
+	{0x21, check_texture_addr1},
+	{0x22, check_texture_addr1},
+	{0x23, check_texture_addr4},
+	{0x2B, check_texture_addr3},
+	{0x2C, check_texture_addr3},
+	{0x2D, check_texture_addr3},
+	{0x2E, check_texture_addr3},
+	{0x2F, check_texture_addr3},
+	{0x30, check_texture_addr3},
+	{0x31, check_texture_addr3},
+	{0x32, check_texture_addr3},
+	{0x33, check_texture_addr3},
+	{0x34, check_texture_addr3},
+	{0x4B, check_texture_addr5},
+	{0x4C, check_texture_addr6},
+	{0x51, check_texture_addr7},
+	{0x52, check_texture_addr8},
+	{0x77, check_texture_addr2},
+	{0x78, no_check},
+	{0x79, no_check},
+	{0x7A, no_check},
+	{0x7B, check_texture_addr_mode},
+	{0x7C, no_check},
+	{0x7D, no_check},
+	{0x7E, no_check},
+	{0x7F, no_check},
+	{0x80, no_check},
+	{0x81, no_check},
+	{0x82, no_check},
+	{0x83, no_check},
+	{0x85, no_check},
+	{0x86, no_check},
+	{0x87, no_check},
+	{0x88, no_check},
+	{0x89, no_check},
+	{0x8A, no_check},
+	{0x90, no_check},
+	{0x91, no_check},
+	{0x92, no_check},
+	{0x93, no_check}
+};
+
+static hz_init_t init_table3[] = {
+	{0xf2, check_for_header2_err},
+	{0xf0, check_for_header1_err},
+	{0xcc, check_for_dummy},
+	{0x00, check_number_texunits}
+};
+
+static hazard_t table1[256];
+static hazard_t table2[256];
+static hazard_t table3[256];
+
+static __inline__ int
+eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
+{
+	if ((buf_end - *buf) >= num_words) {
+		*buf += num_words;
+		return 0;
+	}
+	DRM_ERROR("Illegal termination of DMA command buffer\n");
+	return 1;
+}
+
+/*
+ * Partially stolen from drm_memory.h
+ */
+
+static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
+						    unsigned long offset,
+						    unsigned long size,
+						    struct drm_device * dev)
+{
+	struct drm_map_list *r_list;
+	drm_local_map_t *map = seq->map_cache;
+
+	if (map && map->offset <= offset
+	    && (offset + size) <= (map->offset + map->size)) {
+		return map;
+	}
+
+	list_for_each_entry(r_list, &dev->maplist, head) {
+		map = r_list->map;
+		if (!map)
+			continue;
+		if (map->offset <= offset
+		    && (offset + size) <= (map->offset + map->size)
+		    && !(map->flags & _DRM_RESTRICTED)
+		    && (map->type == _DRM_AGP)) {
+			seq->map_cache = map;
+			return map;
+		}
+	}
+	return NULL;
+}
+
+/*
+ * Require that all AGP texture levels reside in the same AGP map which should
+ * be mappable by the client. This is not a big restriction.
+ * FIXME: To actually enforce this security policy strictly, drm_rmmap
+ * would have to wait for dma quiescent before removing an AGP map.
+ * The via_drm_lookup_agp_map call in reality seems to take
+ * very little CPU time.
+ */
+
+static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
+{
+	switch (cur_seq->unfinished) {
+	case z_address:
+		DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq->z_addr);
+		break;
+	case dest_address:
+		DRM_DEBUG("Destination start address is 0x%x\n",
+			  cur_seq->d_addr);
+		break;
+	case tex_address:
+		if (cur_seq->agp_texture) {
+			unsigned start =
+			    cur_seq->tex_level_lo[cur_seq->texture];
+			unsigned end = cur_seq->tex_level_hi[cur_seq->texture];
+			unsigned long lo = ~0, hi = 0, tmp;
+			uint32_t *addr, *pitch, *height, tex;
+			unsigned i;
+			int npot;
+
+			if (end > 9)
+				end = 9;
+			if (start > 9)
+				start = 9;
+
+			addr =
+			    &(cur_seq->t_addr[tex = cur_seq->texture][start]);
+			pitch = &(cur_seq->pitch[tex][start]);
+			height = &(cur_seq->height[tex][start]);
+			npot = cur_seq->tex_npot[tex];
+			for (i = start; i <= end; ++i) {
+				tmp = *addr++;
+				if (tmp < lo)
+					lo = tmp;
+				if (i == 0 && npot)
+					tmp += (*height++ * *pitch++);
+				else
+					tmp += (*height++ << *pitch++);
+				if (tmp > hi)
+					hi = tmp;
+			}
+
+			if (!via_drm_lookup_agp_map
+			    (cur_seq, lo, hi - lo, cur_seq->dev)) {
+				DRM_ERROR
+				    ("AGP texture is not in allowed map\n");
+				return 2;
+			}
+		}
+		break;
+	default:
+		break;
+	}
+	cur_seq->unfinished = no_sequence;
+	return 0;
+}
+
+static __inline__ int
+investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t * cur_seq)
+{
+	register uint32_t tmp, *tmp_addr;
+
+	if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) {
+		int ret;
+		if ((ret = finish_current_sequence(cur_seq)))
+			return ret;
+	}
+
+	switch (hz) {
+	case check_for_header2:
+		if (cmd == HALCYON_HEADER2)
+			return 1;
+		return 0;
+	case check_for_header1:
+		if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
+			return 1;
+		return 0;
+	case check_for_header2_err:
+		if (cmd == HALCYON_HEADER2)
+			return 1;
+		DRM_ERROR("Illegal DMA HALCYON_HEADER2 command\n");
+		break;
+	case check_for_header1_err:
+		if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
+			return 1;
+		DRM_ERROR("Illegal DMA HALCYON_HEADER1 command\n");
+		break;
+	case check_for_fire:
+		if ((cmd & HALCYON_FIREMASK) == HALCYON_FIRECMD)
+			return 1;
+		DRM_ERROR("Illegal DMA HALCYON_FIRECMD command\n");
+		break;
+	case check_for_dummy:
+		if (HC_DUMMY == cmd)
+			return 0;
+		DRM_ERROR("Illegal DMA HC_DUMMY command\n");
+		break;
+	case check_for_dd:
+		if (0xdddddddd == cmd)
+			return 0;
+		DRM_ERROR("Illegal DMA 0xdddddddd command\n");
+		break;
+	case check_z_buffer_addr0:
+		cur_seq->unfinished = z_address;
+		cur_seq->z_addr = (cur_seq->z_addr & 0xFF000000) |
+		    (cmd & 0x00FFFFFF);
+		return 0;
+	case check_z_buffer_addr1:
+		cur_seq->unfinished = z_address;
+		cur_seq->z_addr = (cur_seq->z_addr & 0x00FFFFFF) |
+		    ((cmd & 0xFF) << 24);
+		return 0;
+	case check_z_buffer_addr_mode:
+		cur_seq->unfinished = z_address;
+		if ((cmd & 0x0000C000) == 0)
+			return 0;
+		DRM_ERROR("Attempt to place Z buffer in system memory\n");
+		return 2;
+	case check_destination_addr0:
+		cur_seq->unfinished = dest_address;
+		cur_seq->d_addr = (cur_seq->d_addr & 0xFF000000) |
+		    (cmd & 0x00FFFFFF);
+		return 0;
+	case check_destination_addr1:
+		cur_seq->unfinished = dest_address;
+		cur_seq->d_addr = (cur_seq->d_addr & 0x00FFFFFF) |
+		    ((cmd & 0xFF) << 24);
+		return 0;
+	case check_destination_addr_mode:
+		cur_seq->unfinished = dest_address;
+		if ((cmd & 0x0000C000) == 0)
+			return 0;
+		DRM_ERROR
+		    ("Attempt to place 3D drawing buffer in system memory\n");
+		return 2;
+	case check_texture_addr0:
+		cur_seq->unfinished = tex_address;
+		tmp = (cmd >> 24);
+		tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
+		*tmp_addr = (*tmp_addr & 0xFF000000) | (cmd & 0x00FFFFFF);
+		return 0;
+	case check_texture_addr1:
+		cur_seq->unfinished = tex_address;
+		tmp = ((cmd >> 24) - 0x20);
+		tmp += tmp << 1;
+		tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
+		*tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
+		tmp_addr++;
+		*tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF00) << 16);
+		tmp_addr++;
+		*tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF0000) << 8);
+		return 0;
+	case check_texture_addr2:
+		cur_seq->unfinished = tex_address;
+		cur_seq->tex_level_lo[tmp = cur_seq->texture] = cmd & 0x3F;
+		cur_seq->tex_level_hi[tmp] = (cmd & 0xFC0) >> 6;
+		return 0;
+	case check_texture_addr3:
+		cur_seq->unfinished = tex_address;
+		tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit);
+		if (tmp == 0 &&
+		    (cmd & HC_HTXnEnPit_MASK)) {
+			cur_seq->pitch[cur_seq->texture][tmp] =
+				(cmd & HC_HTXnLnPit_MASK);
+			cur_seq->tex_npot[cur_seq->texture] = 1;
+		} else {
+			cur_seq->pitch[cur_seq->texture][tmp] =
+				(cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT;
+			cur_seq->tex_npot[cur_seq->texture] = 0;
+			if (cmd & 0x000FFFFF) {
+				DRM_ERROR
+					("Unimplemented texture level 0 pitch mode.\n");
+				return 2;
+			}
+		}
+		return 0;
+	case check_texture_addr4:
+		cur_seq->unfinished = tex_address;
+		tmp_addr = &cur_seq->t_addr[cur_seq->texture][9];
+		*tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
+		return 0;
+	case check_texture_addr5:
+	case check_texture_addr6:
+		cur_seq->unfinished = tex_address;
+		/*
+		 * Texture width. We don't care since we have the pitch.
+		 */
+		return 0;
+	case check_texture_addr7:
+		cur_seq->unfinished = tex_address;
+		tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
+		tmp_addr[5] = 1 << ((cmd & 0x00F00000) >> 20);
+		tmp_addr[4] = 1 << ((cmd & 0x000F0000) >> 16);
+		tmp_addr[3] = 1 << ((cmd & 0x0000F000) >> 12);
+		tmp_addr[2] = 1 << ((cmd & 0x00000F00) >> 8);
+		tmp_addr[1] = 1 << ((cmd & 0x000000F0) >> 4);
+		tmp_addr[0] = 1 << (cmd & 0x0000000F);
+		return 0;
+	case check_texture_addr8:
+		cur_seq->unfinished = tex_address;
+		tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
+		tmp_addr[9] = 1 << ((cmd & 0x0000F000) >> 12);
+		tmp_addr[8] = 1 << ((cmd & 0x00000F00) >> 8);
+		tmp_addr[7] = 1 << ((cmd & 0x000000F0) >> 4);
+		tmp_addr[6] = 1 << (cmd & 0x0000000F);
+		return 0;
+	case check_texture_addr_mode:
+		cur_seq->unfinished = tex_address;
+		if (2 == (tmp = cmd & 0x00000003)) {
+			DRM_ERROR
+			    ("Attempt to fetch texture from system memory.\n");
+			return 2;
+		}
+		cur_seq->agp_texture = (tmp == 3);
+		cur_seq->tex_palette_size[cur_seq->texture] =
+		    (cmd >> 16) & 0x000000007;
+		return 0;
+	case check_for_vertex_count:
+		cur_seq->vertex_count = cmd & 0x0000FFFF;
+		return 0;
+	case check_number_texunits:
+		cur_seq->multitex = (cmd >> 3) & 1;
+		return 0;
+	default:
+		DRM_ERROR("Illegal DMA data: 0x%x\n", cmd);
+		return 2;
+	}
+	return 2;
+}
+
+static __inline__ int
+via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
+		    drm_via_state_t * cur_seq)
+{
+	drm_via_private_t *dev_priv =
+	    (drm_via_private_t *) cur_seq->dev->dev_private;
+	uint32_t a_fire, bcmd, dw_count;
+	int ret = 0;
+	int have_fire;
+	const uint32_t *buf = *buffer;
+
+	while (buf < buf_end) {
+		have_fire = 0;
+		if ((buf_end - buf) < 2) {
+			DRM_ERROR
+			    ("Unexpected termination of primitive list.\n");
+			ret = 1;
+			break;
+		}
+		if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdB)
+			break;
+		bcmd = *buf++;
+		if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdA) {
+			DRM_ERROR("Expected Vertex List A command, got 0x%x\n",
+				  *buf);
+			ret = 1;
+			break;
+		}
+		a_fire =
+		    *buf++ | HC_HPLEND_MASK | HC_HPMValidN_MASK |
+		    HC_HE3Fire_MASK;
+
+		/*
+		 * How many dwords per vertex ?
+		 */
+
+		if (cur_seq->agp && ((bcmd & (0xF << 11)) == 0)) {
+			DRM_ERROR("Illegal B command vertex data for AGP.\n");
+			ret = 1;
+			break;
+		}
+
+		dw_count = 0;
+		if (bcmd & (1 << 7))
+			dw_count += (cur_seq->multitex) ? 2 : 1;
+		if (bcmd & (1 << 8))
+			dw_count += (cur_seq->multitex) ? 2 : 1;
+		if (bcmd & (1 << 9))
+			dw_count++;
+		if (bcmd & (1 << 10))
+			dw_count++;
+		if (bcmd & (1 << 11))
+			dw_count++;
+		if (bcmd & (1 << 12))
+			dw_count++;
+		if (bcmd & (1 << 13))
+			dw_count++;
+		if (bcmd & (1 << 14))
+			dw_count++;
+
+		while (buf < buf_end) {
+			if (*buf == a_fire) {
+				if (dev_priv->num_fire_offsets >=
+				    VIA_FIRE_BUF_SIZE) {
+					DRM_ERROR("Fire offset buffer full.\n");
+					ret = 1;
+					break;
+				}
+				dev_priv->fire_offsets[dev_priv->
+						       num_fire_offsets++] =
+				    buf;
+				have_fire = 1;
+				buf++;
+				if (buf < buf_end && *buf == a_fire)
+					buf++;
+				break;
+			}
+			if ((*buf == HALCYON_HEADER2) ||
+			    ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) {
+				DRM_ERROR("Missing Vertex Fire command, "
+					  "Stray Vertex Fire command  or verifier "
+					  "lost sync.\n");
+				ret = 1;
+				break;
+			}
+			if ((ret = eat_words(&buf, buf_end, dw_count)))
+				break;
+		}
+		if (buf >= buf_end && !have_fire) {
+			DRM_ERROR("Missing Vertex Fire command or verifier "
+				  "lost sync.\n");
+			ret = 1;
+			break;
+		}
+		if (cur_seq->agp && ((buf - cur_seq->buf_start) & 0x01)) {
+			DRM_ERROR("AGP Primitive list end misaligned.\n");
+			ret = 1;
+			break;
+		}
+	}
+	*buffer = buf;
+	return ret;
+}
+
+static __inline__ verifier_state_t
+via_check_header2(uint32_t const **buffer, const uint32_t * buf_end,
+		  drm_via_state_t * hc_state)
+{
+	uint32_t cmd;
+	int hz_mode;
+	hazard_t hz;
+	const uint32_t *buf = *buffer;
+	const hazard_t *hz_table;
+
+	if ((buf_end - buf) < 2) {
+		DRM_ERROR
+		    ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n");
+		return state_error;
+	}
+	buf++;
+	cmd = (*buf++ & 0xFFFF0000) >> 16;
+
+	switch (cmd) {
+	case HC_ParaType_CmdVdata:
+		if (via_check_prim_list(&buf, buf_end, hc_state))
+			return state_error;
+		*buffer = buf;
+		return state_command;
+	case HC_ParaType_NotTex:
+		hz_table = table1;
+		break;
+	case HC_ParaType_Tex:
+		hc_state->texture = 0;
+		hz_table = table2;
+		break;
+	case (HC_ParaType_Tex | (HC_SubType_Tex1 << 8)):
+		hc_state->texture = 1;
+		hz_table = table2;
+		break;
+	case (HC_ParaType_Tex | (HC_SubType_TexGeneral << 8)):
+		hz_table = table3;
+		break;
+	case HC_ParaType_Auto:
+		if (eat_words(&buf, buf_end, 2))
+			return state_error;
+		*buffer = buf;
+		return state_command;
+	case (HC_ParaType_Palette | (HC_SubType_Stipple << 8)):
+		if (eat_words(&buf, buf_end, 32))
+			return state_error;
+		*buffer = buf;
+		return state_command;
+	case (HC_ParaType_Palette | (HC_SubType_TexPalette0 << 8)):
+	case (HC_ParaType_Palette | (HC_SubType_TexPalette1 << 8)):
+		DRM_ERROR("Texture palettes are rejected because of "
+			  "lack of info how to determine their size.\n");
+		return state_error;
+	case (HC_ParaType_Palette | (HC_SubType_FogTable << 8)):
+		DRM_ERROR("Fog factor palettes are rejected because of "
+			  "lack of info how to determine their size.\n");
+		return state_error;
+	default:
+
+		/*
+		 * There are some unimplemented HC_ParaTypes here, that
+		 * need to be implemented if the Mesa driver is extended.
+		 */
+
+		DRM_ERROR("Invalid or unimplemented HALCYON_HEADER2 "
+			  "DMA subcommand: 0x%x. Previous dword: 0x%x\n",
+			  cmd, *(buf - 2));
+		*buffer = buf;
+		return state_error;
+	}
+
+	while (buf < buf_end) {
+		cmd = *buf++;
+		if ((hz = hz_table[cmd >> 24])) {
+			if ((hz_mode = investigate_hazard(cmd, hz, hc_state))) {
+				if (hz_mode == 1) {
+					buf--;
+					break;
+				}
+				return state_error;
+			}
+		} else if (hc_state->unfinished &&
+			   finish_current_sequence(hc_state)) {
+			return state_error;
+		}
+	}
+	if (hc_state->unfinished && finish_current_sequence(hc_state)) {
+		return state_error;
+	}
+	*buffer = buf;
+	return state_command;
+}
+
+static __inline__ verifier_state_t
+via_parse_header2(drm_via_private_t * dev_priv, uint32_t const **buffer,
+		  const uint32_t * buf_end, int *fire_count)
+{
+	uint32_t cmd;
+	const uint32_t *buf = *buffer;
+	const uint32_t *next_fire;
+	int burst = 0;
+
+	next_fire = dev_priv->fire_offsets[*fire_count];
+	buf++;
+	cmd = (*buf & 0xFFFF0000) >> 16;
+	VIA_WRITE(HC_REG_TRANS_SET + HC_REG_BASE, *buf++);
+	switch (cmd) {
+	case HC_ParaType_CmdVdata:
+		while ((buf < buf_end) &&
+		       (*fire_count < dev_priv->num_fire_offsets) &&
+		       (*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB) {
+			while (buf <= next_fire) {
+				VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
+					  (burst & 63), *buf++);
+				burst += 4;
+			}
+			if ((buf < buf_end)
+			    && ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD))
+				buf++;
+
+			if (++(*fire_count) < dev_priv->num_fire_offsets)
+				next_fire = dev_priv->fire_offsets[*fire_count];
+		}
+		break;
+	default:
+		while (buf < buf_end) {
+
+			if (*buf == HC_HEADER2 ||
+			    (*buf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 ||
+			    (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5 ||
+			    (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
+				break;
+
+			VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
+				  (burst & 63), *buf++);
+			burst += 4;
+		}
+	}
+	*buffer = buf;
+	return state_command;
+}
+
+static __inline__ int verify_mmio_address(uint32_t address)
+{
+	if ((address > 0x3FF) && (address < 0xC00)) {
+		DRM_ERROR("Invalid VIDEO DMA command. "
+			  "Attempt to access 3D- or command burst area.\n");
+		return 1;
+	} else if ((address > 0xCFF) && (address < 0x1300)) {
+		DRM_ERROR("Invalid VIDEO DMA command. "
+			  "Attempt to access PCI DMA area.\n");
+		return 1;
+	} else if (address > 0x13FF) {
+		DRM_ERROR("Invalid VIDEO DMA command. "
+			  "Attempt to access VGA registers.\n");
+		return 1;
+	}
+	return 0;
+}
+
+static __inline__ int
+verify_video_tail(uint32_t const **buffer, const uint32_t * buf_end,
+		  uint32_t dwords)
+{
+	const uint32_t *buf = *buffer;
+
+	if (buf_end - buf < dwords) {
+		DRM_ERROR("Illegal termination of video command.\n");
+		return 1;
+	}
+	while (dwords--) {
+		if (*buf++) {
+			DRM_ERROR("Illegal video command tail.\n");
+			return 1;
+		}
+	}
+	*buffer = buf;
+	return 0;
+}
+
+static __inline__ verifier_state_t
+via_check_header1(uint32_t const **buffer, const uint32_t * buf_end)
+{
+	uint32_t cmd;
+	const uint32_t *buf = *buffer;
+	verifier_state_t ret = state_command;
+
+	while (buf < buf_end) {
+		cmd = *buf;
+		if ((cmd > ((0x3FF >> 2) | HALCYON_HEADER1)) &&
+		    (cmd < ((0xC00 >> 2) | HALCYON_HEADER1))) {
+			if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
+				break;
+			DRM_ERROR("Invalid HALCYON_HEADER1 command. "
+				  "Attempt to access 3D- or command burst area.\n");
+			ret = state_error;
+			break;
+		} else if (cmd > ((0xCFF >> 2) | HALCYON_HEADER1)) {
+			if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
+				break;
+			DRM_ERROR("Invalid HALCYON_HEADER1 command. "
+				  "Attempt to access VGA registers.\n");
+			ret = state_error;
+			break;
+		} else {
+			buf += 2;
+		}
+	}
+	*buffer = buf;
+	return ret;
+}
+
+static __inline__ verifier_state_t
+via_parse_header1(drm_via_private_t * dev_priv, uint32_t const **buffer,
+		  const uint32_t * buf_end)
+{
+	register uint32_t cmd;
+	const uint32_t *buf = *buffer;
+
+	while (buf < buf_end) {
+		cmd = *buf;
+		if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
+			break;
+		VIA_WRITE((cmd & ~HALCYON_HEADER1MASK) << 2, *++buf);
+		buf++;
+	}
+	*buffer = buf;
+	return state_command;
+}
+
+static __inline__ verifier_state_t
+via_check_vheader5(uint32_t const **buffer, const uint32_t * buf_end)
+{
+	uint32_t data;
+	const uint32_t *buf = *buffer;
+
+	if (buf_end - buf < 4) {
+		DRM_ERROR("Illegal termination of video header5 command\n");
+		return state_error;
+	}
+
+	data = *buf++ & ~VIA_VIDEOMASK;
+	if (verify_mmio_address(data))
+		return state_error;
+
+	data = *buf++;
+	if (*buf++ != 0x00F50000) {
+		DRM_ERROR("Illegal header5 header data\n");
+		return state_error;
+	}
+	if (*buf++ != 0x00000000) {
+		DRM_ERROR("Illegal header5 header data\n");
+		return state_error;
+	}
+	if (eat_words(&buf, buf_end, data))
+		return state_error;
+	if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
+		return state_error;
+	*buffer = buf;
+	return state_command;
+
+}
+
+static __inline__ verifier_state_t
+via_parse_vheader5(drm_via_private_t * dev_priv, uint32_t const **buffer,
+		   const uint32_t * buf_end)
+{
+	uint32_t addr, count, i;
+	const uint32_t *buf = *buffer;
+
+	addr = *buf++ & ~VIA_VIDEOMASK;
+	i = count = *buf;
+	buf += 3;
+	while (i--) {
+		VIA_WRITE(addr, *buf++);
+	}
+	if (count & 3)
+		buf += 4 - (count & 3);
+	*buffer = buf;
+	return state_command;
+}
+
+static __inline__ verifier_state_t
+via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end)
+{
+	uint32_t data;
+	const uint32_t *buf = *buffer;
+	uint32_t i;
+
+	if (buf_end - buf < 4) {
+		DRM_ERROR("Illegal termination of video header6 command\n");
+		return state_error;
+	}
+	buf++;
+	data = *buf++;
+	if (*buf++ != 0x00F60000) {
+		DRM_ERROR("Illegal header6 header data\n");
+		return state_error;
+	}
+	if (*buf++ != 0x00000000) {
+		DRM_ERROR("Illegal header6 header data\n");
+		return state_error;
+	}
+	if ((buf_end - buf) < (data << 1)) {
+		DRM_ERROR("Illegal termination of video header6 command\n");
+		return state_error;
+	}
+	for (i = 0; i < data; ++i) {
+		if (verify_mmio_address(*buf++))
+			return state_error;
+		buf++;
+	}
+	data <<= 1;
+	if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
+		return state_error;
+	*buffer = buf;
+	return state_command;
+}
+
+static __inline__ verifier_state_t
+via_parse_vheader6(drm_via_private_t * dev_priv, uint32_t const **buffer,
+		   const uint32_t * buf_end)
+{
+
+	uint32_t addr, count, i;
+	const uint32_t *buf = *buffer;
+
+	i = count = *++buf;
+	buf += 3;
+	while (i--) {
+		addr = *buf++;
+		VIA_WRITE(addr, *buf++);
+	}
+	count <<= 1;
+	if (count & 3)
+		buf += 4 - (count & 3);
+	*buffer = buf;
+	return state_command;
+}
+
+int
+via_verify_command_stream(const uint32_t * buf, unsigned int size,
+			  struct drm_device * dev, int agp)
+{
+
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	drm_via_state_t *hc_state = &dev_priv->hc_state;
+	drm_via_state_t saved_state = *hc_state;
+	uint32_t cmd;
+	const uint32_t *buf_end = buf + (size >> 2);
+	verifier_state_t state = state_command;
+	int cme_video;
+	int supported_3d;
+
+	cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A ||
+		     dev_priv->chipset == VIA_DX9_0);
+
+	supported_3d = dev_priv->chipset != VIA_DX9_0;
+
+	hc_state->dev = dev;
+	hc_state->unfinished = no_sequence;
+	hc_state->map_cache = NULL;
+	hc_state->agp = agp;
+	hc_state->buf_start = buf;
+	dev_priv->num_fire_offsets = 0;
+
+	while (buf < buf_end) {
+
+		switch (state) {
+		case state_header2:
+			state = via_check_header2(&buf, buf_end, hc_state);
+			break;
+		case state_header1:
+			state = via_check_header1(&buf, buf_end);
+			break;
+		case state_vheader5:
+			state = via_check_vheader5(&buf, buf_end);
+			break;
+		case state_vheader6:
+			state = via_check_vheader6(&buf, buf_end);
+			break;
+		case state_command:
+			if ((HALCYON_HEADER2 == (cmd = *buf)) &&
+			    supported_3d)
+				state = state_header2;
+			else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
+				state = state_header1;
+			else if (cme_video
+				 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
+				state = state_vheader5;
+			else if (cme_video
+				 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
+				state = state_vheader6;
+			else if ((cmd == HALCYON_HEADER2) && !supported_3d) {
+				DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n");
+				state = state_error;
+			} else {
+				DRM_ERROR
+				    ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
+				     cmd);
+				state = state_error;
+			}
+			break;
+		case state_error:
+		default:
+			*hc_state = saved_state;
+			return -EINVAL;
+		}
+	}
+	if (state == state_error) {
+		*hc_state = saved_state;
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int
+via_parse_command_stream(struct drm_device * dev, const uint32_t * buf,
+			 unsigned int size)
+{
+
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	uint32_t cmd;
+	const uint32_t *buf_end = buf + (size >> 2);
+	verifier_state_t state = state_command;
+	int fire_count = 0;
+
+	while (buf < buf_end) {
+
+		switch (state) {
+		case state_header2:
+			state =
+			    via_parse_header2(dev_priv, &buf, buf_end,
+					      &fire_count);
+			break;
+		case state_header1:
+			state = via_parse_header1(dev_priv, &buf, buf_end);
+			break;
+		case state_vheader5:
+			state = via_parse_vheader5(dev_priv, &buf, buf_end);
+			break;
+		case state_vheader6:
+			state = via_parse_vheader6(dev_priv, &buf, buf_end);
+			break;
+		case state_command:
+			if (HALCYON_HEADER2 == (cmd = *buf))
+				state = state_header2;
+			else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
+				state = state_header1;
+			else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
+				state = state_vheader5;
+			else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
+				state = state_vheader6;
+			else {
+				DRM_ERROR
+				    ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
+				     cmd);
+				state = state_error;
+			}
+			break;
+		case state_error:
+		default:
+			return -EINVAL;
+		}
+	}
+	if (state == state_error) {
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void
+setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
+{
+	int i;
+
+	for (i = 0; i < 256; ++i) {
+		table[i] = forbidden_command;
+	}
+
+	for (i = 0; i < size; ++i) {
+		table[init_table[i].code] = init_table[i].hz;
+	}
+}
+
+void via_init_command_verifier(void)
+{
+	setup_hazard_table(init_table1, table1,
+			   sizeof(init_table1) / sizeof(hz_init_t));
+	setup_hazard_table(init_table2, table2,
+			   sizeof(init_table2) / sizeof(hz_init_t));
+	setup_hazard_table(init_table3, table3,
+			   sizeof(init_table3) / sizeof(hz_init_t));
+}
diff --git a/drivers/gpu/drm/via/via_verifier.h b/drivers/gpu/drm/via/via_verifier.h
new file mode 100644
index 000000000000..d6f8214b69f5
--- /dev/null
+++ b/drivers/gpu/drm/via/via_verifier.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2004 The Unichrome Project. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE UNICHROME PROJECT, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Thomas Hellström 2004.
+ */
+
+#ifndef _VIA_VERIFIER_H_
+#define _VIA_VERIFIER_H_
+
+typedef enum {
+	no_sequence = 0,
+	z_address,
+	dest_address,
+	tex_address
+} drm_via_sequence_t;
+
+typedef struct {
+	unsigned texture;
+	uint32_t z_addr;
+	uint32_t d_addr;
+	uint32_t t_addr[2][10];
+	uint32_t pitch[2][10];
+	uint32_t height[2][10];
+	uint32_t tex_level_lo[2];
+	uint32_t tex_level_hi[2];
+	uint32_t tex_palette_size[2];
+	uint32_t tex_npot[2];
+	drm_via_sequence_t unfinished;
+	int agp_texture;
+	int multitex;
+	struct drm_device *dev;
+	drm_local_map_t *map_cache;
+	uint32_t vertex_count;
+	int agp;
+	const uint32_t *buf_start;
+} drm_via_state_t;
+
+extern int via_verify_command_stream(const uint32_t * buf, unsigned int size,
+				     struct drm_device * dev, int agp);
+extern int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
+				    unsigned int size);
+
+#endif
diff --git a/drivers/gpu/drm/via/via_video.c b/drivers/gpu/drm/via/via_video.c
new file mode 100644
index 000000000000..6ec04ac12459
--- /dev/null
+++ b/drivers/gpu/drm/via/via_video.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Thomas Hellstrom 2005.
+ *
+ * Video and XvMC related functions.
+ */
+
+#include "drmP.h"
+#include "via_drm.h"
+#include "via_drv.h"
+
+void via_init_futex(drm_via_private_t * dev_priv)
+{
+	unsigned int i;
+
+	DRM_DEBUG("\n");
+
+	for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
+		DRM_INIT_WAITQUEUE(&(dev_priv->decoder_queue[i]));
+		XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0;
+	}
+}
+
+void via_cleanup_futex(drm_via_private_t * dev_priv)
+{
+}
+
+void via_release_futex(drm_via_private_t * dev_priv, int context)
+{
+	unsigned int i;
+	volatile int *lock;
+
+	if (!dev_priv->sarea_priv)
+		return;
+
+	for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
+		lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
+		if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
+			if (_DRM_LOCK_IS_HELD(*lock)
+			    && (*lock & _DRM_LOCK_CONT)) {
+				DRM_WAKEUP(&(dev_priv->decoder_queue[i]));
+			}
+			*lock = 0;
+		}
+	}
+}
+
+int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+	drm_via_futex_t *fx = data;
+	volatile int *lock;
+	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+	drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;
+	int ret = 0;
+
+	DRM_DEBUG("\n");
+
+	if (fx->lock > VIA_NR_XVMC_LOCKS)
+		return -EFAULT;
+
+	lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock);
+
+	switch (fx->func) {
+	case VIA_FUTEX_WAIT:
+		DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
+			    (fx->ms / 10) * (DRM_HZ / 100), *lock != fx->val);
+		return ret;
+	case VIA_FUTEX_WAKE:
+		DRM_WAKEUP(&(dev_priv->decoder_queue[fx->lock]));
+		return 0;
+	}
+	return 0;
+}