summary refs log tree commit diff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2014-02-06 12:04:31 +1000
committerDave Airlie <airlied@redhat.com>2014-02-06 12:04:31 +1000
commit9ca5d4b4e1d4416b6d01804be843f8e39845c73d (patch)
treeda4619d822217c904b9d1064101cfe25ab20518a /drivers/gpu/drm
parent9df5a9b02fb59a4148bc93e0d20731c72aaffa5f (diff)
parentcd9a21a831af0af7539a0e37e4455da03df7cf82 (diff)
downloadlinux-9ca5d4b4e1d4416b6d01804be843f8e39845c73d.tar.gz
Merge tag 'vmwgfx-fixes-3.14-2014-02-05' of git://people.freedesktop.org/~thomash/linux into drm-next
A couple of vmwgfx fixes together with missing bits of legacy device
emulation to facilitate old user-space drivers on new devices.

The shader emulation bits are a bit large, but since they mostly touch the
new device code, regressions are unlikely. I figure the gain of having
this from the start clearly outweighs the risc of adding these bits at
this point.

Pull request of 2014-02-05

* tag 'vmwgfx-fixes-3.14-2014-02-05' of git://people.freedesktop.org/~thomash/linux:
  vmwgfx: Fix unitialized stack read in vmw_setup_otable_base
  drm/vmwgfx: Reemit context bindings when necessary v2
  drm/vmwgfx: Detect old user-space drivers and set up legacy emulation v2
  drm/vmwgfx: Emulate legacy shaders on guest-backed devices v2
  drm/vmwgfx: Fix legacy surface reference size copyback
  drm/vmwgfx: Fix SET_SHADER_CONST emulation on guest-backed devices
  drm/vmwgfx: Fix regression caused by "drm/ttm: make ttm reservation calls behave like reservation calls"
  drm/vmwgfx: Don't commit staged bindings if execbuf fails
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c144
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c330
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c93
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c467
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c6
10 files changed, 988 insertions, 130 deletions
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
index d95335cb90bd..b645647b7776 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -2583,4 +2583,28 @@ typedef union {
    float  f;
 } SVGA3dDevCapResult;
 
+typedef enum {
+   SVGA3DCAPS_RECORD_UNKNOWN        = 0,
+   SVGA3DCAPS_RECORD_DEVCAPS_MIN    = 0x100,
+   SVGA3DCAPS_RECORD_DEVCAPS        = 0x100,
+   SVGA3DCAPS_RECORD_DEVCAPS_MAX    = 0x1ff,
+} SVGA3dCapsRecordType;
+
+typedef
+struct SVGA3dCapsRecordHeader {
+   uint32 length;
+   SVGA3dCapsRecordType type;
+}
+SVGA3dCapsRecordHeader;
+
+typedef
+struct SVGA3dCapsRecord {
+   SVGA3dCapsRecordHeader header;
+   uint32 data[1];
+}
+SVGA3dCapsRecord;
+
+
+typedef uint32 SVGA3dCapPair[2];
+
 #endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 82c41daebc0e..9426c53fb483 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -37,7 +37,7 @@ struct vmw_user_context {
 
 
 
-typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *);
+typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
 
 static void vmw_user_context_free(struct vmw_resource *res);
 static struct vmw_resource *
@@ -50,9 +50,11 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
 				 bool readback,
 				 struct ttm_validate_buffer *val_buf);
 static int vmw_gb_context_destroy(struct vmw_resource *res);
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi);
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi);
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi);
+static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+					   bool rebind);
+static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
+static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
 static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
 static uint64_t vmw_user_context_size;
 
@@ -111,10 +113,14 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
 
 	if (res->func->destroy == vmw_gb_context_destroy) {
 		mutex_lock(&dev_priv->cmdbuf_mutex);
+		mutex_lock(&dev_priv->binding_mutex);
+		(void) vmw_context_binding_state_kill
+			(&container_of(res, struct vmw_user_context, res)->cbs);
 		(void) vmw_gb_context_destroy(res);
 		if (dev_priv->pinned_bo != NULL &&
 		    !dev_priv->query_cid_valid)
 			__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
+		mutex_unlock(&dev_priv->binding_mutex);
 		mutex_unlock(&dev_priv->cmdbuf_mutex);
 		return;
 	}
@@ -328,7 +334,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
 	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
 
 	mutex_lock(&dev_priv->binding_mutex);
-	vmw_context_binding_state_kill(&uctx->cbs);
+	vmw_context_binding_state_scrub(&uctx->cbs);
 
 	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
 
@@ -378,10 +384,6 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
 		SVGA3dCmdHeader header;
 		SVGA3dCmdDestroyGBContext body;
 	} *cmd;
-	struct vmw_user_context *uctx =
-		container_of(res, struct vmw_user_context, res);
-
-	BUG_ON(!list_empty(&uctx->cbs.list));
 
 	if (likely(res->id == -1))
 		return 0;
@@ -528,8 +530,9 @@ out_unlock:
  * vmw_context_scrub_shader - scrub a shader binding from a context.
  *
  * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
  */
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
+static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
 {
 	struct vmw_private *dev_priv = bi->ctx->dev_priv;
 	struct {
@@ -548,7 +551,8 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.cid = bi->ctx->id;
 	cmd->body.type = bi->i1.shader_type;
-	cmd->body.shid = SVGA3D_INVALID_ID;
+	cmd->body.shid =
+		cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
 	return 0;
@@ -559,8 +563,10 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
  * from a context.
  *
  * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
  */
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
+static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+					   bool rebind)
 {
 	struct vmw_private *dev_priv = bi->ctx->dev_priv;
 	struct {
@@ -579,7 +585,8 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
 	cmd->header.size = sizeof(cmd->body);
 	cmd->body.cid = bi->ctx->id;
 	cmd->body.type = bi->i1.rt_type;
-	cmd->body.target.sid = SVGA3D_INVALID_ID;
+	cmd->body.target.sid =
+		cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
 	cmd->body.target.face = 0;
 	cmd->body.target.mipmap = 0;
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
@@ -591,11 +598,13 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
  * vmw_context_scrub_texture - scrub a texture binding from a context.
  *
  * @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
  *
  * TODO: Possibly complement this function with a function that takes
  * a list of texture bindings and combines them to a single command.
  */
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
+static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
+				     bool rebind)
 {
 	struct vmw_private *dev_priv = bi->ctx->dev_priv;
 	struct {
@@ -619,7 +628,8 @@ static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
 	cmd->body.c.cid = bi->ctx->id;
 	cmd->body.s1.stage = bi->i1.texture_stage;
 	cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
-	cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID;
+	cmd->body.s1.value =
+		cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 
 	return 0;
@@ -692,6 +702,7 @@ int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
 		vmw_context_binding_drop(loc);
 
 	loc->bi = *bi;
+	loc->bi.scrubbed = false;
 	list_add_tail(&loc->ctx_list, &cbs->list);
 	INIT_LIST_HEAD(&loc->res_list);
 
@@ -727,12 +738,11 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
 	if (loc->bi.ctx != NULL)
 		vmw_context_binding_drop(loc);
 
-	loc->bi = *bi;
-	list_add_tail(&loc->ctx_list, &cbs->list);
-	if (bi->res != NULL)
+	if (bi->res != NULL) {
+		loc->bi = *bi;
+		list_add_tail(&loc->ctx_list, &cbs->list);
 		list_add_tail(&loc->res_list, &bi->res->binding_head);
-	else
-		INIT_LIST_HEAD(&loc->res_list);
+	}
 }
 
 /**
@@ -746,7 +756,10 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
  */
 static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
 {
-	(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi);
+	if (!cb->bi.scrubbed) {
+		(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
+		cb->bi.scrubbed = true;
+	}
 	vmw_context_binding_drop(cb);
 }
 
@@ -768,6 +781,27 @@ static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
 }
 
 /**
+ * vmw_context_binding_state_scrub - Scrub all bindings associated with a
+ * struct vmw_ctx_binding state structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker.
+ */
+static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
+{
+	struct vmw_ctx_binding *entry;
+
+	list_for_each_entry(entry, &cbs->list, ctx_list) {
+		if (!entry->bi.scrubbed) {
+			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
+			entry->bi.scrubbed = true;
+		}
+	}
+}
+
+/**
  * vmw_context_binding_res_list_kill - Kill all bindings on a
  * resource binding list
  *
@@ -785,6 +819,27 @@ void vmw_context_binding_res_list_kill(struct list_head *head)
 }
 
 /**
+ * vmw_context_binding_res_list_scrub - Scrub all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Scrub all bindings associated with a specific resource. Typically
+ * called before the resource is evicted.
+ */
+void vmw_context_binding_res_list_scrub(struct list_head *head)
+{
+	struct vmw_ctx_binding *entry;
+
+	list_for_each_entry(entry, head, res_list) {
+		if (!entry->bi.scrubbed) {
+			(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
+			entry->bi.scrubbed = true;
+		}
+	}
+}
+
+/**
  * vmw_context_binding_state_transfer - Commit staged binding info
  *
  * @ctx: Pointer to context to commit the staged binding info to.
@@ -803,3 +858,50 @@ void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
 	list_for_each_entry_safe(entry, next, &from->list, ctx_list)
 		vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
 }
+
+/**
+ * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
+ *
+ * @ctx: The context resource
+ *
+ * Walks through the context binding list and rebinds all scrubbed
+ * resources.
+ */
+int vmw_context_rebind_all(struct vmw_resource *ctx)
+{
+	struct vmw_ctx_binding *entry;
+	struct vmw_user_context *uctx =
+		container_of(ctx, struct vmw_user_context, res);
+	struct vmw_ctx_binding_state *cbs = &uctx->cbs;
+	int ret;
+
+	list_for_each_entry(entry, &cbs->list, ctx_list) {
+		if (likely(!entry->bi.scrubbed))
+			continue;
+
+		if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
+			    SVGA3D_INVALID_ID))
+			continue;
+
+		ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
+		if (unlikely(ret != 0))
+			return ret;
+
+		entry->bi.scrubbed = false;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_context_binding_list - Return a list of context bindings
+ *
+ * @ctx: The context resource
+ *
+ * Returns the current list of bindings of the given context. Note that
+ * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
+ */
+struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
+{
+	return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 9893328f8fdc..3bdc0adc656d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -941,6 +941,7 @@ static void vmw_postclose(struct drm_device *dev,
 		drm_master_put(&vmw_fp->locked_master);
 	}
 
+	vmw_compat_shader_man_destroy(vmw_fp->shman);
 	ttm_object_file_release(&vmw_fp->tfile);
 	kfree(vmw_fp);
 }
@@ -960,11 +961,17 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
 	if (unlikely(vmw_fp->tfile == NULL))
 		goto out_no_tfile;
 
+	vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
+	if (IS_ERR(vmw_fp->shman))
+		goto out_no_shman;
+
 	file_priv->driver_priv = vmw_fp;
 	dev_priv->bdev.dev_mapping = dev->dev_mapping;
 
 	return 0;
 
+out_no_shman:
+	ttm_object_file_release(&vmw_fp->tfile);
 out_no_tfile:
 	kfree(vmw_fp);
 	return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 554e7fa33082..ecaa302a6154 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -75,10 +75,14 @@
 #define VMW_RES_FENCE ttm_driver_type3
 #define VMW_RES_SHADER ttm_driver_type4
 
+struct vmw_compat_shader_manager;
+
 struct vmw_fpriv {
 	struct drm_master *locked_master;
 	struct ttm_object_file *tfile;
 	struct list_head fence_events;
+	bool gb_aware;
+	struct vmw_compat_shader_manager *shman;
 };
 
 struct vmw_dma_buffer {
@@ -272,6 +276,7 @@ struct vmw_ctx_bindinfo {
 	struct vmw_resource *ctx;
 	struct vmw_resource *res;
 	enum vmw_ctx_binding_type bt;
+	bool scrubbed;
 	union {
 		SVGA3dShaderType shader_type;
 		SVGA3dRenderTargetType rt_type;
@@ -318,7 +323,7 @@ struct vmw_sw_context{
 	struct drm_open_hash res_ht;
 	bool res_ht_initialized;
 	bool kernel; /**< is the called made from the kernel */
-	struct ttm_object_file *tfile;
+	struct vmw_fpriv *fp;
 	struct list_head validate_nodes;
 	struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
 	uint32_t cur_reloc;
@@ -336,6 +341,7 @@ struct vmw_sw_context{
 	bool needs_post_query_barrier;
 	struct vmw_resource *error_resource;
 	struct vmw_ctx_binding_state staged_bindings;
+	struct list_head staged_shaders;
 };
 
 struct vmw_legacy_display;
@@ -569,6 +575,8 @@ struct vmw_user_resource_conv;
 
 extern void vmw_resource_unreference(struct vmw_resource **p_res);
 extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
+extern struct vmw_resource *
+vmw_resource_reference_unless_doomed(struct vmw_resource *res);
 extern int vmw_resource_validate(struct vmw_resource *res);
 extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
 extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
@@ -957,6 +965,9 @@ extern void
 vmw_context_binding_state_transfer(struct vmw_resource *res,
 				   struct vmw_ctx_binding_state *cbs);
 extern void vmw_context_binding_res_list_kill(struct list_head *head);
+extern void vmw_context_binding_res_list_scrub(struct list_head *head);
+extern int vmw_context_rebind_all(struct vmw_resource *ctx);
+extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
 
 /*
  * Surface management - vmwgfx_surface.c
@@ -991,6 +1002,28 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
 				   struct drm_file *file_priv);
 extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
 				    struct drm_file *file_priv);
+extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
+				    SVGA3dShaderType shader_type,
+				    u32 *user_key);
+extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
+				      struct list_head *list);
+extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
+				      struct list_head *list);
+extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
+				    u32 user_key,
+				    SVGA3dShaderType shader_type,
+				    struct list_head *list);
+extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+				 u32 user_key, const void *bytecode,
+				 SVGA3dShaderType shader_type,
+				 size_t size,
+				 struct ttm_object_file *tfile,
+				 struct list_head *list);
+extern struct vmw_compat_shader_manager *
+vmw_compat_shader_man_create(struct vmw_private *dev_priv);
+extern void
+vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
+
 
 /**
  * Inline helper functions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 7a5f1eb55c5a..269b85cc875a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -114,8 +114,10 @@ static void vmw_resource_list_unreserve(struct list_head *list,
 		 * persistent context binding tracker.
 		 */
 		if (unlikely(val->staged_bindings)) {
-			vmw_context_binding_state_transfer
-				(val->res, val->staged_bindings);
+			if (!backoff) {
+				vmw_context_binding_state_transfer
+					(val->res, val->staged_bindings);
+			}
 			kfree(val->staged_bindings);
 			val->staged_bindings = NULL;
 		}
@@ -178,6 +180,44 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
 }
 
 /**
+ * vmw_resource_context_res_add - Put resources previously bound to a context on
+ * the validation list
+ *
+ * @dev_priv: Pointer to a device private structure
+ * @sw_context: Pointer to a software context used for this command submission
+ * @ctx: Pointer to the context resource
+ *
+ * This function puts all resources that were previously bound to @ctx on
+ * the resource validation list. This is part of the context state reemission
+ */
+static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
+					struct vmw_sw_context *sw_context,
+					struct vmw_resource *ctx)
+{
+	struct list_head *binding_list;
+	struct vmw_ctx_binding *entry;
+	int ret = 0;
+	struct vmw_resource *res;
+
+	mutex_lock(&dev_priv->binding_mutex);
+	binding_list = vmw_context_binding_list(ctx);
+
+	list_for_each_entry(entry, binding_list, ctx_list) {
+		res = vmw_resource_reference_unless_doomed(entry->bi.res);
+		if (unlikely(res == NULL))
+			continue;
+
+		ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
+		vmw_resource_unreference(&res);
+		if (unlikely(ret != 0))
+			break;
+	}
+
+	mutex_unlock(&dev_priv->binding_mutex);
+	return ret;
+}
+
+/**
  * vmw_resource_relocation_add - Add a relocation to the relocation list
  *
  * @list: Pointer to head of relocation list.
@@ -233,8 +273,12 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
 {
 	struct vmw_resource_relocation *rel;
 
-	list_for_each_entry(rel, list, head)
-		cb[rel->offset] = rel->res->id;
+	list_for_each_entry(rel, list, head) {
+		if (likely(rel->res != NULL))
+			cb[rel->offset] = rel->res->id;
+		else
+			cb[rel->offset] = SVGA_3D_CMD_NOP;
+	}
 }
 
 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
@@ -379,22 +423,27 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
 }
 
 /**
- * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
  * on the resource validate list unless it's already there.
  *
  * @dev_priv: Pointer to a device private structure.
  * @sw_context: Pointer to the software context.
  * @res_type: Resource type.
  * @converter: User-space visisble type specific information.
- * @id: Pointer to the location in the command buffer currently being
+ * @id: user-space resource id handle.
+ * @id_loc: Pointer to the location in the command buffer currently being
  * parsed from where the user-space resource id handle is located.
+ * @p_val: Pointer to pointer to resource validalidation node. Populated
+ * on exit.
  */
-static int vmw_cmd_res_check(struct vmw_private *dev_priv,
-			     struct vmw_sw_context *sw_context,
-			     enum vmw_res_type res_type,
-			     const struct vmw_user_resource_conv *converter,
-			     uint32_t *id,
-			     struct vmw_resource_val_node **p_val)
+static int
+vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
+			 struct vmw_sw_context *sw_context,
+			 enum vmw_res_type res_type,
+			 const struct vmw_user_resource_conv *converter,
+			 uint32_t id,
+			 uint32_t *id_loc,
+			 struct vmw_resource_val_node **p_val)
 {
 	struct vmw_res_cache_entry *rcache =
 		&sw_context->res_cache[res_type];
@@ -402,7 +451,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
 	struct vmw_resource_val_node *node;
 	int ret;
 
-	if (*id == SVGA3D_INVALID_ID) {
+	if (id == SVGA3D_INVALID_ID) {
 		if (p_val)
 			*p_val = NULL;
 		if (res_type == vmw_res_context) {
@@ -417,7 +466,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
 	 * resource
 	 */
 
-	if (likely(rcache->valid && *id == rcache->handle)) {
+	if (likely(rcache->valid && id == rcache->handle)) {
 		const struct vmw_resource *res = rcache->res;
 
 		rcache->node->first_usage = false;
@@ -426,28 +475,28 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
 
 		return vmw_resource_relocation_add
 			(&sw_context->res_relocations, res,
-			 id - sw_context->buf_start);
+			 id_loc - sw_context->buf_start);
 	}
 
 	ret = vmw_user_resource_lookup_handle(dev_priv,
-					      sw_context->tfile,
-					      *id,
+					      sw_context->fp->tfile,
+					      id,
 					      converter,
 					      &res);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Could not find or use resource 0x%08x.\n",
-			  (unsigned) *id);
+			  (unsigned) id);
 		dump_stack();
 		return ret;
 	}
 
 	rcache->valid = true;
 	rcache->res = res;
-	rcache->handle = *id;
+	rcache->handle = id;
 
 	ret = vmw_resource_relocation_add(&sw_context->res_relocations,
 					  res,
-					  id - sw_context->buf_start);
+					  id_loc - sw_context->buf_start);
 	if (unlikely(ret != 0))
 		goto out_no_reloc;
 
@@ -459,7 +508,11 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
 	if (p_val)
 		*p_val = node;
 
-	if (node->first_usage && res_type == vmw_res_context) {
+	if (dev_priv->has_mob && node->first_usage &&
+	    res_type == vmw_res_context) {
+		ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
+		if (unlikely(ret != 0))
+			goto out_no_reloc;
 		node->staged_bindings =
 			kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
 		if (node->staged_bindings == NULL) {
@@ -481,6 +534,59 @@ out_no_reloc:
 }
 
 /**
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * on the resource validate list unless it's already there.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @res_type: Resource type.
+ * @converter: User-space visisble type specific information.
+ * @id_loc: Pointer to the location in the command buffer currently being
+ * parsed from where the user-space resource id handle is located.
+ * @p_val: Pointer to pointer to resource validalidation node. Populated
+ * on exit.
+ */
+static int
+vmw_cmd_res_check(struct vmw_private *dev_priv,
+		  struct vmw_sw_context *sw_context,
+		  enum vmw_res_type res_type,
+		  const struct vmw_user_resource_conv *converter,
+		  uint32_t *id_loc,
+		  struct vmw_resource_val_node **p_val)
+{
+	return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
+					converter, *id_loc, id_loc, p_val);
+}
+
+/**
+ * vmw_rebind_contexts - Rebind all resources previously bound to
+ * referenced contexts.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Rebind context binding points that have been scrubbed because of eviction.
+ */
+static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
+{
+	struct vmw_resource_val_node *val;
+	int ret;
+
+	list_for_each_entry(val, &sw_context->resource_list, head) {
+		if (likely(!val->staged_bindings))
+			continue;
+
+		ret = vmw_context_rebind_all(val->res);
+		if (unlikely(ret != 0)) {
+			if (ret != -ERESTARTSYS)
+				DRM_ERROR("Failed to rebind context.\n");
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+/**
  * vmw_cmd_cid_check - Check a command header for valid context information.
  *
  * @dev_priv: Pointer to a device private structure.
@@ -767,7 +873,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
 	struct vmw_relocation *reloc;
 	int ret;
 
-	ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
+	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Could not find or use MOB buffer.\n");
 		return -EINVAL;
@@ -828,7 +934,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
 	struct vmw_relocation *reloc;
 	int ret;
 
-	ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
+	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Could not find or use GMR region.\n");
 		return -EINVAL;
@@ -1127,7 +1233,8 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
 
 	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
 
-	vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
+	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
+			     header);
 
 out_no_surface:
 	vmw_dmabuf_unreference(&vmw_bo);
@@ -1478,6 +1585,98 @@ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
 				 &cmd->body.sid, NULL);
 }
 
+
+/**
+ * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
+				 struct vmw_sw_context *sw_context,
+				 SVGA3dCmdHeader *header)
+{
+	struct vmw_shader_define_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDefineShader body;
+	} *cmd;
+	int ret;
+	size_t size;
+
+	cmd = container_of(header, struct vmw_shader_define_cmd,
+			   header);
+
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				user_context_converter, &cmd->body.cid,
+				NULL);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (unlikely(!dev_priv->has_mob))
+		return 0;
+
+	size = cmd->header.size - sizeof(cmd->body);
+	ret = vmw_compat_shader_add(sw_context->fp->shman,
+				    cmd->body.shid, cmd + 1,
+				    cmd->body.type, size,
+				    sw_context->fp->tfile,
+				    &sw_context->staged_shaders);
+	if (unlikely(ret != 0))
+		return ret;
+
+	return vmw_resource_relocation_add(&sw_context->res_relocations,
+					   NULL, &cmd->header.id -
+					   sw_context->buf_start);
+
+	return 0;
+}
+
+/**
+ * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
+				  struct vmw_sw_context *sw_context,
+				  SVGA3dCmdHeader *header)
+{
+	struct vmw_shader_destroy_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdDestroyShader body;
+	} *cmd;
+	int ret;
+
+	cmd = container_of(header, struct vmw_shader_destroy_cmd,
+			   header);
+
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				user_context_converter, &cmd->body.cid,
+				NULL);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (unlikely(!dev_priv->has_mob))
+		return 0;
+
+	ret = vmw_compat_shader_remove(sw_context->fp->shman,
+				       cmd->body.shid,
+				       cmd->body.type,
+				       &sw_context->staged_shaders);
+	if (unlikely(ret != 0))
+		return ret;
+
+	return vmw_resource_relocation_add(&sw_context->res_relocations,
+					   NULL, &cmd->header.id -
+					   sw_context->buf_start);
+
+	return 0;
+}
+
 /**
  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
  * command
@@ -1509,10 +1708,18 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
 	if (dev_priv->has_mob) {
 		struct vmw_ctx_bindinfo bi;
 		struct vmw_resource_val_node *res_node;
-
-		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
-					user_shader_converter,
-					&cmd->body.shid, &res_node);
+		u32 shid = cmd->body.shid;
+
+		if (shid != SVGA3D_INVALID_ID)
+			(void) vmw_compat_shader_lookup(sw_context->fp->shman,
+							cmd->body.type,
+							&shid);
+
+		ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
+					       vmw_res_shader,
+					       user_shader_converter,
+					       shid,
+					       &cmd->body.shid, &res_node);
 		if (unlikely(ret != 0))
 			return ret;
 
@@ -1527,6 +1734,39 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
 }
 
 /**
+ * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
+				    struct vmw_sw_context *sw_context,
+				    SVGA3dCmdHeader *header)
+{
+	struct vmw_set_shader_const_cmd {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdSetShaderConst body;
+	} *cmd;
+	int ret;
+
+	cmd = container_of(header, struct vmw_set_shader_const_cmd,
+			   header);
+
+	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+				user_context_converter, &cmd->body.cid,
+				NULL);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (dev_priv->has_mob)
+		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
+
+	return 0;
+}
+
+/**
  * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
  * command
  *
@@ -1634,14 +1874,14 @@ static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
 		    true, false, false),
 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
 		    false, false, false),
-	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check,
-		    true, true, false),
-	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check,
-		    true, true, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
+		    true, false, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
+		    true, false, false),
 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
 		    true, false, false),
-	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check,
-		    true, true, false),
+	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
+		    true, false, false),
 	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
 		    true, false, false),
 	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
@@ -2171,7 +2411,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 	} else
 		sw_context->kernel = true;
 
-	sw_context->tfile = vmw_fpriv(file_priv)->tfile;
+	sw_context->fp = vmw_fpriv(file_priv);
 	sw_context->cur_reloc = 0;
 	sw_context->cur_val_buf = 0;
 	sw_context->fence_flags = 0;
@@ -2188,16 +2428,17 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 			goto out_unlock;
 		sw_context->res_ht_initialized = true;
 	}
+	INIT_LIST_HEAD(&sw_context->staged_shaders);
 
 	INIT_LIST_HEAD(&resource_list);
 	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
 				command_size);
 	if (unlikely(ret != 0))
-		goto out_err;
+		goto out_err_nores;
 
 	ret = vmw_resources_reserve(sw_context);
 	if (unlikely(ret != 0))
-		goto out_err;
+		goto out_err_nores;
 
 	ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
 	if (unlikely(ret != 0))
@@ -2225,6 +2466,12 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 		goto out_err;
 	}
 
+	if (dev_priv->has_mob) {
+		ret = vmw_rebind_contexts(sw_context);
+		if (unlikely(ret != 0))
+			goto out_err;
+	}
+
 	cmd = vmw_fifo_reserve(dev_priv, command_size);
 	if (unlikely(cmd == NULL)) {
 		DRM_ERROR("Failed reserving fifo space for commands.\n");
@@ -2276,6 +2523,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 	}
 
 	list_splice_init(&sw_context->resource_list, &resource_list);
+	vmw_compat_shaders_commit(sw_context->fp->shman,
+				  &sw_context->staged_shaders);
 	mutex_unlock(&dev_priv->cmdbuf_mutex);
 
 	/*
@@ -2289,10 +2538,11 @@ int vmw_execbuf_process(struct drm_file *file_priv,
 out_unlock_binding:
 	mutex_unlock(&dev_priv->binding_mutex);
 out_err:
-	vmw_resource_relocations_free(&sw_context->res_relocations);
-	vmw_free_relocations(sw_context);
 	ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
+out_err_nores:
 	vmw_resource_list_unreserve(&sw_context->resource_list, true);
+	vmw_resource_relocations_free(&sw_context->res_relocations);
+	vmw_free_relocations(sw_context);
 	vmw_clear_validations(sw_context);
 	if (unlikely(dev_priv->pinned_bo != NULL &&
 		     !dev_priv->query_cid_valid))
@@ -2301,6 +2551,8 @@ out_unlock:
 	list_splice_init(&sw_context->resource_list, &resource_list);
 	error_resource = sw_context->error_resource;
 	sw_context->error_resource = NULL;
+	vmw_compat_shaders_revert(sw_context->fp->shman,
+				  &sw_context->staged_shaders);
 	mutex_unlock(&dev_priv->cmdbuf_mutex);
 
 	/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 116c49736763..f9881f9e62bd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -29,12 +29,18 @@
 #include <drm/vmwgfx_drm.h>
 #include "vmwgfx_kms.h"
 
+struct svga_3d_compat_cap {
+	SVGA3dCapsRecordHeader header;
+	SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
+};
+
 int vmw_getparam_ioctl(struct drm_device *dev, void *data,
 		       struct drm_file *file_priv)
 {
 	struct vmw_private *dev_priv = vmw_priv(dev);
 	struct drm_vmw_getparam_arg *param =
 	    (struct drm_vmw_getparam_arg *)data;
+	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
 
 	switch (param->param) {
 	case DRM_VMW_PARAM_NUM_STREAMS:
@@ -60,6 +66,11 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
 		__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
 		const struct vmw_fifo_state *fifo = &dev_priv->fifo;
 
+		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
+			param->value = SVGA3D_HWVERSION_WS8_B1;
+			break;
+		}
+
 		param->value =
 			ioread32(fifo_mem +
 				 ((fifo->capabilities &
@@ -69,17 +80,26 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
 		break;
 	}
 	case DRM_VMW_PARAM_MAX_SURF_MEMORY:
-		param->value = dev_priv->memory_size;
+		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
+		    !vmw_fp->gb_aware)
+			param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
+		else
+			param->value = dev_priv->memory_size;
 		break;
 	case DRM_VMW_PARAM_3D_CAPS_SIZE:
-		if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
-			param->value = SVGA3D_DEVCAP_MAX;
+		if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
+		    vmw_fp->gb_aware)
+			param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
+		else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
+			param->value = sizeof(struct svga_3d_compat_cap) +
+				sizeof(uint32_t);
 		else
 			param->value = (SVGA_FIFO_3D_CAPS_LAST -
-					SVGA_FIFO_3D_CAPS + 1);
-		param->value *= sizeof(uint32_t);
+					SVGA_FIFO_3D_CAPS + 1) *
+				sizeof(uint32_t);
 		break;
 	case DRM_VMW_PARAM_MAX_MOB_MEMORY:
+		vmw_fp->gb_aware = true;
 		param->value = dev_priv->max_mob_pages * PAGE_SIZE;
 		break;
 	default:
@@ -91,6 +111,38 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
 	return 0;
 }
 
+static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
+			       size_t size)
+{
+	struct svga_3d_compat_cap *compat_cap =
+		(struct svga_3d_compat_cap *) bounce;
+	unsigned int i;
+	size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
+	unsigned int max_size;
+
+	if (size < pair_offset)
+		return -EINVAL;
+
+	max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
+
+	if (max_size > SVGA3D_DEVCAP_MAX)
+		max_size = SVGA3D_DEVCAP_MAX;
+
+	compat_cap->header.length =
+		(pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
+	compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
+
+	mutex_lock(&dev_priv->hw_mutex);
+	for (i = 0; i < max_size; ++i) {
+		vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
+		compat_cap->pairs[i][0] = i;
+		compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+	}
+	mutex_unlock(&dev_priv->hw_mutex);
+
+	return 0;
+}
+
 
 int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv)
@@ -104,41 +156,49 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
 	void *bounce;
 	int ret;
 	bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
+	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
 
 	if (unlikely(arg->pad64 != 0)) {
 		DRM_ERROR("Illegal GET_3D_CAP argument.\n");
 		return -EINVAL;
 	}
 
-	if (gb_objects)
-		size = SVGA3D_DEVCAP_MAX;
+	if (gb_objects && vmw_fp->gb_aware)
+		size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
+	else if (gb_objects)
+		size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
 	else
-		size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1);
-
-	size *= sizeof(uint32_t);
+		size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
+			sizeof(uint32_t);
 
 	if (arg->max_size < size)
 		size = arg->max_size;
 
-	bounce = vmalloc(size);
+	bounce = vzalloc(size);
 	if (unlikely(bounce == NULL)) {
 		DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
 		return -ENOMEM;
 	}
 
-	if (gb_objects) {
-		int i;
+	if (gb_objects && vmw_fp->gb_aware) {
+		int i, num;
 		uint32_t *bounce32 = (uint32_t *) bounce;
 
+		num = size / sizeof(uint32_t);
+		if (num > SVGA3D_DEVCAP_MAX)
+			num = SVGA3D_DEVCAP_MAX;
+
 		mutex_lock(&dev_priv->hw_mutex);
-		for (i = 0; i < SVGA3D_DEVCAP_MAX; ++i) {
+		for (i = 0; i < num; ++i) {
 			vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
 			*bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
 		}
 		mutex_unlock(&dev_priv->hw_mutex);
-
+	} else if (gb_objects) {
+		ret = vmw_fill_compat_cap(dev_priv, bounce, size);
+		if (unlikely(ret != 0))
+			goto out_err;
 	} else {
-
 		fifo_mem = dev_priv->mmio_virt;
 		memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
 	}
@@ -146,6 +206,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
 	ret = copy_to_user(buffer, bounce, size);
 	if (ret)
 		ret = -EFAULT;
+out_err:
 	vfree(bounce);
 
 	if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 4910e7b81811..d4a5a19cb8c3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -134,6 +134,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL)) {
 		DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
+		ret = -ENOMEM;
 		goto out_no_fifo;
 	}
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 6fdd82d42f65..2aa4bc6a4d60 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -88,6 +88,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
 	return res;
 }
 
+struct vmw_resource *
+vmw_resource_reference_unless_doomed(struct vmw_resource *res)
+{
+	return kref_get_unless_zero(&res->kref) ? res : NULL;
+}
 
 /**
  * vmw_resource_release_id - release a resource id to the id manager.
@@ -136,8 +141,12 @@ static void vmw_resource_release(struct kref *kref)
 		vmw_dmabuf_unreference(&res->backup);
 	}
 
-	if (likely(res->hw_destroy != NULL))
+	if (likely(res->hw_destroy != NULL)) {
 		res->hw_destroy(res);
+		mutex_lock(&dev_priv->binding_mutex);
+		vmw_context_binding_res_list_kill(&res->binding_head);
+		mutex_unlock(&dev_priv->binding_mutex);
+	}
 
 	id = res->id;
 	if (res->res_free != NULL)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 1457ec4b7125..217d941b8176 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -29,6 +29,8 @@
 #include "vmwgfx_resource_priv.h"
 #include "ttm/ttm_placement.h"
 
+#define VMW_COMPAT_SHADER_HT_ORDER 12
+
 struct vmw_shader {
 	struct vmw_resource res;
 	SVGA3dShaderType type;
@@ -40,6 +42,50 @@ struct vmw_user_shader {
 	struct vmw_shader shader;
 };
 
+/**
+ * enum vmw_compat_shader_state - Staging state for compat shaders
+ */
+enum vmw_compat_shader_state {
+	VMW_COMPAT_COMMITED,
+	VMW_COMPAT_ADD,
+	VMW_COMPAT_DEL
+};
+
+/**
+ * struct vmw_compat_shader - Metadata for compat shaders.
+ *
+ * @handle: The TTM handle of the guest backed shader.
+ * @tfile: The struct ttm_object_file the guest backed shader is registered
+ * with.
+ * @hash: Hash item for lookup.
+ * @head: List head for staging lists or the compat shader manager list.
+ * @state: Staging state.
+ *
+ * The structure is protected by the cmdbuf lock.
+ */
+struct vmw_compat_shader {
+	u32 handle;
+	struct ttm_object_file *tfile;
+	struct drm_hash_item hash;
+	struct list_head head;
+	enum vmw_compat_shader_state state;
+};
+
+/**
+ * struct vmw_compat_shader_manager - Compat shader manager.
+ *
+ * @shaders: Hash table containing staged and commited compat shaders
+ * @list: List of commited shaders.
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * @shaders and @list are protected by the cmdbuf mutex for now.
+ */
+struct vmw_compat_shader_manager {
+	struct drm_open_hash shaders;
+	struct list_head list;
+	struct vmw_private *dev_priv;
+};
+
 static void vmw_user_shader_free(struct vmw_resource *res);
 static struct vmw_resource *
 vmw_user_shader_base_to_res(struct ttm_base_object *base);
@@ -258,7 +304,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
 		return 0;
 
 	mutex_lock(&dev_priv->binding_mutex);
-	vmw_context_binding_res_list_kill(&res->binding_head);
+	vmw_context_binding_res_list_scrub(&res->binding_head);
 
 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL)) {
@@ -325,13 +371,81 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
 					 TTM_REF_USAGE);
 }
 
+int vmw_shader_alloc(struct vmw_private *dev_priv,
+		     struct vmw_dma_buffer *buffer,
+		     size_t shader_size,
+		     size_t offset,
+		     SVGA3dShaderType shader_type,
+		     struct ttm_object_file *tfile,
+		     u32 *handle)
+{
+	struct vmw_user_shader *ushader;
+	struct vmw_resource *res, *tmp;
+	int ret;
+
+	/*
+	 * Approximate idr memory usage with 128 bytes. It will be limited
+	 * by maximum number_of shaders anyway.
+	 */
+	if (unlikely(vmw_user_shader_size == 0))
+		vmw_user_shader_size =
+			ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
+
+	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+				   vmw_user_shader_size,
+				   false, true);
+	if (unlikely(ret != 0)) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Out of graphics memory for shader "
+				  "creation.\n");
+		goto out;
+	}
+
+	ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
+	if (unlikely(ushader == NULL)) {
+		ttm_mem_global_free(vmw_mem_glob(dev_priv),
+				    vmw_user_shader_size);
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	res = &ushader->shader.res;
+	ushader->base.shareable = false;
+	ushader->base.tfile = NULL;
+
+	/*
+	 * From here on, the destructor takes over resource freeing.
+	 */
+
+	ret = vmw_gb_shader_init(dev_priv, res, shader_size,
+				 offset, shader_type, buffer,
+				 vmw_user_shader_free);
+	if (unlikely(ret != 0))
+		goto out;
+
+	tmp = vmw_resource_reference(res);
+	ret = ttm_base_object_init(tfile, &ushader->base, false,
+				   VMW_RES_SHADER,
+				   &vmw_user_shader_base_release, NULL);
+
+	if (unlikely(ret != 0)) {
+		vmw_resource_unreference(&tmp);
+		goto out_err;
+	}
+
+	if (handle)
+		*handle = ushader->base.hash.key;
+out_err:
+	vmw_resource_unreference(&res);
+out:
+	return ret;
+}
+
+
 int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
 			     struct drm_file *file_priv)
 {
 	struct vmw_private *dev_priv = vmw_priv(dev);
-	struct vmw_user_shader *ushader;
-	struct vmw_resource *res;
-	struct vmw_resource *tmp;
 	struct drm_vmw_shader_create_arg *arg =
 		(struct drm_vmw_shader_create_arg *)data;
 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
@@ -373,69 +487,324 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
 		goto out_bad_arg;
 	}
 
-	/*
-	 * Approximate idr memory usage with 128 bytes. It will be limited
-	 * by maximum number_of shaders anyway.
-	 */
+	ret = ttm_read_lock(&vmaster->lock, true);
+	if (unlikely(ret != 0))
+		goto out_bad_arg;
 
-	if (unlikely(vmw_user_shader_size == 0))
-		vmw_user_shader_size = ttm_round_pot(sizeof(*ushader))
-			+ 128;
+	ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
+			       shader_type, tfile, &arg->shader_handle);
 
-	ret = ttm_read_lock(&vmaster->lock, true);
+	ttm_read_unlock(&vmaster->lock);
+out_bad_arg:
+	vmw_dmabuf_unreference(&buffer);
+	return ret;
+}
+
+/**
+ * vmw_compat_shader_lookup - Look up a compat shader
+ *
+ * @man: Pointer to the compat shader manager.
+ * @shader_type: The shader type, that combined with the user_key identifies
+ * the shader.
+ * @user_key: On entry, this should be a pointer to the user_key.
+ * On successful exit, it will contain the guest-backed shader's TTM handle.
+ *
+ * Returns 0 on success. Non-zero on failure, in which case the value pointed
+ * to by @user_key is unmodified.
+ */
+int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
+			     SVGA3dShaderType shader_type,
+			     u32 *user_key)
+{
+	struct drm_hash_item *hash;
+	int ret;
+	unsigned long key = *user_key | (shader_type << 24);
+
+	ret = drm_ht_find_item(&man->shaders, key, &hash);
 	if (unlikely(ret != 0))
 		return ret;
 
-	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
-				   vmw_user_shader_size,
-				   false, true);
-	if (unlikely(ret != 0)) {
-		if (ret != -ERESTARTSYS)
-			DRM_ERROR("Out of graphics memory for shader"
-				  " creation.\n");
-		goto out_unlock;
+	*user_key = drm_hash_entry(hash, struct vmw_compat_shader,
+				   hash)->handle;
+
+	return 0;
+}
+
+/**
+ * vmw_compat_shader_free - Free a compat shader.
+ *
+ * @man: Pointer to the compat shader manager.
+ * @entry: Pointer to a struct vmw_compat_shader.
+ *
+ * Frees a struct vmw_compat_shder entry and drops its reference to the
+ * guest backed shader.
+ */
+static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
+				   struct vmw_compat_shader *entry)
+{
+	list_del(&entry->head);
+	WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
+	WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
+					  TTM_REF_USAGE));
+	kfree(entry);
+}
+
+/**
+ * vmw_compat_shaders_commit - Commit a list of compat shader actions.
+ *
+ * @man: Pointer to the compat shader manager.
+ * @list: Caller's list of compat shader actions.
+ *
+ * This function commits a list of compat shader additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions has commited the fifo contents to the device.
+ */
+void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
+			       struct list_head *list)
+{
+	struct vmw_compat_shader *entry, *next;
+
+	list_for_each_entry_safe(entry, next, list, head) {
+		list_del(&entry->head);
+		switch (entry->state) {
+		case VMW_COMPAT_ADD:
+			entry->state = VMW_COMPAT_COMMITED;
+			list_add_tail(&entry->head, &man->list);
+			break;
+		case VMW_COMPAT_DEL:
+			ttm_ref_object_base_unref(entry->tfile, entry->handle,
+						  TTM_REF_USAGE);
+			kfree(entry);
+			break;
+		default:
+			BUG();
+			break;
+		}
 	}
+}
 
-	ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
-	if (unlikely(ushader == NULL)) {
-		ttm_mem_global_free(vmw_mem_glob(dev_priv),
-				    vmw_user_shader_size);
-		ret = -ENOMEM;
-		goto out_unlock;
+/**
+ * vmw_compat_shaders_revert - Revert a list of compat shader actions
+ *
+ * @man: Pointer to the compat shader manager.
+ * @list: Caller's list of compat shader actions.
+ *
+ * This function reverts a list of compat shader additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions failed for some reason, and the command stream was never
+ * submitted.
+ */
+void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
+			       struct list_head *list)
+{
+	struct vmw_compat_shader *entry, *next;
+	int ret;
+
+	list_for_each_entry_safe(entry, next, list, head) {
+		switch (entry->state) {
+		case VMW_COMPAT_ADD:
+			vmw_compat_shader_free(man, entry);
+			break;
+		case VMW_COMPAT_DEL:
+			ret = drm_ht_insert_item(&man->shaders, &entry->hash);
+			list_del(&entry->head);
+			list_add_tail(&entry->head, &man->list);
+			entry->state = VMW_COMPAT_COMMITED;
+			break;
+		default:
+			BUG();
+			break;
+		}
 	}
+}
 
-	res = &ushader->shader.res;
-	ushader->base.shareable = false;
-	ushader->base.tfile = NULL;
+/**
+ * vmw_compat_shader_remove - Stage a compat shader for removal.
+ *
+ * @man: Pointer to the compat shader manager
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the shader type.
+ * @shader_type: Shader type.
+ * @list: Caller's list of staged shader actions.
+ *
+ * This function stages a compat shader for removal and removes the key from
+ * the shader manager's hash table. If the shader was previously only staged
+ * for addition it is completely removed (But the execbuf code may keep a
+ * reference if it was bound to a context between addition and removal). If
+ * it was previously commited to the manager, it is staged for removal.
+ */
+int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
+			     u32 user_key, SVGA3dShaderType shader_type,
+			     struct list_head *list)
+{
+	struct vmw_compat_shader *entry;
+	struct drm_hash_item *hash;
+	int ret;
 
-	/*
-	 * From here on, the destructor takes over resource freeing.
-	 */
+	ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
+			       &hash);
+	if (likely(ret != 0))
+		return -EINVAL;
 
-	ret = vmw_gb_shader_init(dev_priv, res, arg->size,
-				 arg->offset, shader_type, buffer,
-				 vmw_user_shader_free);
+	entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
+
+	switch (entry->state) {
+	case VMW_COMPAT_ADD:
+		vmw_compat_shader_free(man, entry);
+		break;
+	case VMW_COMPAT_COMMITED:
+		(void) drm_ht_remove_item(&man->shaders, &entry->hash);
+		list_del(&entry->head);
+		entry->state = VMW_COMPAT_DEL;
+		list_add_tail(&entry->head, list);
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ * vmw_compat_shader_add - Create a compat shader and add the
+ * key to the manager
+ *
+ * @man: Pointer to the compat shader manager
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the shader type.
+ * @bytecode: Pointer to the bytecode of the shader.
+ * @shader_type: Shader type.
+ * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
+ * to be created with.
+ * @list: Caller's list of staged shader actions.
+ *
+ * Note that only the key is added to the shader manager's hash table.
+ * The shader is not yet added to the shader manager's list of shaders.
+ */
+int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+			  u32 user_key, const void *bytecode,
+			  SVGA3dShaderType shader_type,
+			  size_t size,
+			  struct ttm_object_file *tfile,
+			  struct list_head *list)
+{
+	struct vmw_dma_buffer *buf;
+	struct ttm_bo_kmap_obj map;
+	bool is_iomem;
+	struct vmw_compat_shader *compat;
+	u32 handle;
+	int ret;
+
+	if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
+		return -EINVAL;
+
+	/* Allocate and pin a DMA buffer */
+	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+	if (unlikely(buf == NULL))
+		return -ENOMEM;
+
+	ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
+			      true, vmw_dmabuf_bo_free);
 	if (unlikely(ret != 0))
-		goto out_unlock;
+		goto out;
 
-	tmp = vmw_resource_reference(res);
-	ret = ttm_base_object_init(tfile, &ushader->base, false,
-				   VMW_RES_SHADER,
-				   &vmw_user_shader_base_release, NULL);
+	ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
+	if (unlikely(ret != 0))
+		goto no_reserve;
 
+	/* Map and copy shader bytecode. */
+	ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
+			  &map);
 	if (unlikely(ret != 0)) {
-		vmw_resource_unreference(&tmp);
-		goto out_err;
+		ttm_bo_unreserve(&buf->base);
+		goto no_reserve;
 	}
 
-	arg->shader_handle = ushader->base.hash.key;
-out_err:
-	vmw_resource_unreference(&res);
-out_unlock:
-	ttm_read_unlock(&vmaster->lock);
-out_bad_arg:
-	vmw_dmabuf_unreference(&buffer);
+	memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
+	WARN_ON(is_iomem);
+
+	ttm_bo_kunmap(&map);
+	ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
+	WARN_ON(ret != 0);
+	ttm_bo_unreserve(&buf->base);
+
+	/* Create a guest-backed shader container backed by the dma buffer */
+	ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
+			       tfile, &handle);
+	vmw_dmabuf_unreference(&buf);
+	if (unlikely(ret != 0))
+		goto no_reserve;
+	/*
+	 * Create a compat shader structure and stage it for insertion
+	 * in the manager
+	 */
+	compat = kzalloc(sizeof(*compat), GFP_KERNEL);
+	if (compat == NULL)
+		goto no_compat;
+
+	compat->hash.key = user_key |  (shader_type << 24);
+	ret = drm_ht_insert_item(&man->shaders, &compat->hash);
+	if (unlikely(ret != 0))
+		goto out_invalid_key;
+
+	compat->state = VMW_COMPAT_ADD;
+	compat->handle = handle;
+	compat->tfile = tfile;
+	list_add_tail(&compat->head, list);
 
+	return 0;
+
+out_invalid_key:
+	kfree(compat);
+no_compat:
+	ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+no_reserve:
+out:
 	return ret;
+}
+
+/**
+ * vmw_compat_shader_man_create - Create a compat shader manager
+ *
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * Typically done at file open time. If successful returns a pointer to a
+ * compat shader manager. Otherwise returns an error pointer.
+ */
+struct vmw_compat_shader_manager *
+vmw_compat_shader_man_create(struct vmw_private *dev_priv)
+{
+	struct vmw_compat_shader_manager *man;
+	int ret;
+
+	man = kzalloc(sizeof(*man), GFP_KERNEL);
+
+	man->dev_priv = dev_priv;
+	INIT_LIST_HEAD(&man->list);
+	ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
+	if (ret == 0)
+		return man;
+
+	kfree(man);
+	return ERR_PTR(ret);
+}
+
+/**
+ * vmw_compat_shader_man_destroy - Destroy a compat shader manager
+ *
+ * @man: Pointer to the shader manager to destroy.
+ *
+ * Typically done at file close time.
+ */
+void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
+{
+	struct vmw_compat_shader *entry, *next;
+
+	mutex_lock(&man->dev_priv->cmdbuf_mutex);
+	list_for_each_entry_safe(entry, next, &man->list, head)
+		vmw_compat_shader_free(man, entry);
 
+	mutex_unlock(&man->dev_priv->cmdbuf_mutex);
+	kfree(man);
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 979da1c246a5..82468d902915 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -908,8 +908,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
 	    rep->size_addr;
 
 	if (user_sizes)
-		ret = copy_to_user(user_sizes, srf->sizes,
-				   srf->num_sizes * sizeof(*srf->sizes));
+		ret = copy_to_user(user_sizes, &srf->base_size,
+				   sizeof(srf->base_size));
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("copy_to_user failed %p %u\n",
 			  user_sizes, srf->num_sizes);
@@ -1111,7 +1111,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
 		return 0;
 
 	mutex_lock(&dev_priv->binding_mutex);
-	vmw_context_binding_res_list_kill(&res->binding_head);
+	vmw_context_binding_res_list_scrub(&res->binding_head);
 
 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 	if (unlikely(cmd == NULL)) {