summary refs log tree commit diff
path: root/drivers/gpu/drm/panfrost
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/panfrost')
-rw-r--r--drivers/gpu/drm/panfrost/Makefile1
-rw-r--r--drivers/gpu/drm/panfrost/TODO15
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c22
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.h1
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c28
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h31
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c196
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c142
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h23
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c110
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c62
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c442
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.h9
14 files changed, 847 insertions, 237 deletions
diff --git a/drivers/gpu/drm/panfrost/Makefile b/drivers/gpu/drm/panfrost/Makefile
index ecf0864cb515..b71935862417 100644
--- a/drivers/gpu/drm/panfrost/Makefile
+++ b/drivers/gpu/drm/panfrost/Makefile
@@ -5,6 +5,7 @@ panfrost-y := \
 	panfrost_device.o \
 	panfrost_devfreq.o \
 	panfrost_gem.o \
+	panfrost_gem_shrinker.o \
 	panfrost_gpu.o \
 	panfrost_job.o \
 	panfrost_mmu.o \
diff --git a/drivers/gpu/drm/panfrost/TODO b/drivers/gpu/drm/panfrost/TODO
index c2e44add37d8..536a0d4f8d29 100644
--- a/drivers/gpu/drm/panfrost/TODO
+++ b/drivers/gpu/drm/panfrost/TODO
@@ -6,22 +6,7 @@
   - Bifrost specific feature and issue handling
   - Coherent DMA support
 
-- Support for 2MB pages. The io-pgtable code already supports this. Finishing
-  support involves either copying or adapting the iommu API to handle passing
-  aligned addresses and sizes to the io-pgtable code.
-
-- Per FD address space support. The h/w supports multiple addresses spaces.
-  The hard part is handling when more address spaces are needed than what
-  the h/w provides.
-
-- Support pinning pages on demand (GPU page faults).
-
 - Support userspace controlled GPU virtual addresses. Needed for Vulkan. (Tomeu)
 
-- Support for madvise and a shrinker.
-
 - Compute job support. So called 'compute only' jobs need to be plumbed up to
   userspace.
-
-- Performance counter support. (Boris)
-
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index db798532b0b6..a1f5fa6a742a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -39,7 +39,7 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
 	 * If frequency scaling from low to high, adjust voltage first.
 	 * If frequency scaling from high to low, adjust frequency first.
 	 */
-	if (old_clk_rate < target_rate) {
+	if (old_clk_rate < target_rate && pfdev->regulator) {
 		err = regulator_set_voltage(pfdev->regulator, target_volt,
 					    target_volt);
 		if (err) {
@@ -53,12 +53,14 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
 	if (err) {
 		dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate,
 			err);
-		regulator_set_voltage(pfdev->regulator, pfdev->devfreq.cur_volt,
-				      pfdev->devfreq.cur_volt);
+		if (pfdev->regulator)
+			regulator_set_voltage(pfdev->regulator,
+					      pfdev->devfreq.cur_volt,
+					      pfdev->devfreq.cur_volt);
 		return err;
 	}
 
-	if (old_clk_rate > target_rate) {
+	if (old_clk_rate > target_rate && pfdev->regulator) {
 		err = regulator_set_voltage(pfdev->regulator, target_volt,
 					    target_volt);
 		if (err)
@@ -136,9 +138,6 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
 	int ret;
 	struct dev_pm_opp *opp;
 
-	if (!pfdev->regulator)
-		return 0;
-
 	ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev);
 	if (ret == -ENODEV) /* Optional, continue without devfreq */
 		return 0;
@@ -157,17 +156,24 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
 	dev_pm_opp_put(opp);
 
 	pfdev->devfreq.devfreq = devm_devfreq_add_device(&pfdev->pdev->dev,
-			&panfrost_devfreq_profile, "simple_ondemand", NULL);
+			&panfrost_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
+			NULL);
 	if (IS_ERR(pfdev->devfreq.devfreq)) {
 		DRM_DEV_ERROR(&pfdev->pdev->dev, "Couldn't initialize GPU devfreq\n");
 		ret = PTR_ERR(pfdev->devfreq.devfreq);
 		pfdev->devfreq.devfreq = NULL;
+		dev_pm_opp_of_remove_table(&pfdev->pdev->dev);
 		return ret;
 	}
 
 	return 0;
 }
 
+void panfrost_devfreq_fini(struct panfrost_device *pfdev)
+{
+	dev_pm_opp_of_remove_table(&pfdev->pdev->dev);
+}
+
 void panfrost_devfreq_resume(struct panfrost_device *pfdev)
 {
 	int i;
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.h b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
index eb999531ed90..e3bc63e82843 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.h
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
@@ -5,6 +5,7 @@
 #define __PANFROST_DEVFREQ_H__
 
 int panfrost_devfreq_init(struct panfrost_device *pfdev);
+void panfrost_devfreq_fini(struct panfrost_device *pfdev);
 
 void panfrost_devfreq_resume(struct panfrost_device *pfdev);
 void panfrost_devfreq_suspend(struct panfrost_device *pfdev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 8a111d7c0200..46b0b02e4289 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -5,7 +5,6 @@
 #include <linux/clk.h>
 #include <linux/reset.h>
 #include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
 
 #include "panfrost_device.h"
@@ -123,8 +122,9 @@ int panfrost_device_init(struct panfrost_device *pfdev)
 	mutex_init(&pfdev->sched_lock);
 	mutex_init(&pfdev->reset_lock);
 	INIT_LIST_HEAD(&pfdev->scheduled_jobs);
+	INIT_LIST_HEAD(&pfdev->as_lru_list);
 
-	spin_lock_init(&pfdev->hwaccess_lock);
+	spin_lock_init(&pfdev->as_lock);
 
 	err = panfrost_clk_init(pfdev);
 	if (err) {
@@ -164,14 +164,6 @@ int panfrost_device_init(struct panfrost_device *pfdev)
 	if (err)
 		goto err_out4;
 
-	/* runtime PM will wake us up later */
-	panfrost_gpu_power_off(pfdev);
-
-	pm_runtime_set_active(pfdev->dev);
-	pm_runtime_get_sync(pfdev->dev);
-	pm_runtime_mark_last_busy(pfdev->dev);
-	pm_runtime_put_autosuspend(pfdev->dev);
-
 	err = panfrost_perfcnt_init(pfdev);
 	if (err)
 		goto err_out5;
@@ -254,18 +246,22 @@ const char *panfrost_exception_name(struct panfrost_device *pfdev, u32 exception
 	return "UNKNOWN";
 }
 
+void panfrost_device_reset(struct panfrost_device *pfdev)
+{
+	panfrost_gpu_soft_reset(pfdev);
+
+	panfrost_gpu_power_on(pfdev);
+	panfrost_mmu_reset(pfdev);
+	panfrost_job_enable_interrupts(pfdev);
+}
+
 #ifdef CONFIG_PM
 int panfrost_device_resume(struct device *dev)
 {
 	struct platform_device *pdev = to_platform_device(dev);
 	struct panfrost_device *pfdev = platform_get_drvdata(pdev);
 
-	panfrost_gpu_soft_reset(pfdev);
-
-	/* TODO: Re-enable all other address spaces */
-	panfrost_gpu_power_on(pfdev);
-	panfrost_mmu_enable(pfdev, 0);
-	panfrost_job_enable_interrupts(pfdev);
+	panfrost_device_reset(pfdev);
 	panfrost_devfreq_resume(pfdev);
 
 	return 0;
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 83cc01cafde1..9c39b9794811 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -5,6 +5,8 @@
 #ifndef __PANFROST_DEVICE_H__
 #define __PANFROST_DEVICE_H__
 
+#include <linux/atomic.h>
+#include <linux/io-pgtable.h>
 #include <linux/spinlock.h>
 #include <drm/drm_device.h>
 #include <drm/drm_mm.h>
@@ -43,6 +45,7 @@ struct panfrost_features {
 	u32 js_features[16];
 
 	u32 nr_core_groups;
+	u32 thread_tls_alloc;
 
 	unsigned long hw_features[64 / BITS_PER_LONG];
 	unsigned long hw_issues[64 / BITS_PER_LONG];
@@ -60,11 +63,6 @@ struct panfrost_device {
 	struct drm_device *ddev;
 	struct platform_device *pdev;
 
-	spinlock_t hwaccess_lock;
-
-	struct drm_mm mm;
-	spinlock_t mm_lock;
-
 	void __iomem *iomem;
 	struct clk *clock;
 	struct clk *bus_clock;
@@ -73,7 +71,11 @@ struct panfrost_device {
 
 	struct panfrost_features features;
 
-	struct panfrost_mmu *mmu;
+	spinlock_t as_lock;
+	unsigned long as_in_use_mask;
+	unsigned long as_alloc_mask;
+	struct list_head as_lru_list;
+
 	struct panfrost_job_slot *js;
 
 	struct panfrost_job *jobs[NUM_JOB_SLOTS];
@@ -84,6 +86,10 @@ struct panfrost_device {
 	struct mutex sched_lock;
 	struct mutex reset_lock;
 
+	struct mutex shrinker_lock;
+	struct list_head shrinker_list;
+	struct shrinker shrinker;
+
 	struct {
 		struct devfreq *devfreq;
 		struct thermal_cooling_device *cooling;
@@ -93,10 +99,22 @@ struct panfrost_device {
 	} devfreq;
 };
 
+struct panfrost_mmu {
+	struct io_pgtable_cfg pgtbl_cfg;
+	struct io_pgtable_ops *pgtbl_ops;
+	int as;
+	atomic_t as_count;
+	struct list_head list;
+};
+
 struct panfrost_file_priv {
 	struct panfrost_device *pfdev;
 
 	struct drm_sched_entity sched_entity[NUM_JOB_SLOTS];
+
+	struct panfrost_mmu mmu;
+	struct drm_mm mm;
+	spinlock_t mm_lock;
 };
 
 static inline struct panfrost_device *to_panfrost_device(struct drm_device *ddev)
@@ -127,6 +145,7 @@ int panfrost_unstable_ioctl_check(void);
 
 int panfrost_device_init(struct panfrost_device *pfdev);
 void panfrost_device_fini(struct panfrost_device *pfdev);
+void panfrost_device_reset(struct panfrost_device *pfdev);
 
 int panfrost_device_resume(struct device *dev);
 int panfrost_device_suspend(struct device *dev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 85b4b51b6a0d..bc2ddeb55f5d 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -32,10 +32,42 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
 	if (param->pad != 0)
 		return -EINVAL;
 
+#define PANFROST_FEATURE(name, member)			\
+	case DRM_PANFROST_PARAM_ ## name:		\
+		param->value = pfdev->features.member;	\
+		break
+#define PANFROST_FEATURE_ARRAY(name, member, max)			\
+	case DRM_PANFROST_PARAM_ ## name ## 0 ...			\
+		DRM_PANFROST_PARAM_ ## name ## max:			\
+		param->value = pfdev->features.member[param->param -	\
+			DRM_PANFROST_PARAM_ ## name ## 0];		\
+		break
+
 	switch (param->param) {
-	case DRM_PANFROST_PARAM_GPU_PROD_ID:
-		param->value = pfdev->features.id;
-		break;
+		PANFROST_FEATURE(GPU_PROD_ID, id);
+		PANFROST_FEATURE(GPU_REVISION, revision);
+		PANFROST_FEATURE(SHADER_PRESENT, shader_present);
+		PANFROST_FEATURE(TILER_PRESENT, tiler_present);
+		PANFROST_FEATURE(L2_PRESENT, l2_present);
+		PANFROST_FEATURE(STACK_PRESENT, stack_present);
+		PANFROST_FEATURE(AS_PRESENT, as_present);
+		PANFROST_FEATURE(JS_PRESENT, js_present);
+		PANFROST_FEATURE(L2_FEATURES, l2_features);
+		PANFROST_FEATURE(CORE_FEATURES, core_features);
+		PANFROST_FEATURE(TILER_FEATURES, tiler_features);
+		PANFROST_FEATURE(MEM_FEATURES, mem_features);
+		PANFROST_FEATURE(MMU_FEATURES, mmu_features);
+		PANFROST_FEATURE(THREAD_FEATURES, thread_features);
+		PANFROST_FEATURE(MAX_THREADS, max_threads);
+		PANFROST_FEATURE(THREAD_MAX_WORKGROUP_SZ,
+				thread_max_workgroup_sz);
+		PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ,
+				thread_max_barrier_sz);
+		PANFROST_FEATURE(COHERENCY_FEATURES, coherency_features);
+		PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES, texture_features, 3);
+		PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15);
+		PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups);
+		PANFROST_FEATURE(THREAD_TLS_ALLOC, thread_tls_alloc);
 	default:
 		return -EINVAL;
 	}
@@ -46,29 +78,26 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
 static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
 		struct drm_file *file)
 {
-	int ret;
-	struct drm_gem_shmem_object *shmem;
+	struct panfrost_gem_object *bo;
 	struct drm_panfrost_create_bo *args = data;
 
-	if (!args->size || args->flags || args->pad)
+	if (!args->size || args->pad ||
+	    (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
 		return -EINVAL;
 
-	shmem = drm_gem_shmem_create_with_handle(file, dev, args->size,
-						 &args->handle);
-	if (IS_ERR(shmem))
-		return PTR_ERR(shmem);
+	/* Heaps should never be executable */
+	if ((args->flags & PANFROST_BO_HEAP) &&
+	    !(args->flags & PANFROST_BO_NOEXEC))
+		return -EINVAL;
 
-	ret = panfrost_mmu_map(to_panfrost_bo(&shmem->base));
-	if (ret)
-		goto err_free;
+	bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags,
+					     &args->handle);
+	if (IS_ERR(bo))
+		return PTR_ERR(bo);
 
-	args->offset = to_panfrost_bo(&shmem->base)->node.start << PAGE_SHIFT;
+	args->offset = bo->node.start << PAGE_SHIFT;
 
 	return 0;
-
-err_free:
-	drm_gem_handle_delete(file, args->handle);
-	return ret;
 }
 
 /**
@@ -245,7 +274,7 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
 	if (!gem_obj)
 		return -ENOENT;
 
-	ret = reservation_object_wait_timeout_rcu(gem_obj->resv, true,
+	ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true,
 						  true, timeout);
 	if (!ret)
 		ret = timeout ? -ETIMEDOUT : -EBUSY;
@@ -273,6 +302,10 @@ static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
 		return -ENOENT;
 	}
 
+	/* Don't allow mmapping of heap objects as pages are not pinned. */
+	if (to_panfrost_bo(gem_obj)->is_heap)
+		return -EINVAL;
+
 	ret = drm_gem_create_mmap_offset(gem_obj);
 	if (ret == 0)
 		args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
@@ -301,6 +334,38 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
 	return 0;
 }
 
+static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
+				  struct drm_file *file_priv)
+{
+	struct drm_panfrost_madvise *args = data;
+	struct panfrost_device *pfdev = dev->dev_private;
+	struct drm_gem_object *gem_obj;
+
+	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+	if (!gem_obj) {
+		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
+		return -ENOENT;
+	}
+
+	args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
+
+	if (args->retained) {
+		struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
+
+		mutex_lock(&pfdev->shrinker_lock);
+
+		if (args->madv == PANFROST_MADV_DONTNEED)
+			list_add_tail(&bo->base.madv_list, &pfdev->shrinker_list);
+		else if (args->madv == PANFROST_MADV_WILLNEED)
+			list_del_init(&bo->base.madv_list);
+
+		mutex_unlock(&pfdev->shrinker_lock);
+	}
+
+	drm_gem_object_put_unlocked(gem_obj);
+	return 0;
+}
+
 int panfrost_unstable_ioctl_check(void)
 {
 	if (!unstable_ioctls)
@@ -309,9 +374,36 @@ int panfrost_unstable_ioctl_check(void)
 	return 0;
 }
 
+#define PFN_4G		(SZ_4G >> PAGE_SHIFT)
+#define PFN_4G_MASK	(PFN_4G - 1)
+#define PFN_16M		(SZ_16M >> PAGE_SHIFT)
+
+static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
+					 unsigned long color,
+					 u64 *start, u64 *end)
+{
+	/* Executable buffers can't start or end on a 4GB boundary */
+	if (!(color & PANFROST_BO_NOEXEC)) {
+		u64 next_seg;
+
+		if ((*start & PFN_4G_MASK) == 0)
+			(*start)++;
+
+		if ((*end & PFN_4G_MASK) == 0)
+			(*end)--;
+
+		next_seg = ALIGN(*start, PFN_4G);
+		if (next_seg - *start <= PFN_16M)
+			*start = next_seg + 1;
+
+		*end = min(*end, ALIGN(*start, PFN_4G) - 1);
+	}
+}
+
 static int
 panfrost_open(struct drm_device *dev, struct drm_file *file)
 {
+	int ret;
 	struct panfrost_device *pfdev = dev->dev_private;
 	struct panfrost_file_priv *panfrost_priv;
 
@@ -322,7 +414,28 @@ panfrost_open(struct drm_device *dev, struct drm_file *file)
 	panfrost_priv->pfdev = pfdev;
 	file->driver_priv = panfrost_priv;
 
-	return panfrost_job_open(panfrost_priv);
+	spin_lock_init(&panfrost_priv->mm_lock);
+
+	/* 4G enough for now. can be 48-bit */
+	drm_mm_init(&panfrost_priv->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
+	panfrost_priv->mm.color_adjust = panfrost_drm_mm_color_adjust;
+
+	ret = panfrost_mmu_pgtable_alloc(panfrost_priv);
+	if (ret)
+		goto err_pgtable;
+
+	ret = panfrost_job_open(panfrost_priv);
+	if (ret)
+		goto err_job;
+
+	return 0;
+
+err_job:
+	panfrost_mmu_pgtable_free(panfrost_priv);
+err_pgtable:
+	drm_mm_takedown(&panfrost_priv->mm);
+	kfree(panfrost_priv);
+	return ret;
 }
 
 static void
@@ -333,6 +446,8 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file)
 	panfrost_perfcnt_close(panfrost_priv);
 	panfrost_job_close(panfrost_priv);
 
+	panfrost_mmu_pgtable_free(panfrost_priv);
+	drm_mm_takedown(&panfrost_priv->mm);
 	kfree(panfrost_priv);
 }
 
@@ -352,13 +467,18 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
 	PANFROST_IOCTL(GET_BO_OFFSET,	get_bo_offset,	DRM_RENDER_ALLOW),
 	PANFROST_IOCTL(PERFCNT_ENABLE,	perfcnt_enable,	DRM_RENDER_ALLOW),
 	PANFROST_IOCTL(PERFCNT_DUMP,	perfcnt_dump,	DRM_RENDER_ALLOW),
+	PANFROST_IOCTL(MADVISE,		madvise,	DRM_RENDER_ALLOW),
 };
 
 DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops);
 
+/*
+ * Panfrost driver version:
+ * - 1.0 - initial interface
+ * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
+ */
 static struct drm_driver panfrost_drm_driver = {
-	.driver_features	= DRIVER_RENDER | DRIVER_GEM | DRIVER_PRIME |
-				  DRIVER_SYNCOBJ,
+	.driver_features	= DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
 	.open			= panfrost_open,
 	.postclose		= panfrost_postclose,
 	.ioctls			= panfrost_drm_driver_ioctls,
@@ -368,7 +488,7 @@ static struct drm_driver panfrost_drm_driver = {
 	.desc			= "panfrost DRM",
 	.date			= "20180908",
 	.major			= 1,
-	.minor			= 0,
+	.minor			= 1,
 
 	.gem_create_object	= panfrost_gem_create_object,
 	.prime_handle_to_fd	= drm_gem_prime_handle_to_fd,
@@ -400,14 +520,8 @@ static int panfrost_probe(struct platform_device *pdev)
 	ddev->dev_private = pfdev;
 	pfdev->ddev = ddev;
 
-	spin_lock_init(&pfdev->mm_lock);
-
-	/* 4G enough for now. can be 48-bit */
-	drm_mm_init(&pfdev->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
-
-	pm_runtime_use_autosuspend(pfdev->dev);
-	pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */
-	pm_runtime_enable(pfdev->dev);
+	mutex_init(&pfdev->shrinker_lock);
+	INIT_LIST_HEAD(&pfdev->shrinker_list);
 
 	err = panfrost_device_init(pfdev);
 	if (err) {
@@ -423,16 +537,26 @@ static int panfrost_probe(struct platform_device *pdev)
 		goto err_out1;
 	}
 
+	pm_runtime_set_active(pfdev->dev);
+	pm_runtime_mark_last_busy(pfdev->dev);
+	pm_runtime_enable(pfdev->dev);
+	pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */
+	pm_runtime_use_autosuspend(pfdev->dev);
+
 	/*
 	 * Register the DRM device with the core and the connectors with
 	 * sysfs
 	 */
 	err = drm_dev_register(ddev, 0);
 	if (err < 0)
-		goto err_out1;
+		goto err_out2;
+
+	panfrost_gem_shrinker_init(ddev);
 
 	return 0;
 
+err_out2:
+	panfrost_devfreq_fini(pfdev);
 err_out1:
 	panfrost_device_fini(pfdev);
 err_out0:
@@ -447,10 +571,14 @@ static int panfrost_remove(struct platform_device *pdev)
 	struct drm_device *ddev = pfdev->ddev;
 
 	drm_dev_unregister(ddev);
+	panfrost_gem_shrinker_cleanup(ddev);
+
 	pm_runtime_get_sync(pfdev->dev);
-	pm_runtime_put_sync_autosuspend(pfdev->dev);
-	pm_runtime_disable(pfdev->dev);
+	panfrost_devfreq_fini(pfdev);
 	panfrost_device_fini(pfdev);
+	pm_runtime_put_sync_suspend(pfdev->dev);
+	pm_runtime_disable(pfdev->dev);
+
 	drm_dev_put(ddev);
 	return 0;
 }
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index b46416be5a54..acb07fe06580 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -19,20 +19,95 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
 	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
 	struct panfrost_device *pfdev = obj->dev->dev_private;
 
+	if (bo->sgts) {
+		int i;
+		int n_sgt = bo->base.base.size / SZ_2M;
+
+		for (i = 0; i < n_sgt; i++) {
+			if (bo->sgts[i].sgl) {
+				dma_unmap_sg(pfdev->dev, bo->sgts[i].sgl,
+					     bo->sgts[i].nents, DMA_BIDIRECTIONAL);
+				sg_free_table(&bo->sgts[i]);
+			}
+		}
+		kfree(bo->sgts);
+	}
+
+	mutex_lock(&pfdev->shrinker_lock);
+	if (!list_empty(&bo->base.madv_list))
+		list_del(&bo->base.madv_list);
+	mutex_unlock(&pfdev->shrinker_lock);
+
+	drm_gem_shmem_free_object(obj);
+}
+
+static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+{
+	int ret;
+	size_t size = obj->size;
+	u64 align;
+	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+	unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0;
+	struct panfrost_file_priv *priv = file_priv->driver_priv;
+
+	/*
+	 * Executable buffers cannot cross a 16MB boundary as the program
+	 * counter is 24-bits. We assume executable buffers will be less than
+	 * 16MB and aligning executable buffers to their size will avoid
+	 * crossing a 16MB boundary.
+	 */
+	if (!bo->noexec)
+		align = size >> PAGE_SHIFT;
+	else
+		align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
+
+	bo->mmu = &priv->mmu;
+	spin_lock(&priv->mm_lock);
+	ret = drm_mm_insert_node_generic(&priv->mm, &bo->node,
+					 size >> PAGE_SHIFT, align, color, 0);
+	spin_unlock(&priv->mm_lock);
+	if (ret)
+		return ret;
+
+	if (!bo->is_heap) {
+		ret = panfrost_mmu_map(bo);
+		if (ret) {
+			spin_lock(&priv->mm_lock);
+			drm_mm_remove_node(&bo->node);
+			spin_unlock(&priv->mm_lock);
+		}
+	}
+	return ret;
+}
+
+static void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
+{
+	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+	struct panfrost_file_priv *priv = file_priv->driver_priv;
+
 	if (bo->is_mapped)
 		panfrost_mmu_unmap(bo);
 
-	spin_lock(&pfdev->mm_lock);
-	drm_mm_remove_node(&bo->node);
-	spin_unlock(&pfdev->mm_lock);
+	spin_lock(&priv->mm_lock);
+	if (drm_mm_node_allocated(&bo->node))
+		drm_mm_remove_node(&bo->node);
+	spin_unlock(&priv->mm_lock);
+}
 
-	drm_gem_shmem_free_object(obj);
+static int panfrost_gem_pin(struct drm_gem_object *obj)
+{
+	if (to_panfrost_bo(obj)->is_heap)
+		return -EINVAL;
+
+	return drm_gem_shmem_pin(obj);
 }
 
 static const struct drm_gem_object_funcs panfrost_gem_funcs = {
 	.free = panfrost_gem_free_object,
+	.open = panfrost_gem_open,
+	.close = panfrost_gem_close,
 	.print_info = drm_gem_shmem_print_info,
-	.pin = drm_gem_shmem_pin,
+	.pin = panfrost_gem_pin,
 	.unpin = drm_gem_shmem_unpin,
 	.get_sg_table = drm_gem_shmem_get_sg_table,
 	.vmap = drm_gem_shmem_vmap,
@@ -50,10 +125,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = {
  */
 struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
 {
-	int ret;
-	struct panfrost_device *pfdev = dev->dev_private;
 	struct panfrost_gem_object *obj;
-	u64 align;
 
 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 	if (!obj)
@@ -61,21 +133,42 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
 
 	obj->base.base.funcs = &panfrost_gem_funcs;
 
-	size = roundup(size, PAGE_SIZE);
-	align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0;
+	return &obj->base.base;
+}
 
-	spin_lock(&pfdev->mm_lock);
-	ret = drm_mm_insert_node_generic(&pfdev->mm, &obj->node,
-					 size >> PAGE_SHIFT, align, 0, 0);
-	spin_unlock(&pfdev->mm_lock);
+struct panfrost_gem_object *
+panfrost_gem_create_with_handle(struct drm_file *file_priv,
+				struct drm_device *dev, size_t size,
+				u32 flags,
+				uint32_t *handle)
+{
+	int ret;
+	struct drm_gem_shmem_object *shmem;
+	struct panfrost_gem_object *bo;
+
+	/* Round up heap allocations to 2MB to keep fault handling simple */
+	if (flags & PANFROST_BO_HEAP)
+		size = roundup(size, SZ_2M);
+
+	shmem = drm_gem_shmem_create(dev, size);
+	if (IS_ERR(shmem))
+		return ERR_CAST(shmem);
+
+	bo = to_panfrost_bo(&shmem->base);
+	bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
+	bo->is_heap = !!(flags & PANFROST_BO_HEAP);
+
+	/*
+	 * Allocate an id of idr table where the obj is registered
+	 * and handle has the id what user can see.
+	 */
+	ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
+	/* drop reference from allocate - handle holds it now. */
+	drm_gem_object_put_unlocked(&shmem->base);
 	if (ret)
-		goto free_obj;
+		return ERR_PTR(ret);
 
-	return &obj->base.base;
-
-free_obj:
-	kfree(obj);
-	return ERR_PTR(ret);
+	return bo;
 }
 
 struct drm_gem_object *
@@ -84,17 +177,14 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev,
 				   struct sg_table *sgt)
 {
 	struct drm_gem_object *obj;
-	struct panfrost_gem_object *pobj;
+	struct panfrost_gem_object *bo;
 
 	obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
 	if (IS_ERR(obj))
 		return ERR_CAST(obj);
 
-	pobj = to_panfrost_bo(obj);
-
-	obj->resv = attach->dmabuf->resv;
-
-	panfrost_mmu_map(pobj);
+	bo = to_panfrost_bo(obj);
+	bo->noexec = true;
 
 	return obj;
 }
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index 6dbcaba020fc..50920819cc16 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -7,11 +7,17 @@
 #include <drm/drm_gem_shmem_helper.h>
 #include <drm/drm_mm.h>
 
+struct panfrost_mmu;
+
 struct panfrost_gem_object {
 	struct drm_gem_shmem_object base;
+	struct sg_table *sgts;
 
+	struct panfrost_mmu *mmu;
 	struct drm_mm_node node;
-	bool is_mapped;
+	bool is_mapped		:1;
+	bool noexec		:1;
+	bool is_heap		:1;
 };
 
 static inline
@@ -20,6 +26,12 @@ struct  panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
 	return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
 }
 
+static inline
+struct  panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node)
+{
+	return container_of(node, struct panfrost_gem_object, node);
+}
+
 struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
 
 struct drm_gem_object *
@@ -27,4 +39,13 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev,
 				   struct dma_buf_attachment *attach,
 				   struct sg_table *sgt);
 
+struct panfrost_gem_object *
+panfrost_gem_create_with_handle(struct drm_file *file_priv,
+				struct drm_device *dev, size_t size,
+				u32 flags,
+				uint32_t *handle);
+
+void panfrost_gem_shrinker_init(struct drm_device *dev);
+void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
+
 #endif /* __PANFROST_GEM_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
new file mode 100644
index 000000000000..458f0fa68111
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019 Arm Ltd.
+ *
+ * Based on msm_gem_freedreno.c:
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/list.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_gem_shmem_helper.h>
+
+#include "panfrost_device.h"
+#include "panfrost_gem.h"
+#include "panfrost_mmu.h"
+
+static unsigned long
+panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
+{
+	struct panfrost_device *pfdev =
+		container_of(shrinker, struct panfrost_device, shrinker);
+	struct drm_gem_shmem_object *shmem;
+	unsigned long count = 0;
+
+	if (!mutex_trylock(&pfdev->shrinker_lock))
+		return 0;
+
+	list_for_each_entry(shmem, &pfdev->shrinker_list, madv_list) {
+		if (drm_gem_shmem_is_purgeable(shmem))
+			count += shmem->base.size >> PAGE_SHIFT;
+	}
+
+	mutex_unlock(&pfdev->shrinker_lock);
+
+	return count;
+}
+
+static bool panfrost_gem_purge(struct drm_gem_object *obj)
+{
+	struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+	if (!mutex_trylock(&shmem->pages_lock))
+		return false;
+
+	panfrost_mmu_unmap(to_panfrost_bo(obj));
+	drm_gem_shmem_purge_locked(obj);
+
+	mutex_unlock(&shmem->pages_lock);
+	return true;
+}
+
+static unsigned long
+panfrost_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
+{
+	struct panfrost_device *pfdev =
+		container_of(shrinker, struct panfrost_device, shrinker);
+	struct drm_gem_shmem_object *shmem, *tmp;
+	unsigned long freed = 0;
+
+	if (!mutex_trylock(&pfdev->shrinker_lock))
+		return SHRINK_STOP;
+
+	list_for_each_entry_safe(shmem, tmp, &pfdev->shrinker_list, madv_list) {
+		if (freed >= sc->nr_to_scan)
+			break;
+		if (drm_gem_shmem_is_purgeable(shmem) &&
+		    panfrost_gem_purge(&shmem->base)) {
+			freed += shmem->base.size >> PAGE_SHIFT;
+			list_del_init(&shmem->madv_list);
+		}
+	}
+
+	mutex_unlock(&pfdev->shrinker_lock);
+
+	if (freed > 0)
+		pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
+
+	return freed;
+}
+
+/**
+ * panfrost_gem_shrinker_init - Initialize panfrost shrinker
+ * @dev: DRM device
+ *
+ * This function registers and sets up the panfrost shrinker.
+ */
+void panfrost_gem_shrinker_init(struct drm_device *dev)
+{
+	struct panfrost_device *pfdev = dev->dev_private;
+	pfdev->shrinker.count_objects = panfrost_gem_shrinker_count;
+	pfdev->shrinker.scan_objects = panfrost_gem_shrinker_scan;
+	pfdev->shrinker.seeks = DEFAULT_SEEKS;
+	WARN_ON(register_shrinker(&pfdev->shrinker));
+}
+
+/**
+ * panfrost_gem_shrinker_cleanup - Clean up panfrost shrinker
+ * @dev: DRM device
+ *
+ * This function unregisters the panfrost shrinker.
+ */
+void panfrost_gem_shrinker_cleanup(struct drm_device *dev)
+{
+	struct panfrost_device *pfdev = dev->dev_private;
+
+	if (pfdev->shrinker.nr_deferred) {
+		unregister_shrinker(&pfdev->shrinker);
+	}
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index 20ab333fc925..f67ed925c0ef 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -232,6 +232,8 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
 	pfdev->features.stack_present = gpu_read(pfdev, GPU_STACK_PRESENT_LO);
 	pfdev->features.stack_present |= (u64)gpu_read(pfdev, GPU_STACK_PRESENT_HI) << 32;
 
+	pfdev->features.thread_tls_alloc = gpu_read(pfdev, GPU_THREAD_TLS_ALLOC);
+
 	gpu_id = gpu_read(pfdev, GPU_ID);
 	pfdev->features.revision = gpu_id & 0xffff;
 	pfdev->features.id = gpu_id >> 16;
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 9bb9260d9181..a58551668d9a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -6,7 +6,7 @@
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include <drm/gpu_scheduler.h>
 #include <drm/panfrost_drm.h>
 
@@ -141,7 +141,6 @@ static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
 static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
 {
 	struct panfrost_device *pfdev = job->pfdev;
-	unsigned long flags;
 	u32 cfg;
 	u64 jc_head = job->jc;
 	int ret;
@@ -150,11 +149,14 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
 	if (ret < 0)
 		return;
 
-	if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js))))
-		goto end;
+	if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) {
+		pm_runtime_put_sync_autosuspend(pfdev->dev);
+		return;
+	}
+
+	cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
 
 	panfrost_devfreq_record_transition(pfdev, js);
-	spin_lock_irqsave(&pfdev->hwaccess_lock, flags);
 
 	job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
 	job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
@@ -163,8 +165,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
 
 	/* start MMU, medium priority, cache clean/flush on end, clean/flush on
 	 * start */
-	/* TODO: different address spaces */
-	cfg = JS_CONFIG_THREAD_PRI(8) |
+	cfg |= JS_CONFIG_THREAD_PRI(8) |
 		JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE |
 		JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
 
@@ -184,12 +185,6 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
 				job, js, jc_head);
 
 	job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
-
-	spin_unlock_irqrestore(&pfdev->hwaccess_lock, flags);
-
-end:
-	pm_runtime_mark_last_busy(pfdev->dev);
-	pm_runtime_put_autosuspend(pfdev->dev);
 }
 
 static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
@@ -199,7 +194,7 @@ static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
 	int i;
 
 	for (i = 0; i < bo_count; i++)
-		implicit_fences[i] = reservation_object_get_excl_rcu(bos[i]->resv);
+		implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
 }
 
 static void panfrost_attach_object_fences(struct drm_gem_object **bos,
@@ -209,7 +204,7 @@ static void panfrost_attach_object_fences(struct drm_gem_object **bos,
 	int i;
 
 	for (i = 0; i < bo_count; i++)
-		reservation_object_add_excl_fence(bos[i]->resv, fence);
+		dma_resv_add_excl_fence(bos[i]->resv, fence);
 }
 
 int panfrost_job_push(struct panfrost_job *job)
@@ -368,6 +363,7 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
 	struct panfrost_job *job = to_panfrost_job(sched_job);
 	struct panfrost_device *pfdev = job->pfdev;
 	int js = panfrost_job_get_slot(job);
+	unsigned long flags;
 	int i;
 
 	/*
@@ -377,8 +373,9 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
 	if (dma_fence_is_signaled(job->done_fence))
 		return;
 
-	dev_err(pfdev->dev, "gpu sched timeout, js=%d, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
+	dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
 		js,
+		job_read(pfdev, JS_CONFIG(js)),
 		job_read(pfdev, JS_STATUS(js)),
 		job_read(pfdev, JS_HEAD_LO(js)),
 		job_read(pfdev, JS_TAIL_LO(js)),
@@ -392,15 +389,19 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
 	if (sched_job)
 		drm_sched_increase_karma(sched_job);
 
+	spin_lock_irqsave(&pfdev->js->job_lock, flags);
+	for (i = 0; i < NUM_JOB_SLOTS; i++) {
+		if (pfdev->jobs[i]) {
+			pm_runtime_put_noidle(pfdev->dev);
+			pfdev->jobs[i] = NULL;
+		}
+	}
+	spin_unlock_irqrestore(&pfdev->js->job_lock, flags);
+
 	/* panfrost_core_dump(pfdev); */
 
 	panfrost_devfreq_record_transition(pfdev, js);
-	panfrost_gpu_soft_reset(pfdev);
-
-	/* TODO: Re-enable all other address spaces */
-	panfrost_mmu_enable(pfdev, 0);
-	panfrost_gpu_power_on(pfdev);
-	panfrost_job_enable_interrupts(pfdev);
+	panfrost_device_reset(pfdev);
 
 	for (i = 0; i < NUM_JOB_SLOTS; i++)
 		drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
@@ -453,8 +454,21 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
 		}
 
 		if (status & JOB_INT_MASK_DONE(j)) {
-			panfrost_devfreq_record_transition(pfdev, j);
-			dma_fence_signal(pfdev->jobs[j]->done_fence);
+			struct panfrost_job *job;
+
+			spin_lock(&pfdev->js->job_lock);
+			job = pfdev->jobs[j];
+			/* Only NULL if job timeout occurred */
+			if (job) {
+				pfdev->jobs[j] = NULL;
+
+				panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
+				panfrost_devfreq_record_transition(pfdev, j);
+
+				dma_fence_signal_locked(job->done_fence);
+				pm_runtime_put_autosuspend(pfdev->dev);
+			}
+			spin_unlock(&pfdev->js->job_lock);
 		}
 
 		status &= ~mask;
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 6e8145c36e93..6010f9ee7c1f 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -1,7 +1,9 @@
 // SPDX-License-Identifier:	GPL-2.0
 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+#include <linux/atomic.h>
 #include <linux/bitfield.h>
 #include <linux/delay.h>
+#include <linux/dma-mapping.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/iopoll.h>
@@ -9,6 +11,7 @@
 #include <linux/iommu.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
+#include <linux/shmem_fs.h>
 #include <linux/sizes.h>
 
 #include "panfrost_device.h"
@@ -20,12 +23,6 @@
 #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
 #define mmu_read(dev, reg) readl(dev->iomem + reg)
 
-struct panfrost_mmu {
-	struct io_pgtable_cfg pgtbl_cfg;
-	struct io_pgtable_ops *pgtbl_ops;
-	struct mutex lock;
-};
-
 static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
 {
 	int ret;
@@ -83,13 +80,11 @@ static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
 }
 
 
-static int mmu_hw_do_operation(struct panfrost_device *pfdev, u32 as_nr,
-		u64 iova, size_t size, u32 op)
+static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
+				      u64 iova, size_t size, u32 op)
 {
-	unsigned long flags;
-	int ret;
-
-	spin_lock_irqsave(&pfdev->hwaccess_lock, flags);
+	if (as_nr < 0)
+		return 0;
 
 	if (op != AS_COMMAND_UNLOCK)
 		lock_region(pfdev, as_nr, iova, size);
@@ -98,21 +93,29 @@ static int mmu_hw_do_operation(struct panfrost_device *pfdev, u32 as_nr,
 	write_cmd(pfdev, as_nr, op);
 
 	/* Wait for the flush to complete */
-	ret = wait_ready(pfdev, as_nr);
+	return wait_ready(pfdev, as_nr);
+}
 
-	spin_unlock_irqrestore(&pfdev->hwaccess_lock, flags);
+static int mmu_hw_do_operation(struct panfrost_device *pfdev,
+			       struct panfrost_mmu *mmu,
+			       u64 iova, size_t size, u32 op)
+{
+	int ret;
 
+	spin_lock(&pfdev->as_lock);
+	ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op);
+	spin_unlock(&pfdev->as_lock);
 	return ret;
 }
 
-void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr)
+static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
 {
-	struct io_pgtable_cfg *cfg = &pfdev->mmu->pgtbl_cfg;
+	int as_nr = mmu->as;
+	struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg;
 	u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
 	u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
 
-	mmu_write(pfdev, MMU_INT_CLEAR, ~0);
-	mmu_write(pfdev, MMU_INT_MASK, ~0);
+	mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
 
 	mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
 	mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
@@ -126,8 +129,10 @@ void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr)
 	write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
 }
 
-static void mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
+static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
 {
+	mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
+
 	mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
 	mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
 
@@ -137,6 +142,80 @@ static void mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
 	write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
 }
 
+u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
+{
+	int as;
+
+	spin_lock(&pfdev->as_lock);
+
+	as = mmu->as;
+	if (as >= 0) {
+		int en = atomic_inc_return(&mmu->as_count);
+		WARN_ON(en >= NUM_JOB_SLOTS);
+
+		list_move(&mmu->list, &pfdev->as_lru_list);
+		goto out;
+	}
+
+	/* Check for a free AS */
+	as = ffz(pfdev->as_alloc_mask);
+	if (!(BIT(as) & pfdev->features.as_present)) {
+		struct panfrost_mmu *lru_mmu;
+
+		list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) {
+			if (!atomic_read(&lru_mmu->as_count))
+				break;
+		}
+		WARN_ON(&lru_mmu->list == &pfdev->as_lru_list);
+
+		list_del_init(&lru_mmu->list);
+		as = lru_mmu->as;
+
+		WARN_ON(as < 0);
+		lru_mmu->as = -1;
+	}
+
+	/* Assign the free or reclaimed AS to the FD */
+	mmu->as = as;
+	set_bit(as, &pfdev->as_alloc_mask);
+	atomic_set(&mmu->as_count, 1);
+	list_add(&mmu->list, &pfdev->as_lru_list);
+
+	dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask);
+
+	panfrost_mmu_enable(pfdev, mmu);
+
+out:
+	spin_unlock(&pfdev->as_lock);
+	return as;
+}
+
+void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
+{
+	atomic_dec(&mmu->as_count);
+	WARN_ON(atomic_read(&mmu->as_count) < 0);
+}
+
+void panfrost_mmu_reset(struct panfrost_device *pfdev)
+{
+	struct panfrost_mmu *mmu, *mmu_tmp;
+
+	spin_lock(&pfdev->as_lock);
+
+	pfdev->as_alloc_mask = 0;
+
+	list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) {
+		mmu->as = -1;
+		atomic_set(&mmu->as_count, 0);
+		list_del_init(&mmu->list);
+	}
+
+	spin_unlock(&pfdev->as_lock);
+
+	mmu_write(pfdev, MMU_INT_CLEAR, ~0);
+	mmu_write(pfdev, MMU_INT_MASK, ~0);
+}
+
 static size_t get_pgsize(u64 addr, size_t size)
 {
 	if (addr & (SZ_2M - 1) || size < SZ_2M)
@@ -145,53 +224,69 @@ static size_t get_pgsize(u64 addr, size_t size)
 	return SZ_2M;
 }
 
-int panfrost_mmu_map(struct panfrost_gem_object *bo)
+void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
+			      struct panfrost_mmu *mmu,
+			      u64 iova, size_t size)
 {
-	struct drm_gem_object *obj = &bo->base.base;
-	struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
-	struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
-	u64 iova = bo->node.start << PAGE_SHIFT;
-	unsigned int count;
-	struct scatterlist *sgl;
-	struct sg_table *sgt;
-	int ret;
+	if (mmu->as < 0)
+		return;
 
-	if (WARN_ON(bo->is_mapped))
-		return 0;
+	pm_runtime_get_noresume(pfdev->dev);
 
-	sgt = drm_gem_shmem_get_pages_sgt(obj);
-	if (WARN_ON(IS_ERR(sgt)))
-		return PTR_ERR(sgt);
+	/* Flush the PTs only if we're already awake */
+	if (pm_runtime_active(pfdev->dev))
+		mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
 
-	ret = pm_runtime_get_sync(pfdev->dev);
-	if (ret < 0)
-		return ret;
+	pm_runtime_put_sync_autosuspend(pfdev->dev);
+}
 
-	mutex_lock(&pfdev->mmu->lock);
+static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
+		      u64 iova, int prot, struct sg_table *sgt)
+{
+	unsigned int count;
+	struct scatterlist *sgl;
+	struct io_pgtable_ops *ops = mmu->pgtbl_ops;
+	u64 start_iova = iova;
 
 	for_each_sg(sgt->sgl, sgl, sgt->nents, count) {
 		unsigned long paddr = sg_dma_address(sgl);
 		size_t len = sg_dma_len(sgl);
 
-		dev_dbg(pfdev->dev, "map: iova=%llx, paddr=%lx, len=%zx", iova, paddr, len);
+		dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
 
 		while (len) {
 			size_t pgsize = get_pgsize(iova | paddr, len);
 
-			ops->map(ops, iova, paddr, pgsize, IOMMU_WRITE | IOMMU_READ);
+			ops->map(ops, iova, paddr, pgsize, prot);
 			iova += pgsize;
 			paddr += pgsize;
 			len -= pgsize;
 		}
 	}
 
-	mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
-			    bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
+	panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
+
+	return 0;
+}
+
+int panfrost_mmu_map(struct panfrost_gem_object *bo)
+{
+	struct drm_gem_object *obj = &bo->base.base;
+	struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
+	struct sg_table *sgt;
+	int prot = IOMMU_READ | IOMMU_WRITE;
 
-	mutex_unlock(&pfdev->mmu->lock);
+	if (WARN_ON(bo->is_mapped))
+		return 0;
+
+	if (bo->noexec)
+		prot |= IOMMU_NOEXEC;
 
-	pm_runtime_mark_last_busy(pfdev->dev);
-	pm_runtime_put_autosuspend(pfdev->dev);
+	sgt = drm_gem_shmem_get_pages_sgt(obj);
+	if (WARN_ON(IS_ERR(sgt)))
+		return PTR_ERR(sgt);
+
+	mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt);
 	bo->is_mapped = true;
 
 	return 0;
@@ -201,51 +296,34 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
 {
 	struct drm_gem_object *obj = &bo->base.base;
 	struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
-	struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
+	struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops;
 	u64 iova = bo->node.start << PAGE_SHIFT;
 	size_t len = bo->node.size << PAGE_SHIFT;
 	size_t unmapped_len = 0;
-	int ret;
 
 	if (WARN_ON(!bo->is_mapped))
 		return;
 
-	dev_dbg(pfdev->dev, "unmap: iova=%llx, len=%zx", iova, len);
-
-	ret = pm_runtime_get_sync(pfdev->dev);
-	if (ret < 0)
-		return;
-
-	mutex_lock(&pfdev->mmu->lock);
+	dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len);
 
 	while (unmapped_len < len) {
 		size_t unmapped_page;
 		size_t pgsize = get_pgsize(iova, len - unmapped_len);
 
-		unmapped_page = ops->unmap(ops, iova, pgsize, NULL);
-		if (!unmapped_page)
-			break;
-
-		iova += unmapped_page;
-		unmapped_len += unmapped_page;
+		if (ops->iova_to_phys(ops, iova)) {
+			unmapped_page = ops->unmap(ops, iova, pgsize, NULL);
+			WARN_ON(unmapped_page != pgsize);
+		}
+		iova += pgsize;
+		unmapped_len += pgsize;
 	}
 
-	mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
-			    bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
-
-	mutex_unlock(&pfdev->mmu->lock);
-
-	pm_runtime_mark_last_busy(pfdev->dev);
-	pm_runtime_put_autosuspend(pfdev->dev);
+	panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len);
 	bo->is_mapped = false;
 }
 
 static void mmu_tlb_inv_context_s1(void *cookie)
-{
-	struct panfrost_device *pfdev = cookie;
-
-	mmu_hw_do_operation(pfdev, 0, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
-}
+{}
 
 static void mmu_tlb_sync_context(void *cookie)
 {
@@ -271,6 +349,167 @@ static const struct iommu_flush_ops mmu_tlb_ops = {
 	.tlb_flush_leaf = mmu_tlb_flush_leaf,
 };
 
+int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv)
+{
+	struct panfrost_mmu *mmu = &priv->mmu;
+	struct panfrost_device *pfdev = priv->pfdev;
+
+	INIT_LIST_HEAD(&mmu->list);
+	mmu->as = -1;
+
+	mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
+		.pgsize_bitmap	= SZ_4K | SZ_2M,
+		.ias		= FIELD_GET(0xff, pfdev->features.mmu_features),
+		.oas		= FIELD_GET(0xff00, pfdev->features.mmu_features),
+		.tlb		= &mmu_tlb_ops,
+		.iommu_dev	= pfdev->dev,
+	};
+
+	mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
+					      priv);
+	if (!mmu->pgtbl_ops)
+		return -EINVAL;
+
+	return 0;
+}
+
+void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
+{
+	struct panfrost_device *pfdev = priv->pfdev;
+	struct panfrost_mmu *mmu = &priv->mmu;
+
+	spin_lock(&pfdev->as_lock);
+	if (mmu->as >= 0) {
+		pm_runtime_get_noresume(pfdev->dev);
+		if (pm_runtime_active(pfdev->dev))
+			panfrost_mmu_disable(pfdev, mmu->as);
+		pm_runtime_put_autosuspend(pfdev->dev);
+
+		clear_bit(mmu->as, &pfdev->as_alloc_mask);
+		clear_bit(mmu->as, &pfdev->as_in_use_mask);
+		list_del(&mmu->list);
+	}
+	spin_unlock(&pfdev->as_lock);
+
+	free_io_pgtable_ops(mmu->pgtbl_ops);
+}
+
+static struct drm_mm_node *addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
+{
+	struct drm_mm_node *node = NULL;
+	u64 offset = addr >> PAGE_SHIFT;
+	struct panfrost_mmu *mmu;
+
+	spin_lock(&pfdev->as_lock);
+	list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
+		struct panfrost_file_priv *priv;
+		if (as != mmu->as)
+			continue;
+
+		priv = container_of(mmu, struct panfrost_file_priv, mmu);
+		drm_mm_for_each_node(node, &priv->mm) {
+			if (offset >= node->start && offset < (node->start + node->size))
+				goto out;
+		}
+	}
+
+out:
+	spin_unlock(&pfdev->as_lock);
+	return node;
+}
+
+#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
+
+int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
+{
+	int ret, i;
+	struct drm_mm_node *node;
+	struct panfrost_gem_object *bo;
+	struct address_space *mapping;
+	pgoff_t page_offset;
+	struct sg_table *sgt;
+	struct page **pages;
+
+	node = addr_to_drm_mm_node(pfdev, as, addr);
+	if (!node)
+		return -ENOENT;
+
+	bo = drm_mm_node_to_panfrost_bo(node);
+	if (!bo->is_heap) {
+		dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
+			 node->start << PAGE_SHIFT);
+		return -EINVAL;
+	}
+	WARN_ON(bo->mmu->as != as);
+
+	/* Assume 2MB alignment and size multiple */
+	addr &= ~((u64)SZ_2M - 1);
+	page_offset = addr >> PAGE_SHIFT;
+	page_offset -= node->start;
+
+	mutex_lock(&bo->base.pages_lock);
+
+	if (!bo->base.pages) {
+		bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
+				     sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
+		if (!bo->sgts) {
+			mutex_unlock(&bo->base.pages_lock);
+			return -ENOMEM;
+		}
+
+		pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
+				       sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
+		if (!pages) {
+			kfree(bo->sgts);
+			bo->sgts = NULL;
+			mutex_unlock(&bo->base.pages_lock);
+			return -ENOMEM;
+		}
+		bo->base.pages = pages;
+		bo->base.pages_use_count = 1;
+	} else
+		pages = bo->base.pages;
+
+	mapping = bo->base.base.filp->f_mapping;
+	mapping_set_unevictable(mapping);
+
+	for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
+		pages[i] = shmem_read_mapping_page(mapping, i);
+		if (IS_ERR(pages[i])) {
+			mutex_unlock(&bo->base.pages_lock);
+			ret = PTR_ERR(pages[i]);
+			goto err_pages;
+		}
+	}
+
+	mutex_unlock(&bo->base.pages_lock);
+
+	sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
+	ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
+					NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
+	if (ret)
+		goto err_pages;
+
+	if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) {
+		ret = -EINVAL;
+		goto err_map;
+	}
+
+	mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
+
+	bo->is_mapped = true;
+
+	dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
+
+	return 0;
+
+err_map:
+	sg_free_table(sgt);
+err_pages:
+	drm_gem_shmem_put_pages(&bo->base);
+	return ret;
+}
+
 static const char *access_type_name(struct panfrost_device *pfdev,
 		u32 fault_status)
 {
@@ -295,13 +534,19 @@ static const char *access_type_name(struct panfrost_device *pfdev,
 static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
 {
 	struct panfrost_device *pfdev = data;
-	u32 status = mmu_read(pfdev, MMU_INT_STAT);
-	int i;
 
-	if (!status)
+	if (!mmu_read(pfdev, MMU_INT_STAT))
 		return IRQ_NONE;
 
-	dev_err(pfdev->dev, "mmu irq status=%x\n", status);
+	mmu_write(pfdev, MMU_INT_MASK, 0);
+	return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
+{
+	struct panfrost_device *pfdev = data;
+	u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
+	int i, ret;
 
 	for (i = 0; status; i++) {
 		u32 mask = BIT(i) | BIT(i + 16);
@@ -323,6 +568,18 @@ static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
 		access_type = (fault_status >> 8) & 0x3;
 		source_id = (fault_status >> 16);
 
+		/* Page fault only */
+		if ((status & mask) == BIT(i)) {
+			WARN_ON(exception_type < 0xC1 || exception_type > 0xC4);
+
+			ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
+			if (!ret) {
+				mmu_write(pfdev, MMU_INT_CLEAR, BIT(i));
+				status &= ~mask;
+				continue;
+			}
+		}
+
 		/* terminal fault, print info about the fault */
 		dev_err(pfdev->dev,
 			"Unhandled Page fault in AS%d at VA 0x%016llX\n"
@@ -345,50 +602,26 @@ static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
 		status &= ~mask;
 	}
 
+	mmu_write(pfdev, MMU_INT_MASK, ~0);
 	return IRQ_HANDLED;
 };
 
 int panfrost_mmu_init(struct panfrost_device *pfdev)
 {
-	struct io_pgtable_ops *pgtbl_ops;
 	int err, irq;
 
-	pfdev->mmu = devm_kzalloc(pfdev->dev, sizeof(*pfdev->mmu), GFP_KERNEL);
-	if (!pfdev->mmu)
-		return -ENOMEM;
-
-	mutex_init(&pfdev->mmu->lock);
-
 	irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
 	if (irq <= 0)
 		return -ENODEV;
 
-	err = devm_request_irq(pfdev->dev, irq, panfrost_mmu_irq_handler,
-			       IRQF_SHARED, "mmu", pfdev);
+	err = devm_request_threaded_irq(pfdev->dev, irq, panfrost_mmu_irq_handler,
+					panfrost_mmu_irq_handler_thread,
+					IRQF_SHARED, "mmu", pfdev);
 
 	if (err) {
 		dev_err(pfdev->dev, "failed to request mmu irq");
 		return err;
 	}
-	mmu_write(pfdev, MMU_INT_CLEAR, ~0);
-	mmu_write(pfdev, MMU_INT_MASK, ~0);
-
-	pfdev->mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
-		.pgsize_bitmap	= SZ_4K | SZ_2M,
-		.ias		= FIELD_GET(0xff, pfdev->features.mmu_features),
-		.oas		= FIELD_GET(0xff00, pfdev->features.mmu_features),
-		.tlb		= &mmu_tlb_ops,
-		.iommu_dev	= pfdev->dev,
-	};
-
-	pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &pfdev->mmu->pgtbl_cfg,
-					 pfdev);
-	if (!pgtbl_ops)
-		return -ENOMEM;
-
-	pfdev->mmu->pgtbl_ops = pgtbl_ops;
-
-	panfrost_mmu_enable(pfdev, 0);
 
 	return 0;
 }
@@ -396,7 +629,4 @@ int panfrost_mmu_init(struct panfrost_device *pfdev)
 void panfrost_mmu_fini(struct panfrost_device *pfdev)
 {
 	mmu_write(pfdev, MMU_INT_MASK, 0);
-	mmu_disable(pfdev, 0);
-
-	free_io_pgtable_ops(pfdev->mmu->pgtbl_ops);
 }
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
index f5878d86a5ce..7c5b6775ae23 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.h
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
@@ -5,13 +5,20 @@
 #define __PANFROST_MMU_H__
 
 struct panfrost_gem_object;
+struct panfrost_file_priv;
+struct panfrost_mmu;
 
 int panfrost_mmu_map(struct panfrost_gem_object *bo);
 void panfrost_mmu_unmap(struct panfrost_gem_object *bo);
 
 int panfrost_mmu_init(struct panfrost_device *pfdev);
 void panfrost_mmu_fini(struct panfrost_device *pfdev);
+void panfrost_mmu_reset(struct panfrost_device *pfdev);
 
-void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr);
+u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
+void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu);
+
+int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv);
+void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv);
 
 #endif