From c734ee22f9c339cf946262ac06269d1605bed8a7 Mon Sep 17 00:00:00 2001
From: Tomeu Vizoso <tomeu.vizoso@collabora.com>
Date: Wed, 6 Mar 2019 16:07:04 +0100
Subject: [PATCH] drm/panfrost: Rework uapi

Change to have a single job per submit ioctl, and use fences to
synchronize jobs.

Signed-off-by: Tomeu Vizoso <tomeu.vizoso@collabora.com>
---
 drivers/gpu/drm/panfrost/panfrost_drv.c | 255 +++++++++++++-----------
 drivers/gpu/drm/panfrost/panfrost_gem.c |  41 ----
 drivers/gpu/drm/panfrost/panfrost_job.c |  65 +++---
 drivers/gpu/drm/panfrost/panfrost_job.h |  10 +-
 include/uapi/drm/panfrost_drm.h         | 201 +++++++++++--------
 5 files changed, 286 insertions(+), 286 deletions(-)

diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 2749bfb80ecd3..528907105a9c6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -6,6 +6,7 @@
 #include <linux/pagemap.h>
 #include <drm/panfrost_drm.h>
 #include <drm/drm_ioctl.h>
+#include <drm/drm_syncobj.h>
 
 #include "panfrost_device.h"
 #include "panfrost_gem.h"
@@ -22,7 +23,7 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
 		return -EINVAL;
 
 	switch(param->param) {
-	case PANFROST_PARAM_GPU_ID:
+	case DRM_PANFROST_PARAM_GPU_ID:
 		param->value = pfdev->features.id;
 		break;
 	default:
@@ -32,17 +33,17 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct
 	return 0;
 }
 
-static int panfrost_ioctl_gem_new(struct drm_device *dev, void *data,
+static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
 		struct drm_file *file)
 {
 	struct drm_gem_shmem_object *shmem;
-	struct drm_panfrost_gem_new *args = data;
+	struct drm_panfrost_create_bo *args = data;
 	struct address_space *mapping;
 
 	if (args->size == 0)
 		return -EINVAL;
 
-	// FIXME need to handle flags
+	// FIXME need to handle flags ?
 //	if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
 //			    ETNA_BO_FORCE_MMU))
 //		return -EINVAL;
@@ -69,76 +70,15 @@ static int panfrost_ioctl_gem_new(struct drm_device *dev, void *data,
 	return 0;
 }
 
-static inline ktime_t to_ktime(struct drm_panfrost_timespec timeout)
-{
-	return ktime_set(timeout.tv_sec, timeout.tv_nsec);
-}
-
-static int panfrost_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
-		struct drm_file *file)
-{
-	struct drm_panfrost_gem_cpu_prep *args = data;
-	struct drm_gem_object *obj;
-	ktime_t timeout = to_ktime(args->timeout);
-	int ret;
-
-	if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
-		return -EINVAL;
-
-	obj = drm_gem_object_lookup(file, args->handle);
-	if (!obj)
-		return -ENOENT;
-
-	ret = panfrost_gem_cpu_prep(obj, args->op, &timeout);
-
-	drm_gem_object_put_unlocked(obj);
-
-	return ret;
-}
-
-static int panfrost_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
-		struct drm_file *file)
-{
-	struct drm_panfrost_gem_cpu_fini *args = data;
-	struct drm_gem_object *obj;
-	int ret;
-
-	if (args->flags)
-		return -EINVAL;
-
-	obj = drm_gem_object_lookup(file, args->handle);
-	if (!obj)
-		return -ENOENT;
-
-	ret = panfrost_gem_cpu_fini(obj);
-
-	drm_gem_object_put_unlocked(obj);
-
-	return ret;
-}
-
-static int panfrost_ioctl_gem_info(struct drm_device *dev, void *data,
-		struct drm_file *file)
-{
-	struct drm_panfrost_gem_info *args = data;
-
-	if (args->pad)
-		return -EINVAL;
-
-	// FIXME Maybe we need imported objects to be mapped?
-	return drm_gem_dumb_map_offset(file, dev, args->handle, &args->offset);
-}
-
 /**
  * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects
  * referenced by the job.
  * @dev: DRM device
  * @file_priv: DRM file for this fd
+ * @args: IOCTL args
  * @job: job being set up
  *
- * The command validator needs to reference BOs by their index within
- * the submitted job's BO list.  This does the validation of the job's
- * BO list and reference counting for the lifetime of the job.
+ * Resolve handles from userspace to BOs and attach them to job.
  *
  * Note that this function doesn't need to unreference the BOs on
  * failure, because that will happen at panfrost_job_cleanup() time.
@@ -146,14 +86,14 @@ static int panfrost_ioctl_gem_info(struct drm_device *dev, void *data,
 static int
 panfrost_lookup_bos(struct drm_device *dev,
 		  struct drm_file *file_priv,
-		  struct drm_panfrost_gem_submit_atom *atom,
+		  struct drm_panfrost_submit *args,
 		  struct panfrost_job *job)
 {
 	u32 *handles;
 	int ret = 0;
 	int i;
 
-	job->bo_count = atom->bo_handle_count;
+	job->bo_count = args->bo_handle_count;
 
 	if (!job->bo_count)
 		return 0;
@@ -174,7 +114,7 @@ panfrost_lookup_bos(struct drm_device *dev,
 	}
 
 	if (copy_from_user(handles,
-			   (void __user *)(uintptr_t)atom->bo_handles,
+			   (void __user *)(uintptr_t)args->bo_handles,
 			   job->bo_count * sizeof(u32))) {
 		ret = -EFAULT;
 		DRM_DEBUG("Failed to copy in GEM handles\n");
@@ -202,64 +142,135 @@ panfrost_lookup_bos(struct drm_device *dev,
 	return ret;
 }
 
-static int panfrost_ioctl_gem_submit(struct drm_device *dev, void *data,
+static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
 		struct drm_file *file)
 {
 	struct panfrost_device *pfdev = dev->dev_private;
-	struct drm_panfrost_gem_submit *args = data;
-	struct drm_panfrost_gem_submit_atom *atoms;
+	struct drm_panfrost_submit *args = data;
+	struct drm_syncobj *sync_out;
 	struct panfrost_job *job;
-	u32 latest_flush_id;
-	int i, ret = 0;
+	int ret = 0;
+
+	job = kcalloc(1, sizeof(*job), GFP_KERNEL);
+	if (!job)
+		return -ENOMEM;
+
+	kref_init(&job->refcount);
+
+	ret = drm_syncobj_find_fence(file, args->in_sync, 0, 0,
+				     &job->in_fence);
+	if (ret == -EINVAL)
+		goto fail;
+
+	job->pfdev = pfdev;
+	job->jc = args->jc;
+	job->requirements = args->requirements;
+	job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
+	job->file_priv = file->driver_priv;
+
+	ret = panfrost_lookup_bos(dev, file, args, job);
+	if (ret)
+		goto fail;
+
+	ret = panfrost_job_push(job);
+	if (ret)
+		goto fail;
+
+	/* Update the return sync object for the job */
+	sync_out = drm_syncobj_find(file, args->out_sync);
+	if (sync_out) {
+		drm_syncobj_replace_fence(sync_out, job->render_done_fence);
+		drm_syncobj_put(sync_out);
+	}
+
+fail:
+	panfrost_job_put(job);
+
+	return ret;
+}
+
+static unsigned long nsecs_to_jiffies_timeout(const u64 n)
+{
+	/* nsecs_to_jiffies64() does not guard against overflow */
+	if (NSEC_PER_SEC % HZ &&
+	    div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
+		return MAX_JIFFY_OFFSET;
+
+	return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
+}
+
+int
+panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	int ret;
+	struct drm_panfrost_wait_bo *args = data;
+	struct drm_gem_object *gem_obj;
+	struct panfrost_gem_object *bo;
+	ktime_t start = ktime_get();
+	u64 delta_ns;
+	unsigned long timeout_jiffies =
+		nsecs_to_jiffies_timeout(args->timeout_ns);
 
 	if (args->pad != 0)
 		return -EINVAL;
 
-	if (args->nr_atoms == 0)
+	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+	if (!gem_obj) {
+		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
 		return -EINVAL;
+	}
+	bo = to_panfrost_bo(gem_obj);
 
-	atoms = kvmalloc_array(args->nr_atoms, sizeof(*atoms), GFP_KERNEL);
-	if (!atoms)
-		return -ENOMEM;
+	ret = reservation_object_wait_timeout_rcu(bo->resv,
+						  true, true,
+						  timeout_jiffies);
 
-	ret = copy_from_user(atoms, u64_to_user_ptr(args->atoms),
-			     args->nr_atoms * sizeof(*atoms));
-	if (ret) {
-		ret = -EFAULT;
-		goto err_free_atoms;
+	if (ret == 0)
+		ret = -ETIME;
+	else if (ret > 0)
+		ret = 0;
+
+	/* Decrement the user's timeout, in case we got interrupted
+	 * such that the ioctl will be restarted.
+	 */
+	delta_ns = ktime_to_ns(ktime_sub(ktime_get(), start));
+	if (delta_ns < args->timeout_ns)
+		args->timeout_ns -= delta_ns;
+	else
+		args->timeout_ns = 0;
+
+	/* Asked to wait beyond the jiffie/scheduler precision? */
+	if (ret == -ETIME && args->timeout_ns)
+		ret = -EAGAIN;
+
+	drm_gem_object_put_unlocked(gem_obj);
+
+	return ret;
+}
+
+static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct drm_panfrost_mmap_bo *args = data;
+	struct drm_gem_object *gem_obj;
+	int ret;
+
+	if (args->flags != 0) {
+		DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
+		return -EINVAL;
 	}
 
-	latest_flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
-
-	for (i = 0; i < args->nr_atoms; i++) {
-		job = kzalloc(sizeof(*job), GFP_KERNEL);
-		if (!job)
-			return -ENOMEM;
-
-		kref_init(&job->refcount);
-
-		job->pfdev = pfdev;
-		job->file_priv = file->driver_priv;
-		job->jc = atoms[i].jc;
-		job->atom_nr = atoms[i].atom_nr;
-		job->requirements = atoms[i].requirements;
-		job->flush_id = latest_flush_id;
-		job->deps[0].atom_nr = atoms[i].deps[0].atom_nr;
-		job->deps[0].type = atoms[i].deps[0].type;
-		job->deps[1].atom_nr = atoms[i].deps[1].atom_nr;
-		job->deps[1].type = atoms[i].deps[1].type;
-
-		ret = panfrost_lookup_bos(dev, file, &atoms[i], job);
-		if (ret)
-			goto err_free_atoms;
-
-		ret = panfrost_job_push(job);
-		if (ret)
-			goto err_free_atoms;
+	gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+	if (!gem_obj) {
+		DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
+		return -ENOENT;
 	}
 
-err_free_atoms:
-	kvfree(atoms);
+	ret = drm_gem_create_mmap_offset(gem_obj);
+	if (ret == 0)
+		args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+	drm_gem_object_put_unlocked(gem_obj);
 
 	return ret;
 }
@@ -312,16 +323,20 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file)
 	kfree(panfrost_priv);
 }
 
+/* DRM_AUTH is required on SUBMIT for now, while all clients share a single
+ * address space.  Note that render nodes would be able to submit jobs that
+ * could access BOs from clients authenticated with the master node.
+ */
 static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
 #define PANFROST_IOCTL(n, func, flags) \
 	DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags)
-	PANFROST_IOCTL(GET_PARAM,     get_param,     DRM_RENDER_ALLOW),
-	PANFROST_IOCTL(GEM_NEW,       gem_new,       DRM_RENDER_ALLOW),
-	PANFROST_IOCTL(GEM_INFO,      gem_info,      DRM_RENDER_ALLOW),
-	PANFROST_IOCTL(GEM_CPU_PREP,  gem_cpu_prep,  DRM_RENDER_ALLOW),
-	PANFROST_IOCTL(GEM_CPU_FINI,  gem_cpu_fini,  DRM_RENDER_ALLOW),
-	PANFROST_IOCTL(GEM_SUBMIT,    gem_submit,    DRM_RENDER_ALLOW),
-	PANFROST_IOCTL(GET_BO_OFFSET, get_bo_offset, DRM_RENDER_ALLOW),
+
+	PANFROST_IOCTL(SUBMIT,		submit,		DRM_RENDER_ALLOW | DRM_AUTH),
+	PANFROST_IOCTL(WAIT_BO,		wait_bo,	DRM_RENDER_ALLOW),
+	PANFROST_IOCTL(CREATE_BO,	create_bo,	DRM_RENDER_ALLOW),
+	PANFROST_IOCTL(MMAP_BO,		mmap_bo,	DRM_RENDER_ALLOW),
+	PANFROST_IOCTL(GET_PARAM,	get_param,	DRM_RENDER_ALLOW),
+	PANFROST_IOCTL(GET_BO_OFFSET,	get_bo_offset,	DRM_RENDER_ALLOW),
 };
 
 DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 9f74b4d063da7..341c085d50ad6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -13,22 +13,6 @@
 #include "panfrost_gem.h"
 #include "panfrost_mmu.h"
 
-static unsigned long timeout_to_jiffies(const ktime_t *timeout)
-{
-	ktime_t now = ktime_get();
-	unsigned long remaining_jiffies;
-
-	if (ktime_compare(*timeout, now) < 0) {
-		remaining_jiffies = 0;
-	} else {
-		ktime_t rem = ktime_sub(*timeout, now);
-		struct timespec ts = ktime_to_timespec(rem);
-		remaining_jiffies = timespec_to_jiffies(&ts);
-	}
-
-	return remaining_jiffies;
-}
-
 /* Called DRM core on the last userspace/kernel unreference of the
  * BO.
  */
@@ -121,28 +105,3 @@ struct reservation_object *panfrost_prime_res_obj(struct drm_gem_object *obj)
 
 	return bo->resv;
 }
-
-int panfrost_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
-{
-	bool write = !!(op & ETNA_PREP_WRITE);
-	unsigned long remain =
-		op & ETNA_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
-	long ret;
-
-	ret = reservation_object_wait_timeout_rcu(obj->resv, write,
-						  true,  remain);
-	if (ret == 0)
-		return remain == 0 ? -EBUSY : -ETIMEDOUT;
-	else if (ret < 0)
-		return ret;
-
-	/* TODO cache maintenance */
-
-	return 0;
-}
-
-int panfrost_gem_cpu_fini(struct drm_gem_object *obj)
-{
-	/* TODO cache maintenance */
-	return 0;
-}
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index a8b56616f8066..d58916960b2d9 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -396,8 +396,6 @@ int panfrost_job_push(struct panfrost_job *job)
 
 	drm_sched_entity_push_job(&job->base, entity);
 
-	list_add(&job->head, &pfdev->scheduled_jobs);
-
 	mutex_unlock(&pfdev->sched_lock);
 
 	panfrost_attach_object_fences(job->bos, job->bo_count,
@@ -412,28 +410,36 @@ int panfrost_job_push(struct panfrost_job *job)
 #endif
 
 static void
-panfrost_job_free(struct drm_sched_job *sched_job)
+panfrost_job_cleanup(struct kref *ref)
 {
-	struct panfrost_job *job = to_panfrost_job(sched_job);
-	struct panfrost_device *pfdev = job->pfdev;
+	struct panfrost_job *job = container_of(ref, struct panfrost_job,
+						refcount);
+	unsigned int i;
 
-	mutex_lock(&pfdev->sched_lock);
-	list_del(&job->head);
-	mutex_unlock(&pfdev->sched_lock);
+	dma_fence_put(job->in_fence);
+	dma_fence_put(job->done_fence);
+	dma_fence_put(job->render_done_fence);
 
-	drm_sched_job_cleanup(sched_job);
+	for (i = 0; i < job->bo_count; i++)
+		drm_gem_object_put_unlocked(&job->bos[i]->base.base);
+	kvfree(job->bos);
+
+	kfree(job);
 }
 
-struct panfrost_job *panfrost_job_lookup(struct panfrost_device *pfdev, int atom_nr)
+void panfrost_job_put(struct panfrost_job *job)
 {
-	struct panfrost_job *job;
+	kref_put(&job->refcount, panfrost_job_cleanup);
+}
 
-	list_for_each_entry(job, &pfdev->scheduled_jobs, head) {
-		if (job->atom_nr == atom_nr)
-			return job;
-	}
+static void
+panfrost_job_free(struct drm_sched_job *sched_job)
+{
+	struct panfrost_job *job = to_panfrost_job(sched_job);
 
-	return NULL;
+	drm_sched_job_cleanup(sched_job);
+
+	panfrost_job_put(job);
 }
 
 static struct dma_fence *
@@ -441,25 +447,15 @@ panfrost_job_dependency(struct drm_sched_job *sched_job,
 		   struct drm_sched_entity *s_entity)
 {
 	struct panfrost_job *job = to_panfrost_job(sched_job);
-	struct panfrost_device *pfdev = job->pfdev;
-	struct panfrost_job *dep;
-	struct dma_fence *fence = NULL;
-	int i;
+	struct dma_fence *fence;
 
-	mutex_lock(&pfdev->sched_lock);
-	for (i = 0; i < 2 && !fence; i++) {
-		if (!job->deps[i].atom_nr)
-			continue;
-		dep = panfrost_job_lookup(pfdev, job->deps[i].atom_nr);
-		if (!dep)
-			continue; /* Dep might have finished already */
-		/* TODO: Handle job->deps[i].type */
-		if (!dma_fence_is_signaled(dep->render_done_fence))
-			fence = dep->render_done_fence;
+	fence = job->in_fence;
+	if (fence) {
+		job->in_fence = NULL;
+		return fence;
 	}
-	mutex_unlock(&pfdev->sched_lock);
 
-	return fence;
+	return NULL;
 }
 
 static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
@@ -621,11 +617,12 @@ void panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
 	struct panfrost_device *pfdev = panfrost_priv->pfdev;
 	struct panfrost_job_slot *js = pfdev->js;
 	struct drm_sched_rq *rq;
-	int i;
+	int ret, i;
 
 	for (i = 0; i < NUM_JOB_SLOTS; i++) {
 		rq = &js->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
-		drm_sched_entity_init(&panfrost_priv->sched_entity[i], &rq, 1, NULL);
+		ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], &rq, 1, NULL);
+		WARN_ON(ret);
 	}
 }
 
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
index b7d5f312acd7a..cd7102c33879a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.h
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -21,10 +21,13 @@ struct panfrost_job {
 	struct panfrost_device *pfdev;
 	struct panfrost_file_priv *file_priv;
 
+	/* An optional fence userspace can pass in for the job to depend on. */
+	struct dma_fence *in_fence;
+
+	/* fence to be signaled by IRQ handler when the job is complete. */
 	struct dma_fence *done_fence;
 
 	__u64 jc;
-	__u32 atom_nr;
 	__u32 requirements;
 	__u32 flush_id;
 
@@ -32,10 +35,6 @@ struct panfrost_job {
 	u32 bo_count;
 
 	struct dma_fence *render_done_fence;
-
-	struct drm_panfrost_gem_submit_atom_dep deps[2];
-
-	struct list_head head;
 };
 
 int panfrost_job_init(struct panfrost_device *pfdev);
@@ -43,5 +42,6 @@ void panfrost_job_fini(struct panfrost_device *pfdev);
 void panfrost_job_open(struct panfrost_file_priv *panfrost_priv);
 void panfrost_job_close(struct panfrost_file_priv *panfrost_priv);
 int panfrost_job_push(struct panfrost_job *job);
+void panfrost_job_put(struct panfrost_job *job);
 
 #endif
diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
index d4d271e372066..bdf17063d3467 100644
--- a/include/uapi/drm/panfrost_drm.h
+++ b/include/uapi/drm/panfrost_drm.h
@@ -1,8 +1,29 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/* Copyright 2018, Linaro, Ltd., Rob Herring <robh@kernel.org> */
+/*
+ * Copyright © 2014-2018 Broadcom
+ * Copyright © 2019 Collabora ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
 
-#ifndef __PANFROST_DRM_H__
-#define __PANFROST_DRM_H__
+#ifndef _PANFROST_DRM_H_
+#define _PANFROST_DRM_H_
 
 #include "drm.h"
 
@@ -10,27 +31,74 @@
 extern "C" {
 #endif
 
-/* timeouts are specified in clock-monotonic absolute times (to simplify
- * restarting interrupted ioctls).  The following struct is logically the
- * same as 'struct timespec' but 32/64b ABI safe.
+#define DRM_PANFROST_SUBMIT			0x00
+#define DRM_PANFROST_WAIT_BO			0x01
+#define DRM_PANFROST_CREATE_BO			0x02
+#define DRM_PANFROST_MMAP_BO			0x03
+#define DRM_PANFROST_GET_PARAM			0x04
+#define DRM_PANFROST_GET_BO_OFFSET		0x05
+
+#define DRM_IOCTL_PANFROST_SUBMIT		DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit)
+#define DRM_IOCTL_PANFROST_WAIT_BO		DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo)
+#define DRM_IOCTL_PANFROST_CREATE_BO		DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_CREATE_BO, struct drm_panfrost_create_bo)
+#define DRM_IOCTL_PANFROST_MMAP_BO		DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MMAP_BO, struct drm_panfrost_mmap_bo)
+#define DRM_IOCTL_PANFROST_GET_PARAM		DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_PARAM, struct drm_panfrost_get_param)
+#define DRM_IOCTL_PANFROST_GET_BO_OFFSET	DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset)
+
+#define PANFROST_JD_REQ_FS (1 << 0)
+
+/**
+ * struct drm_panfrost_submit - ioctl argument for submitting commands to the 3D
+ * engine.
+ *
+ * This asks the kernel to have the GPU execute a render command list.
  */
-struct drm_panfrost_timespec {
-	__s64 tv_sec;          /* seconds */
-	__s64 tv_nsec;         /* nanoseconds */
-};
+struct drm_panfrost_submit {
 
-#define PANFROST_PARAM_GPU_ID		0x01
+	/** Address to GPU mapping of job descriptor */
+	__u64 jc;
 
-struct drm_panfrost_get_param {
-	__u32 param;	/* in */
+	/** An optional sync object to wait on before starting this job. */
+	__u32 in_sync;
+
+	/** An optional sync object to place the completion fence in. */
+	__u32 out_sync;
+
+	/** Pointer to a u32 array of the BOs that are referenced by the job. */
+	__u64 bo_handles;
+
+	/** Number of BO handles passed in (size is that times 4). */
+	__u32 bo_handle_count;
+
+	/** A combination of PANFROST_JD_REQ_* */
+	__u32 requirements;
+};
+
+/**
+ * struct drm_panfrost_wait_bo - ioctl argument for waiting for
+ * completion of the last DRM_PANFROST_SUBMIT_CL on a BO.
+ *
+ * This is useful for cases where multiple processes might be
+ * rendering to a BO and you want to wait for all rendering to be
+ * completed.
+ */
+struct drm_panfrost_wait_bo {
+	__u32 handle;
 	__u32 pad;
-	__u64 value;	/* out */
+	__u64 timeout_ns;
 };
 
-struct drm_panfrost_gem_new {
-	__u64 size;           /* in */
-	__u32 flags;          /* in, mask of ETNA_BO_x */
-	__u32 handle;         /* out */
+/**
+ * struct drm_panfrost_create_bo - ioctl argument for creating Panfrost BOs.
+ *
+ * There are currently no values for the flags argument, but it may be
+ * used in a future extension.
+ */
+struct drm_panfrost_create_bo {
+	__u32 size;
+	__u32 flags;
+	/** Returned GEM handle for the BO. */
+	__u32 handle;
 	/**
 	 * Returned offset for the BO in the GPU address space.  This offset
 	 * is private to the DRM fd and is valid for the lifetime of the GEM
@@ -39,90 +107,51 @@ struct drm_panfrost_gem_new {
 	 * This offset value will always be nonzero, since various HW
 	 * units treat 0 specially.
 	 */
-	__u64 offset;
-};
-struct drm_panfrost_gem_info {
-	__u32 handle;         /* in */
-	__u32 pad;
-	__u64 offset;         /* out, offset to pass to mmap() */
+	__u32 offset;
 };
 
 /**
- * Returns the offset for the BO in the GPU address space for this DRM fd.
- * This is the same value returned by drm_panfrost_gem_new, if that was called
- * from this DRM fd.
+ * struct drm_panfrost_mmap_bo - ioctl argument for mapping Panfrost BOs.
+ *
+ * This doesn't actually perform an mmap.  Instead, it returns the
+ * offset you need to use in an mmap on the DRM device node.  This
+ * means that tools like valgrind end up knowing about the mapped
+ * memory.
+ *
+ * There are currently no values for the flags argument, but it may be
+ * used in a future extension.
  */
-struct drm_panfrost_get_bo_offset {
+struct drm_panfrost_mmap_bo {
+	/** Handle for the object being mapped. */
 	__u32 handle;
-	__u32 pad;
+	__u32 flags;
+	/** offset into the drm node to use for subsequent mmap call. */
 	__u64 offset;
 };
 
-
-#define ETNA_PREP_READ        0x01
-#define ETNA_PREP_WRITE       0x02
-#define ETNA_PREP_NOSYNC      0x04
-
-struct drm_panfrost_gem_cpu_prep {
-	__u32 handle;         /* in */
-	__u32 op;             /* in, mask of ETNA_PREP_x */
-	struct drm_panfrost_timespec timeout;   /* in */
+enum drm_panfrost_param {
+	DRM_PANFROST_PARAM_GPU_ID,
 };
 
-struct drm_panfrost_gem_cpu_fini {
-	__u32 handle;         /* in */
-	__u32 flags;          /* in, placeholder for now, no defined values */
+struct drm_panfrost_get_param {
+	__u32 param;
+	__u32 pad;
+	__u64 value;
 };
 
-/*
- * Cmdstream Submission:
+/**
+ * Returns the offset for the BO in the GPU address space for this DRM fd.
+ * This is the same value returned by drm_panfrost_create_bo, if that was called
+ * from this DRM fd.
  */
-
-#define PANFROST_JD_REQ_FS (1 << 0)
-
-#define PANFROST_DEP_TYPE_ORDER	0x01
-#define PANFROST_DEP_TYPE_DATA	0x02
-
-struct drm_panfrost_gem_submit_atom_dep {
-	__u32 atom_nr;	/* job ID of dependency */
-	__u32 type;	/* one of PANFROST_DEP_TYPE_* */
-};
-
-struct drm_panfrost_gem_submit_atom {
-	__u64 jc;           /* in, address to GPU mapping of job descriptor */
-	__u32 atom_nr;      /* in, job ID */
-	__u32 requirements; /* in, a combination of PANFROST_JD_REQ_* */
-	__u64 bo_handles;
-	__u32 bo_handle_count;
-	struct drm_panfrost_gem_submit_atom_dep deps[2];
-};
-
-struct drm_panfrost_gem_submit {
-	__u32 nr_atoms;         /* in, number of submit_atom */
+struct drm_panfrost_get_bo_offset {
+	__u32 handle;
 	__u32 pad;
-	__u64 atoms;            /* in, ptr to array of submit_atom */
+	__u64 offset;
 };
 
-
-
-#define DRM_PANFROST_GET_PARAM		0x00
-#define DRM_PANFROST_GEM_NEW		0x01
-#define DRM_PANFROST_GEM_INFO		0x02
-#define DRM_PANFROST_GEM_CPU_PREP	0x03
-#define DRM_PANFROST_GEM_CPU_FINI	0x04
-#define DRM_PANFROST_GEM_SUBMIT		0x05
-#define DRM_PANFROST_GET_BO_OFFSET	0x06
-
-#define DRM_IOCTL_PANFROST_GET_PARAM	DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_PARAM, struct drm_panfrost_get_param)
-#define DRM_IOCTL_PANFROST_GEM_NEW	DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GEM_NEW, struct drm_panfrost_gem_new)
-#define DRM_IOCTL_PANFROST_GEM_INFO	DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GEM_INFO, struct drm_panfrost_gem_info)
-#define DRM_IOCTL_PANFROST_GEM_CPU_PREP	DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_GEM_CPU_PREP, struct drm_panfrost_gem_cpu_prep)
-#define DRM_IOCTL_PANFROST_GEM_CPU_FINI	DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_GEM_CPU_FINI, struct drm_panfrost_gem_cpu_fini)
-#define DRM_IOCTL_PANFROST_GEM_SUBMIT	DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_GEM_SUBMIT, struct drm_panfrost_gem_submit)
-#define DRM_IOCTL_PANFROST_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset)
-
 #if defined(__cplusplus)
 }
 #endif
 
-#endif /* __PANFROST_DRM_H__ */
+#endif /* _PANFROST_DRM_H_ */
-- 
GitLab