Skip to content
Snippets Groups Projects
Commit c4079f8d authored by Tomeu Vizoso's avatar Tomeu Vizoso
Browse files

drm/panfrost: Add dependencies between atoms

parent c3180ff1
No related merge requests found
...@@ -69,6 +69,7 @@ int panfrost_device_init(struct panfrost_device *pfdev) ...@@ -69,6 +69,7 @@ int panfrost_device_init(struct panfrost_device *pfdev)
struct resource *res; struct resource *res;
mutex_init(&pfdev->sched_lock); mutex_init(&pfdev->sched_lock);
INIT_LIST_HEAD(&pfdev->scheduled_jobs);
err = panfrost_clk_init(pfdev); err = panfrost_clk_init(pfdev);
if (err) { if (err) {
......
...@@ -66,6 +66,7 @@ struct panfrost_device { ...@@ -66,6 +66,7 @@ struct panfrost_device {
struct panfrost_job_slot *js; struct panfrost_job_slot *js;
struct panfrost_job *jobs[3]; struct panfrost_job *jobs[3];
struct list_head scheduled_jobs;
struct mutex sched_lock; struct mutex sched_lock;
}; };
......
...@@ -244,6 +244,10 @@ static int panfrost_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -244,6 +244,10 @@ static int panfrost_ioctl_gem_submit(struct drm_device *dev, void *data,
job->atom_nr = atoms[i].atom_nr; job->atom_nr = atoms[i].atom_nr;
job->requirements = atoms[i].requirements; job->requirements = atoms[i].requirements;
job->flush_id = latest_flush_id; job->flush_id = latest_flush_id;
job->deps[0].atom_nr = atoms[i].deps[0].atom_nr;
job->deps[0].type = atoms[i].deps[0].type;
job->deps[1].atom_nr = atoms[i].deps[1].atom_nr;
job->deps[1].type = atoms[i].deps[1].type;
ret = panfrost_lookup_bos(dev, file, &atoms[i], job); ret = panfrost_lookup_bos(dev, file, &atoms[i], job);
if (ret) if (ret)
......
...@@ -379,12 +379,16 @@ int panfrost_job_push(struct panfrost_job *job) ...@@ -379,12 +379,16 @@ int panfrost_job_push(struct panfrost_job *job)
ret = panfrost_lock_bo_reservations(job->bos, job->bo_count, ret = panfrost_lock_bo_reservations(job->bos, job->bo_count,
&acquire_ctx); &acquire_ctx);
if (ret) if (ret) {
mutex_unlock(&pfdev->sched_lock);
return ret; return ret;
}
ret = drm_sched_job_init(&job->base, entity, NULL); ret = drm_sched_job_init(&job->base, entity, NULL);
if (ret) if (ret) {
mutex_unlock(&pfdev->sched_lock);
goto unlock; goto unlock;
}
job->render_done_fence = dma_fence_get(&job->base.s_fence->finished); job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
...@@ -392,6 +396,8 @@ int panfrost_job_push(struct panfrost_job *job) ...@@ -392,6 +396,8 @@ int panfrost_job_push(struct panfrost_job *job)
drm_sched_entity_push_job(&job->base, entity); drm_sched_entity_push_job(&job->base, entity);
list_add(&job->head, &pfdev->scheduled_jobs);
mutex_unlock(&pfdev->sched_lock); mutex_unlock(&pfdev->sched_lock);
panfrost_attach_object_fences(job->bos, job->bo_count, panfrost_attach_object_fences(job->bos, job->bo_count,
...@@ -408,23 +414,54 @@ int panfrost_job_push(struct panfrost_job *job) ...@@ -408,23 +414,54 @@ int panfrost_job_push(struct panfrost_job *job)
static void static void
panfrost_job_free(struct drm_sched_job *sched_job) panfrost_job_free(struct drm_sched_job *sched_job)
{ {
struct panfrost_job *job = to_panfrost_job(sched_job);
struct panfrost_device *pfdev = job->pfdev;
mutex_lock(&pfdev->sched_lock);
list_del(&job->head);
mutex_unlock(&pfdev->sched_lock);
drm_sched_job_cleanup(sched_job); drm_sched_job_cleanup(sched_job);
} }
/** struct panfrost_job *panfrost_job_lookup(struct panfrost_device *pfdev, int atom_nr)
* Returns the fences that the bin or render job depends on, one by one. {
* panfrost_job_run() won't be called until all of them have been signaled. struct panfrost_job *job;
*/
list_for_each_entry(job, &pfdev->scheduled_jobs, head) {
if (job->atom_nr == atom_nr)
return job;
}
return NULL;
}
static struct dma_fence * static struct dma_fence *
panfrost_job_dependency(struct drm_sched_job *sched_job, panfrost_job_dependency(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity) struct drm_sched_entity *s_entity)
{ {
struct panfrost_job *job = to_panfrost_job(sched_job);
struct panfrost_device *pfdev = job->pfdev;
struct panfrost_job *dep;
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
int i;
mutex_lock(&pfdev->sched_lock);
for (i = 0; i < 2 && !fence; i++) {
if (!job->deps[i].atom_nr)
continue;
dep = panfrost_job_lookup(pfdev, job->deps[i].atom_nr);
if (!dep)
continue; /* Dep might have finished already */
/* TODO: Handle job->deps[i].type */
if (!dma_fence_is_signaled(dep->render_done_fence))
fence = dep->render_done_fence;
}
mutex_unlock(&pfdev->sched_lock);
return fence; return fence;
} }
static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job) static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
{ {
struct panfrost_job *job = to_panfrost_job(sched_job); struct panfrost_job *job = to_panfrost_job(sched_job);
...@@ -432,6 +469,9 @@ static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job) ...@@ -432,6 +469,9 @@ static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
int slot = panfrost_job_get_slot(job); int slot = panfrost_job_get_slot(job);
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
if (unlikely(job->base.s_fence->finished.error))
return NULL;
pfdev->jobs[slot] = job; pfdev->jobs[slot] = job;
fence = panfrost_fence_create(pfdev, slot); fence = panfrost_fence_create(pfdev, slot);
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#ifndef __PANFROST_JOB_H__ #ifndef __PANFROST_JOB_H__
#define __PANFROST_JOB_H__ #define __PANFROST_JOB_H__
#include <uapi/drm/panfrost_drm.h>
#include <drm/gpu_scheduler.h> #include <drm/gpu_scheduler.h>
#define NUM_JOB_SLOTS 2 /* Don't need 3rd one until we have compute support */ #define NUM_JOB_SLOTS 2 /* Don't need 3rd one until we have compute support */
...@@ -31,6 +32,10 @@ struct panfrost_job { ...@@ -31,6 +32,10 @@ struct panfrost_job {
u32 bo_count; u32 bo_count;
struct dma_fence *render_done_fence; struct dma_fence *render_done_fence;
struct drm_panfrost_gem_submit_atom_dep deps[2];
struct list_head head;
}; };
int panfrost_job_init(struct panfrost_device *pfdev); int panfrost_job_init(struct panfrost_device *pfdev);
......
...@@ -80,12 +80,21 @@ struct drm_panfrost_gem_cpu_fini { ...@@ -80,12 +80,21 @@ struct drm_panfrost_gem_cpu_fini {
#define PANFROST_JD_REQ_FS (1 << 0) #define PANFROST_JD_REQ_FS (1 << 0)
#define PANFROST_DEP_TYPE_ORDER 0x01
#define PANFROST_DEP_TYPE_DATA 0x02
struct drm_panfrost_gem_submit_atom_dep {
__u32 atom_nr; /* job ID of dependency */
__u32 type; /* one of PANFROST_DEP_TYPE_* */
};
struct drm_panfrost_gem_submit_atom { struct drm_panfrost_gem_submit_atom {
__u64 jc; /* in, address to GPU mapping of job descriptor */ __u64 jc; /* in, address to GPU mapping of job descriptor */
__u32 atom_nr; /* in, job ID */ __u32 atom_nr; /* in, job ID */
__u32 requirements; /* in, a combination of PANFROST_JD_REQ_* */ __u32 requirements; /* in, a combination of PANFROST_JD_REQ_* */
__u64 bo_handles; __u64 bo_handles;
__u32 bo_handle_count; __u32 bo_handle_count;
struct drm_panfrost_gem_submit_atom_dep deps[2];
}; };
struct drm_panfrost_gem_submit { struct drm_panfrost_gem_submit {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment