Commit e5cc66df authored by Marek Vasut's avatar Marek Vasut Committed by Lucas Stach
Browse files

etnaviv: Rework locking



Replace the per-screen locking of flushing with per-context one and
add per-context lock around command stream buffer accesses, to prevent
cross-context flushing from corrupting these command stream buffers.
Signed-off-by: Marek Vasut's avatarMarek Vasut <marex@denx.de>
parent 0c38c545
Pipeline #71642 passed with stages
in 34 minutes
......@@ -324,6 +324,7 @@ etna_clear_blt(struct pipe_context *pctx, unsigned buffers,
const union pipe_color_union *color, double depth, unsigned stencil)
{
struct etna_context *ctx = etna_context(pctx);
mtx_lock(&ctx->lock);
etna_set_state(ctx->stream, VIVS_GL_FLUSH_CACHE, 0x00000c23);
etna_set_state(ctx->stream, VIVS_TS_FLUSH_CACHE, VIVS_TS_FLUSH_CACHE_FLUSH);
......@@ -344,9 +345,9 @@ etna_clear_blt(struct pipe_context *pctx, unsigned buffers,
etna_set_state(ctx->stream, VIVS_GL_FLUSH_CACHE, 0x00000c23);
else
etna_set_state(ctx->stream, VIVS_GL_FLUSH_CACHE, 0x00000002);
mtx_unlock(&ctx->lock);
}
static bool
etna_try_blt_blit(struct pipe_context *pctx,
const struct pipe_blit_info *blit_info)
......@@ -416,6 +417,7 @@ etna_try_blt_blit(struct pipe_context *pctx,
return true;
}
mtx_lock(&ctx->lock);
/* Kick off BLT here */
if (src == dst && src_lev->ts_compress_fmt < 0) {
/* Resolve-in-place */
......@@ -510,6 +512,7 @@ etna_try_blt_blit(struct pipe_context *pctx,
dst->seqno++;
dst_lev->ts_valid = false;
mtx_unlock(&ctx->lock);
return true;
}
......
......@@ -63,7 +63,7 @@ etna_context_destroy(struct pipe_context *pctx)
struct etna_context *ctx = etna_context(pctx);
struct etna_screen *screen = ctx->screen;
mtx_lock(&screen->lock);
mtx_lock(&ctx->lock);
if (ctx->used_resources_read) {
/*
......@@ -94,7 +94,7 @@ etna_context_destroy(struct pipe_context *pctx)
_mesa_set_destroy(ctx->used_resources_write, NULL);
}
mtx_unlock(&screen->lock);
mtx_unlock(&ctx->lock);
if (ctx->dummy_rt)
etna_bo_del(ctx->dummy_rt);
......@@ -118,6 +118,8 @@ etna_context_destroy(struct pipe_context *pctx)
if (ctx->in_fence_fd != -1)
close(ctx->in_fence_fd);
mtx_destroy(&ctx->lock);
FREE(pctx);
}
......@@ -265,6 +267,8 @@ etna_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
if (!etna_state_update(ctx))
return;
mtx_lock(&ctx->lock);
/*
* Figure out the buffers/features we need:
*/
......@@ -339,6 +343,7 @@ etna_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info)
* draw op has caused the hang. */
etna_stall(ctx->stream, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
}
mtx_unlock(&ctx->lock);
if (DBG_ENABLED(ETNA_DBG_FLUSH_ALL))
pctx->flush(pctx, NULL, 0);
......@@ -414,7 +419,7 @@ etna_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence,
struct etna_screen *screen = ctx->screen;
int out_fence_fd = -1;
mtx_lock(&screen->lock);
mtx_lock(&ctx->lock);
list_for_each_entry(struct etna_hw_query, hq, &ctx->active_hw_queries, node)
etna_hw_query_suspend(hq, ctx);
......@@ -452,9 +457,8 @@ etna_flush(struct pipe_context *pctx, struct pipe_fence_handle **fence,
}
_mesa_set_clear(ctx->used_resources_write, NULL);
mtx_unlock(&screen->lock);
etna_reset_gpu_state(ctx);
mtx_unlock(&ctx->lock);
}
static void
......@@ -512,6 +516,8 @@ etna_context_create(struct pipe_screen *pscreen, void *priv, unsigned flags)
if (!ctx->used_resources_write)
goto fail;
mtx_init(&ctx->lock, mtx_recursive);
/* context ctxate setup */
ctx->specs = screen->specs;
ctx->screen = screen;
......
......@@ -194,6 +194,8 @@ struct etna_context {
/* set of resources used by currently-unsubmitted renders */
struct set *used_resources_read;
struct set *used_resources_write;
mtx_t lock;
};
static inline struct etna_context *
......
......@@ -645,7 +645,7 @@ etna_resource_used(struct etna_context *ctx, struct pipe_resource *prsc,
if (!prsc)
return;
mtx_lock(&screen->lock);
mtx_lock(&ctx->lock);
rsc = etna_resource(prsc);
......@@ -682,7 +682,7 @@ etna_resource_used(struct etna_context *ctx, struct pipe_resource *prsc,
_mesa_set_add(rsc->pending_ctx, ctx);
}
mtx_unlock(&screen->lock);
mtx_unlock(&ctx->lock);
}
bool
......
......@@ -392,6 +392,7 @@ etna_clear_rs(struct pipe_context *pctx, unsigned buffers,
const union pipe_color_union *color, double depth, unsigned stencil)
{
struct etna_context *ctx = etna_context(pctx);
mtx_lock(&ctx->lock);
/* Flush color and depth cache before clearing anything.
* This is especially important when coming from another surface, as
......@@ -437,6 +438,7 @@ etna_clear_rs(struct pipe_context *pctx, unsigned buffers,
etna_blit_clear_zs_rs(pctx, ctx->framebuffer_s.zsbuf, buffers, depth, stencil);
etna_stall(ctx->stream, SYNC_RECIPIENT_RA, SYNC_RECIPIENT_PE);
mtx_unlock(&ctx->lock);
}
static bool
......@@ -647,6 +649,8 @@ etna_try_rs_blit(struct pipe_context *pctx,
width & (w_align - 1) || height & (h_align - 1))
goto manual;
mtx_lock(&ctx->lock);
/* Always flush color and depth cache together before resolving. This works
* around artifacts that appear in some cases when scanning out a texture
* directly after it has been rendered to, such as rendering an animated web
......@@ -736,6 +740,7 @@ etna_try_rs_blit(struct pipe_context *pctx,
dst->seqno++;
dst_lev->ts_valid = false;
ctx->dirty |= ETNA_DIRTY_DERIVE_TS;
mtx_unlock(&ctx->lock);
return true;
......
......@@ -84,8 +84,6 @@ etna_screen_destroy(struct pipe_screen *pscreen)
{
struct etna_screen *screen = etna_screen(pscreen);
mtx_destroy(&screen->lock);
if (screen->perfmon)
etna_perfmon_del(screen->perfmon);
......@@ -956,8 +954,6 @@ etna_screen_create(struct etna_device *dev, struct etna_gpu *gpu,
if (screen->drm_version >= ETNA_DRM_VERSION_PERFMON)
etna_pm_query_setup(screen);
mtx_init(&screen->lock, mtx_recursive);
return pscreen;
fail:
......
......@@ -85,8 +85,6 @@ struct etna_screen {
uint32_t drm_version;
mtx_t lock;
nir_shader_compiler_options options;
};
......
......@@ -310,7 +310,9 @@ etna_texture_barrier(struct pipe_context *pctx, unsigned flags)
struct etna_context *ctx = etna_context(pctx);
/* clear color and texture cache to make sure that texture unit reads
* what has been written */
mtx_lock(&ctx->lock);
etna_set_state(ctx->stream, VIVS_GL_FLUSH_CACHE, VIVS_GL_FLUSH_CACHE_COLOR | VIVS_GL_FLUSH_CACHE_TEXTURE);
mtx_unlock(&ctx->lock);
}
uint32_t
......
......@@ -355,7 +355,7 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
* current GPU usage (reads must wait for GPU writes, writes must have
* exclusive access to the buffer).
*/
mtx_lock(&screen->lock);
mtx_lock(&ctx->lock);
if ((trans->rsc && (etna_resource(trans->rsc)->status & ETNA_PENDING_WRITE)) ||
(!trans->rsc &&
......@@ -369,7 +369,7 @@ etna_transfer_map(struct pipe_context *pctx, struct pipe_resource *prsc,
}
}
mtx_unlock(&screen->lock);
mtx_unlock(&ctx->lock);
if (usage & PIPE_TRANSFER_READ)
prep_flags |= DRM_ETNA_PREP_READ;
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment