Commit ff7896a3 authored by Emma Anholt's avatar Emma Anholt
Browse files

vc4: Switch to using a separate ioctl for making shaders.

This gives the kernel a chance to validate and lock down the data,
without having to deal with mmap zapping.

With this, GLBenchmark stops on a texture relocations, because we'd
recycled a shader BO as another shader and failed to revalidate, since we
weren't clearing the cached validation state on mmap faults.
parent e42cfe5d
/*
* Copyright © 2014 Broadcom
* Copyright © 2014-2015 Broadcom
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
......@@ -381,15 +381,57 @@ vc4_bo_get_dmabuf(struct vc4_bo *bo)
}
struct vc4_bo *
vc4_bo_alloc_mem(struct vc4_screen *screen, const void *data, uint32_t size,
const char *name)
vc4_bo_alloc_shader(struct vc4_screen *screen, const void *data, uint32_t size)
{
void *map;
struct vc4_bo *bo;
int ret;
bo = CALLOC_STRUCT(vc4_bo);
if (!bo)
return NULL;
pipe_reference_init(&bo->reference, 1);
bo->screen = screen;
bo->size = align(size, 4096);
bo->name = "code";
bo->private = false; /* Make sure it doesn't go back to the cache. */
if (!using_vc4_simulator) {
struct drm_vc4_create_shader_bo create = {
.size = size,
.data = (uintptr_t)data,
};
ret = drmIoctl(screen->fd, DRM_IOCTL_VC4_CREATE_SHADER_BO,
&create);
bo->handle = create.handle;
} else {
struct drm_mode_create_dumb create;
memset(&create, 0, sizeof(create));
create.width = 128;
create.bpp = 8;
create.height = (size + 127) / 128;
ret = drmIoctl(screen->fd, DRM_IOCTL_MODE_CREATE_DUMB, &create);
bo->handle = create.handle;
assert(create.size >= size);
vc4_bo_map(bo);
memcpy(bo->map, data, size);
}
if (ret != 0) {
fprintf(stderr, "create shader ioctl failure\n");
abort();
}
screen->bo_count++;
screen->bo_size += bo->size;
if (dump_stats) {
fprintf(stderr, "Allocated shader %dkb:\n", size / 1024);
vc4_bo_dump_stats(screen);
}
bo = vc4_bo_alloc(screen, size, name);
map = vc4_bo_map(bo);
memcpy(map, data, size);
return bo;
}
......
......@@ -58,8 +58,8 @@ struct vc4_bo {
struct vc4_bo *vc4_bo_alloc(struct vc4_screen *screen, uint32_t size,
const char *name);
struct vc4_bo *vc4_bo_alloc_mem(struct vc4_screen *screen, const void *data,
uint32_t size, const char *name);
struct vc4_bo *vc4_bo_alloc_shader(struct vc4_screen *screen, const void *data,
uint32_t size);
void vc4_bo_last_unreference(struct vc4_bo *bo);
void vc4_bo_last_unreference_locked_timed(struct vc4_bo *bo, time_t time);
struct vc4_bo *vc4_bo_open_name(struct vc4_screen *screen, uint32_t name,
......
......@@ -31,12 +31,14 @@
#define DRM_VC4_WAIT_BO 0x02
#define DRM_VC4_CREATE_BO 0x03
#define DRM_VC4_MMAP_BO 0x04
#define DRM_VC4_CREATE_SHADER_BO 0x05
#define DRM_IOCTL_VC4_SUBMIT_CL DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_SUBMIT_CL, struct drm_vc4_submit_cl)
#define DRM_IOCTL_VC4_WAIT_SEQNO DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_WAIT_SEQNO, struct drm_vc4_wait_seqno)
#define DRM_IOCTL_VC4_WAIT_BO DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_WAIT_BO, struct drm_vc4_wait_bo)
#define DRM_IOCTL_VC4_CREATE_BO DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_CREATE_BO, struct drm_vc4_create_bo)
#define DRM_IOCTL_VC4_MMAP_BO DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_MMAP_BO, struct drm_vc4_mmap_bo)
#define DRM_IOCTL_VC4_CREATE_SHADER_BO DRM_IOWR( DRM_COMMAND_BASE + DRM_VC4_CREATE_SHADER_BO, struct drm_vc4_create_shader_bo)
struct drm_vc4_submit_rcl_surface {
uint32_t hindex; /* Handle index, or ~0 if not present. */
......@@ -182,6 +184,29 @@ struct drm_vc4_create_bo {
uint32_t pad;
};
/**
* struct drm_vc4_create_shader_bo - ioctl argument for creating VC4
* shader BOs.
*
* Since allowing a shader to be overwritten while it's also being
* executed from would allow privlege escalation, shaders must be
* created using this ioctl, and they can't be mmapped later.
*/
struct drm_vc4_create_shader_bo {
/* Size of the data argument. */
uint32_t size;
/* Flags, currently must be 0. */
uint32_t flags;
/* Pointer to the data. */
uint64_t data;
/** Returned GEM handle for the BO. */
uint32_t handle;
/* Pad, must be 0. */
uint32_t pad;
};
/**
* struct drm_vc4_mmap_bo - ioctl argument for mapping VC4 BOs.
*
......
......@@ -2277,9 +2277,8 @@ vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage,
}
copy_uniform_state_to_shader(shader, c);
shader->bo = vc4_bo_alloc_mem(vc4->screen, c->qpu_insts,
c->qpu_inst_count * sizeof(uint64_t),
"code");
shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts,
c->qpu_inst_count * sizeof(uint64_t));
/* Copy the compiler UBO range state to the compiled shader, dropping
* out arrays that were never referenced by an indirect load.
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment