Commit 2baf7ece authored by Antonio Argenziano's avatar Antonio Argenziano Committed by Chris Wilson

lib: Move __gem_context_create to common ioctl wrapper library.

This patch adds a context creation ioctl wrapper that returns the error
for the caller to consume. Multiple tests that implemented this already,
have been changed to use the new library function.

v2:
	- Add gem_require_contexts() to check for contexts support (Chris)

v3:
	- Add gem_has_contexts to check for contexts support and change
	  gem_require_contexts to skip if contests support is not available.
	  (Chris)

v4:
	- Cosmetic changes and use lib function in gem_ctx_create where
	  possible. (Michal)

v5:
	- Use gem_contexts_require() in tests and fixtures. (Chris)
Signed-off-by: Antonio Argenziano's avatarAntonio Argenziano <antonio.argenziano@intel.com>

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Reviewed-by: Michał Winiarski's avatarMichał Winiarski <michal.winiarski@intel.com>
Signed-off-by: Chris Wilson's avatarChris Wilson <chris@chris-wilson.co.uk>
parent 3fd9b578
......@@ -64,7 +64,7 @@ static uint32_t batch(int fd)
return handle;
}
static uint32_t __gem_context_create(int fd)
static uint32_t __gem_context_create_local(int fd)
{
struct drm_i915_gem_context_create create;
......@@ -101,7 +101,7 @@ static int loop(unsigned ring,
execbuf.flags |= LOCAL_I915_EXEC_HANDLE_LUT;
execbuf.flags |= LOCAL_I915_EXEC_NO_RELOC;
if (mode != DEFAULT) {
execbuf.rsvd1 = __gem_context_create(fd);
execbuf.rsvd1 = __gem_context_create_local(fd);
if (execbuf.rsvd1 == 0)
return 77;
}
......@@ -125,7 +125,7 @@ static int loop(unsigned ring,
uint32_t ctx = 0;
if (mode != DEFAULT && mode != NOP) {
execbuf.rsvd1 = __gem_context_create(fd);
execbuf.rsvd1 = __gem_context_create_local(fd);
ctx = gem_context_create(fd);
}
......
......@@ -105,7 +105,7 @@ static double elapsed(const struct timespec *start, const struct timespec *end)
return 1e3*(end->tv_sec - start->tv_sec) + 1e-6*(end->tv_nsec - start->tv_nsec);
}
static uint32_t __gem_context_create(int fd)
static uint32_t __gem_context_create_local(int fd)
{
struct drm_i915_gem_context_create arg = {};
drmIoctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &arg);
......@@ -216,7 +216,7 @@ static double replay(const char *filename, long nop, long range)
num_ctx = new_ctx;
}
ctx[t->handle] = __gem_context_create(fd);
ctx[t->handle] = __gem_context_create_local(fd);
break;
}
case DEL_CTX:
......
......@@ -43,6 +43,52 @@
* software features improving submission model (context priority).
*/
/**
* gem_has_contexts:
* @fd: open i915 drm file descriptor
*
* Queries whether context creation is supported or not.
*
* Returns: Context creation availability.
*/
bool gem_has_contexts(int fd)
{
uint32_t ctx_id = 0;
__gem_context_create(fd, &ctx_id);
if (ctx_id)
gem_context_destroy(fd, ctx_id);
return ctx_id;
}
/**
* gem_require_contexts:
* @fd: open i915 drm file descriptor
*
* This helper will automatically skip the test on platforms where context
* support is not available.
*/
void gem_require_contexts(int fd)
{
igt_require(gem_has_contexts(fd));
}
int __gem_context_create(int fd, uint32_t *ctx_id)
{
struct drm_i915_gem_context_create create;
int err = 0;
memset(&create, 0, sizeof(create));
if (igt_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create) == 0)
*ctx_id = create.ctx_id;
else
err = -errno;
errno = 0;
return err;
}
/**
* gem_context_create:
* @fd: open i915 drm file descriptor
......@@ -55,18 +101,12 @@
*/
uint32_t gem_context_create(int fd)
{
struct drm_i915_gem_context_create create;
uint32_t ctx_id;
memset(&create, 0, sizeof(create));
if (igt_ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create)) {
int err = -errno;
igt_skip_on(err == -ENODEV || errno == -EINVAL);
igt_assert_eq(err, 0);
}
igt_assert(create.ctx_id != 0);
errno = 0;
igt_assert_eq(__gem_context_create(fd, &ctx_id), 0);
igt_assert(ctx_id != 0);
return create.ctx_id;
return ctx_id;
}
int __gem_context_destroy(int fd, uint32_t ctx_id)
......
......@@ -25,10 +25,15 @@
#define GEM_CONTEXT_H
uint32_t gem_context_create(int fd);
int __gem_context_create(int fd, uint32_t *ctx_id);
void gem_context_destroy(int fd, uint32_t ctx_id);
int __gem_context_destroy(int fd, uint32_t ctx_id);
bool gem_has_contexts(int fd);
void gem_require_contexts(int fd);
void gem_context_require_bannable(int fd);
void gem_context_require_param(int fd, uint64_t param);
void gem_context_get_param(int fd, struct drm_i915_gem_context_param *p);
void gem_context_set_param(int fd, struct drm_i915_gem_context_param *p);
int __gem_context_set_param(int fd, struct drm_i915_gem_context_param *p);
......
......@@ -416,8 +416,10 @@ igt_main
igt_require(err == 0);
}
igt_subtest("i915-to-amd")
igt_subtest("i915-to-amd") {
gem_require_contexts(i915);
i915_to_amd(i915, amd, device);
}
igt_subtest("amd-to-i915")
amd_to_i915(i915, amd, device);
......
......@@ -45,6 +45,8 @@ igt_main
igt_fixture {
fd = drm_open_driver_render(DRIVER_INTEL);
gem_require_contexts(fd);
ctx_id = gem_context_create(fd);
/* Make sure a proper destroy works first */
gem_context_destroy(fd, ctx_id);
......
......@@ -45,7 +45,7 @@ static unsigned all_nengine;
static unsigned ppgtt_engines[16];
static unsigned ppgtt_nengine;
static int __gem_context_create(int fd, struct drm_i915_gem_context_create *arg)
static int __gem_context_create_local(int fd, struct drm_i915_gem_context_create *arg)
{
int ret = 0;
if (drmIoctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, arg))
......@@ -233,7 +233,6 @@ static uint64_t total_avail_mem(unsigned mode)
static void maximum(int fd, int ncpus, unsigned mode)
{
struct drm_i915_gem_context_create create;
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 obj[2];
......@@ -241,8 +240,8 @@ static void maximum(int fd, int ncpus, unsigned mode)
unsigned ctx_size = context_size(fd);
uint32_t *contexts = NULL;
unsigned long count = 0;
uint32_t ctx_id;
memset(&create, 0, sizeof(create));
do {
int err;
......@@ -255,14 +254,14 @@ static void maximum(int fd, int ncpus, unsigned mode)
err = -ENOMEM;
if (avail_mem > (count + 1) * ctx_size)
err = __gem_context_create(fd, &create);
err = __gem_context_create(fd, &ctx_id);
if (err) {
igt_info("Created %lu contexts, before failing with '%s' [%d]\n",
count, strerror(-err), -err);
break;
}
contexts[count++] = create.ctx_id;
contexts[count++] = ctx_id;
} while (1);
igt_require(count);
......@@ -320,10 +319,7 @@ igt_main
fd = drm_open_driver(DRIVER_INTEL);
igt_require_gem(fd);
memset(&create, 0, sizeof(create));
igt_require(__gem_context_create(fd, &create) == 0);
gem_context_destroy(fd, create.ctx_id);
gem_require_contexts(fd);
for_each_engine(fd, engine) {
if (engine == 0)
......@@ -347,7 +343,7 @@ igt_main
memset(&create, 0, sizeof(create));
create.ctx_id = rand();
create.pad = 0;
igt_assert_eq(__gem_context_create(fd, &create), 0);
igt_assert_eq(__gem_context_create_local(fd, &create), 0);
igt_assert(create.ctx_id != 0);
gem_context_destroy(fd, create.ctx_id);
}
......@@ -356,7 +352,7 @@ igt_main
memset(&create, 0, sizeof(create));
create.ctx_id = rand();
create.pad = 1;
igt_assert_eq(__gem_context_create(fd, &create), -EINVAL);
igt_assert_eq(__gem_context_create_local(fd, &create), -EINVAL);
}
igt_subtest("maximum-mem")
......
......@@ -158,11 +158,9 @@ igt_main
fd = drm_open_driver_render(DRIVER_INTEL);
igt_require_gem(fd);
handle = gem_create(fd, 4096);
gem_require_contexts(fd);
/* check that we can create contexts. */
ctx_id = gem_context_create(fd);
gem_context_destroy(fd, ctx_id);
handle = gem_create(fd, 4096);
gem_write(fd, handle, 0, batch, sizeof(batch));
}
......
......@@ -38,6 +38,8 @@ igt_main
igt_fixture {
fd = drm_open_driver_render(DRIVER_INTEL);
gem_require_contexts(fd);
ctx = gem_context_create(fd);
}
......
......@@ -45,19 +45,6 @@
#define INTERRUPTIBLE 1
static int __gem_context_create(int fd, uint32_t *ctx_id)
{
struct drm_i915_gem_context_create arg;
int ret = 0;
memset(&arg, 0, sizeof(arg));
if (drmIoctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &arg))
ret = -errno;
*ctx_id = arg.ctx_id;
return ret;
}
static double elapsed(const struct timespec *start, const struct timespec *end)
{
return ((end->tv_sec - start->tv_sec) +
......@@ -78,8 +65,7 @@ static void single(int fd, uint32_t handle,
gem_require_ring(fd, e->exec_id | e->flags);
igt_require(__gem_context_create(fd, &contexts[0]) == 0);
for (n = 1; n < 64; n++)
for (n = 0; n < 64; n++)
contexts[n] = gem_context_create(fd);
memset(&obj, 0, sizeof(obj));
......@@ -153,6 +139,8 @@ igt_main
fd = drm_open_driver(DRIVER_INTEL);
igt_require_gem(fd);
gem_require_contexts(fd);
light = gem_create(fd, 4096);
gem_write(fd, light, 0, &bbe, sizeof(bbe));
......
......@@ -124,6 +124,8 @@ static void single(const char *name, bool all_engines)
igt_require_gem(fd);
igt_require(gem_can_store_dword(fd, 0));
gem_require_contexts(fd);
gen = intel_gen(intel_get_drm_devid(fd));
num_engines = 0;
......@@ -371,6 +373,10 @@ static void threads(void)
struct thread data;
data.fd = drm_open_driver_render(DRIVER_INTEL);
igt_require_gem(data.fd);
gem_require_contexts(data.fd);
data.num_ctx = get_num_contexts(data.fd, false);
data.all_ctx = malloc(data.num_ctx * sizeof(uint32_t));
igt_assert(data.all_ctx);
......
......@@ -273,17 +273,6 @@ static void test_inflight_suspend(int fd)
trigger_reset(fd);
}
static uint32_t __gem_context_create(int fd)
{
struct drm_i915_gem_context_create create;
memset(&create, 0, sizeof(create));
if (ioctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create))
return 0;
return create.ctx_id;
}
static void test_inflight_contexts(int fd)
{
struct drm_i915_gem_execbuffer2 execbuf;
......@@ -294,8 +283,9 @@ static void test_inflight_contexts(int fd)
int fence[64];
igt_require(gem_has_exec_fence(fd));
gem_require_contexts(fd);
ctx[0] = __gem_context_create(fd);
ctx[0] = gem_context_create(fd);
igt_require(ctx[0]);
for (unsigned int n = 1; n < ARRAY_SIZE(ctx); n++)
ctx[n] = gem_context_create(fd);
......
......@@ -55,15 +55,6 @@ static bool ignore_engine(int fd, unsigned engine)
return false;
}
static uint32_t __gem_context_create(int fd)
{
struct drm_i915_gem_context_create arg;
memset(&arg, 0, sizeof(arg));
drmIoctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &arg);
return arg.ctx_id;
}
static void xchg_obj(void *array, unsigned i, unsigned j)
{
struct drm_i915_gem_exec_object2 *obj = array;
......@@ -130,8 +121,7 @@ static void wide(int fd, int ring_size, int timeout, unsigned int flags)
LOCAL_I915_EXEC_HANDLE_LUT);
if (flags & CONTEXTS) {
exec[e].execbuf.rsvd1 = __gem_context_create(fd);
igt_require(exec[e].execbuf.rsvd1);
exec[e].execbuf.rsvd1 = gem_context_create(fd);
}
exec[e].exec[0].handle = gem_create(fd, 4096);
......@@ -174,7 +164,7 @@ static void wide(int fd, int ring_size, int timeout, unsigned int flags)
if (flags & CONTEXTS) {
gem_context_destroy(fd, exec[e].execbuf.rsvd1);
exec[e].execbuf.rsvd1 = __gem_context_create(fd);
exec[e].execbuf.rsvd1 = gem_context_create(fd);
}
exec[e].reloc.presumed_offset = exec[e].exec[1].offset;
......@@ -358,8 +348,10 @@ igt_main
igt_subtest("wide-all")
wide(device, ring_size, 20, 0);
igt_subtest("wide-contexts")
igt_subtest("wide-contexts") {
gem_require_contexts(device);
wide(device, ring_size, 20, CONTEXTS);
}
igt_fixture {
igt_stop_hang_detector();
......
......@@ -1528,22 +1528,22 @@ igt_main
}
}
igt_subtest("long-history") {
long ring_size = measure_ring_size(i915) - 1;
igt_subtest_group {
long ring_size = 0;
igt_info("Ring size: %ld batches\n", ring_size);
igt_require(ring_size);
test_long_history(i915, ring_size, 0);
}
igt_fixture {
ring_size = measure_ring_size(i915) - 1;
igt_info("Ring size: %ld batches\n", ring_size);
igt_require(ring_size);
igt_subtest("expired-history") {
long ring_size = measure_ring_size(i915) - 1;
gem_require_contexts(i915);
}
igt_info("Ring size: %ld batches\n", ring_size);
igt_require(ring_size);
igt_subtest("long-history")
test_long_history(i915, ring_size, 0);
test_long_history(i915, ring_size, EXPIRED);
igt_subtest("expired-history")
test_long_history(i915, ring_size, EXPIRED);
}
igt_subtest("flip") {
......
......@@ -486,6 +486,7 @@ igt_main
igt_subtest_group {
igt_fixture {
gem_require_contexts(device);
igt_require(gem_scheduler_has_preemption(device));
}
......
......@@ -357,19 +357,6 @@ static void xchg(void *array, unsigned i, unsigned j)
u[j] = tmp;
}
static int __gem_context_create(int fd, uint32_t *ctx_id)
{
struct drm_i915_gem_context_create arg;
int ret = 0;
memset(&arg, 0, sizeof(arg));
if (drmIoctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &arg))
ret = -errno;
*ctx_id = arg.ctx_id;
return ret;
}
static void sequential(int fd, uint32_t handle, unsigned flags, int timeout)
{
const int ncpus = flags & FORKED ? sysconf(_SC_NPROCESSORS_ONLN) : 1;
......@@ -381,6 +368,8 @@ static void sequential(int fd, uint32_t handle, unsigned flags, int timeout)
double time, sum;
unsigned n;
gem_require_contexts(fd);
results = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
igt_assert(results != MAP_FAILED);
......@@ -699,6 +688,7 @@ igt_main
igt_subtest_group {
igt_fixture {
gem_require_contexts(device);
igt_require(gem_scheduler_has_ctx_priority(device));
igt_require(gem_scheduler_has_preemption(device));
}
......
......@@ -55,22 +55,6 @@ static void check_bo(int fd, uint32_t handle, int pass)
munmap(map, 4096);
}
static uint32_t __gem_context_create(int fd)
{
struct drm_i915_gem_context_create arg;
memset(&arg, 0, sizeof(arg));
if (drmIoctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &arg) == 0)
gem_context_destroy(fd, arg.ctx_id);
return arg.ctx_id;
}
static void gem_require_context(int fd)
{
igt_require(__gem_context_create(fd));
}
static bool ignore_engine(int fd, unsigned engine)
{
if (engine == 0)
......@@ -189,7 +173,7 @@ static void all(int fd, unsigned engine, unsigned flags)
int i;
if (flags & CONTEXTS)
gem_require_context(fd);
gem_require_contexts(fd);
if (flags & FDS)
igt_require(gen > 5);
......
......@@ -56,19 +56,6 @@ static void noop(struct noop *n,
gem_execbuf(n->fd, &execbuf);
}
static int __gem_context_create(int fd, uint32_t *ctx_id)
{
struct drm_i915_gem_context_create arg;
int ret = 0;
memset(&arg, 0, sizeof(arg));
if (drmIoctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &arg))
ret = -errno;
*ctx_id = arg.ctx_id;
return ret;
}
static int fls(uint64_t x)
{
int t;
......@@ -215,8 +202,9 @@ igt_main
const unsigned int ncontexts = 1024;
uint32_t contexts[ncontexts];
igt_require(__gem_context_create(no.fd, &contexts[0]) == 0);
for (n = 1; n < ncontexts; n++)
gem_require_contexts(no.fd);
for (n = 0; n < ncontexts; n++)
contexts[n] = gem_context_create(no.fd);
igt_until_timeout(timeout) {
......
......@@ -1001,8 +1001,11 @@ igt_main
fd = drm_open_driver_master(DRIVER_INTEL);
gem_submission_print_method(fd);
gem_scheduler_print_capability(fd);
igt_require_gem(fd);
gem_require_mmap_wc(fd);
gem_require_contexts(fd);
igt_fork_hang_detector(fd);
}
......
......@@ -79,19 +79,6 @@ static void verify_reloc(int fd, uint32_t handle,
}
}
static int __gem_context_create(int fd, uint32_t *ctx_id)
{
struct drm_i915_gem_context_create arg;
int ret = 0;
memset(&arg, 0, sizeof(arg));
if (drmIoctl(fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &arg))
ret = -errno;
*ctx_id = arg.ctx_id;
return ret;
}
static bool ignore_engine(int fd, unsigned engine)
{
if (engine == 0)
......@@ -244,10 +231,8 @@ static void whisper(int fd, unsigned engine, unsigned flags)
if (flags & FDS)
igt_require(gen >= 6);
if (flags & CONTEXTS) {
igt_require(__gem_context_create(fd, &contexts[0]) == 0);
gem_context_destroy(fd, contexts[0]);
}
if (flags & CONTEXTS)
gem_require_contexts(fd);
if (flags & HANG)
init_hang(&hang);
......
......@@ -453,6 +453,9 @@ igt_main
flags & MOCS_NON_DEFAULT_CTX ? "-ctx": "",
flags & MOCS_DIRTY_VALUES ? "-dirty" : "",
e->name) {
if (flags & (MOCS_NON_DEFAULT_CTX | MOCS_DIRTY_VALUES))
gem_require_contexts(fd);
run_test(fd, e->exec_id | e->flags, flags, mode);
}
}
......
......@@ -227,7 +227,8 @@ int main(int argc, char **argv)
igt_fixture {
fd = drm_open_driver(DRIVER_INTEL);
igt_require_gem(fd);
igt_assert(fd >= 0);
gem_require_contexts(fd);
}
igt_subtest("retire-vma-not-inactive")
......
......@@ -114,7 +114,10 @@ static int has_engine(int fd,
static void check_context(const struct intel_execution_engine *e)
{
int fd = drm_open_driver(DRIVER_INTEL);
gem_require_contexts(fd);
igt_require(has_engine(fd, gem_context_create(fd), e));
close(fd);
}
......
......@@ -839,6 +839,7 @@ igt_main
igt_subtest_group {
igt_fixture {
gem_require_contexts(fd);
igt_require(gem_scheduler_has_ctx_priority(fd));
igt_require(gem_scheduler_has_preemption(fd));
}
......
......@@ -182,8 +182,11 @@ static void check_workarounds(int fd, enum operation op, unsigned int flags)
if (flags & FD)
fd = reopen(fd);
if (flags & CONTEXT)
if (flags & CONTEXT) {
gem_require_contexts(fd);
ctx = gem_context_create(fd);
}
igt_assert_eq(workaround_fail_count(fd, ctx), 0);
......
......@@ -1456,8 +1456,10 @@ igt_main
* Check that reported usage is correct when PMU is
* enabled after two batches are running.
*/
igt_subtest_f("busy-double-start-%s", e->name)
igt_subtest_f("busy-double-start-%s", e->name) {
gem_require_contexts(fd);
busy_double_start(fd, e);
}
/**
* Check that the PMU can be safely enabled in face of
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment