Commit 0a1fc45e authored by Chris Wilson's avatar Chris Wilson 😣

igt/gem_busy: Prevent banning when running multiple hang tests

Signed-off-by: Chris Wilson's avatarChris Wilson <chris@chris-wilson.co.uk>
parent c947bfe5
......@@ -70,6 +70,19 @@ static bool has_gpu_reset(int fd)
return once;
}
static void eat_error_state(void)
{
int fd, ret;
fd = igt_debugfs_open("i915_error_state", O_WRONLY);
do {
ret = write(fd, "", 1);
if (ret < 0)
ret = -errno;
} while (ret == -EINTR || ret == -EAGAIN);
close(fd);
}
/**
* igt_require_hang_ring:
* @fd: open i915 drm file descriptor
......@@ -110,6 +123,62 @@ void igt_require_hang_ring(int fd, int ring)
igt_require(has_gpu_reset(fd));
}
igt_hang_t igt_allow_hang(int fd, unsigned ctx, unsigned flags)
{
struct local_i915_gem_context_param param;
unsigned ban;
if (!igt_check_boolean_env_var("IGT_HANG", true))
igt_skip("hang injection disabled by user");
gem_context_require_ban_period(fd);
if (!igt_check_boolean_env_var("IGT_HANG_WITHOUT_RESET", false))
igt_require(has_gpu_reset(fd));
param.context = ctx;
param.size = 0;
if ((flags & HANG_ALLOW_CAPTURE) == 0) {
param.param = LOCAL_CONTEXT_PARAM_NO_ERROR_CAPTURE;
param.value = 1;
/* Older kernels may not have NO_ERROR_CAPTURE, in which case
* we just eat the error state in post-hang (and hope we eat
* the right one).
*/
__gem_context_set_param(fd, &param);
}
param.param = LOCAL_CONTEXT_PARAM_BAN_PERIOD;
param.value = 0;
gem_context_get_param(fd, &param);
ban = param.value;
if ((flags & HANG_ALLOW_BAN) == 0) {
param.param = LOCAL_CONTEXT_PARAM_BAN_PERIOD;
param.value = 0;
gem_context_set_param(fd, &param);
}
return (struct igt_hang){ 0, ctx, ban, flags };
}
void igt_disallow_hang(int fd, igt_hang_t arg)
{
struct local_i915_gem_context_param param;
param.context = arg.ctx;
param.size = 0;
param.param = LOCAL_CONTEXT_PARAM_BAN_PERIOD;
param.value = arg.ban;
gem_context_set_param(fd, &param);
if ((arg.flags & HANG_ALLOW_CAPTURE) == 0) {
param.param = LOCAL_CONTEXT_PARAM_NO_ERROR_CAPTURE;
param.value = 0;
if (__gem_context_set_param(fd, &param))
eat_error_state();
}
}
/**
* igt_hang_ring_ctx:
* @fd: open i915 drm file descriptor
......@@ -118,18 +187,18 @@ void igt_require_hang_ring(int fd, int ring)
* @flags: set of flags to control execution
*
* This helper function injects a hanging batch associated with @ctx into @ring.
* It returns a #igt_hang_ring_t structure which must be passed to
* It returns a #igt_hang_t structure which must be passed to
* igt_post_hang_ring() for hang post-processing (after the gpu hang
* interaction has been tested.
*
* Returns:
* Structure with helper internal state for igt_post_hang_ring().
*/
igt_hang_ring_t igt_hang_ctx(int fd,
uint32_t ctx,
int ring,
unsigned flags,
uint64_t *offset)
igt_hang_t igt_hang_ctx(int fd,
uint32_t ctx,
int ring,
unsigned flags,
uint64_t *offset)
{
struct drm_i915_gem_relocation_entry reloc;
struct drm_i915_gem_execbuffer2 execbuf;
......@@ -214,7 +283,7 @@ igt_hang_ring_t igt_hang_ctx(int fd,
if (offset)
*offset = exec.offset;
return (struct igt_hang_ring){ exec.handle, ctx, ban, flags };
return (igt_hang_t){ exec.handle, ctx, ban, flags };
}
/**
......@@ -223,30 +292,17 @@ igt_hang_ring_t igt_hang_ctx(int fd,
* @ring: execbuf ring flag
*
* This helper function injects a hanging batch into @ring. It returns a
* #igt_hang_ring_t structure which must be passed to igt_post_hang_ring() for
* #igt_hang_t structure which must be passed to igt_post_hang_ring() for
* hang post-processing (after the gpu hang interaction has been tested.
*
* Returns:
* Structure with helper internal state for igt_post_hang_ring().
*/
igt_hang_ring_t igt_hang_ring(int fd, int ring)
igt_hang_t igt_hang_ring(int fd, int ring)
{
return igt_hang_ctx(fd, 0, ring, 0, NULL);
}
static void eat_error_state(void)
{
int fd, ret;
fd = igt_debugfs_open("i915_error_state", O_WRONLY);
do {
ret = write(fd, "", 1);
if (ret < 0)
ret = -errno;
} while (ret == -EINTR || ret == -EAGAIN);
close(fd);
}
/**
* igt_post_hang_ring:
* @fd: open i915 drm file descriptor
......@@ -255,7 +311,7 @@ static void eat_error_state(void)
* This function does the necessary post-processing after a gpu hang injected
* with igt_hang_ring().
*/
void igt_post_hang_ring(int fd, struct igt_hang_ring arg)
void igt_post_hang_ring(int fd, igt_hang_t arg)
{
struct local_i915_gem_context_param param;
......
......@@ -28,25 +28,28 @@
void igt_require_hang_ring(int fd, int ring);
typedef struct igt_hang_ring {
typedef struct igt_hang {
unsigned handle;
unsigned ctx;
unsigned ban;
unsigned flags;
} igt_hang_ring_t;
} igt_hang_t;
igt_hang_t igt_allow_hang(int fd, unsigned ctx, unsigned flags);
void igt_disallow_hang(int fd, igt_hang_t arg);
#define HANG_POISON 0xc5c5c5c5
struct igt_hang_ring igt_hang_ctx(int fd,
uint32_t ctx,
int ring,
unsigned flags,
uint64_t *offset);
igt_hang_t igt_hang_ctx(int fd,
uint32_t ctx,
int ring,
unsigned flags,
uint64_t *offset);
#define HANG_ALLOW_BAN 1
#define HANG_ALLOW_CAPTURE 2
struct igt_hang_ring igt_hang_ring(int fd, int ring);
void igt_post_hang_ring(int fd, struct igt_hang_ring arg);
igt_hang_t igt_hang_ring(int fd, int ring);
void igt_post_hang_ring(int fd, igt_hang_t arg);
void igt_force_gpu_reset(void);
......
......@@ -139,7 +139,7 @@ const uint32_t *batch;
static uint64_t submit_hang(int fd, unsigned ring_id)
{
uint64_t offset;
igt_hang_ring_t hang;
igt_hang_t hang;
hang = igt_hang_ctx(fd, 0, ring_id, HANG_ALLOW_CAPTURE, &offset);
......
......@@ -604,90 +604,97 @@ igt_main
fd = drm_open_driver_master(DRIVER_INTEL);
}
igt_fixture {
igt_fork_hang_detector(fd);
}
for (e = intel_execution_engines; e->name; e++) {
igt_subtest_group {
igt_subtest_f("%sbusy-%s",
e->exec_id == 0 ? "basic-" : "",
e->name) {
igt_require(gem_has_ring(fd, e->exec_id | e->flags));
gem_quiescent_gpu(fd);
basic(fd, e->exec_id | e->flags, 0);
}
}
}
igt_subtest_group {
int gen = 0;
igt_fixture {
igt_require(has_extended_busy_ioctl(fd));
gem_require_mmap_wc(fd);
gen = intel_gen(intel_get_drm_devid(fd));
igt_fork_hang_detector(fd);
}
for (e = intel_execution_engines; e->name; e++) {
/* default exec-id is purely symbolic */
if (e->exec_id == 0)
continue;
igt_subtest_f("extended-%s", e->name) {
gem_require_ring(fd, e->exec_id | e->flags);
igt_skip_on_f(gen == 6 &&
e->exec_id == I915_EXEC_BSD,
"MI_STORE_DATA broken on gen6 bsd\n");
gem_quiescent_gpu(fd);
one(fd, e->exec_id, e->flags, 0);
gem_quiescent_gpu(fd);
igt_subtest_group {
igt_subtest_f("%sbusy-%s",
e->exec_id == 0 ? "basic-" : "",
e->name) {
igt_require(gem_has_ring(fd, e->exec_id | e->flags));
gem_quiescent_gpu(fd);
basic(fd, e->exec_id | e->flags, 0);
}
}
}
for (e = intel_execution_engines; e->name; e++) {
/* default exec-id is purely symbolic */
if (e->exec_id == 0)
continue;
igt_subtest_group {
int gen = 0;
igt_subtest_f("extended-parallel-%s", e->name) {
gem_require_ring(fd, e->exec_id | e->flags);
igt_skip_on_f(gen == 6 &&
e->exec_id == I915_EXEC_BSD,
"MI_STORE_DATA broken on gen6 bsd\n");
gem_quiescent_gpu(fd);
one(fd, e->exec_id, e->flags, PARALLEL);
gem_quiescent_gpu(fd);
igt_fixture {
igt_require(has_extended_busy_ioctl(fd));
gem_require_mmap_wc(fd);
gen = intel_gen(intel_get_drm_devid(fd));
}
}
}
igt_subtest_group {
igt_fixture {
igt_require(has_extended_busy_ioctl(fd));
igt_require(has_semaphores(fd));
for (e = intel_execution_engines; e->name; e++) {
/* default exec-id is purely symbolic */
if (e->exec_id == 0)
continue;
igt_subtest_f("extended-%s", e->name) {
gem_require_ring(fd, e->exec_id | e->flags);
igt_skip_on_f(gen == 6 &&
e->exec_id == I915_EXEC_BSD,
"MI_STORE_DATA broken on gen6 bsd\n");
gem_quiescent_gpu(fd);
one(fd, e->exec_id, e->flags, 0);
gem_quiescent_gpu(fd);
}
}
for (e = intel_execution_engines; e->name; e++) {
/* default exec-id is purely symbolic */
if (e->exec_id == 0)
continue;
igt_subtest_f("extended-parallel-%s", e->name) {
gem_require_ring(fd, e->exec_id | e->flags);
igt_skip_on_f(gen == 6 &&
e->exec_id == I915_EXEC_BSD,
"MI_STORE_DATA broken on gen6 bsd\n");
gem_quiescent_gpu(fd);
one(fd, e->exec_id, e->flags, PARALLEL);
gem_quiescent_gpu(fd);
}
}
}
for (e = intel_execution_engines; e->name; e++) {
/* default exec-id is purely symbolic */
if (e->exec_id == 0)
continue;
igt_subtest_group {
igt_fixture {
igt_require(has_extended_busy_ioctl(fd));
igt_require(has_semaphores(fd));
}
for (e = intel_execution_engines; e->name; e++) {
/* default exec-id is purely symbolic */
if (e->exec_id == 0)
continue;
igt_subtest_f("extended-semaphore-%s", e->name)
semaphore(fd, e->exec_id, e->flags);
igt_subtest_f("extended-semaphore-%s", e->name)
semaphore(fd, e->exec_id, e->flags);
}
}
}
igt_subtest_group {
igt_subtest("close-race")
close_race(fd);
}
igt_fixture {
igt_stop_hang_detector();
igt_fixture {
igt_stop_hang_detector();
}
}
for (e = intel_execution_engines; e->name; e++) {
igt_subtest_group {
igt_subtest_group {
igt_hang_t hang;
igt_fixture {
hang = igt_allow_hang(fd, 0, 0);
}
for (e = intel_execution_engines; e->name; e++) {
igt_subtest_f("%shang-%s",
e->exec_id == 0 ? "basic-" : "",
e->name) {
......@@ -696,32 +703,36 @@ igt_main
basic(fd, e->exec_id | e->flags, HANG);
}
}
}
igt_subtest_group {
int gen = 0;
igt_fixture {
igt_require(has_extended_busy_ioctl(fd));
gem_require_mmap_wc(fd);
gen = intel_gen(intel_get_drm_devid(fd));
}
igt_subtest_group {
int gen = 0;
for (e = intel_execution_engines; e->name; e++) {
/* default exec-id is purely symbolic */
if (e->exec_id == 0)
continue;
igt_fixture {
igt_require(has_extended_busy_ioctl(fd));
gem_require_mmap_wc(fd);
gen = intel_gen(intel_get_drm_devid(fd));
}
igt_subtest_f("extended-hang-%s", e->name) {
gem_require_ring(fd, e->exec_id | e->flags);
igt_skip_on_f(gen == 6 &&
e->exec_id == I915_EXEC_BSD,
"MI_STORE_DATA broken on gen6 bsd\n");
gem_quiescent_gpu(fd);
one(fd, e->exec_id, e->flags, HANG);
gem_quiescent_gpu(fd);
for (e = intel_execution_engines; e->name; e++) {
/* default exec-id is purely symbolic */
if (e->exec_id == 0)
continue;
igt_subtest_f("extended-hang-%s", e->name) {
gem_require_ring(fd, e->exec_id | e->flags);
igt_skip_on_f(gen == 6 &&
e->exec_id == I915_EXEC_BSD,
"MI_STORE_DATA broken on gen6 bsd\n");
gem_quiescent_gpu(fd);
one(fd, e->exec_id, e->flags, HANG);
gem_quiescent_gpu(fd);
}
}
}
igt_fixture {
igt_disallow_hang(fd, hang);
}
}
igt_fixture {
......
......@@ -856,7 +856,7 @@ static void buffers_fini(struct buffers *b)
}
typedef void (*do_copy)(struct buffers *b, drm_intel_bo *dst, drm_intel_bo *src);
typedef struct igt_hang_ring (*do_hang)(void);
typedef igt_hang_t (*do_hang)(void);
static void render_copy_bo(struct buffers *b, drm_intel_bo *dst, drm_intel_bo *src)
{
......@@ -940,27 +940,27 @@ static void wc_copy_bo(struct buffers *b, drm_intel_bo *dst, drm_intel_bo *src)
munmap(s, size);
}
static struct igt_hang_ring no_hang(void)
static igt_hang_t no_hang(void)
{
return (struct igt_hang_ring){0, 0};
return (igt_hang_t){0, 0};
}
static struct igt_hang_ring bcs_hang(void)
static igt_hang_t bcs_hang(void)
{
return igt_hang_ring(fd, I915_EXEC_BLT);
}
static struct igt_hang_ring rcs_hang(void)
static igt_hang_t rcs_hang(void)
{
return igt_hang_ring(fd, I915_EXEC_RENDER);
}
static struct igt_hang_ring all_hang(void)
static igt_hang_t all_hang(void)
{
uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 obj;
struct igt_hang_ring hang;
igt_hang_t hang;
unsigned engine;
memset(&obj, 0, sizeof(obj));
......@@ -992,7 +992,7 @@ static void do_basic0(struct buffers *buffers,
buffers->mode->set_bo(buffers, buffers->src[0], 0xdeadbeef);
for (int i = 0; i < buffers->count; i++) {
struct igt_hang_ring hang = do_hang_func();
igt_hang_t hang = do_hang_func();
do_copy_func(buffers, buffers->dst[i], buffers->src[0]);
buffers->mode->cmp_bo(buffers, buffers->dst[i], 0xdeadbeef);
......@@ -1008,7 +1008,7 @@ static void do_basic1(struct buffers *buffers,
gem_quiescent_gpu(fd);
for (int i = 0; i < buffers->count; i++) {
struct igt_hang_ring hang = do_hang_func();
igt_hang_t hang = do_hang_func();
buffers->mode->set_bo(buffers, buffers->src[i], i);
buffers->mode->set_bo(buffers, buffers->dst[i], ~i);
......@@ -1025,7 +1025,7 @@ static void do_basicN(struct buffers *buffers,
do_copy do_copy_func,
do_hang do_hang_func)
{
struct igt_hang_ring hang;
igt_hang_t hang;
gem_quiescent_gpu(fd);
......@@ -1051,7 +1051,7 @@ static void do_overwrite_source(struct buffers *buffers,
do_copy do_copy_func,
do_hang do_hang_func)
{
struct igt_hang_ring hang;
igt_hang_t hang;
int i;
gem_quiescent_gpu(fd);
......@@ -1075,7 +1075,7 @@ static void do_overwrite_source_read(struct buffers *buffers,
int do_rcs)
{
const int half = buffers->count/2;
struct igt_hang_ring hang;
igt_hang_t hang;
int i;
gem_quiescent_gpu(fd);
......@@ -1119,7 +1119,7 @@ static void do_overwrite_source__rev(struct buffers *buffers,
do_copy do_copy_func,
do_hang do_hang_func)
{
struct igt_hang_ring hang;
igt_hang_t hang;
int i;
gem_quiescent_gpu(fd);
......@@ -1141,7 +1141,7 @@ static void do_overwrite_source__one(struct buffers *buffers,
do_copy do_copy_func,
do_hang do_hang_func)
{
struct igt_hang_ring hang;
igt_hang_t hang;
gem_quiescent_gpu(fd);
buffers->mode->set_bo(buffers, buffers->src[0], 0);
......@@ -1159,7 +1159,7 @@ static void do_intermix(struct buffers *buffers,
int do_rcs)
{
const int half = buffers->count/2;
struct igt_hang_ring hang;
igt_hang_t hang;
int i;
gem_quiescent_gpu(fd);
......@@ -1213,7 +1213,7 @@ static void do_early_read(struct buffers *buffers,
do_copy do_copy_func,
do_hang do_hang_func)
{
struct igt_hang_ring hang;
igt_hang_t hang;
int i;
gem_quiescent_gpu(fd);
......@@ -1231,7 +1231,7 @@ static void do_read_read_bcs(struct buffers *buffers,
do_copy do_copy_func,
do_hang do_hang_func)
{
struct igt_hang_ring hang;
igt_hang_t hang;
int i;
gem_quiescent_gpu(fd);
......@@ -1252,7 +1252,7 @@ static void do_write_read_bcs(struct buffers *buffers,
do_copy do_copy_func,
do_hang do_hang_func)
{
struct igt_hang_ring hang;
igt_hang_t hang;
int i;
gem_quiescent_gpu(fd);
......@@ -1272,7 +1272,7 @@ static void do_read_read_rcs(struct buffers *buffers,
do_copy do_copy_func,
do_hang do_hang_func)
{
struct igt_hang_ring hang;
igt_hang_t hang;
int i;
gem_quiescent_gpu(fd);
......@@ -1293,7 +1293,7 @@ static void do_write_read_rcs(struct buffers *buffers,
do_copy do_copy_func,
do_hang do_hang_func)
{
struct igt_hang_ring hang;
igt_hang_t hang;
int i;
gem_quiescent_gpu(fd);
......@@ -1313,7 +1313,7 @@ static void do_gpu_read_after_write(struct buffers *buffers,
do_copy do_copy_func,
do_hang do_hang_func)
{
struct igt_hang_ring hang;
igt_hang_t hang;
int i;
gem_quiescent_gpu(fd);
......
......@@ -201,7 +201,7 @@ igt_main
* the last context is leaked at every reset.
*/
for (i = 0; i < 20; i++) {
igt_hang_ring_t hang = igt_hang_ring(fd, I915_EXEC_RENDER);
igt_hang_t hang = igt_hang_ring(fd, I915_EXEC_RENDER);
igt_assert(exec(fd, handle, I915_EXEC_RENDER, 0) == 0);
igt_assert(exec(fd, handle, I915_EXEC_RENDER, ctx_id) == 0);
igt_post_hang_ring(fd, hang);
......
......@@ -136,7 +136,7 @@ static int __gem_wait(int fd, uint32_t handle, int64_t timeout)
static void test_wait(int fd)
{
igt_hang_ring_t hang;
igt_hang_t hang;
/* If the request we wait on completes due to a hang (even for
* that request), the user expects the return value to 0 (success).
......
......@@ -118,14 +118,14 @@ verify_small_read(drm_intel_bo *bo, uint32_t val)
}
}
typedef struct igt_hang_ring (*do_hang)(int fd);
typedef igt_hang_t (*do_hang)(int fd);
static struct igt_hang_ring no_hang(int fd)
static igt_hang_t no_hang(int fd)
{
return (struct igt_hang_ring){0};
return (igt_hang_t){0};
}
static struct igt_hang_ring bcs_hang(int fd)
static igt_hang_t bcs_hang(int fd)
{
return igt_hang_ring(fd, batch->gen >= 6 ? I915_EXEC_BLT : I915_EXEC_DEFAULT);
}
......@@ -136,7 +136,7 @@ static void do_test(int fd, int cache_level,
drm_intel_bo *tmp[2],
int loop, do_hang do_hang_func)
{
struct igt_hang_ring hang;
igt_hang_t hang;
if (cache_level != -1) {
gem_set_caching(fd, tmp[0]->handle, cache_level);
......
......@@ -184,18 +184,18 @@ static void reloc_and_emit(int fd, drm_intel_bo *target_bo, bool faulting_reloc)
gem_close(fd, handle_relocs);
}
static struct igt_hang_ring no_hang(int fd)
static igt_hang_t no_hang(int fd)
{
return (struct igt_hang_ring){0};
return (igt_hang_t){0};
}
static struct igt_hang_ring bcs_hang(int fd)
static igt_hang_t bcs_hang(int fd)
{
return igt_hang_ring(fd, I915_EXEC_BLT);
}
static void do_test(int fd, bool faulting_reloc,
struct igt_hang_ring (*do_hang)(int fd))
igt_hang_t (*do_hang)(int fd))
{
uint32_t tiling_mode = I915_TILING_X;
unsigned long pitch, act_size;
......@@ -214,7 +214,7 @@ static void do_test(int fd, bool faulting_reloc,
create_special_bo();
for (i = 0; i < NUM_TARGET_BOS; i++) {
struct igt_hang_ring hang;
igt_hang_t hang;
pc_target_bo[i] = drm_intel_bo_alloc(bufmgr, "special batch", 4096, 4096);
emit_dummy_load(pitch);
......
......@@ -156,7 +156,7 @@ static void inject_hang(int fd, uint32_t ctx,
const struct intel_execution_engine *e,
unsigned flags)
{
igt_hang_ring_t hang;
igt_hang_t hang;
clock_gettime(CLOCK_MONOTONIC, &ts_injected);
......@@ -547,7 +547,7 @@ static void test_close_pending_fork(const struct intel_execution_engine *e,
const bool reverse)
{
int fd = drm_open_driver(DRIVER_INTEL);
igt_hang_ring_t hang;
igt_hang_t hang;
int pid;
assert_reset_status(fd, fd, 0, RS_NO_ERROR);
......
......@@ -88,7 +88,7 @@ static void run_test(int fd, unsigned ring, unsigned flags)
struct drm_i915_gem_exec_object2 obj[2];
struct drm_i915_gem_relocation_entry reloc[1024];
struct drm_i915_gem_execbuffer2 execbuf;
struct igt_hang_ring hang;
igt_hang_t hang;
uint32_t *batch, *b;
int i;
......
......@@ -348,7 +348,7 @@ static void test_evict_hang(int fd)
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 object;
igt_hang_ring_t hang;
igt_hang_t hang;
uint64_t expected;
memset(&object, 0, sizeof(object));
......
......@@ -790,12 +790,12 @@ static void set_y_tiling(struct test_output *o, int fb_idx)
drmFree(r);
}
static igt_hang_ring_t hang_gpu(int fd)
static igt_hang_t hang_gpu(int fd)
{
return igt_hang_ring(fd, I915_EXEC_DEFAULT);
}
static void unhang_gpu(int fd, igt_hang_ring_t hang)
static void unhang_gpu(int fd, igt_hang_t hang)
{
igt_post_hang_ring(fd, hang);
}
......@@ -842,7 +842,7 @@ static unsigned int run_test_step(struct test_output *o)
bool do_vblank;
struct vblank_reply vbl_reply;
unsigned int target_seq;
igt_hang_ring_t hang;
igt_hang_t hang;
target_seq = o->vblank_state.seq_step;
/* Absolute waits only works once we have a frame counter. */
......@@ -1253,7 +1253,7 @@ static unsigned int wait_for_events(struct test_output *o)
static unsigned event_loop(struct test_output *o, unsigned duration_ms)
{
unsigned long start, end;
igt_hang_ring_t hang;
igt_hang_t hang;
int count = 0;
memset(&hang, 0, sizeof(hang));
......
......@@ -229,7 +229,7 @@ igt_main
}
igt_subtest_f("hang-read-crc-pipe-%c", 'A'+i) {
igt_hang_ring_t hang =
igt_hang_t hang =
igt_hang_ring(data.drm_fd, I915_EXEC_RENDER);
test_read_crc(&data, i, 0);
igt_post_hang_ring(data.drm_fd, hang);
......