Commit 140a67c1 authored by Antonio Argenziano's avatar Antonio Argenziano Committed by Chris Wilson

lib/gt: Make use of dummyload library to create recursive batch

An hanging batch is nothing more than a spinning batch that never gets
stopped, so re-use the routines implemented in dummyload.c.

v2: Let caller decide spin loop size
v3: Only use loose loops for hangs (Chris)
v4: No requires
v5: Free the spinner
v6: Chamelium exists.

Signed-off-by: Antonio Argenziano <antonio.argenziano@intel.com> #v3
Signed-off-by: Chris Wilson's avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Antonio Argenziano's avatarAntonio Argenziano <antonio.argenziano@intel.com>
parent caea9c5b
......@@ -25,6 +25,7 @@
#include <signal.h>
#include <sys/ioctl.h>
#include <sys/time.h>
#include "intel_reg.h"
#include "drmtest.h"
......
......@@ -31,7 +31,6 @@
#include <intel_bufmgr.h>
#include <inttypes.h>
#include <stdbool.h>
#include <stddef.h>
#include <sys/time.h>
#include <i915/gem_submission.h>
......@@ -39,12 +38,6 @@
extern drm_intel_bo **trash_bos;
extern int num_trash_bos;
/* signal interrupt helpers */
#define MSEC_PER_SEC (1000)
#define USEC_PER_SEC (1000*MSEC_PER_SEC)
#define NSEC_PER_SEC (1000*USEC_PER_SEC)
/* signal interrupt helpers */
#define gettid() syscall(__NR_gettid)
#define sigev_notify_thread_id _sigev_un._tid
......@@ -295,104 +288,6 @@ void igt_set_module_param_int(const char *name, int val);
int igt_terminate_process(int sig, const char *comm);
void igt_lsof(const char *dpath);
/*
* This list data structure is a verbatim copy from wayland-util.h from the
* Wayland project; except that wl_ prefix has been removed.
*/
struct igt_list {
struct igt_list *prev;
struct igt_list *next;
};
#define __IGT_INIT_LIST(name) { &(name), &(name) }
#define IGT_LIST(name) struct igt_list name = __IGT_INIT_LIST(name)
static inline void igt_list_init(struct igt_list *list)
{
list->prev = list;
list->next = list;
}
static inline void __igt_list_add(struct igt_list *list,
struct igt_list *prev,
struct igt_list *next)
{
next->prev = list;
list->next = next;
list->prev = prev;
prev->next = list;
}
static inline void igt_list_add(struct igt_list *elm, struct igt_list *list)
{
__igt_list_add(elm, list, list->next);
}
static inline void igt_list_add_tail(struct igt_list *elm,
struct igt_list *list)
{
__igt_list_add(elm, list->prev, list);
}
static inline void __igt_list_del(struct igt_list *prev, struct igt_list *next)
{
next->prev = prev;
prev->next = next;
}
static inline void igt_list_del(struct igt_list *elm)
{
__igt_list_del(elm->prev, elm->next);
}
static inline void igt_list_move(struct igt_list *elm, struct igt_list *list)
{
igt_list_del(elm);
igt_list_add(elm, list);
}
static inline void igt_list_move_tail(struct igt_list *elm,
struct igt_list *list)
{
igt_list_del(elm);
igt_list_add_tail(elm, list);
}
static inline bool igt_list_empty(const struct igt_list *list)
{
return list->next == list;
}
#define container_of(ptr, sample, member) \
(typeof(sample))((char *)(ptr) - offsetof(typeof(*sample), member))
#define igt_list_first_entry(head, pos, member) \
container_of((head)->next, (pos), member)
#define igt_list_last_entry(head, pos, member) \
container_of((head)->prev, (pos), member)
#define igt_list_next_entry(pos, member) \
container_of((pos)->member.next, (pos), member)
#define igt_list_prev_entry(pos, member) \
container_of((pos)->member.prev, (pos), member)
#define igt_list_for_each(pos, head, member) \
for (pos = igt_list_first_entry(head, pos, member); \
&pos->member != (head); \
pos = igt_list_next_entry(pos, member))
#define igt_list_for_each_reverse(pos, head, member) \
for (pos = igt_list_last_entry(head, pos, member); \
&pos->member != (head); \
pos = igt_list_prev_entry(pos, member))
#define igt_list_for_each_safe(pos, tmp, head, member) \
for (pos = igt_list_first_entry(head, pos, member), \
tmp = igt_list_next_entry(pos, member); \
&pos->member != (head); \
pos = tmp, tmp = igt_list_next_entry(pos, member))
#define igt_hweight(x) \
__builtin_choose_expr(sizeof(x) == 8, \
__builtin_popcountll(x), \
......
......@@ -38,8 +38,9 @@
#include "igt_chamelium.h"
#include "igt_core.h"
#include "igt_aux.h"
#include "igt_kms.h"
#include "igt_frame.h"
#include "igt_list.h"
#include "igt_kms.h"
#include "igt_rc.h"
/**
......
......@@ -1005,4 +1005,8 @@ void igt_kmsg(const char *format, ...);
#define READ_ONCE(x) (*(volatile typeof(x) *)(&(x)))
#define MSEC_PER_SEC (1000)
#define USEC_PER_SEC (1000*MSEC_PER_SEC)
#define NSEC_PER_SEC (1000*USEC_PER_SEC)
#endif /* IGT_CORE_H */
......@@ -28,7 +28,8 @@
#include <stdint.h>
#include <time.h>
#include "igt_aux.h"
#include "igt_core.h"
#include "igt_list.h"
#include "i915_drm.h"
typedef struct igt_spin {
......
......@@ -40,6 +40,7 @@
#include "ioctl_wrappers.h"
#include "intel_reg.h"
#include "intel_chipset.h"
#include "igt_dummyload.h"
/**
* SECTION:igt_gt
......@@ -265,20 +266,11 @@ static bool has_ctx_exec(int fd, unsigned ring, uint32_t ctx)
* Returns:
* Structure with helper internal state for igt_post_hang_ring().
*/
igt_hang_t igt_hang_ctx(int fd,
uint32_t ctx,
int ring,
unsigned flags,
uint64_t *offset)
igt_hang_t igt_hang_ctx(int fd, uint32_t ctx, int ring, unsigned flags)
{
struct drm_i915_gem_relocation_entry reloc;
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 exec;
struct drm_i915_gem_context_param param;
uint32_t b[16];
igt_spin_t *spin;
unsigned ban;
unsigned len;
int gen;
igt_require_hang_ring(fd, ring);
......@@ -302,52 +294,12 @@ igt_hang_t igt_hang_ctx(int fd,
if ((flags & HANG_ALLOW_BAN) == 0)
context_set_ban(fd, ctx, 0);
memset(&reloc, 0, sizeof(reloc));
memset(&exec, 0, sizeof(exec));
memset(&execbuf, 0, sizeof(execbuf));
exec.handle = gem_create(fd, 4096);
exec.relocation_count = 1;
exec.relocs_ptr = to_user_pointer(&reloc);
memset(b, 0xc5, sizeof(b));
len = 0;
gen = intel_gen(intel_get_drm_devid(fd));
if (gen >= 8) {
b[len++] = MI_BATCH_BUFFER_START | 1 << 8 | 1;
b[len++] = 0;
b[len++] = 0;
} else if (gen >= 6) {
b[len++] = MI_BATCH_BUFFER_START | 1 << 8;
b[len++] = 0;
} else {
b[len++] = MI_BATCH_BUFFER_START | 2 << 6;
b[len] = 0;
if (gen < 4) {
b[len] |= 1;
reloc.delta = 1;
}
len++;
}
b[len++] = MI_BATCH_BUFFER_END;
b[len] = MI_NOOP;
gem_write(fd, exec.handle, 0, b, sizeof(b));
reloc.offset = sizeof(uint32_t);
reloc.target_handle = exec.handle;
reloc.read_domains = I915_GEM_DOMAIN_COMMAND;
execbuf.buffers_ptr = to_user_pointer(&exec);
execbuf.buffer_count = 1;
execbuf.flags = ring;
i915_execbuffer2_set_context_id(execbuf, ctx);
gem_execbuf(fd, &execbuf);
if (offset)
*offset = exec.offset;
spin = __igt_spin_batch_new(fd,
.ctx = ctx,
.engine = ring,
.flags = IGT_SPIN_NO_PREEMPTION);
return (igt_hang_t){ exec.handle, ctx, ban, flags };
return (igt_hang_t){ spin, ctx, ban, flags };
}
/**
......@@ -364,7 +316,7 @@ igt_hang_t igt_hang_ctx(int fd,
*/
igt_hang_t igt_hang_ring(int fd, int ring)
{
return igt_hang_ctx(fd, 0, ring, 0, NULL);
return igt_hang_ctx(fd, 0, ring, 0);
}
/**
......@@ -377,11 +329,11 @@ igt_hang_t igt_hang_ring(int fd, int ring)
*/
void igt_post_hang_ring(int fd, igt_hang_t arg)
{
if (arg.handle == 0)
if (!arg.spin)
return;
gem_sync(fd, arg.handle);
gem_close(fd, arg.handle);
gem_sync(fd, arg.spin->handle); /* Wait until it hangs */
igt_spin_batch_free(fd, arg.spin);
context_set_ban(fd, arg.ctx, arg.ban);
......
......@@ -25,6 +25,7 @@
#define IGT_GT_H
#include "igt_debugfs.h"
#include "igt_dummyload.h"
#include "igt_core.h"
#include "i915_drm.h"
......@@ -32,7 +33,7 @@
void igt_require_hang_ring(int fd, int ring);
typedef struct igt_hang {
unsigned handle;
igt_spin_t *spin;
unsigned ctx;
unsigned ban;
unsigned flags;
......@@ -43,11 +44,7 @@ void igt_disallow_hang(int fd, igt_hang_t arg);
#define HANG_POISON 0xc5c5c5c5
igt_hang_t igt_hang_ctx(int fd,
uint32_t ctx,
int ring,
unsigned flags,
uint64_t *offset);
igt_hang_t igt_hang_ctx(int fd, uint32_t ctx, int ring, unsigned flags);
#define HANG_ALLOW_BAN 1
#define HANG_ALLOW_CAPTURE 2
......
......@@ -24,9 +24,10 @@
#include <signal.h>
#include <errno.h>
#include "igt_aux.h"
#include "igt_core.h"
#include "igt_sysfs.h"
#include "igt_kmod.h"
#include "igt_sysfs.h"
/**
* SECTION:igt_kmod
......
......@@ -26,7 +26,7 @@
#include <libkmod.h>
#include "igt_aux.h"
#include "igt_list.h"
bool igt_kmod_is_loaded(const char *mod_name);
void igt_kmod_list_loaded(void);
......
/*
* Copyright © 2015 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*/
#ifndef IGT_LIST_H
#define IGT_LIST_H
#include <stdbool.h>
#include <stddef.h>
/*
* This list data structure is a verbatim copy from wayland-util.h from the
* Wayland project; except that wl_ prefix has been removed.
*/
struct igt_list {
struct igt_list *prev;
struct igt_list *next;
};
#define __IGT_INIT_LIST(name) { &(name), &(name) }
#define IGT_LIST(name) struct igt_list name = __IGT_INIT_LIST(name)
static inline void igt_list_init(struct igt_list *list)
{
list->prev = list;
list->next = list;
}
static inline void __igt_list_add(struct igt_list *list,
struct igt_list *prev,
struct igt_list *next)
{
next->prev = list;
list->next = next;
list->prev = prev;
prev->next = list;
}
static inline void igt_list_add(struct igt_list *elm, struct igt_list *list)
{
__igt_list_add(elm, list, list->next);
}
static inline void igt_list_add_tail(struct igt_list *elm,
struct igt_list *list)
{
__igt_list_add(elm, list->prev, list);
}
static inline void __igt_list_del(struct igt_list *prev, struct igt_list *next)
{
next->prev = prev;
prev->next = next;
}
static inline void igt_list_del(struct igt_list *elm)
{
__igt_list_del(elm->prev, elm->next);
}
static inline void igt_list_move(struct igt_list *elm, struct igt_list *list)
{
igt_list_del(elm);
igt_list_add(elm, list);
}
static inline void igt_list_move_tail(struct igt_list *elm,
struct igt_list *list)
{
igt_list_del(elm);
igt_list_add_tail(elm, list);
}
static inline bool igt_list_empty(const struct igt_list *list)
{
return list->next == list;
}
#define container_of(ptr, sample, member) \
(typeof(sample))((char *)(ptr) - offsetof(typeof(*sample), member))
#define igt_list_first_entry(head, pos, member) \
container_of((head)->next, (pos), member)
#define igt_list_last_entry(head, pos, member) \
container_of((head)->prev, (pos), member)
#define igt_list_next_entry(pos, member) \
container_of((pos)->member.next, (pos), member)
#define igt_list_prev_entry(pos, member) \
container_of((pos)->member.prev, (pos), member)
#define igt_list_for_each(pos, head, member) \
for (pos = igt_list_first_entry(head, pos, member); \
&pos->member != (head); \
pos = igt_list_next_entry(pos, member))
#define igt_list_for_each_reverse(pos, head, member) \
for (pos = igt_list_last_entry(head, pos, member); \
&pos->member != (head); \
pos = igt_list_prev_entry(pos, member))
#define igt_list_for_each_safe(pos, tmp, head, member) \
for (pos = igt_list_first_entry(head, pos, member), \
tmp = igt_list_next_entry(pos, member); \
&pos->member != (head); \
pos = tmp, tmp = igt_list_next_entry(pos, member))
#endif /* IGT_LIST_H */
......@@ -199,9 +199,12 @@ static void test_error_state_capture(unsigned ring_id,
clear_error_state();
hang = igt_hang_ctx(device, 0, ring_id, HANG_ALLOW_CAPTURE, &offset);
batch = gem_mmap__cpu(device, hang.handle, 0, 4096, PROT_READ);
gem_set_domain(device, hang.handle, I915_GEM_DOMAIN_CPU, 0);
hang = igt_hang_ctx(device, 0, ring_id, HANG_ALLOW_CAPTURE);
offset = hang.spin->obj[1].offset;
batch = gem_mmap__cpu(device, hang.spin->handle, 0, 4096, PROT_READ);
gem_set_domain(device, hang.spin->handle, I915_GEM_DOMAIN_CPU, 0);
igt_post_hang_ring(device, hang);
check_error_state(ring_name, offset, batch);
......
......@@ -946,30 +946,19 @@ static igt_hang_t rcs_hang(void)
static igt_hang_t all_hang(void)
{
uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 obj;
igt_hang_t hang;
igt_hang_t hang = igt_hang_ring(fd, I915_EXEC_RENDER);
unsigned engine;
memset(&obj, 0, sizeof(obj));
obj.handle = gem_create(fd, 4096);
gem_write(fd, obj.handle, 0, &bbe, sizeof(&bbe));
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&obj);
execbuf.buffer_count = 1;
for_each_physical_engine(fd, engine) {
hang = igt_hang_ring(fd, engine);
struct drm_i915_gem_execbuffer2 eb = hang.spin->execbuf;
execbuf.flags = engine;
__gem_execbuf(fd, &execbuf);
if (engine == I915_EXEC_RENDER)
continue;
gem_close(fd, hang.handle);
eb.flags = engine;
__gem_execbuf(fd, &eb);
}
hang.handle = obj.handle;
return hang;
}
......
......@@ -423,7 +423,7 @@ static void preempt(int fd, unsigned ring, unsigned flags)
gem_context_set_priority(fd, ctx[HI], MAX_PRIO);
if (flags & HANG_LP)
hang = igt_hang_ctx(fd, ctx[LO], ring, 0, NULL);
hang = igt_hang_ctx(fd, ctx[LO], ring, 0);
for (int n = 0; n < ARRAY_SIZE(spin); n++) {
if (flags & NEW_CTX) {
......@@ -730,7 +730,7 @@ static void preemptive_hang(int fd, unsigned ring)
gem_context_destroy(fd, ctx[LO]);
}
hang = igt_hang_ctx(fd, ctx[HI], ring, 0, NULL);
hang = igt_hang_ctx(fd, ctx[HI], ring, 0);
igt_post_hang_ring(fd, hang);
for (int n = 0; n < ARRAY_SIZE(spin); n++) {
......
......@@ -399,7 +399,7 @@ test_hang(int fd)
last_pattern = next_pattern;
next_pattern = (next_pattern + 1) % ARRAY_SIZE(patterns);
} while (gem_bo_busy(fd, hang.handle));
} while (gem_bo_busy(fd, hang.spin->handle));
igt_post_hang_ring(fd, hang);
......
......@@ -164,7 +164,7 @@ static void inject_hang(int fd, uint32_t ctx,
clock_gettime(CLOCK_MONOTONIC, &ts_injected);
hang = igt_hang_ctx(fd, ctx, e->exec_id | e->flags, flags & BAN, NULL);
hang = igt_hang_ctx(fd, ctx, e->exec_id | e->flags, flags & BAN);
if ((flags & ASYNC) == 0)
igt_post_hang_ring(fd, hang);
}
......@@ -546,7 +546,7 @@ static void test_close_pending_fork(const struct intel_execution_engine *e,
assert_reset_status(fd, fd, 0, RS_NO_ERROR);
hang = igt_hang_ctx(fd, 0, e->exec_id | e->flags, 0, NULL);
hang = igt_hang_ctx(fd, 0, e->exec_id | e->flags, 0);
sleep(1);
/* Avoid helpers as we need to kill the child
......
......@@ -208,7 +208,7 @@ static void hang(int fd, uint64_t alloc)
gem_execbuf(fd, &execbuf);
}
gem_close(fd, igt_hang_ring(fd, 0).handle);
gem_close(fd, igt_hang_ring(fd, 0).spin->handle);
for (int i = 0; i <= count; i++)
gem_madvise(fd, obj[i].handle, I915_MADV_DONTNEED);
munmap(obj, obj_size);
......
......@@ -359,11 +359,12 @@ static void test_evict_hang(int fd)
execbuf.buffers_ptr = to_user_pointer(&object);
execbuf.buffer_count = 1;
hang = igt_hang_ctx(fd, 0, 0, 0, (uint64_t *)&expected);
object.offset = expected;
object.flags = EXEC_OBJECT_PINNED;
hang = igt_hang_ctx(fd, 0, 0, 0);
expected = hang.spin->obj[1].offset;
/* Replace the hung batch with ourselves, forcing an eviction */
object.offset = expected;
object.flags = EXEC_OBJECT_PINNED;
gem_execbuf(fd, &execbuf);
igt_assert_eq_u64(object.offset, expected);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment