Commit 897d21d1 authored by Dominik Grzegorzek's avatar Dominik Grzegorzek Committed by Chris Wilson

i915/gem_caching: Remove libdrm dependency

Thanks to intel_bb we are able to remove libdrm from gem_caching.
Signed-off-by: default avatarDominik Grzegorzek <dominik.grzegorzek@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson's avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson's avatarChris Wilson <chris@chris-wilson.co.uk>
parent 41e2dcb9
...@@ -39,7 +39,6 @@ ...@@ -39,7 +39,6 @@
#include "i915/gem.h" #include "i915/gem.h"
#include "igt.h" #include "igt.h"
#include "intel_bufmgr.h"
IGT_TEST_DESCRIPTION("Test snoop consistency when touching partial" IGT_TEST_DESCRIPTION("Test snoop consistency when touching partial"
" cachelines."); " cachelines.");
...@@ -49,50 +48,83 @@ IGT_TEST_DESCRIPTION("Test snoop consistency when touching partial" ...@@ -49,50 +48,83 @@ IGT_TEST_DESCRIPTION("Test snoop consistency when touching partial"
* *
*/ */
static drm_intel_bufmgr *bufmgr;
struct intel_batchbuffer *batch;
drm_intel_bo *scratch_bo;
drm_intel_bo *staging_bo;
#define BO_SIZE (4*4096) #define BO_SIZE (4*4096)
uint32_t devid; #define PAGE_SIZE 4096
int fd;
typedef struct {
int fd;
uint32_t devid;
struct buf_ops *bops;
} data_t;
static void *__try_gtt_map_first(data_t *data, struct intel_buf *buf,
int write_enable)
{
uint8_t *ptr;
unsigned int prot = PROT_READ | (write_enable ? PROT_WRITE : 0);
ptr = __gem_mmap__gtt(data->fd, buf->handle, buf->surface[0].size, prot);
if (!ptr) {
ptr = gem_mmap__device_coherent(data->fd, buf->handle,
0, buf->surface[0].size, prot);
}
return ptr;
}
static void static void
copy_bo(drm_intel_bo *src, drm_intel_bo *dst) copy_bo(struct intel_bb *ibb, struct intel_buf *src, struct intel_buf *dst)
{ {
BLIT_COPY_BATCH_START(0); bool has_64b_reloc;
OUT_BATCH((3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */ has_64b_reloc = ibb->gen >= 8;
4096);
OUT_BATCH(0 << 16 | 0); intel_bb_out(ibb,
OUT_BATCH((BO_SIZE/4096) << 16 | 1024); XY_SRC_COPY_BLT_CMD |
OUT_RELOC_FENCED(dst, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0); XY_SRC_COPY_BLT_WRITE_ALPHA |
OUT_BATCH(0 << 16 | 0); XY_SRC_COPY_BLT_WRITE_RGB |
OUT_BATCH(4096); (6 + 2 * has_64b_reloc));
OUT_RELOC_FENCED(src, I915_GEM_DOMAIN_RENDER, 0, 0);
ADVANCE_BATCH(); intel_bb_out(ibb, (3 << 24) | /* 32 bits */
(0xcc << 16) | /* copy ROP */
intel_batchbuffer_flush(batch); 4096);
intel_bb_out(ibb, 0 << 16 | 0);
intel_bb_out(ibb, (BO_SIZE/4096) << 16 | 1024);
intel_bb_emit_reloc_fenced(ibb, dst->handle,
I915_GEM_DOMAIN_RENDER,
I915_GEM_DOMAIN_RENDER,
0, 0x0);
intel_bb_out(ibb, 0 << 16 | 0);
intel_bb_out(ibb, 4096);
intel_bb_emit_reloc_fenced(ibb, src->handle,
I915_GEM_DOMAIN_RENDER,
0, 0, 0x0);
/* Mark the end of the buffer. */
intel_bb_out(ibb, MI_BATCH_BUFFER_END);
intel_bb_ptr_align(ibb, 8);
intel_bb_flush_blit(ibb);
intel_bb_sync(ibb);
} }
static void static void
blt_bo_fill(drm_intel_bo *tmp_bo, drm_intel_bo *bo, uint8_t val) blt_bo_fill(data_t *data, struct intel_bb *ibb, struct intel_buf *tmp_bo,
struct intel_buf *bo, uint8_t val)
{ {
uint8_t *gtt_ptr; uint8_t *gtt_ptr;
int i; int i;
do_or_die(drm_intel_gem_bo_map_gtt(tmp_bo)); gtt_ptr = __try_gtt_map_first(data, tmp_bo, 1);
gtt_ptr = tmp_bo->virtual;
for (i = 0; i < BO_SIZE; i++) for (i = 0; i < BO_SIZE; i++)
gtt_ptr[i] = val; gtt_ptr[i] = val;
drm_intel_gem_bo_unmap_gtt(tmp_bo); munmap(gtt_ptr, tmp_bo->surface[0].size);
igt_drop_caches_set(fd, DROP_BOUND); igt_drop_caches_set(data->fd, DROP_BOUND);
copy_bo(tmp_bo, bo); copy_bo(ibb, tmp_bo, bo);
} }
#define MAX_BLT_SIZE 128 #define MAX_BLT_SIZE 128
...@@ -102,6 +134,9 @@ blt_bo_fill(drm_intel_bo *tmp_bo, drm_intel_bo *bo, uint8_t val) ...@@ -102,6 +134,9 @@ blt_bo_fill(drm_intel_bo *tmp_bo, drm_intel_bo *bo, uint8_t val)
#define TEST_BOTH (TEST_READ | TEST_WRITE) #define TEST_BOTH (TEST_READ | TEST_WRITE)
igt_main igt_main
{ {
struct intel_buf *scratch_buf, *staging_buf;
struct intel_bb *ibb;
data_t data = {0, };
unsigned flags = TEST_BOTH; unsigned flags = TEST_BOTH;
int i, j; int i, j;
uint8_t *cpu_ptr; uint8_t *cpu_ptr;
...@@ -110,29 +145,30 @@ igt_main ...@@ -110,29 +145,30 @@ igt_main
igt_fixture { igt_fixture {
srandom(0xdeadbeef); srandom(0xdeadbeef);
fd = drm_open_driver(DRIVER_INTEL); data.fd = drm_open_driver(DRIVER_INTEL);
igt_require_gem(fd); igt_require_gem(data.fd);
gem_require_blitter(fd); gem_require_blitter(data.fd);
gem_require_caching(fd); gem_require_caching(data.fd);
devid = intel_get_drm_devid(fd); data.devid = intel_get_drm_devid(data.fd);
if (IS_GEN2(devid)) /* chipset only handles cached -> uncached */ if (IS_GEN2(data.devid)) /* chipset only handles cached -> uncached */
flags &= ~TEST_READ; flags &= ~TEST_READ;
if (IS_BROADWATER(devid) || IS_CRESTLINE(devid)) { if (IS_BROADWATER(data.devid) || IS_CRESTLINE(data.devid)) {
/* chipset is completely fubar */ /* chipset is completely fubar */
igt_info("coherency broken on i965g/gm\n"); igt_info("coherency broken on i965g/gm\n");
flags = 0; flags = 0;
} }
data.bops = buf_ops_create(data.fd);
ibb = intel_bb_create(data.fd, PAGE_SIZE);
bufmgr = drm_intel_bufmgr_gem_init(fd, 4096); scratch_buf = intel_buf_create(data.bops, BO_SIZE/4, 1,
batch = intel_batchbuffer_alloc(bufmgr, devid); 32, 0, I915_TILING_NONE, 0);
/* overallocate the buffers we're actually using because */ gem_set_caching(data.fd, scratch_buf->handle, 1);
scratch_bo = drm_intel_bo_alloc(bufmgr, "scratch bo", BO_SIZE, 4096);
gem_set_caching(fd, scratch_bo->handle, 1);
staging_bo = drm_intel_bo_alloc(bufmgr, "staging bo", BO_SIZE, 4096); staging_buf = intel_buf_create(data.bops, BO_SIZE/4, 1,
32, 0, I915_TILING_NONE, 0);
} }
igt_subtest("reads") { igt_subtest("reads") {
...@@ -144,19 +180,20 @@ igt_main ...@@ -144,19 +180,20 @@ igt_main
uint8_t val0 = i; uint8_t val0 = i;
int start, len; int start, len;
blt_bo_fill(staging_bo, scratch_bo, i); blt_bo_fill(&data, ibb, staging_buf, scratch_buf, i);
start = random() % BO_SIZE; start = random() % BO_SIZE;
len = random() % (BO_SIZE-start) + 1; len = random() % (BO_SIZE-start) + 1;
drm_intel_bo_map(scratch_bo, false); cpu_ptr = gem_mmap__cpu(data.fd, scratch_buf->handle,
cpu_ptr = scratch_bo->virtual; 0, scratch_buf->surface[0].size,
PROT_READ);
for (j = 0; j < len; j++) { for (j = 0; j < len; j++) {
igt_assert_f(cpu_ptr[j] == val0, igt_assert_f(cpu_ptr[j] == val0,
"mismatch at %i, got: %i, expected: %i\n", "mismatch at %i, got: %i, expected: %i\n",
j, cpu_ptr[j], val0); j, cpu_ptr[j], val0);
} }
drm_intel_bo_unmap(scratch_bo); munmap(cpu_ptr, scratch_buf->surface[0].size);
igt_progress("partial reads test: ", i, ROUNDS); igt_progress("partial reads test: ", i, ROUNDS);
} }
...@@ -171,20 +208,20 @@ igt_main ...@@ -171,20 +208,20 @@ igt_main
uint8_t val0 = i, val1; uint8_t val0 = i, val1;
int start, len; int start, len;
blt_bo_fill(staging_bo, scratch_bo, val0); blt_bo_fill(&data, ibb, staging_buf, scratch_buf, val0);
start = random() % BO_SIZE; start = random() % BO_SIZE;
len = random() % (BO_SIZE-start) + 1; len = random() % (BO_SIZE-start) + 1;
val1 = val0 + 63; val1 = val0 + 63;
drm_intel_bo_map(scratch_bo, true); cpu_ptr = gem_mmap__cpu(data.fd, scratch_buf->handle,
cpu_ptr = scratch_bo->virtual; 0, scratch_buf->surface[0].size,
memset(cpu_ptr + start, val1, len); PROT_READ | PROT_WRITE);
drm_intel_bo_unmap(scratch_bo);
copy_bo(scratch_bo, staging_bo); memset(cpu_ptr + start, val1, len);
do_or_die(drm_intel_gem_bo_map_gtt(staging_bo)); munmap(cpu_ptr, scratch_buf->surface[0].size);
gtt_ptr = staging_bo->virtual; copy_bo(ibb, scratch_buf, staging_buf);
gtt_ptr = __try_gtt_map_first(&data, staging_buf, 0);
for (j = 0; j < start; j++) { for (j = 0; j < start; j++) {
igt_assert_f(gtt_ptr[j] == val0, igt_assert_f(gtt_ptr[j] == val0,
...@@ -201,7 +238,7 @@ igt_main ...@@ -201,7 +238,7 @@ igt_main
"mismatch at %i, partial=[%d+%d] got: %i, expected: %i\n", "mismatch at %i, partial=[%d+%d] got: %i, expected: %i\n",
j, start, len, gtt_ptr[j], val0); j, start, len, gtt_ptr[j], val0);
} }
drm_intel_gem_bo_unmap_gtt(staging_bo); munmap(gtt_ptr, staging_buf->surface[0].size);
igt_progress("partial writes test: ", i, ROUNDS); igt_progress("partial writes test: ", i, ROUNDS);
} }
...@@ -216,38 +253,41 @@ igt_main ...@@ -216,38 +253,41 @@ igt_main
uint8_t val0 = i, val1, val2; uint8_t val0 = i, val1, val2;
int start, len; int start, len;
blt_bo_fill(staging_bo, scratch_bo, val0); blt_bo_fill(&data, ibb, staging_buf, scratch_buf, val0);
/* partial read */ /* partial read */
start = random() % BO_SIZE; start = random() % BO_SIZE;
len = random() % (BO_SIZE-start) + 1; len = random() % (BO_SIZE-start) + 1;
do_or_die(drm_intel_bo_map(scratch_bo, false)); cpu_ptr = gem_mmap__cpu(data.fd, scratch_buf->handle,
cpu_ptr = scratch_bo->virtual; 0, scratch_buf->surface[0].size,
PROT_READ);
for (j = 0; j < len; j++) { for (j = 0; j < len; j++) {
igt_assert_f(cpu_ptr[j] == val0, igt_assert_f(cpu_ptr[j] == val0,
"mismatch in read at %i, got: %i, expected: %i\n", "mismatch in read at %i, got: %i, expected: %i\n",
j, cpu_ptr[j], val0); j, cpu_ptr[j], val0);
} }
drm_intel_bo_unmap(scratch_bo); munmap(cpu_ptr, scratch_buf->surface[0].size);
/* Change contents through gtt to make the pread cachelines /* Change contents through gtt to make the pread cachelines
* stale. */ * stale. */
val1 = i + 17; val1 = i + 17;
blt_bo_fill(staging_bo, scratch_bo, val1); blt_bo_fill(&data, ibb, staging_buf, scratch_buf, val1);
/* partial write */ /* partial write */
start = random() % BO_SIZE; start = random() % BO_SIZE;
len = random() % (BO_SIZE-start) + 1; len = random() % (BO_SIZE-start) + 1;
val2 = i + 63; val2 = i + 63;
do_or_die(drm_intel_bo_map(scratch_bo, false)); cpu_ptr = gem_mmap__cpu(data.fd, scratch_buf->handle,
cpu_ptr = scratch_bo->virtual; 0, scratch_buf->surface[0].size,
PROT_READ);
memset(cpu_ptr + start, val2, len); memset(cpu_ptr + start, val2, len);
copy_bo(scratch_bo, staging_bo); copy_bo(ibb, scratch_buf, staging_buf);
do_or_die(drm_intel_gem_bo_map_gtt(staging_bo)); gtt_ptr = __try_gtt_map_first(&data, staging_buf, 0);
gtt_ptr = staging_bo->virtual;
for (j = 0; j < start; j++) { for (j = 0; j < start; j++) {
igt_assert_f(gtt_ptr[j] == val1, igt_assert_f(gtt_ptr[j] == val1,
...@@ -264,16 +304,18 @@ igt_main ...@@ -264,16 +304,18 @@ igt_main
"mismatch at %i, partial=[%d+%d] got: %i, expected: %i\n", "mismatch at %i, partial=[%d+%d] got: %i, expected: %i\n",
j, start, len, gtt_ptr[j], val1); j, start, len, gtt_ptr[j], val1);
} }
drm_intel_gem_bo_unmap_gtt(staging_bo); munmap(cpu_ptr, scratch_buf->handle);
drm_intel_bo_unmap(scratch_bo); munmap(gtt_ptr, staging_buf->handle);
igt_progress("partial read/writes test: ", i, ROUNDS); igt_progress("partial read/writes test: ", i, ROUNDS);
} }
} }
igt_fixture { igt_fixture {
drm_intel_bufmgr_destroy(bufmgr); intel_bb_destroy(ibb);
intel_buf_destroy(scratch_buf);
close(fd); intel_buf_destroy(staging_buf);
buf_ops_destroy(data.bops);
close(data.fd);
} }
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment