Commit f52e7ec7 authored by Ville Syrjälä's avatar Ville Syrjälä

Replace __gem_mmap__{cpu,gtt,wc}() + igt_assert() with gem_mmap__{cpu,gtt,wc}()

gem_mmap__{cpu,gtt,wc}() already has the assert built in, so replace
 __gem_mmap__{cpu,gtt,wc}() + igt_assert() with it.

Mostly done with coccinelle, with some manual help:
@@
identifier I;
expression E1, E2, E3, E4, E5, E6;
@@
(
-  I = __gem_mmap__gtt(E1, E2, E3, E4);
+  I = gem_mmap__gtt(E1, E2, E3, E4);
...
-  igt_assert(I);
|
-  I = __gem_mmap__cpu(E1, E2, E3, E4, E5);
+  I = gem_mmap__cpu(E1, E2, E3, E4, E5);
...
-  igt_assert(I);
|
-  I = __gem_mmap__wc(E1, E2, E3, E4, E5);
+  I = gem_mmap__wc(E1, E2, E3, E4, E5);
...
-  igt_assert(I);
)
Signed-off-by: Ville Syrjälä's avatarVille Syrjälä <ville.syrjala@linux.intel.com>
Stochastically-reviwewed-by: Chris Wilson's avatarChris Wilson <chris@chris-wilson.co.uk>
parent b8a77dd6
......@@ -177,8 +177,7 @@ static int run(int object, int batch, int count, int reps)
fd = drm_open_driver(DRIVER_INTEL);
handle = gem_create(fd, size);
buf = __gem_mmap__cpu(fd, handle, 0, size, PROT_WRITE);
igt_assert(buf);
buf = gem_mmap__cpu(fd, handle, 0, size, PROT_WRITE);
gen = intel_gen(intel_get_drm_devid(fd));
has_64bit_reloc = gen >= 8;
......
......@@ -115,18 +115,15 @@ int main(int argc, char **argv)
handle = gem_create(fd, OBJECT_SIZE);
switch (map) {
case CPU:
ptr = __gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, PROT_WRITE);
igt_assert(ptr);
ptr = gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, PROT_WRITE);
gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
break;
case GTT:
ptr = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_WRITE);
igt_assert(ptr);
ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_WRITE);
gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
break;
case WC:
ptr = __gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_WRITE);
igt_assert(ptr);
ptr = gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_WRITE);
gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
break;
default:
......
......@@ -252,8 +252,7 @@ static void draw_rect_mmap_cpu(int fd, struct buf_data *buf, struct rect *rect,
if (tiling != I915_TILING_NONE)
igt_require(intel_gen(intel_get_drm_devid(fd)) >= 5);
ptr = __gem_mmap__cpu(fd, buf->handle, 0, buf->size, 0);
igt_assert(ptr);
ptr = gem_mmap__cpu(fd, buf->handle, 0, buf->size, 0);
switch (tiling) {
case I915_TILING_NONE:
......@@ -281,8 +280,7 @@ static void draw_rect_mmap_gtt(int fd, struct buf_data *buf, struct rect *rect,
gem_set_domain(fd, buf->handle, I915_GEM_DOMAIN_GTT,
I915_GEM_DOMAIN_GTT);
ptr = __gem_mmap__gtt(fd, buf->handle, buf->size, PROT_READ | PROT_WRITE);
igt_assert(ptr);
ptr = gem_mmap__gtt(fd, buf->handle, buf->size, PROT_READ | PROT_WRITE);
draw_rect_ptr_linear(ptr, buf->stride, rect, color, buf->bpp);
......@@ -303,9 +301,8 @@ static void draw_rect_mmap_wc(int fd, struct buf_data *buf, struct rect *rect,
if (tiling != I915_TILING_NONE)
igt_require(intel_gen(intel_get_drm_devid(fd)) >= 5);
ptr = __gem_mmap__wc(fd, buf->handle, 0, buf->size,
ptr = gem_mmap__wc(fd, buf->handle, 0, buf->size,
PROT_READ | PROT_WRITE);
igt_assert(ptr);
switch (tiling) {
case I915_TILING_NONE:
......
......@@ -745,12 +745,11 @@ static void create_cairo_surface__blit(int fd, struct igt_fb *fb)
I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
/* Setup cairo context */
blit->linear.map = __gem_mmap__cpu(fd,
blit->linear.map = gem_mmap__cpu(fd,
blit->linear.handle,
0,
blit->linear.size,
PROT_READ | PROT_WRITE);
igt_assert(blit->linear.map);
cairo_format = drm_format_to_cairo(fb->drm_format);
fb->cairo_surface =
......@@ -774,8 +773,7 @@ static void destroy_cairo_surface__gtt(void *arg)
static void create_cairo_surface__gtt(int fd, struct igt_fb *fb)
{
void *ptr = __gem_mmap__gtt(fd, fb->gem_handle, fb->size, PROT_READ | PROT_WRITE);
igt_assert(ptr);
void *ptr = gem_mmap__gtt(fd, fb->gem_handle, fb->size, PROT_READ | PROT_WRITE);
fb->cairo_surface =
cairo_image_surface_create_for_data(ptr,
......
......@@ -56,20 +56,18 @@ test_fence_restore(int fd, bool tiled2untiled, bool hibernate)
handle_tiled = gem_create(fd, OBJECT_SIZE);
/* Access the buffer objects in the order we want to have the laid out. */
ptr1 = __gem_mmap__gtt(fd, handle1, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr1);
ptr1 = gem_mmap__gtt(fd, handle1, OBJECT_SIZE, PROT_READ | PROT_WRITE);
for (i = 0; i < OBJECT_SIZE/sizeof(uint32_t); i++)
ptr1[i] = i;
ptr_tiled = __gem_mmap__gtt(fd, handle_tiled, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr_tiled);
ptr_tiled = gem_mmap__gtt(fd, handle_tiled, OBJECT_SIZE,
PROT_READ | PROT_WRITE);
if (tiled2untiled)
gem_set_tiling(fd, handle_tiled, I915_TILING_X, 2048);
for (i = 0; i < OBJECT_SIZE/sizeof(uint32_t); i++)
ptr_tiled[i] = i;
ptr2 = __gem_mmap__gtt(fd, handle2, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr2);
ptr2 = gem_mmap__gtt(fd, handle2, OBJECT_SIZE, PROT_READ | PROT_WRITE);
for (i = 0; i < OBJECT_SIZE/sizeof(uint32_t); i++)
ptr2[i] = i;
......
......@@ -471,10 +471,8 @@ static void cpu_copy_bo(drm_intel_bo *dst, drm_intel_bo *src)
gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_CPU, 0);
gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
s = __gem_mmap__cpu(fd, src->handle, 0, size, PROT_READ);
igt_assert(s);
d = __gem_mmap__cpu(fd, dst->handle, 0, size, PROT_WRITE);
igt_assert(d);
s = gem_mmap__cpu(fd, src->handle, 0, size, PROT_READ);
d = gem_mmap__cpu(fd, dst->handle, 0, size, PROT_WRITE);
memcpy(d, s, size);
......@@ -490,10 +488,8 @@ static void gtt_copy_bo(drm_intel_bo *dst, drm_intel_bo *src)
gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_GTT, 0);
gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
s = __gem_mmap__gtt(fd, src->handle, size, PROT_READ);
igt_assert(s);
d = __gem_mmap__gtt(fd, dst->handle, size, PROT_WRITE);
igt_assert(d);
s = gem_mmap__gtt(fd, src->handle, size, PROT_READ);
d = gem_mmap__gtt(fd, dst->handle, size, PROT_WRITE);
memcpy(d, s, size);
......@@ -509,10 +505,8 @@ static void wc_copy_bo(drm_intel_bo *dst, drm_intel_bo *src)
gem_set_domain(fd, src->handle, I915_GEM_DOMAIN_GTT, 0);
gem_set_domain(fd, dst->handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
s = __gem_mmap__wc(fd, src->handle, 0, size, PROT_READ);
igt_assert(s);
d = __gem_mmap__wc(fd, dst->handle, 0, size, PROT_WRITE);
igt_assert(d);
s = gem_mmap__wc(fd, src->handle, 0, size, PROT_READ);
d = gem_mmap__wc(fd, dst->handle, 0, size, PROT_WRITE);
memcpy(d, s, size);
......
......@@ -115,9 +115,8 @@ static void run_on_ring(int fd, unsigned ring_id, const char *ring_name)
igt_progress(buf, split, BATCH_SIZE/8 - 1);
handle_new = gem_create(fd, BATCH_SIZE);
batch_ptr = __gem_mmap__cpu(fd, handle_new, 0, BATCH_SIZE,
batch_ptr = gem_mmap__cpu(fd, handle_new, 0, BATCH_SIZE,
PROT_READ | PROT_WRITE);
igt_assert(batch_ptr);
batch_ptr[split*2] = MI_BATCH_BUFFER_END;
for (i = split*2 + 2; i < BATCH_SIZE/8; i++)
......
......@@ -132,9 +132,8 @@ copy(int fd, uint32_t dst, uint32_t src, uint32_t *all_bo, int n_bo)
static void clear(int fd, uint32_t handle, int size)
{
void *base = __gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE);
void *base = gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE);
igt_assert(base != NULL);
memset(base, 0, size);
munmap(base, size);
}
......
......@@ -200,9 +200,8 @@ static void run(int object_size)
handle_relocs = gem_create(fd, 4096);
gem_write(fd, handle_relocs, 0, reloc, sizeof(reloc));
gtt_relocs = __gem_mmap__gtt(fd, handle_relocs, 4096,
PROT_READ | PROT_WRITE);
igt_assert(gtt_relocs);
gtt_relocs = gem_mmap__gtt(fd, handle_relocs, 4096,
PROT_READ | PROT_WRITE);
exec[2].handle = handle;
if (intel_gen(devid) >= 8)
......
......@@ -123,8 +123,7 @@ igt_simple_main
size = ALIGN(sizeof(mem_reloc), 4096);
reloc_handle = gem_create(fd, size);
reloc = __gem_mmap__cpu(fd, reloc_handle, 0, size, PROT_READ | PROT_WRITE);
igt_assert(reloc);
reloc = gem_mmap__cpu(fd, reloc_handle, 0, size, PROT_READ | PROT_WRITE);
for (n = 0; n < MAX_NUM_RELOC; n++) {
reloc[n].offset = 1024;
reloc[n].read_domains = I915_GEM_DOMAIN_RENDER;
......
......@@ -67,15 +67,14 @@ bo_create (int fd, int tiling)
handle = gem_create(fd, OBJECT_SIZE);
/* dirty cpu caches a bit ... */
ptr = __gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr);
ptr = gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE,
PROT_READ | PROT_WRITE);
memset(ptr, 0, OBJECT_SIZE);
munmap(ptr, OBJECT_SIZE);
gem_set_tiling(fd, handle, tiling, 1024);
ptr = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr);
ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
gem_close(fd, handle);
......
......@@ -68,8 +68,7 @@ static void performance(void)
for (n = 0; n < count; n++) {
handle[n] = gem_create(fd, OBJECT_SIZE);
ptr[n] = __gem_mmap__gtt(fd, handle[n], OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr[n]);
ptr[n] = gem_mmap__gtt(fd, handle[n], OBJECT_SIZE, PROT_READ | PROT_WRITE);
}
gettimeofday(&start, NULL);
......@@ -176,8 +175,7 @@ static void thread_performance(unsigned mask)
for (n = 0; n < count; n++) {
handle[n] = gem_create(fd, OBJECT_SIZE);
ptr[n] = __gem_mmap__gtt(fd, handle[n], OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr[n]);
ptr[n] = gem_mmap__gtt(fd, handle[n], OBJECT_SIZE, PROT_READ | PROT_WRITE);
if (mask & READ) {
readers[n].id = n;
......@@ -257,8 +255,7 @@ static void *no_contention(void *closure)
int n;
for (n = 0; n < t->loops; n++) {
uint32_t *ptr = __gem_mmap__gtt(t->fd, t->handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr);
uint32_t *ptr = gem_mmap__gtt(t->fd, t->handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
memset(ptr + (rand() % 256) * 4096 / 4, 0, 4096);
munmap(ptr, OBJECT_SIZE);
}
......@@ -272,8 +269,7 @@ static void *wc_mmap(void *closure)
int n;
for (n = 0; n < t->loops; n++) {
uint32_t *ptr = __gem_mmap__wc(t->fd, t->handle, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr);
uint32_t *ptr = gem_mmap__wc(t->fd, t->handle, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE);
memset(ptr + (rand() % 256) * 4096 / 4, 0, 4096);
munmap(ptr, OBJECT_SIZE);
}
......
......@@ -59,8 +59,7 @@ create_bo(int fd)
handle = gem_create(fd, OBJ_SIZE);
/* Fill the BO with dwords starting at start_val */
data = __gem_mmap__gtt(fd, handle, OBJ_SIZE, PROT_READ | PROT_WRITE);
igt_assert(data);
data = gem_mmap__gtt(fd, handle, OBJ_SIZE, PROT_READ | PROT_WRITE);
for (i = 0; i < OBJ_SIZE/4; i++)
data[i] = i;
munmap(data, OBJ_SIZE);
......@@ -83,8 +82,7 @@ igt_simple_main
handle = gem_create(fd, OBJ_SIZE);
/* touch one page */
ptr = __gem_mmap__gtt(fd, handle, OBJ_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr);
ptr = gem_mmap__gtt(fd, handle, OBJ_SIZE, PROT_READ | PROT_WRITE);
*ptr = 0xdeadbeef;
munmap(ptr, OBJ_SIZE);
......
......@@ -140,12 +140,11 @@ static void run(data_t *data, int child)
* set-to-gtt-domain within the fault handler.
*/
if (write) {
ptr = __gem_mmap__gtt(data->fd, handle, size, PROT_READ | PROT_WRITE);
igt_assert(ptr);
ptr = gem_mmap__gtt(data->fd, handle, size,
PROT_READ | PROT_WRITE);
ptr[rand() % (size / 4)] = canary;
} else {
ptr = __gem_mmap__gtt(data->fd, handle, size, PROT_READ);
igt_assert(ptr);
ptr = gem_mmap__gtt(data->fd, handle, size, PROT_READ);
}
x = ptr[rand() % (size / 4)];
munmap(ptr, size);
......
......@@ -89,12 +89,10 @@ int main(int argc, char **argv)
I915_GEM_DOMAIN_CPU);
{
uint32_t *base = __gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE);
uint32_t *base = gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE);
volatile uint32_t *ptr = base;
int x = 0;
igt_assert(base);
for (i = 0; i < size/sizeof(*ptr); i++)
x += ptr[i];
......@@ -106,12 +104,12 @@ int main(int argc, char **argv)
/* mmap read */
gettimeofday(&start, NULL);
for (loop = 0; loop < 1000; loop++) {
base = __gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE);
base = gem_mmap__cpu(fd, handle, 0,
size,
PROT_READ | PROT_WRITE);
ptr = base;
x = 0;
igt_assert(base);
for (i = 0; i < size/sizeof(*ptr); i++)
x += ptr[i];
......@@ -127,11 +125,11 @@ int main(int argc, char **argv)
/* mmap write */
gettimeofday(&start, NULL);
for (loop = 0; loop < 1000; loop++) {
base = __gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE);
base = gem_mmap__cpu(fd, handle, 0,
size,
PROT_READ | PROT_WRITE);
ptr = base;
igt_assert(base);
for (i = 0; i < size/sizeof(*ptr); i++)
ptr[i] = i;
......@@ -143,8 +141,9 @@ int main(int argc, char **argv)
gettimeofday(&start, NULL);
for (loop = 0; loop < 1000; loop++) {
base = __gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE);
igt_assert(base);
base = gem_mmap__cpu(fd, handle, 0,
size,
PROT_READ | PROT_WRITE);
memset(base, 0, size);
munmap(base, size);
}
......@@ -153,8 +152,8 @@ int main(int argc, char **argv)
size/1024, elapsed(&start, &end, loop));
gettimeofday(&start, NULL);
base = __gem_mmap__cpu(fd, handle, 0, size, PROT_READ | PROT_WRITE);
igt_assert(base);
base = gem_mmap__cpu(fd, handle, 0, size,
PROT_READ | PROT_WRITE);
for (loop = 0; loop < 1000; loop++)
memset(base, 0, size);
munmap(base, size);
......@@ -182,12 +181,10 @@ int main(int argc, char **argv)
/* prefault into gtt */
{
uint32_t *base = __gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE);
uint32_t *base = gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE);
volatile uint32_t *ptr = base;
int x = 0;
igt_assert(base);
for (i = 0; i < size/sizeof(*ptr); i++)
x += ptr[i];
......@@ -199,12 +196,10 @@ int main(int argc, char **argv)
/* mmap read */
gettimeofday(&start, NULL);
for (loop = 0; loop < 1000; loop++) {
uint32_t *base = __gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE);
uint32_t *base = gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE);
volatile uint32_t *ptr = base;
int x = 0;
igt_assert(base);
for (i = 0; i < size/sizeof(*ptr); i++)
x += ptr[i];
......@@ -220,12 +215,10 @@ int main(int argc, char **argv)
if (gem_mmap__has_wc(fd)) {
gettimeofday(&start, NULL);
for (loop = 0; loop < 1000; loop++) {
uint32_t *base = __gem_mmap__wc(fd, handle, 0, size, PROT_READ | PROT_WRITE);
uint32_t *base = gem_mmap__wc(fd, handle, 0, size, PROT_READ | PROT_WRITE);
volatile uint32_t *ptr = base;
int x = 0;
igt_assert(base);
for (i = 0; i < size/sizeof(*ptr); i++)
x += ptr[i];
......@@ -243,11 +236,9 @@ int main(int argc, char **argv)
/* mmap write */
gettimeofday(&start, NULL);
for (loop = 0; loop < 1000; loop++) {
uint32_t *base = __gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE);
uint32_t *base = gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE);
volatile uint32_t *ptr = base;
igt_assert(base);
for (i = 0; i < size/sizeof(*ptr); i++)
ptr[i] = i;
......@@ -261,11 +252,9 @@ int main(int argc, char **argv)
/* mmap write */
gettimeofday(&start, NULL);
for (loop = 0; loop < 1000; loop++) {
uint32_t *base = __gem_mmap__wc(fd, handle, 0, size, PROT_READ | PROT_WRITE);
uint32_t *base = gem_mmap__wc(fd, handle, 0, size, PROT_READ | PROT_WRITE);
volatile uint32_t *ptr = base;
igt_assert(base);
for (i = 0; i < size/sizeof(*ptr); i++)
ptr[i] = i;
......@@ -279,7 +268,7 @@ int main(int argc, char **argv)
/* mmap clear */
gettimeofday(&start, NULL);
for (loop = 0; loop < 1000; loop++) {
uint32_t *base = __gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE);
uint32_t *base = gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE);
memset(base, 0, size);
munmap(base, size);
}
......@@ -291,7 +280,7 @@ int main(int argc, char **argv)
/* mmap clear */
gettimeofday(&start, NULL);
for (loop = 0; loop < 1000; loop++) {
uint32_t *base = __gem_mmap__wc(fd, handle, 0, size, PROT_READ | PROT_WRITE);
uint32_t *base = gem_mmap__wc(fd, handle, 0, size, PROT_READ | PROT_WRITE);
memset(base, 0, size);
munmap(base, size);
}
......@@ -301,7 +290,7 @@ int main(int argc, char **argv)
}
gettimeofday(&start, NULL);{
uint32_t *base = __gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE);
uint32_t *base = gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE);
for (loop = 0; loop < 1000; loop++)
memset(base, 0, size);
munmap(base, size);
......@@ -311,7 +300,7 @@ int main(int argc, char **argv)
if (gem_mmap__has_wc(fd)) {
gettimeofday(&start, NULL);{
uint32_t *base = __gem_mmap__wc(fd, handle, 0, size, PROT_READ | PROT_WRITE);
uint32_t *base = gem_mmap__wc(fd, handle, 0, size, PROT_READ | PROT_WRITE);
for (loop = 0; loop < 1000; loop++)
memset(base, 0, size);
munmap(base, size);
......@@ -323,7 +312,7 @@ int main(int argc, char **argv)
/* mmap read */
gettimeofday(&start, NULL);
for (loop = 0; loop < 1000; loop++) {
uint32_t *base = __gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE);
uint32_t *base = gem_mmap__gtt(fd, handle, size, PROT_READ | PROT_WRITE);
volatile uint32_t *ptr = base;
int x = 0;
......
......@@ -64,8 +64,8 @@ test_large_object(int fd)
igt_assert(ioctl(fd, DRM_IOCTL_I915_GEM_CREATE, &create) == 0);
/* prefault */
ptr = __gem_mmap__gtt(fd, create.handle, obj_size, PROT_WRITE | PROT_READ);
igt_assert(ptr);
ptr = gem_mmap__gtt(fd, create.handle, obj_size,
PROT_WRITE | PROT_READ);
*ptr = 0;
gem_write(fd, create.handle, 0, data, obj_size);
......
......@@ -166,8 +166,7 @@ igt_main
igt_subtest("short-mmap") {
igt_assert(OBJECT_SIZE > 4096);
arg.handle = gem_create(fd, OBJECT_SIZE);
addr = __gem_mmap__cpu(fd, arg.handle, 0, 4096, PROT_WRITE);
igt_assert(addr);
addr = gem_mmap__cpu(fd, arg.handle, 0, 4096, PROT_WRITE);
memset(addr, 0, 4096);
munmap(addr, 4096);
gem_close(fd, arg.handle);
......
......@@ -56,8 +56,7 @@ mmap_bo(int fd, uint32_t handle)
{
void *ptr;
ptr = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr);
ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
return ptr;
}
......@@ -179,8 +178,7 @@ test_read_write(int fd, enum test_read_write order)
handle = gem_create(fd, OBJECT_SIZE);
ptr = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr);
ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
if (order == READ_BEFORE_WRITE) {
val = *(uint32_t *)ptr;
......@@ -203,11 +201,9 @@ test_read_write2(int fd, enum test_read_write order)
handle = gem_create(fd, OBJECT_SIZE);
r = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ);
igt_assert(r);
r = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ);
w = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(w);
w = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
if (order == READ_BEFORE_WRITE) {
val = *(uint32_t *)r;
......@@ -291,13 +287,11 @@ test_huge_bo(int fd, int huge, int tiling)
bo = gem_create(fd, PAGE_SIZE);
if (tiling)
gem_set_tiling(fd, bo, tiling, pitch);
linear_pattern = __gem_mmap__gtt(fd, bo, PAGE_SIZE,
linear_pattern = gem_mmap__gtt(fd, bo, PAGE_SIZE,
PROT_READ | PROT_WRITE);
igt_assert(linear_pattern);
for (i = 0; i < PAGE_SIZE; i++)
linear_pattern[i] = i;
tiled_pattern = __gem_mmap__cpu(fd, bo, 0, PAGE_SIZE, PROT_READ);
igt_assert(tiled_pattern);
tiled_pattern = gem_mmap__cpu(fd, bo, 0, PAGE_SIZE, PROT_READ);
gem_set_domain(fd, bo, I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT, 0);
gem_close(fd, bo);
......@@ -307,7 +301,7 @@ test_huge_bo(int fd, int huge, int tiling)
gem_set_tiling(fd, bo, tiling, pitch);
/* Initialise first/last page through CPU mmap */
ptr = __gem_mmap__cpu(fd, bo, 0, size, PROT_READ | PROT_WRITE);
ptr = gem_mmap__cpu(fd, bo, 0, size, PROT_READ | PROT_WRITE);
memcpy(ptr, tiled_pattern, PAGE_SIZE);
memcpy(ptr + last_offset, tiled_pattern, PAGE_SIZE);
munmap(ptr, size);
......@@ -440,11 +434,9 @@ test_write_cpu_read_gtt(int fd)
handle = gem_create(fd, OBJECT_SIZE);
dst = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ);
igt_assert(dst);
dst = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ);
src = __gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, PROT_WRITE);
igt_assert(src);
src = gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, PROT_WRITE);
gem_close(fd, handle);
......
......@@ -60,8 +60,7 @@ create_and_map_bo(int fd)
handle = gem_create(fd, OBJECT_SIZE);
ptr = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr);
ptr = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_READ | PROT_WRITE);
/* touch it to force it into the gtt */
*ptr = 0;
......
......@@ -62,8 +62,7 @@ mmap_bo(int fd, uint32_t handle)
{
void *ptr;
ptr = __gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(ptr);
ptr = gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE);
return ptr;
}
......@@ -183,11 +182,9 @@ test_read_write2(int fd, enum test_read_write order)
handle = gem_create(fd, OBJECT_SIZE);
set_domain(fd, handle);
r = __gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ);
igt_assert(r);
r = gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ);
w = __gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE);
igt_assert(w);
w = gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ | PROT_WRITE);
if (order == READ_BEFORE_WRITE) {
val = *(uint32_t *)r;
......@@ -288,11 +285,9 @@ test_write_cpu_read_wc(int fd, int force_domain)
handle = gem_create(fd, OBJECT_SIZE);
dst = __gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ);
igt_assert(dst);
dst = gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ);
src = __gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, PROT_WRITE);
igt_assert(src);
src = gem_mmap__cpu(fd, handle, 0, OBJECT_SIZE, PROT_WRITE);
memset(src, 0xaa, OBJECT_SIZE);
if (force_domain)
......@@ -315,11 +310,9 @@ test_write_gtt_read_wc(int fd)
handle = gem_create(fd, OBJECT_SIZE);
set_domain(fd, handle);
dst = __gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ);
igt_assert(dst);
dst = gem_mmap__wc(fd, handle, 0, OBJECT_SIZE, PROT_READ);
src = __gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_WRITE);
igt_assert(src);
src = gem_mmap__gtt(fd, handle, OBJECT_SIZE, PROT_WRITE);
memset(src, 0xaa, OBJECT_SIZE);
igt_assert(memcmp(dst, src, OBJECT_SIZE) == 0);
......
......@@ -225,10 +225,8 @@ static void do_test(int fd, bool faulting_reloc)
relocs_bo_handle[i] = gem_create(fd, 4096);
gem_write(fd, relocs_bo_handle[i], 0, reloc, sizeof(reloc));
gtt_relocs_ptr[i] = __gem_mmap__gtt(fd, relocs_bo_handle[i], 4096,
PROT_READ | PROT_WRITE);
igt_assert(gtt_relocs_ptr[i]);
gtt_relocs_ptr[i] = gem_mmap__gtt(fd, relocs_bo_handle[i], 4096,
PROT_READ | PROT_WRITE);
}
/* repeat must be smaller than 4096/small_pitch */
......
......@@ -118,8 +118,7 @@ static void test_big_gtt(int fd, int scale)
handle = gem_create(fd, size);
gem_set_domain(fd, handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
ptr = __gem_mmap__wc(fd, handle, 0, size, PROT_READ);
igt_assert(ptr);
ptr = gem_mmap__wc(fd, handle, 0, size, PROT_READ);
for (offset = 0; offset < size; offset += 4096) {
int suboffset = (offset >> 12) % (4096 / sizeof(offset) - 1) * sizeof(offset);
......
......@@ -112,10 +112,8 @@ static void as_gtt_mmap(int fd, uint32_t src, uint32_t dst, void *buf, int len,
uint32_t *src_ptr, *dst_ptr;
BUILD_EXEC;
src_ptr = __gem_mmap__gtt(fd, src, OBJECT_SIZE, PROT_WRITE);
igt_assert(src_ptr);
dst_ptr = __gem_mmap__gtt(fd, dst, OBJECT_SIZE, PROT_READ);
igt_assert(dst_ptr);
src_ptr = gem_mmap__gtt(fd, src, OBJECT_SIZE, PROT_WRITE);
dst_ptr = gem_mmap__gtt(fd, dst, OBJECT_SIZE, PROT_READ);
while (loops--) {
gem_set_domain(fd, src,
......@@ -139,10 +137,8 @@ static void as_cpu_mmap(int fd, uint32_t src, uint32_t dst, void *buf, int len,
uint32_t *src_ptr, *dst_ptr;
BUILD_EXEC;
src_ptr = __gem_mmap__cpu(fd, src, 0, OBJECT_SIZE, PROT_WRITE);
igt_assert(src_ptr);
dst_ptr = __gem_mmap__cpu(fd, dst, 0, OBJECT_SIZE, PROT_READ);
igt_assert(dst_ptr);
src_ptr = gem_mmap__cpu(fd, src, 0, OBJECT_SIZE, PROT_WRITE);
dst_ptr = gem_mmap__cpu(fd, dst, 0, OBJECT_SIZE, PROT_READ);
while (loops--) {
gem_set_domain(fd, src,
......@@ -186,10 +182,8 @@ static void test_as_gtt_mmap(int fd, uint32_t src, uint32_t dst, int len)
int i;
BUILD_EXEC;
src_ptr = __gem_mmap__gtt(fd, src, OBJECT_SIZE, PROT_WRITE);
igt_assert(src_ptr);
dst_ptr = __gem_mmap__gtt(fd, dst, OBJECT_SIZE, PROT_READ);
igt_assert(dst_ptr);
src_ptr = gem_mmap__gtt(fd, src, OBJECT_SIZE, PROT_WRITE);
dst_ptr = gem_mmap__gtt(fd, dst, OBJECT_SIZE, PROT_READ);
gem_set_domain(fd, src, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
for (i = 0; i < len/4; i++)
......@@ -212,10 +206,8 @@ static void test_as_cpu_mmap(int fd, uint32_t src, uint32_t dst, int len)
int i;
BUILD_EXEC;
src_ptr = __gem_mmap__cpu(fd, src, 0, OBJECT_SIZE, PROT_WRITE);
igt_assert(src_ptr);
dst_ptr = __gem_mmap__cpu(fd, dst, 0, OBJECT_SIZE, PROT_READ);
igt_assert(dst_ptr);
src_ptr = gem_mmap__cpu(fd, src, 0, OBJECT_SIZE, PROT_WRITE);