gem_exec_parse.c 17.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2013 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25
#include "igt.h"
26 27 28
#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
29 30 31 32
#include <errno.h>

#include <drm.h>

33 34
#include "igt_device.h"

35 36 37 38
#ifndef I915_PARAM_CMD_PARSER_VERSION
#define I915_PARAM_CMD_PARSER_VERSION       28
#endif

39
#define DERRMR 0x44050
40
#define OASTATUS2 0x2368
41 42
#define OACONTROL 0x2360
#define SO_WRITE_OFFSET_0 0x5280
43

44 45 46 47
#define HSW_CS_GPR(n) (0x2600 + 8*(n))
#define HSW_CS_GPR0 HSW_CS_GPR(0)
#define HSW_CS_GPR1 HSW_CS_GPR(1)

48 49 50 51
/* To help craft commands known to be invalid across all engines */
#define INSTR_CLIENT_SHIFT	29
#define   INSTR_INVALID_CLIENT  0x7

52 53 54 55 56 57 58 59 60
#define MI_LOAD_REGISTER_REG (0x2a << 23)
#define MI_STORE_REGISTER_MEM (0x24 << 23)
#define MI_ARB_ON_OFF (0x8 << 23)
#define MI_DISPLAY_FLIP ((0x14 << 23) | 1)

#define GFX_OP_PIPE_CONTROL	((0x3<<29)|(0x3<<27)|(0x2<<24)|2)
#define   PIPE_CONTROL_QW_WRITE	(1<<14)
#define   PIPE_CONTROL_LRI_POST_OP (1<<23)

61
static int parser_version;
62

63 64 65 66 67 68 69 70 71 72 73 74 75 76
static int command_parser_version(int fd)
{
	int version = -1;
	drm_i915_getparam_t gp;

	gp.param = I915_PARAM_CMD_PARSER_VERSION;
	gp.value = &version;

	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp) == 0)
		return version;

	return -1;
}

77 78
static uint64_t __exec_batch_patched(int fd, uint32_t cmd_bo, uint32_t *cmds,
				     int size, int patch_offset)
79 80
{
	struct drm_i915_gem_execbuffer2 execbuf;
81
	struct drm_i915_gem_exec_object2 obj[2];
82 83 84 85 86 87 88
	struct drm_i915_gem_relocation_entry reloc[1];

	uint32_t target_bo = gem_create(fd, 4096);
	uint64_t actual_value = 0;

	gem_write(fd, cmd_bo, 0, cmds, size);

89 90 91 92 93
	memset(obj, 0, sizeof(obj));
	obj[0].handle = target_bo;
	obj[1].handle = cmd_bo;

	memset(reloc, 0, sizeof(reloc));
94
	reloc[0].offset = patch_offset;
95
	reloc[0].target_handle = obj[0].handle;
96
	reloc[0].delta = 0;
97 98
	reloc[0].read_domains = I915_GEM_DOMAIN_COMMAND;
	reloc[0].write_domain = I915_GEM_DOMAIN_COMMAND;
99
	obj[1].relocs_ptr = to_user_pointer(reloc);
100 101 102
	obj[1].relocation_count = 1;

	memset(&execbuf, 0, sizeof(execbuf));
103
	execbuf.buffers_ptr = to_user_pointer(obj);
104 105 106 107 108 109 110 111 112 113
	execbuf.buffer_count = 2;
	execbuf.batch_len = size;
	execbuf.flags = I915_EXEC_RENDER;

	gem_execbuf(fd, &execbuf);
	gem_sync(fd, cmd_bo);

	gem_read(fd,target_bo, 0, &actual_value, sizeof(actual_value));

	gem_close(fd, target_bo);
114 115 116 117 118 119 120 121 122 123 124

	return actual_value;
}

static void exec_batch_patched(int fd, uint32_t cmd_bo, uint32_t *cmds,
			       int size, int patch_offset,
			       uint64_t expected_value)
{
	igt_assert_eq(__exec_batch_patched(fd, cmd_bo, cmds,
					   size, patch_offset),
		      expected_value);
125 126
}

127 128
static int __exec_batch(int fd, uint32_t cmd_bo, uint32_t *cmds,
			int size, int ring)
129 130
{
	struct drm_i915_gem_execbuffer2 execbuf;
131
	struct drm_i915_gem_exec_object2 obj[1];
132 133 134

	gem_write(fd, cmd_bo, 0, cmds, size);

135 136
	memset(obj, 0, sizeof(obj));
	obj[0].handle = cmd_bo;
137

138
	memset(&execbuf, 0, sizeof(execbuf));
139
	execbuf.buffers_ptr = to_user_pointer(obj);
140 141 142 143
	execbuf.buffer_count = 1;
	execbuf.batch_len = size;
	execbuf.flags = ring;

144
	return __gem_execbuf(fd, &execbuf);
145
}
146 147
#define exec_batch(fd, bo, cmds, sz, ring, expected) \
	igt_assert_eq(__exec_batch(fd, bo, cmds, sz, ring), expected)
148

149 150
static void exec_split_batch(int fd, uint32_t *cmds,
			     int size, int ring, int expected_ret)
151 152
{
	struct drm_i915_gem_execbuffer2 execbuf;
153
	struct drm_i915_gem_exec_object2 obj[1];
154 155
	uint32_t cmd_bo;
	uint32_t noop[1024] = { 0 };
156 157
	const int alloc_size = 4096 * 2;
	const int actual_start_offset = 4096-sizeof(uint32_t);
158

159
	/* Allocate and fill a 2-page batch with noops */
160
	cmd_bo = gem_create(fd, alloc_size);
161 162 163
	gem_write(fd, cmd_bo, 0, noop, sizeof(noop));
	gem_write(fd, cmd_bo, 4096, noop, sizeof(noop));

164 165 166 167
	/* Write the provided commands such that the first dword
	 * of the command buffer is the last dword of the first
	 * page (i.e. the command is split across the two pages).
	 */
168
	gem_write(fd, cmd_bo, actual_start_offset, cmds, size);
169

170 171
	memset(obj, 0, sizeof(obj));
	obj[0].handle = cmd_bo;
172

173
	memset(&execbuf, 0, sizeof(execbuf));
174
	execbuf.buffers_ptr = to_user_pointer(obj);
175
	execbuf.buffer_count = 1;
176 177 178 179 180
	/* NB: We want batch_start_offset and batch_len to point to the block
	 * of the actual commands (i.e. at the last dword of the first page),
	 * but have to adjust both the start offset and length to meet the
	 * kernel driver's requirements on the alignment of those fields.
	 */
181 182 183 184
	execbuf.batch_start_offset = actual_start_offset & ~0x7;
	execbuf.batch_len =
		ALIGN(size + actual_start_offset - execbuf.batch_start_offset,
		      0x8);
185 186
	execbuf.flags = ring;

187
	igt_assert_eq(__gem_execbuf(fd, &execbuf), expected_ret);
188 189 190 191 192

	gem_sync(fd, cmd_bo);
	gem_close(fd, cmd_bo);
}

193 194 195 196 197
static void exec_batch_chained(int fd, uint32_t cmd_bo, uint32_t *cmds,
			       int size, int patch_offset,
			       uint64_t expected_value)
{
	struct drm_i915_gem_execbuffer2 execbuf;
198 199
	struct drm_i915_gem_exec_object2 obj[3];
	struct drm_i915_gem_relocation_entry reloc[1];
200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
	struct drm_i915_gem_relocation_entry first_level_reloc;

	uint32_t target_bo = gem_create(fd, 4096);
	uint32_t first_level_bo = gem_create(fd, 4096);
	uint64_t actual_value = 0;

	static uint32_t first_level_cmds[] = {
		MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965,
		0,
		MI_BATCH_BUFFER_END,
		0,
	};

	if (IS_HASWELL(intel_get_drm_devid(fd)))
		first_level_cmds[0] |= MI_BATCH_NON_SECURE_HSW;

	gem_write(fd, first_level_bo, 0,
		  first_level_cmds, sizeof(first_level_cmds));
	gem_write(fd, cmd_bo, 0, cmds, size);

220 221 222 223 224 225 226 227 228 229 230 231
	memset(obj, 0, sizeof(obj));
	obj[0].handle = target_bo;
	obj[1].handle = cmd_bo;
	obj[2].handle = first_level_bo;

	memset(reloc, 0, sizeof(reloc));
	reloc[0].offset = patch_offset;
	reloc[0].delta = 0;
	reloc[0].target_handle = target_bo;
	reloc[0].read_domains = I915_GEM_DOMAIN_COMMAND;
	reloc[0].write_domain = I915_GEM_DOMAIN_COMMAND;
	obj[1].relocation_count = 1;
232
	obj[1].relocs_ptr = to_user_pointer(&reloc);
233

234
	memset(&first_level_reloc, 0, sizeof(first_level_reloc));
235 236 237
	first_level_reloc.offset = 4;
	first_level_reloc.delta = 0;
	first_level_reloc.target_handle = cmd_bo;
238
	first_level_reloc.read_domains = I915_GEM_DOMAIN_COMMAND;
239
	first_level_reloc.write_domain = 0;
240
	obj[2].relocation_count = 1;
241
	obj[2].relocs_ptr = to_user_pointer(&first_level_reloc);
242 243

	memset(&execbuf, 0, sizeof(execbuf));
244
	execbuf.buffers_ptr = to_user_pointer(obj);
245 246 247 248 249 250 251 252 253 254 255 256 257 258
	execbuf.buffer_count = 3;
	execbuf.batch_len = sizeof(first_level_cmds);
	execbuf.flags = I915_EXEC_RENDER;

	gem_execbuf(fd, &execbuf);
	gem_sync(fd, cmd_bo);

	gem_read(fd,target_bo, 0, &actual_value, sizeof(actual_value));
	igt_assert_eq(expected_value, actual_value);

	gem_close(fd, first_level_bo);
	gem_close(fd, target_bo);
}

259 260 261 262
/* Be careful to take into account what register bits we can store and read
 * from...
 */
struct test_lri {
263 264 265 266 267 268 269
	const char *name; /* register name for debug info */
	uint32_t reg; /* address to test */
	uint32_t read_mask; /* ignore things like HW status bits */
	uint32_t init_val; /* initial identifiable value to set without LRI */
	uint32_t test_val; /* value to attempt loading via LRI command */
	bool whitelisted; /* expect to become NOOP / fail if not whitelisted */
	int min_ver; /* required command parser version to test */
270 271 272
};

static void
273
test_lri(int fd, uint32_t handle, struct test_lri *test)
274 275 276
{
	uint32_t lri[] = {
		MI_LOAD_REGISTER_IMM,
277 278
		test->reg,
		test->test_val,
279 280
		MI_BATCH_BUFFER_END,
	};
281 282 283 284 285 286 287
	int bad_lri_errno = parser_version >= 8 ? 0 : -EINVAL;
	int expected_errno = test->whitelisted ? 0 : bad_lri_errno;
	uint32_t expect = test->whitelisted ? test->test_val : test->init_val;

	igt_debug("Testing %s LRI: addr=%x, val=%x, expected errno=%d, expected val=%x\n",
		  test->name, test->reg, test->test_val,
		  expected_errno, expect);
288

289
	intel_register_write(test->reg, test->init_val);
290

291 292 293 294
	igt_assert_eq_u32((intel_register_read(test->reg) &
			   test->read_mask),
			  test->init_val);

295 296 297 298
	exec_batch(fd, handle,
		   lri, sizeof(lri),
		   I915_EXEC_RENDER,
		   expected_errno);
299 300
	gem_sync(fd, handle);

301 302 303
	igt_assert_eq_u32((intel_register_read(test->reg) &
			   test->read_mask),
			  expect);
304 305
}

306 307
static void test_allocations(int fd)
{
308
	const uint32_t bbe = MI_BATCH_BUFFER_END;
309 310
	struct drm_i915_gem_execbuffer2 execbuf;
	struct drm_i915_gem_exec_object2 obj[17];
311
	unsigned long count;
312 313 314 315

	intel_require_memory(2, 1ull<<(12 + ARRAY_SIZE(obj)), CHECK_RAM);

	memset(obj, 0, sizeof(obj));
316
	for (int i = 0; i < ARRAY_SIZE(obj); i++) {
317 318 319 320 321 322
		uint64_t size = 1ull << (12 + i);

		obj[i].handle = gem_create(fd, size);
		for (uint64_t page = 4096; page <= size; page += 4096)
			gem_write(fd, obj[i].handle,
				  page - sizeof(bbe), &bbe, sizeof(bbe));
323 324 325 326
	}

	memset(&execbuf, 0, sizeof(execbuf));
	execbuf.buffer_count = 1;
327 328 329 330

	count = 0;
	igt_until_timeout(20) {
		int i = rand() % ARRAY_SIZE(obj);
331
		execbuf.buffers_ptr = to_user_pointer(&obj[i]);
332
		execbuf.batch_start_offset = (rand() % (1ull<<i)) << 12;
333 334
		execbuf.batch_start_offset += 64 * (rand() % 64);
		execbuf.batch_len = (1ull<<(12+i)) - execbuf.batch_start_offset;
335
		gem_execbuf(fd, &execbuf);
336
		count++;
337
	}
338 339
	igt_info("Submitted %lu execbufs\n", count);
	igt_drop_caches_set(fd, DROP_RESET_ACTIVE); /* Cancel the queued work */
340

341
	for (int i = 0; i < ARRAY_SIZE(obj); i++) {
342 343 344 345 346
		gem_sync(fd, obj[i].handle);
		gem_close(fd, obj[i].handle);
	}
}

347 348
static void hsw_load_register_reg(void)
{
349 350
	uint32_t init_gpr0[16] = {
		MI_LOAD_REGISTER_IMM | (3 - 2),
351
		HSW_CS_GPR0,
352 353 354 355
		0xabcdabc0, /* leave [1:0] zero */
		MI_BATCH_BUFFER_END,
	};
	uint32_t store_gpr0[16] = {
356 357
		MI_STORE_REGISTER_MEM | (3 - 2),
		HSW_CS_GPR0,
358
		0, /* reloc*/
359 360
		MI_BATCH_BUFFER_END,
	};
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
	uint32_t do_lrr[16] = {
		MI_LOAD_REGISTER_REG | (3 - 2),
		0, /* [1] = src */
		HSW_CS_GPR0, /* dst */
		MI_BATCH_BUFFER_END,
	};
	uint32_t allowed_regs[] = {
		HSW_CS_GPR1,
		SO_WRITE_OFFSET_0,
	};
	uint32_t disallowed_regs[] = {
		0,
		OACONTROL, /* filtered */
		DERRMR, /* master only */
		0x2038, /* RING_START: invalid */
	};
377
	int fd;
378
	uint32_t handle;
379
	int bad_lrr_errno = parser_version >= 8 ? 0 : -EINVAL;
380 381 382 383 384

	/* Open again to get a non-master file descriptor */
	fd = drm_open_driver(DRIVER_INTEL);

	igt_require(IS_HASWELL(intel_get_drm_devid(fd)));
385
	igt_require(parser_version >= 7);
386

387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
	handle = gem_create(fd, 4096);

	for (int i = 0 ; i < ARRAY_SIZE(allowed_regs); i++) {
		uint32_t var;

		exec_batch(fd, handle, init_gpr0, sizeof(init_gpr0),
			   I915_EXEC_RENDER,
			   0);
		exec_batch_patched(fd, handle,
				   store_gpr0, sizeof(store_gpr0),
				   2 * sizeof(uint32_t), /* reloc */
				   0xabcdabc0);
		do_lrr[1] = allowed_regs[i];
		exec_batch(fd, handle, do_lrr, sizeof(do_lrr),
			   I915_EXEC_RENDER,
			   0);
		var = __exec_batch_patched(fd, handle,
					   store_gpr0, sizeof(store_gpr0),
					   2 * sizeof(uint32_t)); /* reloc */
		igt_assert_neq(var, 0xabcdabc0);
	}
408

409
	for (int i = 0 ; i < ARRAY_SIZE(disallowed_regs); i++) {
410 411 412 413 414 415 416
		exec_batch(fd, handle, init_gpr0, sizeof(init_gpr0),
			   I915_EXEC_RENDER,
			   0);
		exec_batch_patched(fd, handle,
				   store_gpr0, sizeof(store_gpr0),
				   2 * sizeof(uint32_t), /* reloc */
				   0xabcdabc0);
417 418 419
		do_lrr[1] = disallowed_regs[i];
		exec_batch(fd, handle, do_lrr, sizeof(do_lrr),
			   I915_EXEC_RENDER,
420 421 422 423 424
			   bad_lrr_errno);
		exec_batch_patched(fd, handle,
				   store_gpr0, sizeof(store_gpr0),
				   2 * sizeof(uint32_t), /* reloc */
				   0xabcdabc0);
425
	}
426 427 428 429

	close(fd);
}

430 431
igt_main
{
432 433 434
	uint32_t handle;
	int fd;

435
	igt_fixture {
436
		fd = drm_open_driver(DRIVER_INTEL);
437
		igt_require_gem(fd);
438

439 440
		parser_version = command_parser_version(fd);
		igt_require(parser_version != -1);
441

442
		igt_require(gem_uses_ppgtt(fd));
443

444
		handle = gem_create(fd, 4096);
445 446 447

		/* ATM cmd parser only exists on gen7. */
		igt_require(intel_gen(intel_get_drm_devid(fd)) == 7);
448
		igt_fork_hang_detector(fd);
449 450 451 452 453 454
	}

	igt_subtest("basic-allowed") {
		uint32_t pc[] = {
			GFX_OP_PIPE_CONTROL,
			PIPE_CONTROL_QW_WRITE,
455
			0, /* To be patched */
456 457 458 459
			0x12000000,
			0,
			MI_BATCH_BUFFER_END,
		};
460 461
		exec_batch_patched(fd, handle,
				   pc, sizeof(pc),
462
				   8, /* patch offset, */
463
				   0x12000000);
464 465
	}

466
	igt_subtest("basic-rejected") {
467 468
		uint32_t invalid_cmd[] = {
			INSTR_INVALID_CLIENT << INSTR_CLIENT_SHIFT,
469 470
			MI_BATCH_BUFFER_END,
		};
471 472
		uint32_t invalid_set_context[] = {
			MI_SET_CONTEXT | 32, /* invalid length */
473 474
			MI_BATCH_BUFFER_END,
		};
475
		exec_batch(fd, handle,
476
			   invalid_cmd, sizeof(invalid_cmd),
477 478 479
			   I915_EXEC_RENDER,
			   -EINVAL);
		exec_batch(fd, handle,
480
			   invalid_cmd, sizeof(invalid_cmd),
481 482
			   I915_EXEC_BSD,
			   -EINVAL);
483 484 485 486 487 488
		if (gem_has_blt(fd)) {
			exec_batch(fd, handle,
				   invalid_cmd, sizeof(invalid_cmd),
				   I915_EXEC_BLT,
				   -EINVAL);
		}
489
		if (gem_has_vebox(fd)) {
490
			exec_batch(fd, handle,
491
				   invalid_cmd, sizeof(invalid_cmd),
492 493
				   I915_EXEC_VEBOX,
				   -EINVAL);
494
		}
495

496
		exec_batch(fd, handle,
497 498
			   invalid_set_context, sizeof(invalid_set_context),
			   I915_EXEC_RENDER,
499
			   -EINVAL);
500 501
	}

502 503 504 505
	igt_subtest("basic-allocation") {
		test_allocations(fd);
	}

506
	igt_subtest_group {
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
#define REG(R, MSK, INI, V, OK, MIN_V) { #R, R, MSK, INI, V, OK, MIN_V }
		struct test_lri lris[] = {
			/* dummy head pointer */
			REG(OASTATUS2,
			    0xffffff80, 0xdeadf000, 0xbeeff000, false, 0),
			/* NB: [1:0] MBZ */
			REG(SO_WRITE_OFFSET_0,
			    0xfffffffc, 0xabcdabc0, 0xbeefbee0, true, 0),

			/* It's really important for us to check that
			 * an LRI to OACONTROL doesn't result in an
			 * EINVAL error because Mesa attempts writing
			 * to OACONTROL to determine what extensions to
			 * expose and will abort() for execbuffer()
			 * errors.
			 *
			 * Mesa can gracefully recognise and handle the
			 * LRI becoming a NOOP.
			 *
			 * The test values represent dummy context IDs
			 * while leaving the OA unit disabled
			 */
			REG(OACONTROL,
			    0xfffff000, 0xfeed0000, 0x31337000, false, 9)
		};
#undef REG

534
		igt_fixture {
535
			intel_register_access_init(igt_device_get_pci_device(fd), 0, fd);
536 537
		}

538 539 540 541 542 543
		for (int i = 0; i < ARRAY_SIZE(lris); i++) {
			igt_subtest_f("test-lri-%s", lris[i].name) {
				igt_require_f(parser_version >= lris[i].min_ver,
					      "minimum required parser version for test = %d\n",
					      lris[i].min_ver);
				test_lri(fd, handle, lris + i);
544
			}
545
		}
546 547 548 549 550 551

		igt_fixture {
			intel_register_access_fini();
		}
	}

552 553 554 555 556
	igt_subtest("bitmasks") {
		uint32_t pc[] = {
			GFX_OP_PIPE_CONTROL,
			(PIPE_CONTROL_QW_WRITE |
			 PIPE_CONTROL_LRI_POST_OP),
557
			0, /* To be patched */
558 559 560 561
			0x12000000,
			0,
			MI_BATCH_BUFFER_END,
		};
562 563 564 565 566 567 568 569 570 571 572 573 574 575
		if (parser_version >= 8) {
			/* Expect to read back zero since the command should be
			 * squashed to a NOOP
			 */
			exec_batch_patched(fd, handle,
					   pc, sizeof(pc),
					   8, /* patch offset, */
					   0x0);
		} else {
			exec_batch(fd, handle,
				   pc, sizeof(pc),
				   I915_EXEC_RENDER,
				   -EINVAL);
		}
576 577
	}

578 579
	igt_subtest("batch-without-end") {
		uint32_t noop[1024] = { 0 };
580 581 582 583
		exec_batch(fd, handle,
			   noop, sizeof(noop),
			   I915_EXEC_RENDER,
			   -EINVAL);
584 585
	}

586 587 588
	igt_subtest("cmd-crossing-page") {
		uint32_t lri_ok[] = {
			MI_LOAD_REGISTER_IMM,
589 590 591 592 593 594 595 596
			SO_WRITE_OFFSET_0, /* allowed register address */
			0xdcbaabc0, /* [1:0] MBZ */
			MI_BATCH_BUFFER_END,
		};
		uint32_t store_reg[] = {
			MI_STORE_REGISTER_MEM | (3 - 2),
			SO_WRITE_OFFSET_0,
			0, /* reloc */
597 598
			MI_BATCH_BUFFER_END,
		};
599 600 601 602
		exec_split_batch(fd,
				 lri_ok, sizeof(lri_ok),
				 I915_EXEC_RENDER,
				 0);
603 604 605 606 607
		exec_batch_patched(fd, handle,
				   store_reg,
				   sizeof(store_reg),
				   2 * sizeof(uint32_t), /* reloc */
				   0xdcbaabc0);
608 609
	}

610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
	igt_subtest("oacontrol-tracking") {
		uint32_t lri_ok[] = {
			MI_LOAD_REGISTER_IMM,
			OACONTROL,
			0x31337000,
			MI_LOAD_REGISTER_IMM,
			OACONTROL,
			0x0,
			MI_BATCH_BUFFER_END,
			0
		};
		uint32_t lri_bad[] = {
			MI_LOAD_REGISTER_IMM,
			OACONTROL,
			0x31337000,
			MI_BATCH_BUFFER_END,
		};
		uint32_t lri_extra_bad[] = {
			MI_LOAD_REGISTER_IMM,
			OACONTROL,
			0x31337000,
			MI_LOAD_REGISTER_IMM,
			OACONTROL,
			0x0,
			MI_LOAD_REGISTER_IMM,
			OACONTROL,
			0x31337000,
			MI_BATCH_BUFFER_END,
		};
639 640 641

		igt_require(parser_version < 9);

642 643 644 645 646 647 648 649 650 651 652 653
		exec_batch(fd, handle,
			   lri_ok, sizeof(lri_ok),
			   I915_EXEC_RENDER,
			   0);
		exec_batch(fd, handle,
			   lri_bad, sizeof(lri_bad),
			   I915_EXEC_RENDER,
			   -EINVAL);
		exec_batch(fd, handle,
			   lri_extra_bad, sizeof(lri_extra_bad),
			   I915_EXEC_RENDER,
			   -EINVAL);
654 655
	}

656 657 658 659
	igt_subtest("chained-batch") {
		uint32_t pc[] = {
			GFX_OP_PIPE_CONTROL,
			PIPE_CONTROL_QW_WRITE,
660
			0, /* To be patched */
661 662 663 664 665 666
			0x12000000,
			0,
			MI_BATCH_BUFFER_END,
		};
		exec_batch_chained(fd, handle,
				   pc, sizeof(pc),
667
				   8, /* patch offset, */
668 669 670
				   0x12000000);
	}

671 672 673
	igt_subtest("load-register-reg")
		hsw_load_register_reg();

674
	igt_fixture {
675
		igt_stop_hang_detector();
676 677 678 679 680
		gem_close(fd, handle);

		close(fd);
	}
}