rendercopy_gen7.c 14.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
#include <assert.h>
#include <stdlib.h>
#include <sys/ioctl.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
#include <fcntl.h>
#include <inttypes.h>
#include <errno.h>
#include <sys/stat.h>
#include <sys/time.h>
#include "drm.h"
#include "i915_drm.h"
#include "drmtest.h"
#include "intel_bufmgr.h"
#include "intel_batchbuffer.h"
17
#include "intel_io.h"
18
#include "intel_chipset.h"
19 20
#include "rendercopy.h"
#include "gen7_render.h"
21
#include "intel_reg.h"
22

23

24
static const uint32_t ps_kernel[][4] = {
25 26 27 28 29 30 31 32
	{ 0x0080005a, 0x2e2077bd, 0x000000c0, 0x008d0040 },
	{ 0x0080005a, 0x2e6077bd, 0x000000d0, 0x008d0040 },
	{ 0x02800031, 0x21801fa9, 0x008d0e20, 0x08840001 },
	{ 0x00800001, 0x2e2003bd, 0x008d0180, 0x00000000 },
	{ 0x00800001, 0x2e6003bd, 0x008d01c0, 0x00000000 },
	{ 0x00800001, 0x2ea003bd, 0x008d0200, 0x00000000 },
	{ 0x00800001, 0x2ee003bd, 0x008d0240, 0x00000000 },
	{ 0x05800031, 0x20001fa8, 0x008d0e20, 0x90031000 },
33 34 35
};

static void
36 37
gen7_render_flush(struct intel_batchbuffer *batch,
		  drm_intel_context *context, uint32_t batch_end)
38 39 40 41 42
{
	int ret;

	ret = drm_intel_bo_subdata(batch->bo, 0, 4096, batch->buffer);
	if (ret == 0)
43 44
		ret = drm_intel_gem_bo_context_exec(batch->bo, context,
						    batch_end, 0);
45
	igt_assert(ret == 0);
46 47 48
}

static uint32_t
49 50 51
gen7_tiling_bits(uint32_t tiling)
{
	switch (tiling) {
52
	default: igt_assert(0);
53 54 55 56 57 58 59 60
	case I915_TILING_NONE: return 0;
	case I915_TILING_X: return GEN7_SURFACE_TILED;
	case I915_TILING_Y: return GEN7_SURFACE_TILED | GEN7_SURFACE_TILED_Y;
	}
}

static uint32_t
gen7_bind_buf(struct intel_batchbuffer *batch,
Ville Syrjälä's avatar
Ville Syrjälä committed
61
	      const struct igt_buf *buf,
62 63
	      int is_dst)
{
64
	uint32_t format, *ss;
65 66 67
	uint32_t write_domain, read_domain;
	int ret;

68 69 70 71 72 73 74
	switch (buf->bpp) {
		case 8: format = SURFACEFORMAT_R8_UNORM; break;
		case 16: format = SURFACEFORMAT_R8G8_UNORM; break;
		case 32: format = SURFACEFORMAT_B8G8R8A8_UNORM; break;
		default: igt_assert(0);
	}

75 76 77 78 79 80 81
	if (is_dst) {
		write_domain = read_domain = I915_GEM_DOMAIN_RENDER;
	} else {
		write_domain = 0;
		read_domain = I915_GEM_DOMAIN_SAMPLER;
	}

82
	ss = intel_batchbuffer_subdata_alloc(batch, 8 * sizeof(*ss), 32);
83

84
	ss[0] = (SURFACE_2D << GEN7_SURFACE_TYPE_SHIFT |
85 86 87
		 gen7_tiling_bits(buf->tiling) |
		format << GEN7_SURFACE_FORMAT_SHIFT);
	ss[1] = buf->bo->offset;
88 89
	ss[2] = ((igt_buf_width(buf) - 1)  << GEN7_SURFACE_WIDTH_SHIFT |
		 (igt_buf_height(buf) - 1) << GEN7_SURFACE_HEIGHT_SHIFT);
90 91 92 93 94 95 96
	ss[3] = (buf->stride - 1) << GEN7_SURFACE_PITCH_SHIFT;
	ss[4] = 0;
	ss[5] = 0;
	ss[6] = 0;
	ss[7] = 0;
	if (IS_HASWELL(batch->devid))
		ss[7] |= HSW_SURFACE_SWIZZLE(RED, GREEN, BLUE, ALPHA);
97 98

	ret = drm_intel_bo_emit_reloc(batch->bo,
99
				      intel_batchbuffer_subdata_offset(batch, &ss[1]),
100 101
				      buf->bo, 0,
				      read_domain, write_domain);
102
	igt_assert(ret == 0);
103

104
	return intel_batchbuffer_subdata_offset(batch, ss);
105 106
}

107 108 109
static void
gen7_emit_vertex_elements(struct intel_batchbuffer *batch)
{
110
	OUT_BATCH(GEN4_3DSTATE_VERTEX_ELEMENTS |
111 112
		  ((2 * (1 + 2)) + 1 - 2));

113
	OUT_BATCH(0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT | GEN6_VE0_VALID |
114 115
		  SURFACEFORMAT_R32G32B32A32_FLOAT << VE0_FORMAT_SHIFT |
		  0 << VE0_OFFSET_SHIFT);
116

117 118 119 120
	OUT_BATCH(GEN4_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_0_SHIFT |
		  GEN4_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT |
		  GEN4_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
		  GEN4_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_3_SHIFT);
121 122

	/* x,y */
123
	OUT_BATCH(0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT | GEN6_VE0_VALID |
124 125
		  SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT |
		  0 << VE0_OFFSET_SHIFT); /* offsets vb in bytes */
126 127 128 129
	OUT_BATCH(GEN4_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
		  GEN4_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
		  GEN4_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
		  GEN4_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT);
130 131

	/* s,t */
132
	OUT_BATCH(0 << GEN6_VE0_VERTEX_BUFFER_INDEX_SHIFT | GEN6_VE0_VALID |
133 134
		  SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT |
		  4 << VE0_OFFSET_SHIFT);  /* offset vb in bytes */
135 136 137 138
	OUT_BATCH(GEN4_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT |
		  GEN4_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT |
		  GEN4_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT |
		  GEN4_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT);
139 140 141
}

static uint32_t
142 143 144 145 146 147
gen7_create_vertex_buffer(struct intel_batchbuffer *batch,
			  uint32_t src_x, uint32_t src_y,
			  uint32_t dst_x, uint32_t dst_y,
			  uint32_t width, uint32_t height)
{
	uint16_t *v;
148

149
	v = intel_batchbuffer_subdata_alloc(batch, 12 * sizeof(*v), 8);
150

151 152 153 154
	v[0] = dst_x + width;
	v[1] = dst_y + height;
	v[2] = src_x + width;
	v[3] = src_y + height;
155

156 157 158 159
	v[4] = dst_x;
	v[5] = dst_y + height;
	v[6] = src_x;
	v[7] = src_y + height;
160

161 162 163 164 165
	v[8] = dst_x;
	v[9] = dst_y;
	v[10] = src_x;
	v[11] = src_y;

166
	return intel_batchbuffer_subdata_offset(batch, v);
167 168 169
}

static void gen7_emit_vertex_buffer(struct intel_batchbuffer *batch,
170 171
				    int src_x, int src_y,
				    int dst_x, int dst_y,
172 173
				    int width, int height,
				    uint32_t offset)
174
{
175 176 177
	OUT_BATCH(GEN4_3DSTATE_VERTEX_BUFFERS | (5 - 2));
	OUT_BATCH(0 << GEN6_VB0_BUFFER_INDEX_SHIFT |
		  GEN6_VB0_VERTEXDATA |
178
		  GEN7_VB0_ADDRESS_MODIFY_ENABLE |
179
		  4 * 2 << VB0_BUFFER_PITCH_SHIFT);
180

181
	OUT_RELOC(batch->bo, I915_GEM_DOMAIN_VERTEX, 0, offset);
182
	OUT_BATCH(~0);
183 184 185 186
	OUT_BATCH(0);
}

static uint32_t
187
gen7_bind_surfaces(struct intel_batchbuffer *batch,
Ville Syrjälä's avatar
Ville Syrjälä committed
188 189
		   const struct igt_buf *src,
		   const struct igt_buf *dst)
190 191 192
{
	uint32_t *binding_table;

193
	binding_table = intel_batchbuffer_subdata_alloc(batch, 8, 32);
194

195 196
	binding_table[0] = gen7_bind_buf(batch, dst, 1);
	binding_table[1] = gen7_bind_buf(batch, src, 0);
197

198
	return intel_batchbuffer_subdata_offset(batch, binding_table);
199 200 201 202
}

static void
gen7_emit_binding_table(struct intel_batchbuffer *batch,
Ville Syrjälä's avatar
Ville Syrjälä committed
203 204
			const struct igt_buf *src,
			const struct igt_buf *dst,
205
			uint32_t bind_surf_off)
206
{
207
	OUT_BATCH(GEN7_3DSTATE_BINDING_TABLE_POINTERS_PS | (2 - 2));
208
	OUT_BATCH(bind_surf_off);
209 210
}

211
static void
Ville Syrjälä's avatar
Ville Syrjälä committed
212
gen7_emit_drawing_rectangle(struct intel_batchbuffer *batch, const struct igt_buf *dst)
213
{
214
	OUT_BATCH(GEN4_3DSTATE_DRAWING_RECTANGLE | (4 - 2));
215
	OUT_BATCH(0);
216
	OUT_BATCH((igt_buf_height(dst) - 1) << 16 | (igt_buf_width(dst) - 1));
217
	OUT_BATCH(0);
218 219 220
}

static uint32_t
221
gen7_create_blend_state(struct intel_batchbuffer *batch)
222
{
223
	struct gen6_blend_state *blend;
224

225
	blend = intel_batchbuffer_subdata_alloc(batch, sizeof(*blend), 64);
226

227 228 229
	blend->blend0.dest_blend_factor = GEN6_BLENDFACTOR_ZERO;
	blend->blend0.source_blend_factor = GEN6_BLENDFACTOR_ONE;
	blend->blend0.blend_func = GEN6_BLENDFUNCTION_ADD;
230
	blend->blend1.post_blend_clamp_enable = 1;
231
	blend->blend1.pre_blend_clamp_enable = 1;
232

233
	return intel_batchbuffer_subdata_offset(batch, blend);
234 235
}

236 237 238
static void
gen7_emit_state_base_address(struct intel_batchbuffer *batch)
{
239
	OUT_BATCH(GEN4_STATE_BASE_ADDRESS | (10 - 2));
240 241 242 243 244 245 246 247 248 249 250 251
	OUT_BATCH(0);
	OUT_RELOC(batch->bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
	OUT_RELOC(batch->bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);
	OUT_BATCH(0);
	OUT_RELOC(batch->bo, I915_GEM_DOMAIN_INSTRUCTION, 0, BASE_ADDRESS_MODIFY);

	OUT_BATCH(0);
	OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
	OUT_BATCH(0);
	OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
}

252
static uint32_t
253
gen7_create_cc_viewport(struct intel_batchbuffer *batch)
254
{
255
	struct gen4_cc_viewport *vp;
256

257
	vp = intel_batchbuffer_subdata_alloc(batch, sizeof(*vp), 32);
258 259
	vp->min_depth = -1.e35;
	vp->max_depth = 1.e35;
260

261
	return intel_batchbuffer_subdata_offset(batch, vp);
262 263
}

264
static void
265 266
gen7_emit_cc(struct intel_batchbuffer *batch, uint32_t blend_state,
	     uint32_t cc_viewport)
267
{
268 269
	OUT_BATCH(GEN7_3DSTATE_BLEND_STATE_POINTERS | (2 - 2));
	OUT_BATCH(blend_state);
270

271 272
	OUT_BATCH(GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_CC | (2 - 2));
	OUT_BATCH(cc_viewport);
273 274 275
}

static uint32_t
276
gen7_create_sampler(struct intel_batchbuffer *batch)
277
{
278
	struct gen7_sampler_state *ss;
279

280
	ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 32);
281

282 283
	ss->ss0.min_filter = GEN4_MAPFILTER_NEAREST;
	ss->ss0.mag_filter = GEN4_MAPFILTER_NEAREST;
284

285 286 287
	ss->ss3.r_wrap_mode = GEN4_TEXCOORDMODE_CLAMP;
	ss->ss3.s_wrap_mode = GEN4_TEXCOORDMODE_CLAMP;
	ss->ss3.t_wrap_mode = GEN4_TEXCOORDMODE_CLAMP;
288

289
	ss->ss3.non_normalized_coord = 1;
290

291
	return intel_batchbuffer_subdata_offset(batch, ss);
292 293 294
}

static void
295
gen7_emit_sampler(struct intel_batchbuffer *batch, uint32_t sampler_off)
296
{
297 298
	OUT_BATCH(GEN7_3DSTATE_SAMPLER_STATE_POINTERS_PS | (2 - 2));
	OUT_BATCH(sampler_off);
299 300 301
}

static void
302 303
gen7_emit_multisample(struct intel_batchbuffer *batch)
{
304 305 306
	OUT_BATCH(GEN6_3DSTATE_MULTISAMPLE | (4 - 2));
	OUT_BATCH(GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_CENTER |
		  GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_1); /* 1 sample/pixel */
307 308
	OUT_BATCH(0);
	OUT_BATCH(0);
309

310
	OUT_BATCH(GEN6_3DSTATE_SAMPLE_MASK | (2 - 2));
311
	OUT_BATCH(1);
312 313 314
}

static void
315 316 317 318
gen7_emit_urb(struct intel_batchbuffer *batch)
{
	OUT_BATCH(GEN7_3DSTATE_PUSH_CONSTANT_ALLOC_PS | (2 - 2));
	OUT_BATCH(8); /* in 1KBs */
319

320 321 322 323 324
	/* num of VS entries must be divisible by 8 if size < 9 */
	OUT_BATCH(GEN7_3DSTATE_URB_VS | (2 - 2));
	OUT_BATCH((64 << GEN7_URB_ENTRY_NUMBER_SHIFT) |
		  (2 - 1) << GEN7_URB_ENTRY_SIZE_SHIFT |
		  (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
325

326 327 328
	OUT_BATCH(GEN7_3DSTATE_URB_HS | (2 - 2));
	OUT_BATCH((0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
		  (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));
329

330 331 332 333 334 335 336
	OUT_BATCH(GEN7_3DSTATE_URB_DS | (2 - 2));
	OUT_BATCH((0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
		  (2 << GEN7_URB_STARTING_ADDRESS_SHIFT));

	OUT_BATCH(GEN7_3DSTATE_URB_GS | (2 - 2));
	OUT_BATCH((0 << GEN7_URB_ENTRY_SIZE_SHIFT) |
		  (1 << GEN7_URB_STARTING_ADDRESS_SHIFT));
337 338 339
}

static void
340 341
gen7_emit_vs(struct intel_batchbuffer *batch)
{
342
	OUT_BATCH(GEN6_3DSTATE_VS | (6 - 2));
343
	OUT_BATCH(0); /* no VS kernel */
344 345 346
	OUT_BATCH(0);
	OUT_BATCH(0);
	OUT_BATCH(0);
347
	OUT_BATCH(0); /* pass-through */
348 349 350
}

static void
351 352
gen7_emit_hs(struct intel_batchbuffer *batch)
{
353 354 355 356 357 358 359
	OUT_BATCH(GEN7_3DSTATE_HS | (7 - 2));
	OUT_BATCH(0); /* no HS kernel */
	OUT_BATCH(0);
	OUT_BATCH(0);
	OUT_BATCH(0);
	OUT_BATCH(0);
	OUT_BATCH(0); /* pass-through */
360 361 362
}

static void
363 364
gen7_emit_te(struct intel_batchbuffer *batch)
{
365 366 367 368
	OUT_BATCH(GEN7_3DSTATE_TE | (4 - 2));
	OUT_BATCH(0);
	OUT_BATCH(0);
	OUT_BATCH(0);
369 370 371
}

static void
372 373
gen7_emit_ds(struct intel_batchbuffer *batch)
{
374 375 376 377 378 379
	OUT_BATCH(GEN7_3DSTATE_DS | (6 - 2));
	OUT_BATCH(0);
	OUT_BATCH(0);
	OUT_BATCH(0);
	OUT_BATCH(0);
	OUT_BATCH(0);
380 381 382
}

static void
383 384
gen7_emit_gs(struct intel_batchbuffer *batch)
{
385
	OUT_BATCH(GEN6_3DSTATE_GS | (7 - 2));
386 387 388 389 390 391
	OUT_BATCH(0); /* no GS kernel */
	OUT_BATCH(0);
	OUT_BATCH(0);
	OUT_BATCH(0);
	OUT_BATCH(0);
	OUT_BATCH(0); /* pass-through  */
392 393 394
}

static void
395 396
gen7_emit_streamout(struct intel_batchbuffer *batch)
{
397 398 399
	OUT_BATCH(GEN7_3DSTATE_STREAMOUT | (3 - 2));
	OUT_BATCH(0);
	OUT_BATCH(0);
400 401 402
}

static void
403 404
gen7_emit_sf(struct intel_batchbuffer *batch)
{
405
	OUT_BATCH(GEN6_3DSTATE_SF | (7 - 2));
406
	OUT_BATCH(0);
407 408
	OUT_BATCH(GEN6_3DSTATE_SF_CULL_NONE);
	OUT_BATCH(2 << GEN6_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT);
409 410 411
	OUT_BATCH(0);
	OUT_BATCH(0);
	OUT_BATCH(0);
412 413 414
}

static void
415 416
gen7_emit_sbe(struct intel_batchbuffer *batch)
{
417
	OUT_BATCH(GEN7_3DSTATE_SBE | (14 - 2));
418 419 420
	OUT_BATCH(1 << GEN7_SBE_NUM_OUTPUTS_SHIFT |
		  1 << GEN7_SBE_URB_ENTRY_READ_LENGTH_SHIFT |
		  1 << GEN7_SBE_URB_ENTRY_READ_OFFSET_SHIFT);
421
	OUT_BATCH(0);
422
	OUT_BATCH(0); /* dw4 */
423 424 425
	OUT_BATCH(0);
	OUT_BATCH(0);
	OUT_BATCH(0);
426
	OUT_BATCH(0); /* dw8 */
427 428 429
	OUT_BATCH(0);
	OUT_BATCH(0);
	OUT_BATCH(0);
430
	OUT_BATCH(0); /* dw12 */
431 432 433 434 435
	OUT_BATCH(0);
	OUT_BATCH(0);
}

static void
436
gen7_emit_ps(struct intel_batchbuffer *batch, uint32_t kernel_off)
437 438
{
	int threads;
439

440 441 442 443 444 445
	if (IS_HASWELL(batch->devid))
		threads = 40 << HSW_PS_MAX_THREADS_SHIFT | 1 << HSW_PS_SAMPLE_MASK_SHIFT;
	else
		threads = 40 << IVB_PS_MAX_THREADS_SHIFT;

	OUT_BATCH(GEN7_3DSTATE_PS | (8 - 2));
446
	OUT_BATCH(kernel_off);
447 448 449 450 451 452 453
	OUT_BATCH(1 << GEN7_PS_SAMPLER_COUNT_SHIFT |
		  2 << GEN7_PS_BINDING_TABLE_ENTRY_COUNT_SHIFT);
	OUT_BATCH(0); /* scratch address */
	OUT_BATCH(threads |
		  GEN7_PS_16_DISPATCH_ENABLE |
		  GEN7_PS_ATTRIBUTE_ENABLE);
	OUT_BATCH(6 << GEN7_PS_DISPATCH_START_GRF_SHIFT_0);
454 455 456 457 458
	OUT_BATCH(0);
	OUT_BATCH(0);
}

static void
459 460
gen7_emit_clip(struct intel_batchbuffer *batch)
{
461
	OUT_BATCH(GEN6_3DSTATE_CLIP | (4 - 2));
462 463 464
	OUT_BATCH(0);
	OUT_BATCH(0); /* pass-through */
	OUT_BATCH(0);
465

466 467
	OUT_BATCH(GEN7_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CL | (2 - 2));
	OUT_BATCH(0);
468 469 470
}

static void
471 472
gen7_emit_wm(struct intel_batchbuffer *batch)
{
473
	OUT_BATCH(GEN6_3DSTATE_WM | (3 - 2));
474 475 476
	OUT_BATCH(GEN7_WM_DISPATCH_ENABLE |
		GEN7_WM_PERSPECTIVE_PIXEL_BARYCENTRIC);
	OUT_BATCH(0);
477 478 479
}

static void
480
gen7_emit_null_depth_buffer(struct intel_batchbuffer *batch)
481
{
482
	OUT_BATCH(GEN7_3DSTATE_DEPTH_BUFFER | (7 - 2));
483 484
	OUT_BATCH(SURFACE_NULL << GEN4_3DSTATE_DEPTH_BUFFER_TYPE_SHIFT |
		  GEN4_DEPTHFORMAT_D32_FLOAT << GEN4_3DSTATE_DEPTH_BUFFER_FORMAT_SHIFT);
485 486 487 488 489
	OUT_BATCH(0); /* disable depth, stencil and hiz */
	OUT_BATCH(0);
	OUT_BATCH(0);
	OUT_BATCH(0);
	OUT_BATCH(0);
490

491 492 493
	OUT_BATCH(GEN7_3DSTATE_CLEAR_PARAMS | (3 - 2));
	OUT_BATCH(0);
	OUT_BATCH(0);
494
}
495 496 497

#define BATCH_STATE_SPLIT 2048
void gen7_render_copyfunc(struct intel_batchbuffer *batch,
498
			  drm_intel_context *context,
Ville Syrjälä's avatar
Ville Syrjälä committed
499
			  const struct igt_buf *src, unsigned src_x, unsigned src_y,
500
			  unsigned width, unsigned height,
Ville Syrjälä's avatar
Ville Syrjälä committed
501
			  const struct igt_buf *dst, unsigned dst_x, unsigned dst_y)
502
{
503 504 505
	uint32_t ps_binding_table, ps_sampler_off, ps_kernel_off;
	uint32_t blend_state, cc_viewport;
	uint32_t vertex_buffer;
506 507
	uint32_t batch_end;

508
	igt_assert(src->bpp == dst->bpp);
509
	intel_batchbuffer_flush_with_context(batch, context);
510

511 512
	batch->ptr = &batch->buffer[BATCH_STATE_SPLIT];

513

514 515 516
	blend_state = gen7_create_blend_state(batch);
	cc_viewport = gen7_create_cc_viewport(batch);
	ps_sampler_off = gen7_create_sampler(batch);
517 518
	ps_kernel_off = intel_batchbuffer_copy_data(batch, ps_kernel,
						    sizeof(ps_kernel), 64);
519 520 521 522 523 524 525 526 527
	vertex_buffer = gen7_create_vertex_buffer(batch,
						  src_x, src_y,
						  dst_x, dst_y,
						  width, height);
	ps_binding_table = gen7_bind_surfaces(batch, src, dst);

	igt_assert(batch->ptr < &batch->buffer[4095]);

	batch->ptr = batch->buffer;
528
	OUT_BATCH(G4X_PIPELINE_SELECT | PIPELINE_SELECT_3D);
529 530 531

	gen7_emit_state_base_address(batch);
	gen7_emit_multisample(batch);
532 533 534 535 536 537
	gen7_emit_urb(batch);
	gen7_emit_vs(batch);
	gen7_emit_hs(batch);
	gen7_emit_te(batch);
	gen7_emit_ds(batch);
	gen7_emit_gs(batch);
538 539
	gen7_emit_clip(batch);
	gen7_emit_sf(batch);
540 541 542
	gen7_emit_wm(batch);
	gen7_emit_streamout(batch);
	gen7_emit_null_depth_buffer(batch);
543 544
	gen7_emit_cc(batch, blend_state, cc_viewport);
	gen7_emit_sampler(batch, ps_sampler_off);
545
	gen7_emit_sbe(batch);
546
	gen7_emit_ps(batch, ps_kernel_off);
547
	gen7_emit_vertex_elements(batch);
548 549 550 551
	gen7_emit_vertex_buffer(batch, src_x, src_y,
				dst_x, dst_y, width,
				height, vertex_buffer);
	gen7_emit_binding_table(batch, src, dst, ps_binding_table);
552 553
	gen7_emit_drawing_rectangle(batch, dst);

554 555
	OUT_BATCH(GEN4_3DPRIMITIVE | (7 - 2));
	OUT_BATCH(GEN4_3DPRIMITIVE_VERTEX_SEQUENTIAL | _3DPRIM_RECTLIST);
556 557 558 559 560
	OUT_BATCH(3);
	OUT_BATCH(0);
	OUT_BATCH(1);   /* single instance */
	OUT_BATCH(0);   /* start instance location */
	OUT_BATCH(0);   /* index buffer offset, ignored */
561 562 563

	OUT_BATCH(MI_BATCH_BUFFER_END);

564 565
	batch_end = batch->ptr - batch->buffer;
	batch_end = ALIGN(batch_end, 8);
566
	igt_assert(batch_end < BATCH_STATE_SPLIT);
567

568
	gen7_render_flush(batch, context, batch_end);
569 570
	intel_batchbuffer_reset(batch);
}