radv_nir_to_llvm.c 142 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
/*
 * Copyright © 2016 Red Hat.
 * Copyright © 2016 Bas Nieuwenhuizen
 *
 * based in part on anv driver which is:
 * Copyright © 2015 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include "radv_private.h"
29
#include "radv_shader.h"
30
#include "radv_shader_helper.h"
31
#include "radv_shader_args.h"
32
#include "radv_debug.h"
33
34
35
36
37
38
39
40
41
42
43
44
45
46
#include "nir/nir.h"

#include "sid.h"
#include "ac_binary.h"
#include "ac_llvm_util.h"
#include "ac_llvm_build.h"
#include "ac_shader_abi.h"
#include "ac_shader_util.h"
#include "ac_exp_param.h"

#define RADEON_LLVM_MAX_INPUTS (VARYING_SLOT_VAR31 + 1)

struct radv_shader_context {
	struct ac_llvm_context ac;
47
	const struct nir_shader *shader;
48
	struct ac_shader_abi abi;
49
50
51
	const struct radv_shader_args *args;

	gl_shader_stage stage;
52
53
54
55
56

	unsigned max_workgroup_size;
	LLVMContextRef context;
	LLVMValueRef main_function;

57
	LLVMValueRef descriptor_sets[MAX_SETS];
58

59
60
61
	LLVMValueRef ring_offsets;

	LLVMValueRef rel_auto_id;
62

63
64
65
66
	LLVMValueRef gs_wave_id;
	LLVMValueRef gs_vtx_offset[6];

	LLVMValueRef esgs_ring;
67
	LLVMValueRef gsvs_ring[4];
68
69
70
71
72
73
74
	LLVMValueRef hs_ring_tess_offchip;
	LLVMValueRef hs_ring_tess_factor;

	LLVMValueRef inputs[RADEON_LLVM_MAX_INPUTS * 4];

	uint64_t output_mask;

75
	LLVMValueRef gs_next_vertex[4];
76
77
78
79
	LLVMValueRef gs_curprim_verts[4];
	LLVMValueRef gs_generated_prims[4];
	LLVMValueRef gs_ngg_emit;
	LLVMValueRef gs_ngg_scratch;
80

81
	uint32_t tcs_num_inputs;
82
	uint32_t tcs_num_patches;
83
84

	LLVMValueRef vertexptr; /* GFX10 only */
85
86
};

87
88
89
90
91
92
93
struct radv_shader_output_values {
	LLVMValueRef values[4];
	unsigned slot_name;
	unsigned slot_index;
	unsigned usage_mask;
};

94
95
96
97
98
99
100
101
102
103
104
static inline struct radv_shader_context *
radv_shader_context_from_abi(struct ac_shader_abi *abi)
{
	struct radv_shader_context *ctx = NULL;
	return container_of(abi, ctx, abi);
}

static LLVMValueRef get_rel_patch_id(struct radv_shader_context *ctx)
{
	switch (ctx->stage) {
	case MESA_SHADER_TESS_CTRL:
105
106
107
		return ac_unpack_param(&ctx->ac,
				       ac_get_arg(&ctx->ac, ctx->args->ac.tcs_rel_ids),
				       0, 8);
108
	case MESA_SHADER_TESS_EVAL:
109
		return ac_get_arg(&ctx->ac, ctx->args->tes_rel_patch_id);
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
		break;
	default:
		unreachable("Illegal stage");
	}
}

/* Tessellation shaders pass outputs to the next shader using LDS.
 *
 * LS outputs = TCS inputs
 * TCS outputs = TES inputs
 *
 * The LDS layout is:
 * - TCS inputs for patch 0
 * - TCS inputs for patch 1
 * - TCS inputs for patch 2		= get_tcs_in_current_patch_offset (if RelPatchID==2)
 * - ...
 * - TCS outputs for patch 0            = get_tcs_out_patch0_offset
 * - Per-patch TCS outputs for patch 0  = get_tcs_out_patch0_patch_data_offset
 * - TCS outputs for patch 1
 * - Per-patch TCS outputs for patch 1
 * - TCS outputs for patch 2            = get_tcs_out_current_patch_offset (if RelPatchID==2)
 * - Per-patch TCS outputs for patch 2  = get_tcs_out_current_patch_data_offset (if RelPatchID==2)
 * - ...
 *
 * All three shaders VS(LS), TCS, TES share the same LDS space.
 */
static LLVMValueRef
get_tcs_in_patch_stride(struct radv_shader_context *ctx)
{
139
	assert(ctx->stage == MESA_SHADER_TESS_CTRL);
140
	uint32_t input_vertex_size = ctx->tcs_num_inputs * 16;
141
	uint32_t input_patch_size = ctx->args->options->key.tcs.input_vertices * input_vertex_size;
142
143
144

	input_patch_size /= 4;
	return LLVMConstInt(ctx->ac.i32, input_patch_size, false);
145
146
147
148
149
}

static LLVMValueRef
get_tcs_out_patch_stride(struct radv_shader_context *ctx)
{
150
151
	uint32_t num_tcs_outputs = util_last_bit64(ctx->args->shader_info->tcs.outputs_written);
	uint32_t num_tcs_patch_outputs = util_last_bit64(ctx->args->shader_info->tcs.patch_outputs_written);
Dave Airlie's avatar
Dave Airlie committed
152
	uint32_t output_vertex_size = num_tcs_outputs * 16;
153
	uint32_t pervertex_output_patch_size = ctx->shader->info.tess.tcs_vertices_out * output_vertex_size;
Dave Airlie's avatar
Dave Airlie committed
154
155
156
	uint32_t output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
	output_patch_size /= 4;
	return LLVMConstInt(ctx->ac.i32, output_patch_size, false);
157
158
159
160
161
}

static LLVMValueRef
get_tcs_out_vertex_stride(struct radv_shader_context *ctx)
{
162
	uint32_t num_tcs_outputs = util_last_bit64(ctx->args->shader_info->tcs.outputs_written);
Dave Airlie's avatar
Dave Airlie committed
163
164
165
	uint32_t output_vertex_size = num_tcs_outputs * 16;
	output_vertex_size /= 4;
	return LLVMConstInt(ctx->ac.i32, output_vertex_size, false);
166
167
168
169
170
}

static LLVMValueRef
get_tcs_out_patch0_offset(struct radv_shader_context *ctx)
{
Dave Airlie's avatar
Dave Airlie committed
171
172
	assert (ctx->stage == MESA_SHADER_TESS_CTRL);
	uint32_t input_vertex_size = ctx->tcs_num_inputs * 16;
173
	uint32_t input_patch_size = ctx->args->options->key.tcs.input_vertices * input_vertex_size;
Dave Airlie's avatar
Dave Airlie committed
174
	uint32_t output_patch0_offset = input_patch_size;
175
	unsigned num_patches = ctx->tcs_num_patches;
Dave Airlie's avatar
Dave Airlie committed
176

177
	output_patch0_offset *= num_patches;
Dave Airlie's avatar
Dave Airlie committed
178
	output_patch0_offset /= 4;
179
	return LLVMConstInt(ctx->ac.i32, output_patch0_offset, false);
180
181
182
183
184
}

static LLVMValueRef
get_tcs_out_patch0_patch_data_offset(struct radv_shader_context *ctx)
{
185
	assert (ctx->stage == MESA_SHADER_TESS_CTRL);
Dave Airlie's avatar
Dave Airlie committed
186
	uint32_t input_vertex_size = ctx->tcs_num_inputs * 16;
187
	uint32_t input_patch_size = ctx->args->options->key.tcs.input_vertices * input_vertex_size;
Dave Airlie's avatar
Dave Airlie committed
188
189
	uint32_t output_patch0_offset = input_patch_size;

190
	uint32_t num_tcs_outputs = util_last_bit64(ctx->args->shader_info->tcs.outputs_written);
Dave Airlie's avatar
Dave Airlie committed
191
	uint32_t output_vertex_size = num_tcs_outputs * 16;
192
	uint32_t pervertex_output_patch_size = ctx->shader->info.tess.tcs_vertices_out * output_vertex_size;
193
	unsigned num_patches = ctx->tcs_num_patches;
Dave Airlie's avatar
Dave Airlie committed
194

195
196
	output_patch0_offset *= num_patches;
	output_patch0_offset += pervertex_output_patch_size;
Dave Airlie's avatar
Dave Airlie committed
197
	output_patch0_offset /= 4;
198
	return LLVMConstInt(ctx->ac.i32, output_patch0_offset, false);
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
}

static LLVMValueRef
get_tcs_in_current_patch_offset(struct radv_shader_context *ctx)
{
	LLVMValueRef patch_stride = get_tcs_in_patch_stride(ctx);
	LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);

	return LLVMBuildMul(ctx->ac.builder, patch_stride, rel_patch_id, "");
}

static LLVMValueRef
get_tcs_out_current_patch_offset(struct radv_shader_context *ctx)
{
	LLVMValueRef patch0_offset = get_tcs_out_patch0_offset(ctx);
	LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
	LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);

Samuel Pitoiset's avatar
Samuel Pitoiset committed
217
218
	return ac_build_imad(&ctx->ac, patch_stride, rel_patch_id,
			     patch0_offset);
219
220
221
222
223
224
225
226
227
228
}

static LLVMValueRef
get_tcs_out_current_patch_data_offset(struct radv_shader_context *ctx)
{
	LLVMValueRef patch0_patch_data_offset =
		get_tcs_out_patch0_patch_data_offset(ctx);
	LLVMValueRef patch_stride = get_tcs_out_patch_stride(ctx);
	LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);

Samuel Pitoiset's avatar
Samuel Pitoiset committed
229
230
	return ac_build_imad(&ctx->ac, patch_stride, rel_patch_id,
			     patch0_patch_data_offset);
231
232
233
}

static LLVMValueRef
234
235
create_llvm_function(struct ac_llvm_context *ctx, LLVMModuleRef module,
                     LLVMBuilderRef builder,
236
		     const struct ac_shader_args *args,
237
		     enum ac_llvm_calling_convention convention,
238
		     unsigned max_workgroup_size,
239
		     const struct radv_nir_compiler_options *options)
240
241
{
	LLVMValueRef main_function =
242
		ac_build_main(args, ctx, convention, "main", ctx->voidt, module);
243

244
245
246
247
248
249
	if (options->address32_hi) {
		ac_llvm_add_target_dep_function_attr(main_function,
						     "amdgpu-32bit-address-high-bits",
						     options->address32_hi);
	}

250
251
	ac_llvm_set_workgroup_size(main_function, max_workgroup_size);

252
253
254
	return main_function;
}

255
256
257
258
259
260
261
static void
load_descriptor_sets(struct radv_shader_context *ctx)
{
	uint32_t mask = ctx->args->shader_info->desc_set_used_mask;
	if (ctx->args->shader_info->need_indirect_descriptor_sets) {
		LLVMValueRef desc_sets =
			ac_get_arg(&ctx->ac, ctx->args->descriptor_sets[0]);
262
263
264
265
266
267
		while (mask) {
			int i = u_bit_scan(&mask);

			ctx->descriptor_sets[i] =
				ac_build_load_to_sgpr(&ctx->ac, desc_sets,
						      LLVMConstInt(ctx->ac.i32, i, false));
268
269

		}
270
271
272
	} else {
		while (mask) {
			int i = u_bit_scan(&mask);
273

274
275
276
			ctx->descriptor_sets[i] =
				ac_get_arg(&ctx->ac, ctx->args->descriptor_sets[i]);
		}
277
	}
278
279
}

280
281
static enum ac_llvm_calling_convention
get_llvm_calling_convention(LLVMValueRef func, gl_shader_stage stage)
282
283
284
285
{
	switch (stage) {
	case MESA_SHADER_VERTEX:
	case MESA_SHADER_TESS_EVAL:
286
		return AC_LLVM_AMDGPU_VS;
287
288
		break;
	case MESA_SHADER_GEOMETRY:
289
		return AC_LLVM_AMDGPU_GS;
290
291
		break;
	case MESA_SHADER_TESS_CTRL:
292
		return AC_LLVM_AMDGPU_HS;
293
294
		break;
	case MESA_SHADER_FRAGMENT:
295
		return AC_LLVM_AMDGPU_PS;
296
297
		break;
	case MESA_SHADER_COMPUTE:
298
		return AC_LLVM_AMDGPU_CS;
299
300
301
302
303
304
		break;
	default:
		unreachable("Unhandle shader type");
	}
}

305
306
307
308
309
310
/* Returns whether the stage is a stage that can be directly before the GS */
static bool is_pre_gs_stage(gl_shader_stage stage)
{
	return stage == MESA_SHADER_VERTEX || stage == MESA_SHADER_TESS_EVAL;
}

311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
static void create_function(struct radv_shader_context *ctx,
                            gl_shader_stage stage,
                            bool has_previous_stage)
{
	if (ctx->ac.chip_class >= GFX10) {
		if (is_pre_gs_stage(stage) && ctx->args->options->key.vs_common_out.as_ngg) {
			/* On GFX10, VS is merged into GS for NGG. */
			stage = MESA_SHADER_GEOMETRY;
			has_previous_stage = true;
		}
	}

	ctx->main_function = create_llvm_function(
	    &ctx->ac, ctx->ac.module, ctx->ac.builder, &ctx->args->ac,
	    get_llvm_calling_convention(ctx->main_function, stage),
	    ctx->max_workgroup_size,
	    ctx->args->options);

329
330
331
332
333
	ctx->ring_offsets = ac_build_intrinsic(&ctx->ac, "llvm.amdgcn.implicit.buffer.ptr",
					       LLVMPointerType(ctx->ac.i8, AC_ADDR_SPACE_CONST),
					       NULL, 0, AC_FUNC_ATTR_READNONE);
	ctx->ring_offsets = LLVMBuildBitCast(ctx->ac.builder, ctx->ring_offsets,
					     ac_array_in_const_addr_space(ctx->ac.v4i32), "");
334
335
336

	load_descriptor_sets(ctx);

337
	if (stage == MESA_SHADER_TESS_CTRL ||
338
	    (stage == MESA_SHADER_VERTEX && ctx->args->options->key.vs_common_out.as_ls) ||
339
340
341
342
343
344
345
346
347
348
349
350
351
352
	    /* GFX9 has the ESGS ring buffer in LDS. */
	    (stage == MESA_SHADER_GEOMETRY && has_previous_stage)) {
		ac_declare_lds_as_pointer(&ctx->ac);
	}

}


static LLVMValueRef
radv_load_resource(struct ac_shader_abi *abi, LLVMValueRef index,
		   unsigned desc_set, unsigned binding)
{
	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
	LLVMValueRef desc_ptr = ctx->descriptor_sets[desc_set];
353
	struct radv_pipeline_layout *pipeline_layout = ctx->args->options->layout;
354
355
356
357
358
359
360
361
	struct radv_descriptor_set_layout *layout = pipeline_layout->set[desc_set].layout;
	unsigned base_offset = layout->binding[binding].offset;
	LLVMValueRef offset, stride;

	if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
	    layout->binding[binding].type == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
		unsigned idx = pipeline_layout->set[desc_set].dynamic_offset_start +
			layout->binding[binding].dynamic_offset_offset;
362
		desc_ptr = ac_get_arg(&ctx->ac, ctx->args->ac.push_constants);
363
364
365
366
367
		base_offset = pipeline_layout->push_constant_size + 16 * idx;
		stride = LLVMConstInt(ctx->ac.i32, 16, false);
	} else
		stride = LLVMConstInt(ctx->ac.i32, layout->binding[binding].size, false);

368
369
370
371
372
	offset = LLVMConstInt(ctx->ac.i32, base_offset, false);

	if (layout->binding[binding].type != VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
		offset = ac_build_imad(&ctx->ac, index, stride, offset);
	}
373

374
	desc_ptr = LLVMBuildGEP(ctx->ac.builder, desc_ptr, &offset, 1, "");
375
376
377
	desc_ptr = ac_cast_ptr(&ctx->ac, desc_ptr, ctx->ac.v4i32);
	LLVMSetMetadata(desc_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);

378
379
380
381
	if (layout->binding[binding].type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT) {
		uint32_t desc_type = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
			S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
			S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
382
383
384
385
			S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);

		if (ctx->ac.chip_class >= GFX10) {
			desc_type |= S_008F0C_FORMAT(V_008F0C_IMG_FORMAT_32_FLOAT) |
386
				     S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) |
387
388
389
390
391
				     S_008F0C_RESOURCE_LEVEL(1);
		} else {
			desc_type |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
				     S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
		}
392
393
394

		LLVMValueRef desc_components[4] = {
			LLVMBuildPtrToInt(ctx->ac.builder, desc_ptr, ctx->ac.intptr, ""),
395
			LLVMConstInt(ctx->ac.i32, S_008F04_BASE_ADDRESS_HI(ctx->args->options->address32_hi), false),
396
397
398
399
400
401
402
403
			/* High limit to support variable sizes. */
			LLVMConstInt(ctx->ac.i32, 0xffffffff, false),
			LLVMConstInt(ctx->ac.i32, desc_type, false),
		};

		return ac_build_gather_values(&ctx->ac, desc_components, 4);
	}

404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
	return desc_ptr;
}


/* The offchip buffer layout for TCS->TES is
 *
 * - attribute 0 of patch 0 vertex 0
 * - attribute 0 of patch 0 vertex 1
 * - attribute 0 of patch 0 vertex 2
 *   ...
 * - attribute 0 of patch 1 vertex 0
 * - attribute 0 of patch 1 vertex 1
 *   ...
 * - attribute 1 of patch 0 vertex 0
 * - attribute 1 of patch 0 vertex 1
 *   ...
 * - per patch attribute 0 of patch 0
 * - per patch attribute 0 of patch 1
 *   ...
 *
 * Note that every attribute has 4 components.
 */
426
427
static LLVMValueRef get_non_vertex_index_offset(struct radv_shader_context *ctx)
{
428
429
430
	uint32_t num_patches = ctx->tcs_num_patches;
	uint32_t num_tcs_outputs;
	if (ctx->stage == MESA_SHADER_TESS_CTRL)
431
		num_tcs_outputs = util_last_bit64(ctx->args->shader_info->tcs.outputs_written);
432
	else
433
		num_tcs_outputs = ctx->args->options->key.tes.tcs_num_outputs;
434

435
	uint32_t output_vertex_size = num_tcs_outputs * 16;
436
	uint32_t pervertex_output_patch_size = ctx->shader->info.tess.tcs_vertices_out * output_vertex_size;
437
438

	return LLVMConstInt(ctx->ac.i32, pervertex_output_patch_size * num_patches, false);
439
440
441
442
443
444
}

static LLVMValueRef calc_param_stride(struct radv_shader_context *ctx,
				      LLVMValueRef vertex_index)
{
	LLVMValueRef param_stride;
445
	if (vertex_index)
446
		param_stride = LLVMConstInt(ctx->ac.i32, ctx->shader->info.tess.tcs_vertices_out * ctx->tcs_num_patches, false);
447
448
	else
		param_stride = LLVMConstInt(ctx->ac.i32, ctx->tcs_num_patches, false);
449
450
451
	return param_stride;
}

452
453
454
455
static LLVMValueRef get_tcs_tes_buffer_address(struct radv_shader_context *ctx,
                                               LLVMValueRef vertex_index,
                                               LLVMValueRef param_index)
{
456
	LLVMValueRef base_addr;
457
458
	LLVMValueRef param_stride, constant16;
	LLVMValueRef rel_patch_id = get_rel_patch_id(ctx);
459
	LLVMValueRef vertices_per_patch = LLVMConstInt(ctx->ac.i32, ctx->shader->info.tess.tcs_vertices_out, false);
460
	constant16 = LLVMConstInt(ctx->ac.i32, 16, false);
461
	param_stride = calc_param_stride(ctx, vertex_index);
462
	if (vertex_index) {
Samuel Pitoiset's avatar
Samuel Pitoiset committed
463
464
		base_addr = ac_build_imad(&ctx->ac, rel_patch_id,
					  vertices_per_patch, vertex_index);
465
466
467
468
469
470
471
472
473
474
475
	} else {
		base_addr = rel_patch_id;
	}

	base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
	                         LLVMBuildMul(ctx->ac.builder, param_index,
	                                      param_stride, ""), "");

	base_addr = LLVMBuildMul(ctx->ac.builder, base_addr, constant16, "");

	if (!vertex_index) {
476
		LLVMValueRef patch_data_offset = get_non_vertex_index_offset(ctx);
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528

		base_addr = LLVMBuildAdd(ctx->ac.builder, base_addr,
		                         patch_data_offset, "");
	}
	return base_addr;
}

static LLVMValueRef get_tcs_tes_buffer_address_params(struct radv_shader_context *ctx,
						      unsigned param,
						      unsigned const_index,
						      bool is_compact,
						      LLVMValueRef vertex_index,
						      LLVMValueRef indir_index)
{
	LLVMValueRef param_index;

	if (indir_index)
		param_index = LLVMBuildAdd(ctx->ac.builder, LLVMConstInt(ctx->ac.i32, param, false),
					   indir_index, "");
	else {
		if (const_index && !is_compact)
			param += const_index;
		param_index = LLVMConstInt(ctx->ac.i32, param, false);
	}
	return get_tcs_tes_buffer_address(ctx, vertex_index, param_index);
}

static LLVMValueRef
get_dw_address(struct radv_shader_context *ctx,
	       LLVMValueRef dw_addr,
	       unsigned param,
	       unsigned const_index,
	       bool compact_const_index,
	       LLVMValueRef vertex_index,
	       LLVMValueRef stride,
	       LLVMValueRef indir_index)

{

	if (vertex_index) {
		dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
				       LLVMBuildMul(ctx->ac.builder,
						    vertex_index,
						    stride, ""), "");
	}

	if (indir_index)
		dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
				       LLVMBuildMul(ctx->ac.builder, indir_index,
						    LLVMConstInt(ctx->ac.i32, 4, false), ""), "");
	else if (const_index && !compact_const_index)
		dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
529
				       LLVMConstInt(ctx->ac.i32, const_index * 4, false), "");
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559

	dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
			       LLVMConstInt(ctx->ac.i32, param * 4, false), "");

	if (const_index && compact_const_index)
		dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
				       LLVMConstInt(ctx->ac.i32, const_index, false), "");
	return dw_addr;
}

static LLVMValueRef
load_tcs_varyings(struct ac_shader_abi *abi,
		  LLVMTypeRef type,
		  LLVMValueRef vertex_index,
		  LLVMValueRef indir_index,
		  unsigned const_index,
		  unsigned location,
		  unsigned driver_location,
		  unsigned component,
		  unsigned num_components,
		  bool is_patch,
		  bool is_compact,
		  bool load_input)
{
	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
	LLVMValueRef dw_addr, stride;
	LLVMValueRef value[4], result;
	unsigned param = shader_io_get_unique_index(location);

	if (load_input) {
560
561
		uint32_t input_vertex_size = (ctx->tcs_num_inputs * 16) / 4;
		stride = LLVMConstInt(ctx->ac.i32, input_vertex_size, false);
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
		dw_addr = get_tcs_in_current_patch_offset(ctx);
	} else {
		if (!is_patch) {
			stride = get_tcs_out_vertex_stride(ctx);
			dw_addr = get_tcs_out_current_patch_offset(ctx);
		} else {
			dw_addr = get_tcs_out_current_patch_data_offset(ctx);
			stride = NULL;
		}
	}

	dw_addr = get_dw_address(ctx, dw_addr, param, const_index, is_compact, vertex_index, stride,
				 indir_index);

	for (unsigned i = 0; i < num_components + component; i++) {
		value[i] = ac_lds_load(&ctx->ac, dw_addr);
		dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
				       ctx->ac.i32_1, "");
	}
	result = ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
	return result;
}

static void
store_tcs_output(struct ac_shader_abi *abi,
587
		 const nir_variable *var,
588
589
590
591
592
593
594
		 LLVMValueRef vertex_index,
		 LLVMValueRef param_index,
		 unsigned const_index,
		 LLVMValueRef src,
		 unsigned writemask)
{
	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
595
	const unsigned location = var->data.location;
596
	unsigned component = var->data.location_frac;
597
598
	const bool is_patch = var->data.patch;
	const bool is_compact = var->data.compact;
599
600
601
	LLVMValueRef dw_addr;
	LLVMValueRef stride = NULL;
	LLVMValueRef buf_addr = NULL;
602
	LLVMValueRef oc_lds = ac_get_arg(&ctx->ac, ctx->args->oc_lds);
603
604
605
606
	unsigned param;
	bool store_lds = true;

	if (is_patch) {
607
		if (!(ctx->shader->info.patch_outputs_read & (1U << (location - VARYING_SLOT_PATCH0))))
608
609
			store_lds = false;
	} else {
610
		if (!(ctx->shader->info.outputs_read & (1ULL << location)))
611
612
613
614
			store_lds = false;
	}

	param = shader_io_get_unique_index(location);
615
616
617
618
619
620
621
622
	if ((location == VARYING_SLOT_CLIP_DIST0 || location == VARYING_SLOT_CLIP_DIST1) && is_compact) {
		const_index += component;
		component = 0;

		if (const_index >= 4) {
			const_index -= 4;
			param++;
		}
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
	}

	if (!is_patch) {
		stride = get_tcs_out_vertex_stride(ctx);
		dw_addr = get_tcs_out_current_patch_offset(ctx);
	} else {
		dw_addr = get_tcs_out_current_patch_data_offset(ctx);
	}

	dw_addr = get_dw_address(ctx, dw_addr, param, const_index, is_compact, vertex_index, stride,
				 param_index);
	buf_addr = get_tcs_tes_buffer_address_params(ctx, param, const_index, is_compact,
						     vertex_index, param_index);

	bool is_tess_factor = false;
	if (location == VARYING_SLOT_TESS_LEVEL_INNER ||
	    location == VARYING_SLOT_TESS_LEVEL_OUTER)
		is_tess_factor = true;

	unsigned base = is_compact ? const_index : 0;
	for (unsigned chan = 0; chan < 8; chan++) {
		if (!(writemask & (1 << chan)))
			continue;
		LLVMValueRef value = ac_llvm_extract_elem(&ctx->ac, src, chan - component);
647
648
		value = ac_to_integer(&ctx->ac, value);
		value = LLVMBuildZExtOrBitCast(ctx->ac.builder, value, ctx->ac.i32, "");
649
650
651
652
653
654
655
656
657
658

		if (store_lds || is_tess_factor) {
			LLVMValueRef dw_addr_chan =
				LLVMBuildAdd(ctx->ac.builder, dw_addr,
				                           LLVMConstInt(ctx->ac.i32, chan, false), "");
			ac_lds_store(&ctx->ac, dw_addr_chan, value);
		}

		if (!is_tess_factor && writemask != 0xF)
			ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, value, 1,
659
						    buf_addr, oc_lds,
660
						    4 * (base + chan), ac_glc);
661
662
663
664
	}

	if (writemask == 0xF) {
		ac_build_buffer_store_dword(&ctx->ac, ctx->hs_ring_tess_offchip, src, 4,
665
					    buf_addr, oc_lds,
666
					    (base * 4), ac_glc);
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
	}
}

static LLVMValueRef
load_tes_input(struct ac_shader_abi *abi,
	       LLVMTypeRef type,
	       LLVMValueRef vertex_index,
	       LLVMValueRef param_index,
	       unsigned const_index,
	       unsigned location,
	       unsigned driver_location,
	       unsigned component,
	       unsigned num_components,
	       bool is_patch,
	       bool is_compact,
	       bool load_input)
{
	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
	LLVMValueRef buf_addr;
	LLVMValueRef result;
687
	LLVMValueRef oc_lds = ac_get_arg(&ctx->ac, ctx->args->oc_lds);
688
689
	unsigned param = shader_io_get_unique_index(location);

690
691
692
693
694
695
696
	if ((location == VARYING_SLOT_CLIP_DIST0 || location == VARYING_SLOT_CLIP_DIST1) && is_compact) {
		const_index += component;
		component = 0;
		if (const_index >= 4) {
			const_index -= 4;
			param++;
		}
697
698
699
700
701
702
703
704
705
	}

	buf_addr = get_tcs_tes_buffer_address_params(ctx, param, const_index,
						     is_compact, vertex_index, param_index);

	LLVMValueRef comp_offset = LLVMConstInt(ctx->ac.i32, component * 4, false);
	buf_addr = LLVMBuildAdd(ctx->ac.builder, buf_addr, comp_offset, "");

	result = ac_build_buffer_load(&ctx->ac, ctx->hs_ring_tess_offchip, num_components, NULL,
706
				      buf_addr, oc_lds, is_compact ? (4 * const_index) : 0, ac_glc, true, false);
707
708
709
710
	result = ac_trim_vector(&ctx->ac, result, num_components);
	return result;
}

711
712
713
714
715
716
717
718
719
720
721
722
static LLVMValueRef
radv_emit_fetch_64bit(struct radv_shader_context *ctx,
		      LLVMTypeRef type, LLVMValueRef a, LLVMValueRef b)
{
	LLVMValueRef values[2] = {
		ac_to_integer(&ctx->ac, a),
		ac_to_integer(&ctx->ac, b),
	};
	LLVMValueRef result = ac_build_gather_values(&ctx->ac, values, 2);
	return LLVMBuildBitCast(ctx->ac.builder, result, type, "");
}

723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
static LLVMValueRef
load_gs_input(struct ac_shader_abi *abi,
	      unsigned location,
	      unsigned driver_location,
	      unsigned component,
	      unsigned num_components,
	      unsigned vertex_index,
	      unsigned const_index,
	      LLVMTypeRef type)
{
	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
	LLVMValueRef vtx_offset;
	unsigned param, vtx_offset_param;
	LLVMValueRef value[4], result;

	vtx_offset_param = vertex_index;
	assert(vtx_offset_param < 6);
	vtx_offset = LLVMBuildMul(ctx->ac.builder, ctx->gs_vtx_offset[vtx_offset_param],
				  LLVMConstInt(ctx->ac.i32, 4, false), "");

	param = shader_io_get_unique_index(location);

	for (unsigned i = component; i < num_components + component; i++) {
		if (ctx->ac.chip_class >= GFX9) {
			LLVMValueRef dw_addr = ctx->gs_vtx_offset[vtx_offset_param];
			dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
			                       LLVMConstInt(ctx->ac.i32, param * 4 + i + const_index, 0), "");
			value[i] = ac_lds_load(&ctx->ac, dw_addr);
751
752
753
754
755
756
757
758

			if (ac_get_type_size(type) == 8) {
				dw_addr = LLVMBuildAdd(ctx->ac.builder, dw_addr,
					               LLVMConstInt(ctx->ac.i32, param * 4 + i + const_index + 1, 0), "");
				LLVMValueRef tmp = ac_lds_load(&ctx->ac, dw_addr);

				value[i] = radv_emit_fetch_64bit(ctx, type, value[i], tmp);
			}
759
760
761
762
763
764
765
766
767
768
		} else {
			LLVMValueRef soffset =
				LLVMConstInt(ctx->ac.i32,
					     (param * 4 + i + const_index) * 256,
					     false);

			value[i] = ac_build_buffer_load(&ctx->ac,
							ctx->esgs_ring, 1,
							ctx->ac.i32_0,
							vtx_offset, soffset,
769
							0, ac_glc, true, false);
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784

			if (ac_get_type_size(type) == 8) {
				soffset = LLVMConstInt(ctx->ac.i32,
						       (param * 4 + i + const_index + 1) * 256,
						       false);

				LLVMValueRef tmp =
					ac_build_buffer_load(&ctx->ac,
							     ctx->esgs_ring, 1,
							     ctx->ac.i32_0,
							     vtx_offset, soffset,
							     0, ac_glc, true, false);

				value[i] = radv_emit_fetch_64bit(ctx, type, value[i], tmp);
			}
785
		}
786

787
788
789
		if (ac_get_type_size(type) == 2) {
			value[i] = LLVMBuildBitCast(ctx->ac.builder, value[i], ctx->ac.i32, "");
			value[i] = LLVMBuildTrunc(ctx->ac.builder, value[i], ctx->ac.i16, "");
790
		}
791
		value[i] = LLVMBuildBitCast(ctx->ac.builder, value[i], type, "");
792
793
794
795
796
797
	}
	result = ac_build_varying_gather_values(&ctx->ac, value, num_components, component);
	result = ac_to_integer(&ctx->ac, result);
	return result;
}

798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
static uint32_t
radv_get_sample_pos_offset(uint32_t num_samples)
{
	uint32_t sample_pos_offset = 0;

	switch (num_samples) {
	case 2:
		sample_pos_offset = 1;
		break;
	case 4:
		sample_pos_offset = 3;
		break;
	case 8:
		sample_pos_offset = 7;
		break;
	default:
		break;
	}
	return sample_pos_offset;
}

819
820
821
822
823
824
static LLVMValueRef load_sample_position(struct ac_shader_abi *abi,
					 LLVMValueRef sample_id)
{
	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);

	LLVMValueRef result;
825
826
	LLVMValueRef index = LLVMConstInt(ctx->ac.i32, RING_PS_SAMPLE_POSITIONS, false);
	LLVMValueRef ptr = LLVMBuildGEP(ctx->ac.builder, ctx->ring_offsets, &index, 1, "");
827
828
829
830

	ptr = LLVMBuildBitCast(ctx->ac.builder, ptr,
			       ac_array_in_const_addr_space(ctx->ac.v2f32), "");

831
	uint32_t sample_pos_offset =
832
		radv_get_sample_pos_offset(ctx->args->options->key.fs.num_samples);
833
834
835
836

	sample_id =
		LLVMBuildAdd(ctx->ac.builder, sample_id,
			     LLVMConstInt(ctx->ac.i32, sample_pos_offset, false), "");
837
838
839
840
841
842
843
844
845
	result = ac_build_load_invariant(&ctx->ac, ptr, sample_id);

	return result;
}


static LLVMValueRef load_sample_mask_in(struct ac_shader_abi *abi)
{
	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
846
847
	uint8_t log2_ps_iter_samples;

848
	if (ctx->args->shader_info->ps.force_persample) {
849
		log2_ps_iter_samples =
850
			util_logbase2(ctx->args->options->key.fs.num_samples);
851
	} else {
852
		log2_ps_iter_samples = ctx->args->options->key.fs.log2_ps_iter_samples;
853
	}
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868

	/* The bit pattern matches that used by fixed function fragment
	 * processing. */
	static const uint16_t ps_iter_masks[] = {
		0xffff, /* not used */
		0x5555,
		0x1111,
		0x0101,
		0x0001,
	};
	assert(log2_ps_iter_samples < ARRAY_SIZE(ps_iter_masks));

	uint32_t ps_iter_mask = ps_iter_masks[log2_ps_iter_samples];

	LLVMValueRef result, sample_id;
869
	sample_id = ac_unpack_param(&ctx->ac, ac_get_arg(&ctx->ac, ctx->args->ac.ancillary), 8, 4);
870
	sample_id = LLVMBuildShl(ctx->ac.builder, LLVMConstInt(ctx->ac.i32, ps_iter_mask, false), sample_id, "");
871
872
	result = LLVMBuildAnd(ctx->ac.builder, sample_id,
			      ac_get_arg(&ctx->ac, ctx->args->ac.sample_coverage), "");
873
874
875
876
	return result;
}


877
878
879
880
static void gfx10_ngg_gs_emit_vertex(struct radv_shader_context *ctx,
				     unsigned stream,
				     LLVMValueRef *addrs);

881
882
883
884
885
static void
visit_emit_vertex(struct ac_shader_abi *abi, unsigned stream, LLVMValueRef *addrs)
{
	LLVMValueRef gs_next_vertex;
	LLVMValueRef can_emit;
886
	unsigned offset = 0;
887
888
	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);

889
	if (ctx->args->options->key.vs_common_out.as_ngg) {
890
891
892
893
		gfx10_ngg_gs_emit_vertex(ctx, stream, addrs);
		return;
	}

894
895
	/* Write vertex attribute values to GSVS ring */
	gs_next_vertex = LLVMBuildLoad(ctx->ac.builder,
896
				       ctx->gs_next_vertex[stream],
897
898
899
				       "");

	/* If this thread has already emitted the declared maximum number of
900
901
	 * vertices, don't emit any more: excessive vertex emissions are not
	 * supposed to have any effect.
902
903
	 */
	can_emit = LLVMBuildICmp(ctx->ac.builder, LLVMIntULT, gs_next_vertex,
904
				 LLVMConstInt(ctx->ac.i32, ctx->shader->info.gs.vertices_out, false), "");
905

906
	bool use_kill = !ctx->args->shader_info->gs.writes_memory;
907
908
909
910
	if (use_kill)
		ac_build_kill_if_false(&ctx->ac, can_emit);
	else
		ac_build_ifcc(&ctx->ac, can_emit, 6505);
911
912

	for (unsigned i = 0; i < AC_LLVM_MAX_OUTPUTS; ++i) {
913
		unsigned output_usage_mask =
914
			ctx->args->shader_info->gs.output_usage_mask[i];
915
		uint8_t output_stream =
916
			ctx->args->shader_info->gs.output_streams[i];
917
		LLVMValueRef *out_ptr = &addrs[i * 4];
918
		int length = util_last_bit(output_usage_mask);
919

920
921
		if (!(ctx->output_mask & (1ull << i)) ||
		    output_stream != stream)
922
923
924
			continue;

		for (unsigned j = 0; j < length; j++) {
925
926
927
			if (!(output_usage_mask & (1 << j)))
				continue;

928
929
			LLVMValueRef out_val = LLVMBuildLoad(ctx->ac.builder,
							     out_ptr[j], "");
930
931
			LLVMValueRef voffset =
				LLVMConstInt(ctx->ac.i32, offset *
932
					     ctx->shader->info.gs.vertices_out, false);
933
934
935

			offset++;

936
937
938
			voffset = LLVMBuildAdd(ctx->ac.builder, voffset, gs_next_vertex, "");
			voffset = LLVMBuildMul(ctx->ac.builder, voffset, LLVMConstInt(ctx->ac.i32, 4, false), "");

939
940
			out_val = ac_to_integer(&ctx->ac, out_val);
			out_val = LLVMBuildZExtOrBitCast(ctx->ac.builder, out_val, ctx->ac.i32, "");
941

942
943
			ac_build_buffer_store_dword(&ctx->ac,
						    ctx->gsvs_ring[stream],
944
						    out_val, 1,
945
946
947
						    voffset,
						    ac_get_arg(&ctx->ac,
							       ctx->args->gs2vs_offset),
948
						    0, ac_glc | ac_slc | ac_swizzled);
949
950
951
952
953
		}
	}

	gs_next_vertex = LLVMBuildAdd(ctx->ac.builder, gs_next_vertex,
				      ctx->ac.i32_1, "");
954
	LLVMBuildStore(ctx->ac.builder, gs_next_vertex, ctx->gs_next_vertex[stream]);
955

956
957
958
	ac_build_sendmsg(&ctx->ac,
			 AC_SENDMSG_GS_OP_EMIT | AC_SENDMSG_GS | (stream << 8),
			 ctx->gs_wave_id);
959
960
961

	if (!use_kill)
		ac_build_endif(&ctx->ac, 6505);
962
963
964
965
966
967
}

static void
visit_end_primitive(struct ac_shader_abi *abi, unsigned stream)
{
	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
968

969
	if (ctx->args->options->key.vs_common_out.as_ngg) {
970
971
972
973
		LLVMBuildStore(ctx->ac.builder, ctx->ac.i32_0, ctx->gs_curprim_verts[stream]);
		return;
	}

974
975
976
977
978
979
980
981
982
	ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_CUT | AC_SENDMSG_GS | (stream << 8), ctx->gs_wave_id);
}

static LLVMValueRef
load_tess_coord(struct ac_shader_abi *abi)
{
	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);

	LLVMValueRef coord[4] = {
983
984
		ac_get_arg(&ctx->ac, ctx->args->tes_u),
		ac_get_arg(&ctx->ac, ctx->args->tes_v),
985
986
987
988
		ctx->ac.f32_0,
		ctx->ac.f32_0,
	};

989
	if (ctx->shader->info.tess.primitive_mode == GL_TRIANGLES)
990
991
992
993
994
995
996
997
998
999
		coord[2] = LLVMBuildFSub(ctx->ac.builder, ctx->ac.f32_1,
					LLVMBuildFAdd(ctx->ac.builder, coord[0], coord[1], ""), "");

	return ac_build_gather_values(&ctx->ac, coord, 3);
}

static LLVMValueRef
load_patch_vertices_in(struct ac_shader_abi *abi)
{
	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
1000
	return LLVMConstInt(ctx->ac.i32, ctx->args->options->key.tcs.input_vertices, false);
1001
1002
1003
1004
1005
}


static LLVMValueRef radv_load_base_vertex(struct ac_shader_abi *abi)
{
1006
1007
	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
	return ac_get_arg(&ctx->ac, ctx->args->ac.base_vertex);
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
}

static LLVMValueRef radv_load_ssbo(struct ac_shader_abi *abi,
				   LLVMValueRef buffer_ptr, bool write)
{
	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
	LLVMValueRef result;

	LLVMSetMetadata(buffer_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);

	result = LLVMBuildLoad(ctx->ac.builder, buffer_ptr, "");
	LLVMSetMetadata(result, ctx->ac.invariant_load_md_kind, ctx->ac.empty_md);

	return result;
}

static LLVMValueRef radv_load_ubo(struct ac_shader_abi *abi, LLVMValueRef buffer_ptr)
{
	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
	LLVMValueRef result;

1029
1030
1031
1032
1033
	if (LLVMGetTypeKind(LLVMTypeOf(buffer_ptr)) != LLVMPointerTypeKind) {
		/* Do not load the descriptor for inlined uniform blocks. */
		return buffer_ptr;
	}

1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
	LLVMSetMetadata(buffer_ptr, ctx->ac.uniform_md_kind, ctx->ac.empty_md);

	result = LLVMBuildLoad(ctx->ac.builder, buffer_ptr, "");
	LLVMSetMetadata(result, ctx->ac.invariant_load_md_kind, ctx->ac.empty_md);

	return result;
}

static LLVMValueRef radv_get_sampler_desc(struct ac_shader_abi *abi,
					  unsigned descriptor_set,
					  unsigned base_index,
					  unsigned constant_index,
					  LLVMValueRef index,
					  enum ac_descriptor_type desc_type,
1048
1049
					  bool image, bool write,
					  bool bindless)
1050
1051
1052
{
	struct radv_shader_context *ctx = radv_shader_context_from_abi(abi);
	LLVMValueRef list = ctx->descriptor_sets[descriptor_set];
1053
	struct radv_descriptor_set_layout *layout = ctx->args->options->layout->set[descriptor_set].layout;
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
	struct radv_descriptor_set_binding_layout *binding = layout->binding + base_index;
	unsigned offset = binding->offset;
	unsigned stride = binding->size;
	unsigned type_size;
	LLVMBuilderRef builder = ctx->ac.builder;
	LLVMTypeRef type;

	assert(base_index < layout->binding_count);

	switch (desc_type) {
	case AC_DESC_IMAGE:
		type = ctx->ac.v8i32;
		type_size = 32;
		break;
	case AC_DESC_FMASK:
		type = ctx->ac.v8i32;
		offset += 32;
		type_size = 32;
		break;
	case AC_DESC_SAMPLER:
		type = ctx->ac.v4i32;
1075
1076
1077
		if (binding->type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
			offset += radv_combined_image_descriptor_sampler_offset(binding);
		}
1078
1079
1080
1081
1082
1083
1084

		type_size = 16;
		break;
	case AC_DESC_BUFFER:
		type = ctx->ac.v4i32;
		type_size = 16;
		break;
1085
1086
1087
1088
1089
1090
1091
	case AC_DESC_PLANE_0:
	case AC_DESC_PLANE_1:
	case AC_DESC_PLANE_2:
		type = ctx->ac.v8i32;
		type_size = 32;
		offset += 32 * (desc_type - AC_DESC_PLANE_0);
		break;
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
	default:
		unreachable("invalid desc_type\n");
	}

	offset += constant_index * stride;

	if (desc_type == AC_DESC_SAMPLER && binding->immutable_samplers_offset &&
	    (!index || binding->immutable_samplers_equal)) {
		if (binding->immutable_samplers_equal)
			constant_index = 0;

		const uint32_t *samplers = radv_immutable_samplers(layout, binding);

		LLVMValueRef constants[] = {
			LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 0], 0),
			LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 1], 0),
			LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 2], 0),
			LLVMConstInt(ctx->ac.i32, samplers[constant_index * 4 + 3], 0),
		};
		return ac_build_gather_values(&ctx->ac, constants, 4);
	}

	assert(stride % type_size == 0);

1116
1117
1118
	LLVMValueRef adjusted_index = index;
	if (!adjusted_index)
		adjusted_index = ctx->ac.i32_0;
1119

1120
	adjusted_index = LLVMBuildMul(builder, adjusted_index, LLVMConstInt(ctx->ac.i32, stride / type_size, 0), "");
1121

1122
1123
	LLVMValueRef val_offset = LLVMConstInt(ctx->ac.i32, offset, 0);
	list = LLVMBuildGEP(builder, list, &val_offset, 1, "");
1124
1125
	list = LLVMBuildPointerCast(builder, list,
				    ac_array_in_const32_addr_space(type), "");
1126

1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
	LLVMValueRef descriptor = ac_build_load_to_sgpr(&ctx->ac, list, adjusted_index);

	/* 3 plane formats always have same size and format for plane 1 & 2, so
	 * use the tail from plane 1 so that we can store only the first 16 bytes
	 * of the last plane. */
	if (desc_type == AC_DESC_PLANE_2) {
		LLVMValueRef descriptor2 = radv_get_sampler_desc(abi, descriptor_set, base_index, constant_index, index, AC_DESC_PLANE_1,image, write, bindless);

		LLVMValueRef components[8];
		for (unsigned i = 0; i < 4; ++i)
			components[i] = ac_llvm_extract_elem(&ctx->ac, descriptor, i);

		for (unsigned i = 4; i < 8; ++i)
			components[i] = ac_llvm_extract_elem(&ctx->ac, descriptor2, i);
		descriptor = ac_build_gather_values(&ctx->ac, components, 8);
	}

	return descriptor;
1145
1146
}

1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
/* For 2_10_10_10 formats the alpha is handled as unsigned by pre-vega HW.
 * so we may need to fix it up. */
static LLVMValueRef
adjust_vertex_fetch_alpha(struct radv_shader_context *ctx,
                          unsigned adjustment,
                          LLVMValueRef alpha)
{
	if (adjustment == RADV_ALPHA_ADJUST_NONE)
		return alpha;

	LLVMValueRef c30 = LLVMConstInt(ctx->ac.i32, 30, 0);

1159
1160
	alpha = LLVMBuildBitCast(ctx->ac.builder, alpha, ctx->ac.f32, "");

1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
	if (adjustment == RADV_ALPHA_ADJUST_SSCALED)
		alpha = LLVMBuildFPToUI(ctx->ac.builder, alpha, ctx->ac.i32, "");
	else
		alpha = ac_to_integer(&ctx->ac, alpha);

	/* For the integer-like cases, do a natural sign extension.
	 *
	 * For the SNORM case, the values are 0.0, 0.333, 0.666, 1.0
	 * and happen to contain 0, 1, 2, 3 as the two LSBs of the
	 * exponent.
	 */
	alpha = LLVMBuildShl(ctx->ac.builder, alpha,
	                     adjustment == RADV_ALPHA_ADJUST_SNORM ?
	                     LLVMConstInt(ctx->ac.i32, 7, 0) : c30, "");
	alpha = LLVMBuildAShr(ctx->ac.builder, alpha, c30, "");

	/* Convert back to the right type. */
	if (adjustment == RADV_ALPHA_ADJUST_SNORM) {
		LLVMValueRef clamp;
		LLVMValueRef neg_one = LLVMConstReal(ctx->ac.f32, -1.0);
		alpha = LLVMBuildSIToFP(ctx->ac.builder, alpha, ctx->ac.f32, "");
		clamp = LLVMBuildFCmp(ctx->ac.builder, LLVMRealULT, alpha, neg_one, "");
		alpha = LLVMBuildSelect(ctx->ac.builder, clamp, neg_one, alpha, "");
	} else if (adjustment == RADV_ALPHA_ADJUST_SSCALED) {
		alpha = LLVMBuildSIToFP(ctx->ac.builder, alpha, ctx->ac.f32, "");
	}

1188
	return LLVMBuildBitCast(ctx->ac.builder, alpha, ctx->ac.i32, "");
1189
}
1190

1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
static LLVMValueRef
radv_fixup_vertex_input_fetches(struct radv_shader_context *ctx,
				LLVMValueRef value,
				unsigned num_channels,
				bool is_float)
{
	LLVMValueRef zero = is_float ? ctx->ac.f32_0 : ctx->ac.i32_0;
	LLVMValueRef one = is_float ? ctx->ac.f32_1 : ctx->ac.i32_1;
	LLVMValueRef chan[4];

	if (LLVMGetTypeKind(LLVMTypeOf(value)) == LLVMVectorTypeKind) {
		unsigned vec_size = LLVMGetVectorSize(LLVMTypeOf(value));

1204
		if (num_channels == 4 && num_channels == vec_size)
1205
1206
1207
1208
1209
1210
1211
			return value;

		num_channels = MIN2(num_channels, vec_size);

		for (unsigned i = 0; i < num_channels; i++)
			chan[i] = ac_llvm_extract_elem(&ctx->ac, value, i);
	} else {
1212
1213
		assert(num_channels == 1);
		chan[0] = value;
1214
1215
	}

1216
	for (unsigned i = num_channels; i < 4; i++) {
1217
		chan[i] = i == 3 ? one : zero;
1218
		chan[i] = ac_to_integer(&ctx->ac, chan[i]);
1219
	}
1220
1221
1222
1223

	return ac_build_gather_values(&ctx->ac, chan, 4);
}

1224
1225
1226
1227
static void
handle_vs_input_decl(struct radv_shader_context *ctx,
		     struct nir_variable *variable)
{
1228
	LLVMValueRef t_list_ptr = ac_get_arg(&ctx->ac, ctx->args->vertex_buffers);
1229
1230
1231
1232
1233
1234
	LLVMValueRef t_offset;
	LLVMValueRef t_list;
	LLVMValueRef input;
	LLVMValueRef buffer_index;
	unsigned attrib_count = glsl_count_attribute_slots(variable->type, true);
	uint8_t input_usage_mask =
1235
		ctx->args->shader_info->vs.input_usage_mask[variable->data.location];
1236
	unsigned num_input_channels = util_last_bit(input_usage_mask);
1237

1238
1239
	variable->data.driver_location = variable->data.location * 4;

1240
	enum glsl_base_type type = glsl_get_base_type(variable->type);
1241
1242
1243
	for (unsigned i = 0; i < attrib_count; ++i) {
		LLVMValueRef output[4];
		unsigned attrib_index = variable->data.location + i - VERT_ATTRIB_GENERIC0;
1244
		unsigned attrib_format = ctx->args->options->key.vs.vertex_attribute_formats[attrib_index];
1245
1246
		unsigned data_format = attrib_format & 0x0f;
		unsigned num_format = (attrib_format >> 4) & 0x07;
1247
1248
		bool is_float = num_format != V_008F0C_BUF_NUM_FORMAT_UINT &&
		                num_format != V_008F0C_BUF_NUM_FORMAT_SINT;
1249

1250
1251
		if (ctx->args->options->key.vs.instance_rate_inputs & (1u << attrib_index)) {
			uint32_t divisor = ctx->args->options->key.vs.instance_rate_divisors[attrib_index];
1252
1253

			if (divisor) {
1254
				buffer_index = ctx->abi.instance_id;
1255
1256
1257
1258
1259

				if (divisor != 1) {
					buffer_index = LLVMBuildUDiv(ctx->ac.builder, buffer_index,
					                             LLVMConstInt(ctx->ac.i32, divisor, 0), "");
				}
1260
			} else {
1261
				buffer_index = ctx->ac.i32_0;
1262
			}
1263

1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
			buffer_index = LLVMBuildAdd(ctx->ac.builder,
						    ac_get_arg(&ctx->ac,
							       ctx->args->ac.start_instance),\
						    buffer_index, "");
		} else {
			buffer_index = LLVMBuildAdd(ctx->ac.builder,
						    ctx->abi.vertex_id,
			                            ac_get_arg(&ctx->ac,
							       ctx->args->ac.base_vertex), "");
		}
1274

1275
		const struct ac_data_format_info *vtx_info = ac_get_data_format_info(data_format);
1276

1277
1278
1279
		/* Adjust the number of channels to load based on the vertex
		 * attribute format.
		 */
1280
		unsigned num_channels = MIN2(num_input_channels, vtx_info->num_channels);
1281
1282
1283
		unsigned attrib_binding = ctx->args->options->key.vs.vertex_attribute_bindings[attrib_index];
		unsigned attrib_offset = ctx->args->options->key.vs.vertex_attribute_offsets[attrib_index];
		unsigned attrib_stride = ctx->args->options->key.vs.vertex_attribute_strides[attrib_index];
1284

1285
		if (ctx->args->options->key.vs.post_shuffle & (1 << attrib_index)) {
1286
1287
1288
1289
1290
1291
			/* Always load, at least, 3 channels for formats that
			 * need to be shuffled because X<->Z.
			 */
			num_channels = MAX2(num_channels, 3);
		}

1292
1293
		t_offset = LLVMConstInt(ctx->ac.i32, attrib_binding, false);
		t_list = ac_build_load_to_sgpr(&ctx->ac, t_list_ptr, t_offset);
1294

1295
1296
1297
1298
1299
1300
		/* Perform per-channel vertex fetch operations if unaligned
		 * access are detected. Only GFX6 and GFX10 are affected.
		 */
		bool unaligned_vertex_fetches = false;
		if ((ctx->ac.chip_class == GFX6 || ctx->ac.chip_class == GFX10) &&
		    vtx_info->chan_format != data_format &&
1301
1302
		    ((attrib_offset % vtx_info->element_size) ||
		     (attrib_stride % vtx_info->element_size)))
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
			unaligned_vertex_fetches = true;

		if (unaligned_vertex_fetches) {
			unsigned chan_format = vtx_info->chan_format;
			LLVMValueRef values[4];

			assert(ctx->ac.chip_class == GFX6 ||
			       ctx->ac.chip_class == GFX10);

			for (unsigned chan  = 0; chan < num_channels; chan++) {
				unsigned chan_offset = attrib_offset + chan * vtx_info->chan_byte_size;
				LLVMValueRef chan_index = buffer_index;

				if (attrib_stride != 0 && chan_offset > attrib_stride) {
					LLVMValueRef buffer_offset =
						LLVMConstInt(ctx->ac.i32,
							     chan_offset / attrib_stride, false);

					chan_index = LLVMBuildAdd(ctx->ac.builder,
								  buffer_index,
								  buffer_offset, "");

					chan_offset = chan_offset % attrib_stride;
				}
1327

1328
1329
1330
1331
1332
1333
				values[chan] = ac_build_struct_tbuffer_load(&ctx->ac, t_list,
									   chan_index,
									   LLVMConstInt(ctx->ac.i32, chan_offset, false),
									   ctx->ac.i32_0, ctx->ac.i32_0, 1,
									   chan_format, num_format, 0, true);