bifrost_compile.c 147 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
/*
 * Copyright (C) 2020 Collabora Ltd.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 * Authors (Collabora):
 *      Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
 */

#include "compiler/glsl/glsl_to_nir.h"
#include "compiler/nir_types.h"
#include "compiler/nir/nir_builder.h"
30
#include "util/u_debug.h"
31
32
33
34

#include "disassemble.h"
#include "bifrost_compile.h"
#include "compiler.h"
Alyssa Rosenzweig's avatar
Alyssa Rosenzweig committed
35
#include "bi_quirks.h"
36
#include "bi_builder.h"
37
#include "bifrost_nir.h"
38

39
static const struct debug_named_value bifrost_debug_options[] = {
40
41
        {"msgs",      BIFROST_DBG_MSGS,		"Print debug messages"},
        {"shaders",   BIFROST_DBG_SHADERS,	"Dump shaders in NIR and MIR"},
42
        {"shaderdb",  BIFROST_DBG_SHADERDB,	"Print statistics"},
43
        {"verbose",   BIFROST_DBG_VERBOSE,	"Disassemble verbosely"},
44
        {"internal",  BIFROST_DBG_INTERNAL,	"Dump even internal shaders"},
45
46
        {"nosched",   BIFROST_DBG_NOSCHED, 	"Force trivial bundling"},
        {"inorder",   BIFROST_DBG_INORDER, 	"Force in-order bundling"},
47
        {"novalidate",BIFROST_DBG_NOVALIDATE,   "Skip IR validation"},
48
        {"noopt",     BIFROST_DBG_NOOPT,        "Skip optimization passes"},
49
        {"noidvs",    BIFROST_DBG_NOIDVS,       "Disable IDVS"},
50
51
52
        DEBUG_NAMED_VALUE_END
};

53
DEBUG_GET_ONCE_FLAGS_OPTION(bifrost_debug, "BIFROST_MESA_DEBUG", bifrost_debug_options, 0)
54

Icecream95's avatar
Icecream95 committed
55
56
57
/* How many bytes are prefetched by the Bifrost shader core. From the final
 * clause of the shader, this range must be valid instructions or zero. */
#define BIFROST_SHADER_PREFETCH 128
58

59
60
61
62
63
64
65
int bifrost_debug = 0;

#define DBG(fmt, ...) \
		do { if (bifrost_debug & BIFROST_DBG_MSGS) \
			fprintf(stderr, "%s:%d: "fmt, \
				__FUNCTION__, __LINE__, ##__VA_ARGS__); } while (0)

66
static bi_block *emit_cf_list(bi_context *ctx, struct exec_list *list);
67

68
69
70
71
72
73
static void
bi_block_add_successor(bi_block *block, bi_block *successor)
{
        assert(block != NULL && successor != NULL);

        /* Cull impossible edges */
74
        if (block->unconditional_jumps)
75
76
                return;

77
78
        for (unsigned i = 0; i < ARRAY_SIZE(block->successors); ++i) {
                if (block->successors[i]) {
79
                       if (block->successors[i] == successor)
80
81
82
83
84
                               return;
                       else
                               continue;
                }

85
                block->successors[i] = successor;
86
                _mesa_set_add(successor->predecessors, block);
87
88
89
90
91
92
                return;
        }

        unreachable("Too many successors");
}

93
94
95
static void
bi_emit_jump(bi_builder *b, nir_jump_instr *instr)
{
96
        bi_instr *branch = bi_jump(b, bi_zero());
97
98
99
100
101
102
103
104
105
106
107
108

        switch (instr->type) {
        case nir_jump_break:
                branch->branch_target = b->shader->break_block;
                break;
        case nir_jump_continue:
                branch->branch_target = b->shader->continue_block;
                break;
        default:
                unreachable("Unhandled jump type");
        }

109
        bi_block_add_successor(b->shader->current_block, branch->branch_target);
110
        b->shader->current_block->unconditional_jumps = true;
111
112
}

113
114
115
116
117
118
119
120
121
122
123
124
125
static bi_index
bi_varying_src0_for_barycentric(bi_builder *b, nir_intrinsic_instr *intr)
{
        switch (intr->intrinsic) {
        case nir_intrinsic_load_barycentric_centroid:
        case nir_intrinsic_load_barycentric_sample:
                return bi_register(61);

        /* Need to put the sample ID in the top 16-bits */
        case nir_intrinsic_load_barycentric_at_sample:
                return bi_mkvec_v2i16(b, bi_half(bi_dontcare(), false),
                                bi_half(bi_src_index(&intr->src[0]), false));

126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
        /* Interpret as 8:8 signed fixed point positions in pixels along X and
         * Y axes respectively, relative to top-left of pixel. In NIR, (0, 0)
         * is the center of the pixel so we first fixup and then convert. For
         * fp16 input:
         *
         * f2i16(((x, y) + (0.5, 0.5)) * 2**8) =
         * f2i16((256 * (x, y)) + (128, 128)) =
         * V2F16_TO_V2S16(FMA.v2f16((x, y), #256, #128))
         *
         * For fp32 input, that lacks enough precision for MSAA 16x, but the
         * idea is the same. FIXME: still doesn't pass
         */
        case nir_intrinsic_load_barycentric_at_offset: {
                bi_index offset = bi_src_index(&intr->src[0]);
                bi_index f16 = bi_null();
                unsigned sz = nir_src_bit_size(intr->src[0]);

                if (sz == 16) {
                        f16 = bi_fma_v2f16(b, offset, bi_imm_f16(256.0),
                                        bi_imm_f16(128.0), BI_ROUND_NONE);
                } else {
                        assert(sz == 32);
                        bi_index f[2];
                        for (unsigned i = 0; i < 2; ++i) {
                                f[i] = bi_fadd_rscale_f32(b,
                                                bi_word(offset, i),
                                                bi_imm_f32(0.5), bi_imm_u32(8),
                                                BI_ROUND_NONE, BI_SPECIAL_NONE);
                        }

                        f16 = bi_v2f32_to_v2f16(b, f[0], f[1], BI_ROUND_NONE);
                }

                return bi_v2f16_to_v2s16(b, f16, BI_ROUND_RTZ);
        }

162
163
164
165
166
167
        case nir_intrinsic_load_barycentric_pixel:
        default:
                return bi_dontcare();
        }
}

168
static enum bi_sample
169
170
171
172
bi_interp_for_intrinsic(nir_intrinsic_op op)
{
        switch (op) {
        case nir_intrinsic_load_barycentric_centroid:
173
                return BI_SAMPLE_CENTROID;
174
        case nir_intrinsic_load_barycentric_sample:
175
        case nir_intrinsic_load_barycentric_at_sample:
176
                return BI_SAMPLE_SAMPLE;
177
178
        case nir_intrinsic_load_barycentric_at_offset:
                return BI_SAMPLE_EXPLICIT;
179
180
        case nir_intrinsic_load_barycentric_pixel:
        default:
181
                return BI_SAMPLE_CENTER;
182
183
184
        }
}

185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
/* auto, 64-bit omitted */
static enum bi_register_format
bi_reg_fmt_for_nir(nir_alu_type T)
{
        switch (T) {
        case nir_type_float16: return BI_REGISTER_FORMAT_F16;
        case nir_type_float32: return BI_REGISTER_FORMAT_F32;
        case nir_type_int16:   return BI_REGISTER_FORMAT_S16;
        case nir_type_uint16:  return BI_REGISTER_FORMAT_U16;
        case nir_type_int32:   return BI_REGISTER_FORMAT_S32;
        case nir_type_uint32:  return BI_REGISTER_FORMAT_U32;
        default: unreachable("Invalid type for register format");
        }
}

200
201
202
203
/* Checks if the _IMM variant of an intrinsic can be used, returning in imm the
 * immediate to be used (which applies even if _IMM can't be used) */

static bool
204
bi_is_intr_immediate(nir_intrinsic_instr *instr, unsigned *immediate, unsigned max)
205
206
207
208
209
210
211
{
        nir_src *offset = nir_get_io_offset_src(instr);

        if (!nir_src_is_const(*offset))
                return false;

        *immediate = nir_intrinsic_base(instr) + nir_src_as_uint(*offset);
212
        return (*immediate) < max;
213
214
}

215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
static void
bi_make_vec_to(bi_builder *b, bi_index final_dst,
                bi_index *src,
                unsigned *channel,
                unsigned count,
                unsigned bitsize);

/* Bifrost's load instructions lack a component offset despite operating in
 * terms of vec4 slots. Usually I/O vectorization avoids nonzero components,
 * but they may be unavoidable with separate shaders in use. To solve this, we
 * lower to a larger load and an explicit copy of the desired components. */

static void
bi_copy_component(bi_builder *b, nir_intrinsic_instr *instr, bi_index tmp)
{
        unsigned component = nir_intrinsic_component(instr);

        if (component == 0)
                return;

        bi_index srcs[] = { tmp, tmp, tmp, tmp };
        unsigned channels[] = { component, component + 1, component + 2 };

        bi_make_vec_to(b,
                        bi_dest_index(&instr->dest),
                        srcs, channels, instr->num_components,
                        nir_dest_bit_size(instr->dest));
} 

244
245
246
247
248
249
static void
bi_emit_load_attr(bi_builder *b, nir_intrinsic_instr *instr)
{
        nir_alu_type T = nir_intrinsic_dest_type(instr);
        enum bi_register_format regfmt = bi_reg_fmt_for_nir(T);
        nir_src *offset = nir_get_io_offset_src(instr);
250
251
        unsigned component = nir_intrinsic_component(instr);
        enum bi_vecsize vecsize = (instr->num_components + component - 1);
252
253
254
        unsigned imm_index = 0;
        unsigned base = nir_intrinsic_base(instr);
        bool constant = nir_src_is_const(*offset);
255
        bool immediate = bi_is_intr_immediate(instr, &imm_index, 16);
256
        bi_index dest = (component == 0) ? bi_dest_index(&instr->dest) : bi_temp(b->shader);
257
258

        if (immediate) {
259
260
                bi_ld_attr_imm_to(b, dest, bi_register(61), bi_register(62),
                                regfmt, vecsize, imm_index);
261
262
263
264
265
266
267
268
        } else {
                bi_index idx = bi_src_index(&instr->src[0]);

                if (constant)
                        idx = bi_imm_u32(imm_index);
                else if (base != 0)
                        idx = bi_iadd_u32(b, idx, bi_imm_u32(base), false);

269
270
                bi_ld_attr_to(b, dest, bi_register(61), bi_register(62),
                                idx, regfmt, vecsize);
271
        }
272
273

        bi_copy_component(b, instr, dest);
274
275
}

276
277
278
279
280
281
282
static void
bi_emit_load_vary(bi_builder *b, nir_intrinsic_instr *instr)
{
        enum bi_sample sample = BI_SAMPLE_CENTER;
        enum bi_update update = BI_UPDATE_STORE;
        enum bi_register_format regfmt = BI_REGISTER_FORMAT_AUTO;
        bool smooth = instr->intrinsic == nir_intrinsic_load_interpolated_input;
283
        bi_index src0 = bi_null();
284

285
286
287
288
        unsigned component = nir_intrinsic_component(instr);
        enum bi_vecsize vecsize = (instr->num_components + component - 1);
        bi_index dest = (component == 0) ? bi_dest_index(&instr->dest) : bi_temp(b->shader);

289
290
        unsigned sz = nir_dest_bit_size(instr->dest);

291
292
293
294
295
        if (smooth) {
                nir_intrinsic_instr *parent = nir_src_as_intrinsic(instr->src[0]);
                assert(parent);

                sample = bi_interp_for_intrinsic(parent->intrinsic);
296
                src0 = bi_varying_src0_for_barycentric(b, parent);
297
298
299
300

                assert(sz == 16 || sz == 32);
                regfmt = (sz == 16) ? BI_REGISTER_FORMAT_F16
                        : BI_REGISTER_FORMAT_F32;
301
        } else {
302
303
                assert(sz == 32);
                regfmt = BI_REGISTER_FORMAT_U32;
304
305
306
307
        }

        nir_src *offset = nir_get_io_offset_src(instr);
        unsigned imm_index = 0;
308
        bool immediate = bi_is_intr_immediate(instr, &imm_index, 20);
309
310

        if (immediate && smooth) {
311
312
                bi_ld_var_imm_to(b, dest, src0, regfmt, sample, update,
                                vecsize, imm_index);
313
        } else if (immediate && !smooth) {
314
315
                bi_ld_var_flat_imm_to(b, dest, BI_FUNCTION_NONE, regfmt,
                                vecsize, imm_index);
316
317
318
319
320
321
322
323
        } else {
                bi_index idx = bi_src_index(offset);
                unsigned base = nir_intrinsic_base(instr);

                if (base != 0)
                        idx = bi_iadd_u32(b, idx, bi_imm_u32(base), false);

                if (smooth) {
324
325
                        bi_ld_var_to(b, dest, src0, idx, regfmt, sample,
                                        update, vecsize);
326
                } else {
327
328
                        bi_ld_var_flat_to(b, dest, idx, BI_FUNCTION_NONE,
                                        regfmt, vecsize);
329
330
                }
        }
331
332

        bi_copy_component(b, instr, dest);
333
334
}

335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
static void
bi_make_vec16_to(bi_builder *b, bi_index dst, bi_index *src,
                unsigned *channel, unsigned count)
{
        for (unsigned i = 0; i < count; i += 2) {
                bool next = (i + 1) < count;

                unsigned chan = channel ? channel[i] : 0;
                unsigned nextc = next && channel ? channel[i + 1] : 0;

                bi_index w0 = bi_word(src[i], chan >> 1);
                bi_index w1 = next ? bi_word(src[i + 1], nextc >> 1) : bi_zero();

                bi_index h0 = bi_half(w0, chan & 1);
                bi_index h1 = bi_half(w1, nextc & 1);

                bi_index to = bi_word(dst, i >> 1);

                if (bi_is_word_equiv(w0, w1) && (chan & 1) == 0 && ((nextc & 1) == 1))
                        bi_mov_i32_to(b, to, w0);
                else if (bi_is_word_equiv(w0, w1))
                        bi_swz_v2i16_to(b, to, bi_swz_16(w0, chan & 1, nextc & 1));
                else
                        bi_mkvec_v2i16_to(b, to, h0, h1);
        }
}

362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
static void
bi_make_vec_to(bi_builder *b, bi_index final_dst,
                bi_index *src,
                unsigned *channel,
                unsigned count,
                unsigned bitsize)
{
        /* If we reads our own output, we need a temporary move to allow for
         * swapping. TODO: Could do a bit better for pairwise swaps of 16-bit
         * vectors */
        bool reads_self = false;

        for (unsigned i = 0; i < count; ++i)
                reads_self |= bi_is_equiv(final_dst, src[i]);

        /* SSA can't read itself */
        assert(!reads_self || final_dst.reg);

        bi_index dst = reads_self ? bi_temp(b->shader) : final_dst;

        if (bitsize == 32) {
                for (unsigned i = 0; i < count; ++i) {
                        bi_mov_i32_to(b, bi_word(dst, i),
                                        bi_word(src[i], channel ? channel[i] : 0));
                }
        } else if (bitsize == 16) {
388
                bi_make_vec16_to(b, dst, src, channel, count);
389
390
391
392
        } else if (bitsize == 8 && count == 1) {
                bi_swz_v4i8_to(b, dst, bi_byte(
                                        bi_word(src[0], channel[0] >> 2),
                                        channel[0] & 3));
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
        } else {
                unreachable("8-bit mkvec not yet supported");
        }

        /* Emit an explicit copy if needed */
        if (!bi_is_equiv(dst, final_dst)) {
                unsigned shift = (bitsize == 8) ? 2 : (bitsize == 16) ? 1 : 0;
                unsigned vec = (1 << shift);

                for (unsigned i = 0; i < count; i += vec) {
                        bi_mov_i32_to(b, bi_word(final_dst, i >> shift),
                                        bi_word(dst, i >> shift));
                }
        }
}

409
410
411
412
static bi_instr *
bi_load_sysval_to(bi_builder *b, bi_index dest, int sysval,
                unsigned nr_components, unsigned offset)
{
413
414
        unsigned sysval_ubo =
                MAX2(b->shader->inputs->sysval_ubo, b->shader->nir->info.num_ubos);
415
        unsigned uniform =
416
                pan_lookup_sysval(b->shader->sysval_to_id,
417
                                  b->shader->info.sysvals,
418
                                  sysval);
419
420
421
422
        unsigned idx = (uniform * 16) + offset;

        return bi_load_to(b, nr_components * 32, dest,
                        bi_imm_u32(idx),
423
                        bi_imm_u32(sysval_ubo), BI_SEG_UBO);
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
}

static void
bi_load_sysval_nir(bi_builder *b, nir_intrinsic_instr *intr,
                unsigned nr_components, unsigned offset)
{
        bi_load_sysval_to(b, bi_dest_index(&intr->dest),
                        panfrost_sysval_for_instr(&intr->instr, NULL),
                        nr_components, offset);
}

static bi_index
bi_load_sysval(bi_builder *b, int sysval,
                unsigned nr_components, unsigned offset)
{
        bi_index tmp = bi_temp(b->shader);
        bi_load_sysval_to(b, tmp, sysval, nr_components, offset);
        return tmp;
}

444
445
446
447
448
449
450
451
452
453
454
static void
bi_load_sample_id_to(bi_builder *b, bi_index dst)
{
        /* r61[16:23] contains the sampleID, mask it out. Upper bits
         * seem to read garbage (despite being architecturally defined
         * as zero), so use a 5-bit mask instead of 8-bits */

        bi_rshift_and_i32_to(b, dst, bi_register(61), bi_imm_u32(0x1f),
                                bi_imm_u8(16));
}

455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
static bi_index
bi_load_sample_id(bi_builder *b)
{
        bi_index sample_id = bi_temp(b->shader);
        bi_load_sample_id_to(b, sample_id);
        return sample_id;
}

static bi_index
bi_pixel_indices(bi_builder *b, unsigned rt)
{
        /* We want to load the current pixel. */
        struct bifrost_pixel_indices pix = {
                .y = BIFROST_CURRENT_PIXEL,
                .rt = rt
        };

        uint32_t indices_u32 = 0;
        memcpy(&indices_u32, &pix, sizeof(indices_u32));
        bi_index indices = bi_imm_u32(indices_u32);

        /* Sample index above is left as zero. For multisampling, we need to
         * fill in the actual sample ID in the lower byte */

        if (b->shader->inputs->blend.nr_samples > 1)
                indices = bi_iadd_u32(b, indices, bi_load_sample_id(b), false);

        return indices;
}

485
486
487
488
489
static void
bi_emit_load_blend_input(bi_builder *b, nir_intrinsic_instr *instr)
{
        ASSERTED nir_io_semantics sem = nir_intrinsic_io_semantics(instr);

490
491
        /* Source color is passed through r0-r3, or r4-r7 for the second
         * source when dual-source blending.  TODO: Precolour instead */
492
493
494
        bi_index srcs[] = {
                bi_register(0), bi_register(1), bi_register(2), bi_register(3)
        };
495
496
497
498
499
        bi_index srcs2[] = {
                bi_register(4), bi_register(5), bi_register(6), bi_register(7)
        };

        bool second_source = (sem.location == VARYING_SLOT_VAR0);
500

501
502
503
        bi_make_vec_to(b, bi_dest_index(&instr->dest),
                       second_source ? srcs2 : srcs,
                       NULL, 4, 32);
504
505
}

506
static void
507
508
bi_emit_blend_op(bi_builder *b, bi_index rgba, nir_alu_type T,
                 bi_index rgba2, nir_alu_type T2, unsigned rt)
509
{
510
        /* Reads 2 or 4 staging registers to cover the input */
511
        unsigned size = nir_alu_type_get_type_size(T);
512
        unsigned size_2 = nir_alu_type_get_type_size(T2);
513
        unsigned sr_count = (size <= 16) ? 2 : 4;
514
        unsigned sr_count_2 = (size_2 <= 16) ? 2 : 4;
515
516
517
518
519
520
521
522
523
        const struct panfrost_compile_inputs *inputs = b->shader->inputs;
        uint64_t blend_desc = inputs->blend.bifrost_blend_desc;

        if (inputs->is_blend && inputs->blend.nr_samples > 1) {
                /* Conversion descriptor comes from the compile inputs, pixel
                 * indices derived at run time based on sample ID */
                bi_st_tile(b, rgba, bi_pixel_indices(b, rt), bi_register(60),
                                bi_imm_u32(blend_desc >> 32), BI_VECSIZE_V4);
        } else if (b->shader->inputs->is_blend) {
524
525
526
                /* Blend descriptor comes from the compile inputs */
                /* Put the result in r0 */
                bi_blend_to(b, bi_register(0), rgba,
527
                                bi_register(60),
528
                                bi_imm_u32(blend_desc & 0xffffffff),
529
530
                                bi_imm_u32(blend_desc >> 32),
                                bi_null(), sr_count, 0);
531
532
533
534
535
        } else {
                /* Blend descriptor comes from the FAU RAM. By convention, the
                 * return address is stored in r48 and will be used by the
                 * blend shader to jump back to the fragment shader after */
                bi_blend_to(b, bi_register(48), rgba,
536
                                bi_register(60),
537
                                bi_fau(BIR_FAU_BLEND_0 + rt, false),
538
539
                                bi_fau(BIR_FAU_BLEND_0 + rt, true),
                                rgba2, sr_count, sr_count_2);
540
541
542
        }

        assert(rt < 8);
543
        b->shader->info.bifrost->blend[rt].type = T;
544
545
546

        if (T2)
                b->shader->info.bifrost->blend_src1_type = T2;
547
548
}

549
550
551
552
553
554
555
556
557
558
/* Blend shaders do not need to run ATEST since they are dependent on a
 * fragment shader that runs it. Blit shaders may not need to run ATEST, since
 * ATEST is not needed if early-z is forced, alpha-to-coverage is disabled, and
 * there are no writes to the coverage mask. The latter two are satisfied for
 * all blit shaders, so we just care about early-z, which blit shaders force
 * iff they do not write depth or stencil */

static bool
bi_skip_atest(bi_context *ctx, bool emit_zs)
{
559
        return (ctx->inputs->is_blit && !emit_zs) || ctx->inputs->is_blend;
560
561
}

562
563
564
565
566
567
568
569
570
571
572
static void
bi_emit_atest(bi_builder *b, bi_index alpha)
{
        bi_index coverage = bi_register(60);
        bi_instr *atest = bi_atest_to(b, coverage, coverage, alpha);
        b->shader->emitted_atest = true;

        /* Pseudo-source to encode in the tuple */
        atest->src[2] = bi_fau(BIR_FAU_ATEST_PARAM, false);
}

573
574
575
576
577
578
579
580
581
582
583
584
static void
bi_emit_fragment_out(bi_builder *b, nir_intrinsic_instr *instr)
{
        bool combined = instr->intrinsic ==
                nir_intrinsic_store_combined_output_pan;

        unsigned writeout = combined ? nir_intrinsic_component(instr) :
                PAN_WRITEOUT_C;

        bool emit_blend = writeout & (PAN_WRITEOUT_C);
        bool emit_zs = writeout & (PAN_WRITEOUT_Z | PAN_WRITEOUT_S);

585
586
587
        unsigned loc = ~0;

        if (!combined) {
588
589
                const nir_variable *var =
                        nir_find_variable_with_driver_location(b->shader->nir,
590
591
592
593
594
                                        nir_var_shader_out, nir_intrinsic_base(instr));
                assert(var);

                loc = var->data.location;
        }
595

596
597
598
599
600
601
602
603
604
605
606
607
        bi_index src0 = bi_src_index(&instr->src[0]);

        /* By ISA convention, the coverage mask is stored in R60. The store
         * itself will be handled by a subsequent ATEST instruction */
        if (loc == FRAG_RESULT_SAMPLE_MASK) {
                bi_index orig = bi_register(60);
                bi_index msaa = bi_load_sysval(b, PAN_SYSVAL_MULTISAMPLED, 1, 0);
                bi_index new = bi_lshift_and_i32(b, orig, src0, bi_imm_u8(0));
                bi_mux_i32_to(b, orig, orig, new, msaa, BI_MUX_INT_ZERO);
                return;
        }

608
609
610
611
        /* Emit ATEST if we have to, note ATEST requires a floating-point alpha
         * value, but render target #0 might not be floating point. However the
         * alpha value is only used for alpha-to-coverage, a stage which is
         * skipped for pure integer framebuffers, so the issue is moot. */
612
613

        if (!b->shader->emitted_atest && !bi_skip_atest(b->shader, emit_zs)) {
614
615
616
                nir_alu_type T = nir_intrinsic_src_type(instr);

                bi_index rgba = bi_src_index(&instr->src[0]);
617
618
619
620
                bi_index alpha =
                        (T == nir_type_float16) ? bi_half(bi_word(rgba, 1), true) :
                        (T == nir_type_float32) ? bi_word(rgba, 3) :
                        bi_dontcare();
621

622
623
624
625
                /* Don't read out-of-bounds */
                if (nir_src_num_components(instr->src[0]) < 4)
                        alpha = bi_imm_f32(1.0);

626
                bi_emit_atest(b, alpha);
627
628
629
630
631
632
633
634
635
636
637
        }

        if (emit_zs) {
                bi_index z = { 0 }, s = { 0 };

                if (writeout & PAN_WRITEOUT_Z)
                        z = bi_src_index(&instr->src[2]);

                if (writeout & PAN_WRITEOUT_S)
                        s = bi_src_index(&instr->src[3]);

638
                bi_zs_emit_to(b, bi_register(60), z, s, bi_register(60),
639
640
641
642
643
                                writeout & PAN_WRITEOUT_S,
                                writeout & PAN_WRITEOUT_Z);
        }

        if (emit_blend) {
644
                unsigned rt = combined ? 0 : (loc - FRAG_RESULT_DATA0);
645
                bool dual = (writeout & PAN_WRITEOUT_2);
646
                bi_index color = bi_src_index(&instr->src[0]);
647
648
                bi_index color2 = dual ? bi_src_index(&instr->src[4]) : bi_null();
                nir_alu_type T2 = dual ? nir_intrinsic_dest_type(instr) : 0;
649
650
651
652

                /* Explicit copy since BLEND inputs are precoloured to R0-R3,
                 * TODO: maybe schedule around this or implement in RA as a
                 * spill */
653
654
655
656
657
658
                bool has_mrt = false;

                nir_foreach_shader_out_variable(var, b->shader->nir)
                        has_mrt |= (var->data.location > FRAG_RESULT_DATA0);

                if (has_mrt) {
659
660
661
662
663
664
665
                        bi_index srcs[4] = { color, color, color, color };
                        unsigned channels[4] = { 0, 1, 2, 3 };
                        color = bi_temp(b->shader);
                        bi_make_vec_to(b, color, srcs, channels,
                                       nir_src_num_components(instr->src[0]),
                                       nir_alu_type_get_type_size(nir_intrinsic_src_type(instr)));
                }
666

667
668
                bi_emit_blend_op(b, color, nir_intrinsic_src_type(instr),
                                    color2, T2, rt);
669
670
        }

671
        if (b->shader->inputs->is_blend) {
672
673
674
                /* Jump back to the fragment shader, return address is stored
                 * in r48 (see above).
                 */
675
                bi_jump(b, bi_register(48));
676
677
678
        }
}

679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
/**
 * In a vertex shader, is the specified variable a position output? These kinds
 * of outputs are written from position shaders when IDVS is enabled. All other
 * outputs are written from the varying shader.
 */
static bool
bi_should_remove_store(nir_intrinsic_instr *intr, enum bi_idvs_mode idvs)
{
        nir_io_semantics sem = nir_intrinsic_io_semantics(intr);

        switch (sem.location) {
        case VARYING_SLOT_POS:
        case VARYING_SLOT_PSIZ:
                return idvs == BI_IDVS_VARYING;
        default:
                return idvs == BI_IDVS_POSITION;
        }
}

698
699
700
static void
bi_emit_store_vary(bi_builder *b, nir_intrinsic_instr *instr)
{
701
702
703
704
705
706
707
708
        /* In principle we can do better for 16-bit. At the moment we require
         * 32-bit to permit the use of .auto, in order to force .u32 for flat
         * varyings, to handle internal TGSI shaders that set flat in the VS
         * but smooth in the FS */

        ASSERTED nir_alu_type T = nir_intrinsic_src_type(instr);
        assert(nir_alu_type_get_type_size(T) == 32);
        enum bi_register_format regfmt = BI_REGISTER_FORMAT_AUTO;
709
710

        unsigned imm_index = 0;
711
        bool immediate = bi_is_intr_immediate(instr, &imm_index, 16);
712

713
714
715
716
717
718
        /* Skip stores to the wrong kind of variable in a specialized IDVS
         * shader. Backend dead code elimination will clean up the mess.
         */
        if (bi_should_remove_store(instr, b->shader->idvs))
                return;

719
720
721
722
723
724
725
726
727
728
729
        /* Only look at the total components needed. In effect, we fill in all
         * the intermediate "holes" in the write mask, since we can't mask off
         * stores. Since nir_lower_io_to_temporaries ensures each varying is
         * written at most once, anything that's masked out is undefined, so it
         * doesn't matter what we write there. So we may as well do the
         * simplest thing possible. */
        unsigned nr = util_last_bit(nir_intrinsic_write_mask(instr));
        assert(nr > 0 && nr <= nir_intrinsic_src_components(instr, 0));

        bi_index data = bi_src_index(&instr->src[0]);

730
731
732
733
734
735
736
737
738
739
740
        if (b->shader->arch <= 8 && b->shader->idvs == BI_IDVS_POSITION) {
                /* Bifrost position shaders have a fast path */
                assert(T == nir_type_float16 || T == nir_type_float32);
                unsigned regfmt = (T == nir_type_float16) ? 0 : 1;
                unsigned identity = (b->shader->arch == 6) ? 0x688 : 0;
                unsigned snap4 = 0x5E;
                uint32_t format = identity | (snap4 << 12) | (regfmt << 24);

                bi_st_cvt(b, data, bi_register(58), bi_register(59),
                          bi_imm_u32(format), regfmt, nr - 1);
        } else if (immediate) {
741
                bi_index address = bi_lea_attr_imm(b,
742
                                          bi_register(61), bi_register(62),
743
                                          regfmt, imm_index);
744
745
746

                bi_st_cvt(b, data, address, bi_word(address, 1),
                          bi_word(address, 2), regfmt, nr - 1);
747
748
749
750
751
752
        } else {
                bi_index idx =
                        bi_iadd_u32(b,
                                    bi_src_index(nir_get_io_offset_src(instr)),
                                    bi_imm_u32(nir_intrinsic_base(instr)),
                                    false);
753
                bi_index address = bi_lea_attr(b,
754
                                      bi_register(61), bi_register(62),
755
                                      idx, regfmt);
756

757
758
759
                bi_st_cvt(b, data, address, bi_word(address, 1),
                          bi_word(address, 2), regfmt, nr - 1);
        }
760
761
}

762
763
764
static void
bi_emit_load_ubo(bi_builder *b, nir_intrinsic_instr *instr)
{
765
766
767
768
        nir_src *offset = nir_get_io_offset_src(instr);

        bool offset_is_const = nir_src_is_const(*offset);
        bi_index dyn_offset = bi_src_index(offset);
769
        uint32_t const_offset = offset_is_const ? nir_src_as_uint(*offset) : 0;
770
771
        bool kernel_input = (instr->intrinsic == nir_intrinsic_load_kernel_input);

772
        bi_load_to(b, instr->num_components * nir_dest_bit_size(instr->dest),
773
774
                        bi_dest_index(&instr->dest), offset_is_const ?
                        bi_imm_u32(const_offset) : dyn_offset,
775
                        kernel_input ? bi_zero() : bi_src_index(&instr->src[0]),
776
777
778
                        BI_SEG_UBO);
}

779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
static bi_index
bi_addr_high(nir_src *src)
{
	return (nir_src_bit_size(*src) == 64) ?
		bi_word(bi_src_index(src), 1) : bi_zero();
}

static void
bi_emit_load(bi_builder *b, nir_intrinsic_instr *instr, enum bi_seg seg)
{
        bi_load_to(b, instr->num_components * nir_dest_bit_size(instr->dest),
                   bi_dest_index(&instr->dest),
                   bi_src_index(&instr->src[0]), bi_addr_high(&instr->src[0]),
                   seg);
}

static void
bi_emit_store(bi_builder *b, nir_intrinsic_instr *instr, enum bi_seg seg)
{
798
799
800
801
        /* Require contiguous masks, gauranteed by nir_lower_wrmasks */
        assert(nir_intrinsic_write_mask(instr) ==
                        BITFIELD_MASK(instr->num_components));

802
        bi_store(b, instr->num_components * nir_src_bit_size(instr->src[0]),
803
804
805
806
807
                    bi_src_index(&instr->src[0]),
                    bi_src_index(&instr->src[1]), bi_addr_high(&instr->src[1]),
                    seg);
}

Alyssa Rosenzweig's avatar
Alyssa Rosenzweig committed
808
809
810
/* Exchanges the staging register with memory */

static void
811
bi_emit_axchg_to(bi_builder *b, bi_index dst, bi_index addr, nir_src *arg, enum bi_seg seg)
Alyssa Rosenzweig's avatar
Alyssa Rosenzweig committed
812
813
814
{
        assert(seg == BI_SEG_NONE || seg == BI_SEG_WLS);

815
        unsigned sz = nir_src_bit_size(*arg);
Alyssa Rosenzweig's avatar
Alyssa Rosenzweig committed
816
817
        assert(sz == 32 || sz == 64);

818
819
        bi_index data = bi_src_index(arg);

Alyssa Rosenzweig's avatar
Alyssa Rosenzweig committed
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
        bi_index data_words[] = {
                bi_word(data, 0),
                bi_word(data, 1),
        };

        bi_index inout = bi_temp_reg(b->shader);
        bi_make_vec_to(b, inout, data_words, NULL, sz / 32, 32);

        bi_axchg_to(b, sz, inout, inout,
                        bi_word(addr, 0),
                        (seg == BI_SEG_NONE) ? bi_word(addr, 1) : bi_zero(),
                        seg);

        bi_index inout_words[] = {
                bi_word(inout, 0),
                bi_word(inout, 1),
        };

838
        bi_make_vec_to(b, dst, inout_words, NULL, sz / 32, 32);
Alyssa Rosenzweig's avatar
Alyssa Rosenzweig committed
839
840
}

Alyssa Rosenzweig's avatar
Alyssa Rosenzweig committed
841
842
843
844
/* Exchanges the second staging register with memory if comparison with first
 * staging register passes */

static void
845
bi_emit_acmpxchg_to(bi_builder *b, bi_index dst, bi_index addr, nir_src *arg_1, nir_src *arg_2, enum bi_seg seg)
Alyssa Rosenzweig's avatar
Alyssa Rosenzweig committed
846
847
848
849
{
        assert(seg == BI_SEG_NONE || seg == BI_SEG_WLS);

        /* hardware is swapped from NIR */
850
851
        bi_index src0 = bi_src_index(arg_2);
        bi_index src1 = bi_src_index(arg_1);
Alyssa Rosenzweig's avatar
Alyssa Rosenzweig committed
852

853
        unsigned sz = nir_src_bit_size(*arg_1);
Alyssa Rosenzweig's avatar
Alyssa Rosenzweig committed
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
        assert(sz == 32 || sz == 64);

        bi_index data_words[] = {
                bi_word(src0, 0),
                sz == 32 ? bi_word(src1, 0) : bi_word(src0, 1),

                /* 64-bit */
                bi_word(src1, 0),
                bi_word(src1, 1),
        };

        bi_index inout = bi_temp_reg(b->shader);
        bi_make_vec_to(b, inout, data_words, NULL, 2 * (sz / 32), 32);

        bi_acmpxchg_to(b, sz, inout, inout,
                        bi_word(addr, 0),
                        (seg == BI_SEG_NONE) ? bi_word(addr, 1) : bi_zero(),
                        seg);

        bi_index inout_words[] = {
                bi_word(inout, 0),
                bi_word(inout, 1),
        };

878
        bi_make_vec_to(b, dst, inout_words, NULL, sz / 32, 32);
Alyssa Rosenzweig's avatar
Alyssa Rosenzweig committed
879
880
}

881
882
883
/* Extracts an atomic opcode */

static enum bi_atom_opc
884
bi_atom_opc_for_nir(nir_intrinsic_op op)
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
{
        switch (op) {
        case nir_intrinsic_global_atomic_add:
        case nir_intrinsic_shared_atomic_add:
        case nir_intrinsic_image_atomic_add:
                return BI_ATOM_OPC_AADD;

        case nir_intrinsic_global_atomic_imin:
        case nir_intrinsic_shared_atomic_imin:
        case nir_intrinsic_image_atomic_imin:
                return BI_ATOM_OPC_ASMIN;

        case nir_intrinsic_global_atomic_umin:
        case nir_intrinsic_shared_atomic_umin:
        case nir_intrinsic_image_atomic_umin:
                return BI_ATOM_OPC_AUMIN;

        case nir_intrinsic_global_atomic_imax:
        case nir_intrinsic_shared_atomic_imax:
        case nir_intrinsic_image_atomic_imax:
                return BI_ATOM_OPC_ASMAX;

        case nir_intrinsic_global_atomic_umax:
        case nir_intrinsic_shared_atomic_umax:
        case nir_intrinsic_image_atomic_umax:
                return BI_ATOM_OPC_AUMAX;

        case nir_intrinsic_global_atomic_and:
        case nir_intrinsic_shared_atomic_and:
        case nir_intrinsic_image_atomic_and:
                return BI_ATOM_OPC_AAND;

        case nir_intrinsic_global_atomic_or:
        case nir_intrinsic_shared_atomic_or:
        case nir_intrinsic_image_atomic_or:
                return BI_ATOM_OPC_AOR;

        case nir_intrinsic_global_atomic_xor:
        case nir_intrinsic_shared_atomic_xor:
        case nir_intrinsic_image_atomic_xor:
                return BI_ATOM_OPC_AXOR;

        default:
                unreachable("Unexpected computational atomic");
        }
}

932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
/* Optimized unary atomics are available with an implied #1 argument */

static bool
bi_promote_atom_c1(enum bi_atom_opc op, bi_index arg, enum bi_atom_opc *out)
{
        /* Check we have a compatible constant */
        if (arg.type != BI_INDEX_CONSTANT)
                return false;

        if (!(arg.value == 1 || (arg.value == -1 && op == BI_ATOM_OPC_AADD)))
                return false;

        /* Check for a compatible operation */
        switch (op) {
        case BI_ATOM_OPC_AADD:
                *out = (arg.value == 1) ? BI_ATOM_OPC_AINC : BI_ATOM_OPC_ADEC;
                return true;
        case BI_ATOM_OPC_ASMAX:
                *out = BI_ATOM_OPC_ASMAX1;
                return true;
        case BI_ATOM_OPC_AUMAX:
                *out = BI_ATOM_OPC_AUMAX1;
                return true;
        case BI_ATOM_OPC_AOR:
                *out = BI_ATOM_OPC_AOR1;
                return true;
        default:
                return false;
        }
}

963
964
965
/* Coordinates are 16-bit integers in Bifrost but 32-bit in NIR */

static bi_index
966
967
bi_emit_image_coord(bi_builder *b, bi_index coord, unsigned src_idx,
                    unsigned coord_comps, bool is_array)
968
{
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
        assert(coord_comps > 0 && coord_comps <= 3);

        if (src_idx == 0) {
                if (coord_comps == 1 || (coord_comps == 2 && is_array))
                        return bi_word(coord, 0);
                else
                        return bi_mkvec_v2i16(b,
                                              bi_half(bi_word(coord, 0), false),
                                              bi_half(bi_word(coord, 1), false));
        } else {
                if (coord_comps == 3)
                        return bi_word(coord, 2);
                else if (coord_comps == 2 && is_array)
                        return bi_word(coord, 1);
                else
                        return bi_zero();
        }
986
987
}

988
989
990
991
992
993
994
995
996
997
998
999
1000
static bi_index
bi_emit_image_index(bi_builder *b, nir_intrinsic_instr *instr)
{
        nir_src src = instr->src[0];
        bi_index index = bi_src_index(&src);
        bi_context *ctx = b->shader;

        /* Images come after vertex attributes, so handle an explicit offset */
        unsigned offset = (ctx->stage == MESA_SHADER_VERTEX) ?
                util_bitcount64(ctx->nir->info.inputs_read) : 0;

        if (offset == 0)
                return index;