Commit ade416d0 authored by Eric Anholt's avatar Eric Anholt

broadcom: Add VC5 NIR compiler.

This is a pretty straightforward fork of VC4's NIR compiler to VC5.  The
condition codes, registers, and I/O have all changed, making the backend
hard to share, though their heritage is still recognizable.

v2: Move to src/broadcom/compiler to match intel's layout, rename more
    "vc5" to "v3d", rename QIR to VIR ("V3D IR") to avoid symbol conflicts
    with vc4, use new v3d_debug header, add compiler init/free functions,
    do texture swizzling in NIR to allow optimization.
parent f71364f2
......@@ -26,6 +26,8 @@ AM_CPPFLAGS = \
-I$(top_srcdir)/src \
-I$(top_srcdir)/src/broadcom/ \
-I$(top_srcdir)/src/broadcom/include \
-I$(top_srcdir)/src/gallium/auxiliary \
-I$(top_srcdir)/src/gallium/include \
$(VALGRIND_CFLAGS) \
$(DEFINES)
......
......@@ -16,6 +16,19 @@ BROADCOM_FILES = \
clif/clif_dump.c \
clif/clif_dump.h \
common/v3d_device_info.h \
compiler/nir_to_vir.c \
compiler/vir.c \
compiler/vir_dump.c \
compiler/vir_live_variables.c \
compiler/vir_lower_uniforms.c \
compiler/vir_opt_copy_propagate.c \
compiler/vir_opt_dead_code.c \
compiler/vir_register_allocate.c \
compiler/vir_to_qpu.c \
compiler/qpu_schedule.c \
compiler/qpu_validate.c \
compiler/v3d_compiler.h \
compiler/v3d_nir_lower_io.c \
qpu/qpu_disasm.c \
qpu/qpu_disasm.h \
qpu/qpu_instr.c \
......
......@@ -13,6 +13,7 @@ check_PROGRAMS += \
LDADD = \
libbroadcom.la \
$(top_builddir)/src/compiler/nir/libnir.la \
$(top_builddir)/src/util/libmesautil.la \
$(NULL)
......
This diff is collapsed.
This diff is collapsed.
/*
* Copyright © 2014 Broadcom
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/**
* @file
*
* Validates the QPU instruction sequence after register allocation and
* scheduling.
*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include "v3d_compiler.h"
#include "qpu/qpu_disasm.h"
struct v3d_qpu_validate_state {
struct v3d_compile *c;
const struct v3d_qpu_instr *last;
int ip;
int last_sfu_write;
};
static void
fail_instr(struct v3d_qpu_validate_state *state, const char *msg)
{
struct v3d_compile *c = state->c;
fprintf(stderr, "v3d_qpu_validate at ip %d: %s:\n", state->ip, msg);
int dump_ip = 0;
vir_for_each_inst_inorder(inst, c) {
v3d_qpu_dump(c->devinfo, &inst->qpu);
if (dump_ip++ == state->ip)
fprintf(stderr, " *** ERROR ***");
fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
abort();
}
static bool
qpu_magic_waddr_matches(const struct v3d_qpu_instr *inst,
bool (*predicate)(enum v3d_qpu_waddr waddr))
{
if (inst->type == V3D_QPU_INSTR_TYPE_ALU)
return false;
if (inst->alu.add.op != V3D_QPU_A_NOP &&
inst->alu.add.magic_write &&
predicate(inst->alu.add.waddr))
return true;
if (inst->alu.mul.op != V3D_QPU_M_NOP &&
inst->alu.mul.magic_write &&
predicate(inst->alu.mul.waddr))
return true;
return false;
}
static void
qpu_validate_inst(struct v3d_qpu_validate_state *state, struct qinst *qinst)
{
const struct v3d_qpu_instr *inst = &qinst->qpu;
if (inst->type != V3D_QPU_INSTR_TYPE_ALU)
return;
/* LDVARY writes r5 two instructions later and LDUNIF writes
* r5 one instruction later, which is illegal to have
* together.
*/
if (state->last && state->last->sig.ldvary && inst->sig.ldunif) {
fail_instr(state, "LDUNIF after a LDVARY");
}
int tmu_writes = 0;
int sfu_writes = 0;
int vpm_writes = 0;
int tlb_writes = 0;
int tsy_writes = 0;
if (inst->alu.add.op != V3D_QPU_A_NOP) {
if (inst->alu.add.magic_write) {
if (v3d_qpu_magic_waddr_is_tmu(inst->alu.add.waddr))
tmu_writes++;
if (v3d_qpu_magic_waddr_is_sfu(inst->alu.add.waddr))
sfu_writes++;
if (v3d_qpu_magic_waddr_is_vpm(inst->alu.add.waddr))
vpm_writes++;
if (v3d_qpu_magic_waddr_is_tlb(inst->alu.add.waddr))
tlb_writes++;
if (v3d_qpu_magic_waddr_is_tsy(inst->alu.add.waddr))
tsy_writes++;
}
}
if (inst->alu.mul.op != V3D_QPU_M_NOP) {
if (inst->alu.mul.magic_write) {
if (v3d_qpu_magic_waddr_is_tmu(inst->alu.mul.waddr))
tmu_writes++;
if (v3d_qpu_magic_waddr_is_sfu(inst->alu.mul.waddr))
sfu_writes++;
if (v3d_qpu_magic_waddr_is_vpm(inst->alu.mul.waddr))
vpm_writes++;
if (v3d_qpu_magic_waddr_is_tlb(inst->alu.mul.waddr))
tlb_writes++;
if (v3d_qpu_magic_waddr_is_tsy(inst->alu.mul.waddr))
tsy_writes++;
}
}
(void)qpu_magic_waddr_matches; /* XXX */
/* SFU r4 results come back two instructions later. No doing
* r4 read/writes or other SFU lookups until it's done.
*/
if (state->ip - state->last_sfu_write < 2) {
if (v3d_qpu_uses_mux(inst, V3D_QPU_MUX_R4))
fail_instr(state, "R4 read too soon after SFU");
if (v3d_qpu_writes_r4(inst))
fail_instr(state, "R4 write too soon after SFU");
if (sfu_writes)
fail_instr(state, "SFU write too soon after SFU");
}
/* XXX: The docs say VPM can happen with the others, but the simulator
* disagrees.
*/
if (tmu_writes +
sfu_writes +
vpm_writes +
tlb_writes +
tsy_writes +
inst->sig.ldtmu +
inst->sig.ldtlb +
inst->sig.ldvpm +
inst->sig.ldtlbu > 1) {
fail_instr(state,
"Only one of [TMU, SFU, TSY, TLB read, VPM] allowed");
}
if (sfu_writes)
state->last_sfu_write = state->ip;
}
static void
qpu_validate_block(struct v3d_qpu_validate_state *state, struct qblock *block)
{
vir_for_each_inst(qinst, block) {
qpu_validate_inst(state, qinst);
state->last = &qinst->qpu;
state->ip++;
}
}
/**
* Checks for the instruction restrictions from page 37 ("Summary of
* Instruction Restrictions").
*/
void
qpu_validate(struct v3d_compile *c)
{
/* We don't want to do validation in release builds, but we want to
* keep compiling the validation code to make sure it doesn't get
* broken.
*/
#ifndef DEBUG
return;
#endif
struct v3d_qpu_validate_state state = {
.c = c,
.last_sfu_write = -10,
.ip = 0,
};
vir_for_each_block(block, c) {
qpu_validate_block(&state, block);
}
}
/*
* Copyright © 2016 Broadcom
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
struct v3d_compiler *
v3d_compiler_init(void)
{
struct v3d_compile *c = rzalloc(struct v3d_compile);
return c;
}
void
v3d_add_qpu_inst(struct v3d_compiler *c, uint64_t inst)
{
if (c->qpu_inst_count >= c->qpu_inst_size) {
c->qpu_inst_size = MAX2(c->qpu_inst_size * 2, 16);
c->qpu_insts = reralloc(c, c->qpu_insts, uint64_t,
c->qpu_inst_size_array_size);
}
c->qpu_insts[c->qpu_inst_count++] = inst;
}
This diff is collapsed.
/*
* Copyright © 2015 Broadcom
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "compiler/v3d_compiler.h"
#include "compiler/nir/nir_builder.h"
/**
* Walks the NIR generated by TGSI-to-NIR or GLSL-to-NIR to lower its io
* intrinsics into something amenable to the V3D architecture.
*
* Currently, it splits VS inputs and uniforms into scalars, drops any
* non-position outputs in coordinate shaders, and fixes up the addressing on
* indirect uniform loads. FS input and VS output scalarization is handled by
* nir_lower_io_to_scalar().
*/
static void
replace_intrinsic_with_vec(nir_builder *b, nir_intrinsic_instr *intr,
nir_ssa_def **comps)
{
/* Batch things back together into a vector. This will get split by
* the later ALU scalarization pass.
*/
nir_ssa_def *vec = nir_vec(b, comps, intr->num_components);
/* Replace the old intrinsic with a reference to our reconstructed
* vector.
*/
nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(vec));
nir_instr_remove(&intr->instr);
}
static void
v3d_nir_lower_output(struct v3d_compile *c, nir_builder *b,
nir_intrinsic_instr *intr)
{
nir_variable *output_var = NULL;
nir_foreach_variable(var, &c->s->outputs) {
if (var->data.driver_location == nir_intrinsic_base(intr)) {
output_var = var;
break;
}
}
assert(output_var);
if (c->vs_key) {
int slot = output_var->data.location;
bool used = false;
switch (slot) {
case VARYING_SLOT_PSIZ:
case VARYING_SLOT_POS:
used = true;
break;
default:
for (int i = 0; i < c->vs_key->num_fs_inputs; i++) {
if (v3d_slot_get_slot(c->vs_key->fs_inputs[i]) == slot) {
used = true;
break;
}
}
break;
}
if (!used)
nir_instr_remove(&intr->instr);
}
}
static void
v3d_nir_lower_uniform(struct v3d_compile *c, nir_builder *b,
nir_intrinsic_instr *intr)
{
b->cursor = nir_before_instr(&intr->instr);
/* Generate scalar loads equivalent to the original vector. */
nir_ssa_def *dests[4];
for (unsigned i = 0; i < intr->num_components; i++) {
nir_intrinsic_instr *intr_comp =
nir_intrinsic_instr_create(c->s, intr->intrinsic);
intr_comp->num_components = 1;
nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, 32, NULL);
/* Convert the uniform offset to bytes. If it happens
* to be a constant, constant-folding will clean up
* the shift for us.
*/
nir_intrinsic_set_base(intr_comp,
nir_intrinsic_base(intr) * 16 +
i * 4);
intr_comp->src[0] =
nir_src_for_ssa(nir_ishl(b, intr->src[0].ssa,
nir_imm_int(b, 4)));
dests[i] = &intr_comp->dest.ssa;
nir_builder_instr_insert(b, &intr_comp->instr);
}
replace_intrinsic_with_vec(b, intr, dests);
}
static void
v3d_nir_lower_io_instr(struct v3d_compile *c, nir_builder *b,
struct nir_instr *instr)
{
if (instr->type != nir_instr_type_intrinsic)
return;
nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
switch (intr->intrinsic) {
case nir_intrinsic_load_input:
break;
case nir_intrinsic_store_output:
v3d_nir_lower_output(c, b, intr);
break;
case nir_intrinsic_load_uniform:
v3d_nir_lower_uniform(c, b, intr);
break;
case nir_intrinsic_load_user_clip_plane:
default:
break;
}
}
static bool
v3d_nir_lower_io_impl(struct v3d_compile *c, nir_function_impl *impl)
{
nir_builder b;
nir_builder_init(&b, impl);
nir_foreach_block(block, impl) {
nir_foreach_instr_safe(instr, block)
v3d_nir_lower_io_instr(c, &b, instr);
}
nir_metadata_preserve(impl, nir_metadata_block_index |
nir_metadata_dominance);
return true;
}
void
v3d_nir_lower_io(nir_shader *s, struct v3d_compile *c)
{
nir_foreach_function(function, s) {
if (function->impl)
v3d_nir_lower_io_impl(c, function->impl);
}
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* Copyright © 2014 Broadcom
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
/**
* @file v3d_vir_lower_uniforms.c
*
* This is the pre-code-generation pass for fixing up instructions that try to
* read from multiple uniform values.
*/
#include "v3d_compiler.h"
#include "util/hash_table.h"
#include "util/u_math.h"
static inline uint32_t
index_hash(const void *key)
{
return (uintptr_t)key;
}
static inline bool
index_compare(const void *a, const void *b)
{
return a == b;
}
static void
add_uniform(struct hash_table *ht, struct qreg reg)
{
struct hash_entry *entry;
void *key = (void *)(uintptr_t)(reg.index + 1);
entry = _mesa_hash_table_search(ht, key);
if (entry) {
entry->data++;
} else {
_mesa_hash_table_insert(ht, key, (void *)(uintptr_t)1);
}
}
static void
remove_uniform(struct hash_table *ht, struct qreg reg)
{
struct hash_entry *entry;
void *key = (void *)(uintptr_t)(reg.index + 1);
entry = _mesa_hash_table_search(ht, key);
assert(entry);
entry->data--;
if (entry->data == NULL)
_mesa_hash_table_remove(ht, entry);
}
static bool
is_lowerable_uniform(struct qinst *inst, int i)
{
if (inst->src[i].file != QFILE_UNIF)
return false;
if (vir_has_implicit_uniform(inst))
return i != vir_get_implicit_uniform_src(inst);
return true;
}
/* Returns the number of different uniform values referenced by the
* instruction.
*/
static uint32_t
vir_get_instruction_uniform_count(struct qinst *inst)
{
uint32_t count = 0;
for (int i = 0; i < vir_get_nsrc(inst); i++) {
if (inst->src[i].file != QFILE_UNIF)
continue;
bool is_duplicate = false;
for (int j = 0; j < i; j++) {
if (inst->src[j].file == QFILE_UNIF &&
inst->src[j].index == inst->src[i].index) {
is_duplicate = true;
break;
}
}
if (!is_duplicate)
count++;
}
return count;
}
void
vir_lower_uniforms(struct v3d_compile *c)
{
struct hash_table *ht =
_mesa_hash_table_create(c, index_hash, index_compare);
/* Walk the instruction list, finding which instructions have more
* than one uniform referenced, and add those uniform values to the
* ht.
*/
vir_for_each_inst_inorder(inst, c) {
uint32_t nsrc = vir_get_nsrc(inst);
if (vir_get_instruction_uniform_count(inst) <= 1)
continue;
for (int i = 0; i < nsrc; i++) {
if (is_lowerable_uniform(inst, i))
add_uniform(ht, inst->src[i]);
}
}
while (ht->entries) {
/* Find the most commonly used uniform in instructions that
* need a uniform lowered.
*/
uint32_t max_count = 0;
uint32_t max_index = 0;
struct hash_entry *entry;
hash_table_foreach(ht, entry) {
uint32_t count = (uintptr_t)entry->data;
uint32_t index = (uintptr_t)entry->key - 1;
if (count > max_count) {
max_count = count;
max_index = index;
}
}
struct qreg unif = vir_reg(QFILE_UNIF, max_index);
/* Now, find the instructions using this uniform and make them
* reference a temp instead.
*/
vir_for_each_block(block, c) {
struct qinst *mov = NULL;
vir_for_each_inst(inst, block) {
uint32_t nsrc = vir_get_nsrc(inst);
uint32_t count = vir_get_instruction_uniform_count(inst);
if (count <= 1)
continue;
/* If the block doesn't have a load of the
* uniform yet, add it. We could potentially
* do better and CSE MOVs from multiple blocks
* into dominating blocks, except that may
* cause troubles for register allocation.
*/
if (!mov) {
mov = vir_mul_inst(V3D_QPU_M_MOV,
vir_get_temp(c),
unif, c->undef);
list_add(&mov->link,
&block->instructions);
c->defs[mov->dst.index] = mov;
}
bool removed = false;
for (int i = 0; i < nsrc; i++) {
if (is_lowerable_uniform(inst, i) &&
inst->src[i].index == max_index) {
inst->src[i].file =
mov->dst.file;
inst->src[i].index =
mov->dst.index;
remove_uniform(ht, unif);
removed = true;
}
}
if (removed)
count--;
/* If the instruction doesn't need lowering any more,
* then drop it from the list.
*/
if (count <= 1) {
for (int i = 0; i < nsrc; i++) {
if (is_lowerable_uniform(inst, i))
remove_uniform(ht, inst->src[i]);
}
}
}
}
}
_mesa_hash_table_destroy(ht, NULL);
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment