Commit 232ae150 authored by Caio Marcelo de Oliveira Filho's avatar Caio Marcelo de Oliveira Filho Committed by Marge Bot
Browse files

nir: Rename nir_is_per_vertex_io to nir_is_arrayed_io



VS outputs are "per vertex" but not the kind of I/O we want to match
with this helper.  Change to a name that covers the "arrayness"
required by the type.

Name inspired by the GLSL spec definition of arrayed I/O.
Reviewed-by: Timur Kristóf's avatarTimur Kristóf <timur.kristof@gmail.com>
Reviewed-by: Matt Turner's avatarMatt Turner <mattst88@gmail.com>
Reviewed-by: Jason Ekstrand's avatarJason Ekstrand <jason@jlekstrand.net>
Part-of: <!10493>
parent df5b1496
Pipeline #319151 passed with stages
in 33 minutes and 50 seconds
......@@ -4575,7 +4575,7 @@ nir_src *nir_get_io_offset_src(nir_intrinsic_instr *instr);
nir_src *nir_get_io_vertex_index_src(nir_intrinsic_instr *instr);
nir_src *nir_get_shader_call_payload_src(nir_intrinsic_instr *call);
bool nir_is_per_vertex_io(const nir_variable *var, gl_shader_stage stage);
bool nir_is_arrayed_io(const nir_variable *var, gl_shader_stage stage);
bool nir_lower_regs_to_ssa_impl(nir_function_impl *impl);
bool nir_lower_regs_to_ssa(nir_shader *shader);
......
......@@ -43,7 +43,7 @@ get_deref_info(nir_shader *shader, nir_variable *var, nir_deref_instr *deref,
*cross_invocation = false;
*indirect = false;
const bool per_vertex = nir_is_per_vertex_io(var, shader->info.stage);
const bool per_vertex = nir_is_arrayed_io(var, shader->info.stage);
nir_deref_path path;
nir_deref_path_init(&path, deref, NULL);
......@@ -170,7 +170,7 @@ mark_whole_variable(nir_shader *shader, nir_variable *var,
{
const struct glsl_type *type = var->type;
if (nir_is_per_vertex_io(var, shader->info.stage)) {
if (nir_is_arrayed_io(var, shader->info.stage)) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
......@@ -181,7 +181,7 @@ mark_whole_variable(nir_shader *shader, nir_variable *var,
* on Intel), verify that "peeling" the type twice is correct. This
* assert ensures we remember it.
*/
assert(!nir_is_per_vertex_io(var, shader->info.stage));
assert(!nir_is_arrayed_io(var, shader->info.stage));
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
......@@ -239,7 +239,7 @@ try_mask_partial_io(nir_shader *shader, nir_variable *var,
nir_deref_instr *deref, bool is_output_read)
{
const struct glsl_type *type = var->type;
bool per_vertex = nir_is_per_vertex_io(var, shader->info.stage);
bool per_vertex = nir_is_arrayed_io(var, shader->info.stage);
if (per_vertex) {
assert(glsl_type_is_array(type));
......
......@@ -49,7 +49,7 @@ get_variable_io_mask(nir_variable *var, gl_shader_stage stage)
assert(var->data.location >= 0);
const struct glsl_type *type = var->type;
if (nir_is_per_vertex_io(var, stage) || var->data.per_view) {
if (nir_is_arrayed_io(var, stage) || var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
......@@ -279,7 +279,7 @@ get_unmoveable_components_masks(nir_shader *shader,
var->data.location - VARYING_SLOT_VAR0 < MAX_VARYINGS_INCL_PATCH) {
const struct glsl_type *type = var->type;
if (nir_is_per_vertex_io(var, stage) || var->data.per_view) {
if (nir_is_arrayed_io(var, stage) || var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
......@@ -380,7 +380,7 @@ remap_slots_and_components(nir_shader *shader, nir_variable_mode mode,
var->data.location - VARYING_SLOT_VAR0 < MAX_VARYINGS_INCL_PATCH) {
const struct glsl_type *type = var->type;
if (nir_is_per_vertex_io(var, stage) || var->data.per_view) {
if (nir_is_arrayed_io(var, stage) || var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
......@@ -513,7 +513,7 @@ gather_varying_component_info(nir_shader *producer, nir_shader *consumer,
continue;
const struct glsl_type *type = var->type;
if (nir_is_per_vertex_io(var, producer->info.stage) || var->data.per_view) {
if (nir_is_arrayed_io(var, producer->info.stage) || var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
......@@ -570,7 +570,7 @@ gather_varying_component_info(nir_shader *producer, nir_shader *consumer,
if (!vc_info->initialised) {
const struct glsl_type *type = in_var->type;
if (nir_is_per_vertex_io(in_var, consumer->info.stage) ||
if (nir_is_arrayed_io(in_var, consumer->info.stage) ||
in_var->data.per_view) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
......@@ -636,7 +636,7 @@ gather_varying_component_info(nir_shader *producer, nir_shader *consumer,
if (!vc_info->initialised) {
const struct glsl_type *type = out_var->type;
if (nir_is_per_vertex_io(out_var, producer->info.stage)) {
if (nir_is_arrayed_io(out_var, producer->info.stage)) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
......@@ -1187,7 +1187,7 @@ nir_assign_io_var_locations(nir_shader *shader, nir_variable_mode mode,
bool last_partial = false;
nir_foreach_variable_in_list(var, &io_vars) {
const struct glsl_type *type = var->type;
if (nir_is_per_vertex_io(var, stage)) {
if (nir_is_arrayed_io(var, stage)) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
......@@ -1332,7 +1332,7 @@ get_linked_variable_io_mask(nir_variable *variable, gl_shader_stage stage)
{
const struct glsl_type *type = variable->type;
if (nir_is_per_vertex_io(variable, stage)) {
if (nir_is_arrayed_io(variable, stage)) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
......
......@@ -48,7 +48,7 @@ get_unwrapped_array_length(nir_shader *nir, nir_variable *var)
* array length.
*/
const struct glsl_type *type = var->type;
if (nir_is_per_vertex_io(var, nir->info.stage))
if (nir_is_arrayed_io(var, nir->info.stage))
type = glsl_get_array_element(type);
assert(glsl_type_is_array(type));
......
......@@ -139,11 +139,15 @@ nir_assign_var_locations(nir_shader *shader, nir_variable_mode mode,
}
/**
* Return true if the given variable is a per-vertex input/output array.
* (such as geometry shader inputs).
* Some inputs and outputs are arrayed, meaning that there is an extra level
* of array indexing to handle mismatches between the shader interface and the
* dispatch pattern of the shader. For instance, geometry shaders are
* executed per-primitive while their inputs and outputs are specified
* per-vertex so all inputs and outputs have to be additionally indexed with
* the vertex index within the primitive.
*/
bool
nir_is_per_vertex_io(const nir_variable *var, gl_shader_stage stage)
nir_is_arrayed_io(const nir_variable *var, gl_shader_stage stage)
{
if (var->data.patch || !glsl_type_is_array(var->type))
return false;
......@@ -164,7 +168,7 @@ static unsigned get_number_of_slots(struct lower_io_state *state,
{
const struct glsl_type *type = var->type;
if (nir_is_per_vertex_io(var, state->builder.shader->info.stage)) {
if (nir_is_arrayed_io(var, state->builder.shader->info.stage)) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
......@@ -612,7 +616,7 @@ nir_lower_io_block(nir_block *block,
b->cursor = nir_before_instr(instr);
const bool per_vertex = nir_is_per_vertex_io(var, b->shader->info.stage);
const bool per_vertex = nir_is_arrayed_io(var, b->shader->info.stage);
nir_ssa_def *offset;
nir_ssa_def *vertex_index = NULL;
......
......@@ -46,7 +46,7 @@ get_io_offset(nir_builder *b, nir_deref_instr *deref, nir_variable *var,
/* For per-vertex input arrays (i.e. geometry shader inputs), skip the
* outermost array index. Process the rest normally.
*/
if (nir_is_per_vertex_io(var, b->shader->info.stage)) {
if (nir_is_arrayed_io(var, b->shader->info.stage)) {
*vertex_index = nir_ssa_for_src(b, (*p)->arr.index, 1);
p++;
}
......@@ -89,7 +89,7 @@ get_array_elements(struct hash_table *ht, nir_variable *var,
struct hash_entry *entry = _mesa_hash_table_search(ht, var);
if (!entry) {
const struct glsl_type *type = var->type;
if (nir_is_per_vertex_io(var, stage)) {
if (nir_is_arrayed_io(var, stage)) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
......@@ -151,7 +151,7 @@ lower_array(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var,
if (glsl_type_is_matrix(type))
type = glsl_get_column_type(type);
if (nir_is_per_vertex_io(var, b->shader->info.stage)) {
if (nir_is_arrayed_io(var, b->shader->info.stage)) {
type = glsl_array_type(type, glsl_get_length(element->type),
glsl_get_explicit_stride(element->type));
}
......@@ -164,7 +164,7 @@ lower_array(nir_builder *b, nir_intrinsic_instr *intr, nir_variable *var,
nir_deref_instr *element_deref = nir_build_deref_var(b, element);
if (nir_is_per_vertex_io(var, b->shader->info.stage)) {
if (nir_is_arrayed_io(var, b->shader->info.stage)) {
assert(vertex_index);
element_deref = nir_build_deref_array(b, element_deref, vertex_index);
}
......@@ -206,7 +206,7 @@ deref_has_indirect(nir_builder *b, nir_variable *var, nir_deref_path *path)
assert(path->path[0]->deref_type == nir_deref_type_var);
nir_deref_instr **p = &path->path[1];
if (nir_is_per_vertex_io(var, b->shader->info.stage)) {
if (nir_is_arrayed_io(var, b->shader->info.stage)) {
p++;
}
......@@ -317,7 +317,7 @@ lower_io_arrays_to_elements(nir_shader *shader, nir_variable_mode mask,
nir_variable_mode mode = var->data.mode;
const struct glsl_type *type = var->type;
if (nir_is_per_vertex_io(var, b.shader->info.stage)) {
if (nir_is_arrayed_io(var, b.shader->info.stage)) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
......
......@@ -51,7 +51,7 @@ static const struct glsl_type *
get_per_vertex_type(const nir_shader *shader, const nir_variable *var,
unsigned *num_vertices)
{
if (nir_is_per_vertex_io(var, shader->info.stage)) {
if (nir_is_arrayed_io(var, shader->info.stage)) {
assert(glsl_type_is_array(var->type));
if (num_vertices)
*num_vertices = glsl_get_length(var->type);
......@@ -90,8 +90,8 @@ variables_can_merge(const nir_shader *shader,
const struct glsl_type *a_type_tail = a->type;
const struct glsl_type *b_type_tail = b->type;
if (nir_is_per_vertex_io(a, shader->info.stage) !=
nir_is_per_vertex_io(b, shader->info.stage))
if (nir_is_arrayed_io(a, shader->info.stage) !=
nir_is_arrayed_io(b, shader->info.stage))
return false;
/* They must have the same array structure */
......@@ -353,7 +353,7 @@ build_array_deref_of_new_var_flat(nir_shader *shader,
{
nir_deref_instr *deref = nir_build_deref_var(b, new_var);
if (nir_is_per_vertex_io(new_var, shader->info.stage)) {
if (nir_is_arrayed_io(new_var, shader->info.stage)) {
assert(leader->deref_type == nir_deref_type_array);
nir_ssa_def *index = leader->arr.index.ssa;
leader = nir_deref_instr_parent(leader);
......
......@@ -1409,7 +1409,7 @@ validate_var_decl(nir_variable *var, nir_variable_mode valid_modes,
assert(glsl_type_is_array(var->type));
const struct glsl_type *type = glsl_get_array_element(var->type);
if (nir_is_per_vertex_io(var, state->shader->info.stage)) {
if (nir_is_arrayed_io(var, state->shader->info.stage)) {
assert(glsl_type_is_array(type));
assert(glsl_type_is_scalar(glsl_get_array_element(type)));
} else {
......
......@@ -1884,7 +1884,7 @@ vtn_create_variable(struct vtn_builder *b, struct vtn_value *val,
}
struct vtn_type *per_vertex_type = var->type;
if (nir_is_per_vertex_io(var->var, b->shader->info.stage))
if (nir_is_arrayed_io(var->var, b->shader->info.stage))
per_vertex_type = var->type->array_element;
/* Figure out the interface block type. */
......
......@@ -498,7 +498,7 @@ void nir_tgsi_scan_shader(const struct nir_shader *nir,
unsigned semantic_name, semantic_index;
const struct glsl_type *type = variable->type;
if (nir_is_per_vertex_io(variable, nir->info.stage)) {
if (nir_is_arrayed_io(variable, nir->info.stage)) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
......@@ -598,7 +598,7 @@ void nir_tgsi_scan_shader(const struct nir_shader *nir,
i = variable->data.driver_location;
const struct glsl_type *type = variable->type;
if (nir_is_per_vertex_io(variable, nir->info.stage)) {
if (nir_is_arrayed_io(variable, nir->info.stage)) {
assert(glsl_type_is_array(type));
type = glsl_get_array_element(type);
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment