Commit f11bef6f authored by Jesse Natalie's avatar Jesse Natalie Committed by Erik Faye-Lund
Browse files

Revert "nir/vtn: Propagate SPIRV alignment to load/store deref intrinsics"

This reverts commit db131082.
parent fa7c3237
......@@ -1178,9 +1178,8 @@ nir_load_reg(nir_builder *build, nir_register *reg)
}
static inline nir_ssa_def *
nir_load_deref_with_access_and_align(nir_builder *build, nir_deref_instr *deref,
enum gl_access_qualifier access, unsigned alignment,
unsigned align_offset)
nir_load_deref_with_access(nir_builder *build, nir_deref_instr *deref,
enum gl_access_qualifier access)
{
nir_intrinsic_instr *load =
nir_intrinsic_instr_create(build->shader, nir_intrinsic_load_deref);
......@@ -1189,18 +1188,10 @@ nir_load_deref_with_access_and_align(nir_builder *build, nir_deref_instr *deref,
nir_ssa_dest_init(&load->instr, &load->dest, load->num_components,
glsl_get_bit_size(deref->type), NULL);
nir_intrinsic_set_access(load, access);
nir_intrinsic_set_align(load, alignment, align_offset);
nir_builder_instr_insert(build, &load->instr);
return &load->dest.ssa;
}
static inline nir_ssa_def *
nir_load_deref_with_access(nir_builder *build, nir_deref_instr *deref,
enum gl_access_qualifier access)
{
return nir_load_deref_with_access_and_align(build, deref, access, 0, 0);
}
static inline nir_ssa_def *
nir_load_deref(nir_builder *build, nir_deref_instr *deref)
{
......@@ -1208,10 +1199,9 @@ nir_load_deref(nir_builder *build, nir_deref_instr *deref)
}
static inline void
nir_store_deref_with_access_and_align(nir_builder *build, nir_deref_instr *deref,
nir_ssa_def *value, unsigned writemask,
enum gl_access_qualifier access, unsigned alignment,
unsigned align_offset)
nir_store_deref_with_access(nir_builder *build, nir_deref_instr *deref,
nir_ssa_def *value, unsigned writemask,
enum gl_access_qualifier access)
{
nir_intrinsic_instr *store =
nir_intrinsic_instr_create(build->shader, nir_intrinsic_store_deref);
......@@ -1221,18 +1211,9 @@ nir_store_deref_with_access_and_align(nir_builder *build, nir_deref_instr *deref
nir_intrinsic_set_write_mask(store,
writemask & ((1 << store->num_components) - 1));
nir_intrinsic_set_access(store, access);
nir_intrinsic_set_align(store, alignment, align_offset);
nir_builder_instr_insert(build, &store->instr);
}
static inline void
nir_store_deref_with_access(nir_builder *build, nir_deref_instr *deref,
nir_ssa_def *value, unsigned writemask,
enum gl_access_qualifier access)
{
nir_store_deref_with_access_and_align(build, deref, value, writemask, access, 0, 0);
}
static inline void
nir_store_deref(nir_builder *build, nir_deref_instr *deref,
nir_ssa_def *value, unsigned writemask)
......
......@@ -191,7 +191,7 @@ vtn_handle_function_call(struct vtn_builder *b, SpvOp opcode,
if (ret_type->base_type == vtn_base_type_void) {
vtn_push_value(b, w[2], vtn_value_type_undef);
} else {
vtn_push_ssa_value(b, w[2], vtn_local_load(b, ret_deref, 0, 0));
vtn_push_ssa_value(b, w[2], vtn_local_load(b, ret_deref, 0));
}
}
......@@ -931,7 +931,7 @@ vtn_handle_phis_first_pass(struct vtn_builder *b, SpvOp opcode,
_mesa_hash_table_insert(b->phi_table, w, phi_var);
vtn_push_ssa_value(b, w[2],
vtn_local_load(b, nir_build_deref_var(&b->nb, phi_var), 0, 0));
vtn_local_load(b, nir_build_deref_var(&b->nb, phi_var), 0));
return true;
}
......@@ -966,7 +966,7 @@ vtn_handle_phi_second_pass(struct vtn_builder *b, SpvOp opcode,
struct vtn_ssa_value *src = vtn_ssa_value(b, w[i]);
vtn_local_store(b, src, nir_build_deref_var(&b->nb, phi_var), 0, 0);
vtn_local_store(b, src, nir_build_deref_var(&b->nb, phi_var), 0);
}
return true;
......@@ -1082,7 +1082,7 @@ vtn_emit_ret_store(struct vtn_builder *b, struct vtn_block *block)
nir_deref_instr *ret_deref =
nir_build_deref_cast(&b->nb, nir_load_param(&b->nb, 0),
nir_var_function_temp, ret_type, 0);
vtn_local_store(b, src, ret_deref, 0, 0);
vtn_local_store(b, src, ret_deref, 0);
}
static void
......
......@@ -663,7 +663,7 @@ _handle_v_load_store(struct vtn_builder *b, enum OpenCLstd_Entrypoints opcode,
nir_deref_instr *arr_deref = nir_build_deref_ptr_as_array(&b->nb, deref, coffset);
if (load) {
comps[i] = vtn_local_load(b, arr_deref, p->type->access, 0);
comps[i] = vtn_local_load(b, arr_deref, p->type->access);
ncomps[i] = comps[i]->def;
if (glsl_get_base_type(comps[i]->type) != glsl_get_base_type(dest_type)) {
assert(comps[i]->type == glsl_float16_t_type() && glsl_get_base_type(dest_type) == GLSL_TYPE_FLOAT);
......@@ -680,7 +680,7 @@ _handle_v_load_store(struct vtn_builder *b, enum OpenCLstd_Entrypoints opcode,
}
struct vtn_ssa_value *ssa = vtn_create_ssa_value(b, glsl_scalar_type(glsl_get_base_type(dest_type)));
ssa->def = nir_channel(&b->nb, def, i);
vtn_local_store(b, ssa, arr_deref, p->type->access, 0);
vtn_local_store(b, ssa, arr_deref, p->type->access);
}
}
if (load) {
......
......@@ -832,17 +832,17 @@ vtn_pointer_to_offset(struct vtn_builder *b, struct vtn_pointer *ptr,
struct vtn_ssa_value *
vtn_local_load(struct vtn_builder *b, nir_deref_instr *src,
enum gl_access_qualifier access, unsigned alignment);
enum gl_access_qualifier access);
void vtn_local_store(struct vtn_builder *b, struct vtn_ssa_value *src,
nir_deref_instr *dest,
enum gl_access_qualifier access, unsigned alignment);
enum gl_access_qualifier access);
struct vtn_ssa_value *
vtn_variable_load(struct vtn_builder *b, struct vtn_pointer *src, unsigned alignment);
vtn_variable_load(struct vtn_builder *b, struct vtn_pointer *src);
void vtn_variable_store(struct vtn_builder *b, struct vtn_ssa_value *src,
struct vtn_pointer *dest, unsigned alignment);
struct vtn_pointer *dest);
void vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count);
......
......@@ -620,13 +620,13 @@ vtn_pointer_to_deref(struct vtn_builder *b, struct vtn_pointer *ptr)
static void
_vtn_local_load_store(struct vtn_builder *b, bool load, nir_deref_instr *deref,
struct vtn_ssa_value *inout,
enum gl_access_qualifier access, unsigned alignment)
enum gl_access_qualifier access)
{
if (glsl_type_is_vector_or_scalar(deref->type)) {
if (load) {
inout->def = nir_load_deref_with_access_and_align(&b->nb, deref, access, alignment, 0);
inout->def = nir_load_deref_with_access(&b->nb, deref, access);
} else {
nir_store_deref_with_access_and_align(&b->nb, deref, inout->def, ~0, access, alignment, 0);
nir_store_deref_with_access(&b->nb, deref, inout->def, ~0, access);
}
} else if (glsl_type_is_array(deref->type) ||
glsl_type_is_matrix(deref->type)) {
......@@ -634,14 +634,14 @@ _vtn_local_load_store(struct vtn_builder *b, bool load, nir_deref_instr *deref,
for (unsigned i = 0; i < elems; i++) {
nir_deref_instr *child =
nir_build_deref_array_imm(&b->nb, deref, i);
_vtn_local_load_store(b, load, child, inout->elems[i], access, alignment);
_vtn_local_load_store(b, load, child, inout->elems[i], access);
}
} else {
vtn_assert(glsl_type_is_struct_or_ifc(deref->type));
unsigned elems = glsl_get_length(deref->type);
for (unsigned i = 0; i < elems; i++) {
nir_deref_instr *child = nir_build_deref_struct(&b->nb, deref, i);
_vtn_local_load_store(b, load, child, inout->elems[i], access, alignment);
_vtn_local_load_store(b, load, child, inout->elems[i], access);
}
}
}
......@@ -680,11 +680,11 @@ get_deref_tail(struct vtn_builder *b, nir_deref_instr *deref)
struct vtn_ssa_value *
vtn_local_load(struct vtn_builder *b, nir_deref_instr *src,
enum gl_access_qualifier access, unsigned alignment)
enum gl_access_qualifier access)
{
nir_deref_instr *src_tail = get_deref_tail(b, src);
struct vtn_ssa_value *val = vtn_create_ssa_value(b, src_tail->type);
_vtn_local_load_store(b, true, src_tail, val, access, alignment);
_vtn_local_load_store(b, true, src_tail, val, access);
if (src_tail != src) {
val->type = src->type;
......@@ -696,20 +696,19 @@ vtn_local_load(struct vtn_builder *b, nir_deref_instr *src,
void
vtn_local_store(struct vtn_builder *b, struct vtn_ssa_value *src,
nir_deref_instr *dest, enum gl_access_qualifier access,
unsigned alignment)
nir_deref_instr *dest, enum gl_access_qualifier access)
{
nir_deref_instr *dest_tail = get_deref_tail(b, dest);
if (dest_tail != dest) {
struct vtn_ssa_value *val = vtn_create_ssa_value(b, dest_tail->type);
_vtn_local_load_store(b, true, dest_tail, val, access, alignment);
_vtn_local_load_store(b, true, dest_tail, val, access);
val->def = nir_vector_insert(&b->nb, val->def, src->def,
dest->arr.index.ssa);
_vtn_local_load_store(b, false, dest_tail, val, access, alignment);
_vtn_local_load_store(b, false, dest_tail, val, access);
} else {
_vtn_local_load_store(b, false, dest_tail, src, access, alignment);
_vtn_local_load_store(b, false, dest_tail, src, access);
}
}
......@@ -1042,7 +1041,6 @@ static void
_vtn_variable_load_store(struct vtn_builder *b, bool load,
struct vtn_pointer *ptr,
enum gl_access_qualifier access,
unsigned alignment,
struct vtn_ssa_value **inout)
{
if (ptr->mode == vtn_variable_mode_uniform) {
......@@ -1092,17 +1090,17 @@ _vtn_variable_load_store(struct vtn_builder *b, bool load,
* deref.
*/
if (load) {
(*inout)->def = nir_load_deref_with_access_and_align(&b->nb, deref,
ptr->type->access | access, alignment, 0);
(*inout)->def = nir_load_deref_with_access(&b->nb, deref,
ptr->type->access | access);
} else {
nir_store_deref_with_access_and_align(&b->nb, deref, (*inout)->def, ~0,
ptr->type->access | access, alignment, 0);
nir_store_deref_with_access(&b->nb, deref, (*inout)->def, ~0,
ptr->type->access | access);
}
} else {
if (load) {
*inout = vtn_local_load(b, deref, ptr->type->access | access, alignment);
*inout = vtn_local_load(b, deref, ptr->type->access | access);
} else {
vtn_local_store(b, *inout, deref, ptr->type->access | access, alignment);
vtn_local_store(b, *inout, deref, ptr->type->access | access);
}
}
return;
......@@ -1122,7 +1120,7 @@ _vtn_variable_load_store(struct vtn_builder *b, bool load,
for (unsigned i = 0; i < elems; i++) {
chain.link[0].id = i;
struct vtn_pointer *elem = vtn_pointer_dereference(b, ptr, &chain);
_vtn_variable_load_store(b, load, elem, ptr->type->access | access, alignment,
_vtn_variable_load_store(b, load, elem, ptr->type->access | access,
&(*inout)->elems[i]);
}
return;
......@@ -1134,27 +1132,27 @@ _vtn_variable_load_store(struct vtn_builder *b, bool load,
}
struct vtn_ssa_value *
vtn_variable_load(struct vtn_builder *b, struct vtn_pointer *src, unsigned alignment)
vtn_variable_load(struct vtn_builder *b, struct vtn_pointer *src)
{
if (vtn_pointer_uses_ssa_offset(b, src)) {
return vtn_block_load(b, src);
} else {
struct vtn_ssa_value *val = vtn_create_ssa_value(b, src->type->type);
_vtn_variable_load_store(b, true, src, src->access, alignment, &val);
_vtn_variable_load_store(b, true, src, src->access, &val);
return val;
}
}
void
vtn_variable_store(struct vtn_builder *b, struct vtn_ssa_value *src,
struct vtn_pointer *dest, unsigned alignment)
struct vtn_pointer *dest)
{
if (vtn_pointer_uses_ssa_offset(b, dest)) {
vtn_assert(dest->mode == vtn_variable_mode_ssbo ||
dest->mode == vtn_variable_mode_workgroup);
vtn_block_store(b, src, dest);
} else {
_vtn_variable_load_store(b, false, dest, dest->access, alignment, &src);
_vtn_variable_load_store(b, false, dest, dest->access, &src);
}
}
......@@ -1183,7 +1181,7 @@ _vtn_variable_copy(struct vtn_builder *b, struct vtn_pointer *dest,
* ensure that matrices get loaded in the optimal way even if they
* are storred row-major in a UBO.
*/
vtn_variable_store(b, vtn_variable_load(b, src, 0), dest, 0);
vtn_variable_store(b, vtn_variable_load(b, src), dest);
return;
case GLSL_TYPE_INTERFACE:
......@@ -2543,13 +2541,11 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
vtn_assert_types_equal(b, opcode, res_type, src_val->type->deref);
unsigned alignment = 0;
if (count > 4) {
unsigned idx = 5;
SpvMemoryAccessMask access = w[4];
if (access & SpvMemoryAccessAlignedMask)
alignment = w[idx++];
idx++;
if (access & SpvMemoryAccessMakePointerVisibleMask) {
SpvMemorySemanticsMask semantics =
......@@ -2561,7 +2557,7 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
}
}
vtn_push_ssa_value(b, w[2], vtn_variable_load(b, src, alignment));
vtn_push_ssa_value(b, w[2], vtn_variable_load(b, src));
break;
}
......@@ -2589,32 +2585,29 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
struct vtn_ssa_value *bool_ssa =
vtn_create_ssa_value(b, dest->type->type);
bool_ssa->def = nir_i2b(&b->nb, vtn_ssa_value(b, w[2])->def);
vtn_variable_store(b, bool_ssa, dest, 0);
vtn_variable_store(b, bool_ssa, dest);
break;
}
vtn_assert_types_equal(b, opcode, dest_val->type->deref, src_val->type);
unsigned alignment = 0;
SpvMemoryAccessMask access = 0;
unsigned idx = 4;
struct vtn_ssa_value *src = vtn_ssa_value(b, w[2]);
vtn_variable_store(b, src, dest);
if (count > 3) {
access = w[3];
unsigned idx = 4;
SpvMemoryAccessMask access = w[3];
if (access & SpvMemoryAccessAlignedMask)
alignment = w[idx++];
}
idx++;
struct vtn_ssa_value *src = vtn_ssa_value(b, w[2]);
vtn_variable_store(b, src, dest, alignment);
if (access & SpvMemoryAccessMakePointerAvailableMask) {
SpvMemorySemanticsMask semantics =
SpvMemorySemanticsMakeAvailableMask |
vtn_storage_class_to_memory_semantics(dest->ptr_type->storage_class);
SpvScope scope = vtn_constant_uint(b, w[idx]);
vtn_emit_memory_barrier(b, scope, semantics);
if (access & SpvMemoryAccessMakePointerAvailableMask) {
SpvMemorySemanticsMask semantics =
SpvMemorySemanticsMakeAvailableMask |
vtn_storage_class_to_memory_semantics(dest->ptr_type->storage_class);
SpvScope scope = vtn_constant_uint(b, w[idx]);
vtn_emit_memory_barrier(b, scope, semantics);
}
}
break;
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment