Commit 03912fb6 authored by Jason Ekstrand's avatar Jason Ekstrand

spirv: Delete the letacy offset/index UBO/SSBO lowering

parent 3fc3ee4d
Pipeline #203991 waiting for manual action with stages
......@@ -56,9 +56,6 @@ enum nir_spirv_execution_environment {
struct spirv_to_nir_options {
enum nir_spirv_execution_environment environment;
/* Whether or not to lower all UBO/SSBO access to offsets up-front. */
bool lower_ubo_ssbo_access_to_offsets;
/* Whether to make FragCoord to a system value, the same as
* GLSLFragCoordIsSysVal in GLSL.
*/
......
......@@ -3315,34 +3315,6 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
vtn_emit_memory_barrier(b, scope, after_semantics);
}
static nir_intrinsic_op
get_ssbo_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
{
switch (opcode) {
case SpvOpAtomicLoad: return nir_intrinsic_load_ssbo;
case SpvOpAtomicStore: return nir_intrinsic_store_ssbo;
#define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
OP(AtomicExchange, atomic_exchange)
OP(AtomicCompareExchange, atomic_comp_swap)
OP(AtomicCompareExchangeWeak, atomic_comp_swap)
OP(AtomicIIncrement, atomic_add)
OP(AtomicIDecrement, atomic_add)
OP(AtomicIAdd, atomic_add)
OP(AtomicISub, atomic_add)
OP(AtomicSMin, atomic_imin)
OP(AtomicUMin, atomic_umin)
OP(AtomicSMax, atomic_imax)
OP(AtomicUMax, atomic_umax)
OP(AtomicAnd, atomic_and)
OP(AtomicOr, atomic_or)
OP(AtomicXor, atomic_xor)
OP(AtomicFAddEXT, atomic_fadd)
#undef OP
default:
vtn_fail_with_opcode("Invalid SSBO atomic", opcode);
}
}
static nir_intrinsic_op
get_uniform_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
{
......@@ -3487,61 +3459,6 @@ vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode,
unreachable("Invalid SPIR-V atomic");
}
} else if (vtn_pointer_uses_ssa_offset(b, ptr)) {
nir_ssa_def *offset, *index;
offset = vtn_pointer_to_offset(b, ptr, &index);
assert(ptr->mode == vtn_variable_mode_ssbo);
nir_intrinsic_op op = get_ssbo_nir_atomic_op(b, opcode);
atomic = nir_intrinsic_instr_create(b->nb.shader, op);
nir_intrinsic_set_access(atomic, access | ACCESS_COHERENT);
int src = 0;
switch (opcode) {
case SpvOpAtomicLoad:
atomic->num_components = glsl_get_vector_elements(ptr->type->type);
nir_intrinsic_set_align(atomic, 4, 0);
if (ptr->mode == vtn_variable_mode_ssbo)
atomic->src[src++] = nir_src_for_ssa(index);
atomic->src[src++] = nir_src_for_ssa(offset);
break;
case SpvOpAtomicStore:
atomic->num_components = glsl_get_vector_elements(ptr->type->type);
nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
nir_intrinsic_set_align(atomic, 4, 0);
atomic->src[src++] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[4]));
if (ptr->mode == vtn_variable_mode_ssbo)
atomic->src[src++] = nir_src_for_ssa(index);
atomic->src[src++] = nir_src_for_ssa(offset);
break;
case SpvOpAtomicExchange:
case SpvOpAtomicCompareExchange:
case SpvOpAtomicCompareExchangeWeak:
case SpvOpAtomicIIncrement:
case SpvOpAtomicIDecrement:
case SpvOpAtomicIAdd:
case SpvOpAtomicISub:
case SpvOpAtomicSMin:
case SpvOpAtomicUMin:
case SpvOpAtomicSMax:
case SpvOpAtomicUMax:
case SpvOpAtomicAnd:
case SpvOpAtomicOr:
case SpvOpAtomicXor:
case SpvOpAtomicFAddEXT:
if (ptr->mode == vtn_variable_mode_ssbo)
atomic->src[src++] = nir_src_for_ssa(index);
atomic->src[src++] = nir_src_for_ssa(offset);
fill_common_atomic_sources(b, opcode, w, &atomic->src[src]);
break;
default:
vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode);
}
} else {
nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
const struct glsl_type *deref_type = deref->type;
......
......@@ -523,16 +523,6 @@ struct vtn_pointer {
enum gl_access_qualifier access;
};
bool vtn_mode_uses_ssa_offset(struct vtn_builder *b,
enum vtn_variable_mode mode);
static inline bool vtn_pointer_uses_ssa_offset(struct vtn_builder *b,
struct vtn_pointer *ptr)
{
return vtn_mode_uses_ssa_offset(b, ptr->mode);
}
struct vtn_variable {
enum vtn_variable_mode mode;
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment