Commit d41cdef2 authored by Ian Romanick's avatar Ian Romanick

nir: Use the flrp lowering pass instead of nir_opt_algebraic

I tried to be very careful while updating all the various drivers, but I
don't have any of that hardware for testing. :(

i965 is the only platform that sets always_precise = true, and it is
only set true for fragment shaders.  Gen4 and Gen5 both set lower_flrp32
only for vertex shaders.  For fragment shaders, nir_op_flrp is lowered
during code generation as a(1-c)+bc.  On all other platforms 64-bit
nir_op_flrp and on Gen11 32-bit nir_op_flrp are lowered using the old
nir_opt_algebraic method.

No changes on any other Intel platforms.

v2: Add panfrost changes.

Iron Lake and GM45 had similar results. (Iron Lake shown)
total cycles in shared programs: 188647754 -> 188647748 (<.01%)
cycles in affected programs: 5096 -> 5090 (-0.12%)
helped: 3
HURT: 0
helped stats (abs) min: 2 max: 2 x̄: 2.00 x̃: 2
helped stats (rel) min: 0.12% max: 0.12% x̄: 0.12% x̃: 0.12%
Reviewed-by: Matt Turner's avatarMatt Turner <mattst88@gmail.com>
parent 158370ed
......@@ -124,6 +124,10 @@ radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively,
bool allow_copies)
{
bool progress;
unsigned lower_flrp =
(shader->options->lower_flrp16 ? 16 : 0) |
(shader->options->lower_flrp32 ? 32 : 0) |
(shader->options->lower_flrp64 ? 64 : 0);
do {
progress = false;
......@@ -164,6 +168,27 @@ radv_optimize_nir(struct nir_shader *shader, bool optimize_conservatively,
NIR_PASS(progress, shader, nir_opt_peephole_select, 8, true, true);
NIR_PASS(progress, shader, nir_opt_algebraic);
NIR_PASS(progress, shader, nir_opt_constant_folding);
if (lower_flrp != 0) {
bool lower_flrp_progress;
NIR_PASS(lower_flrp_progress,
shader,
nir_lower_flrp,
lower_flrp,
false /* always_precise */,
shader->options->lower_ffma);
if (lower_flrp_progress) {
NIR_PASS(progress, shader,
nir_opt_constant_folding);
progress = true;
}
/* Nothing should rematerialize any flrps, so we only
* need to do this lowering once.
*/
lower_flrp = 0;
}
NIR_PASS(progress, shader, nir_opt_undef);
NIR_PASS(progress, shader, nir_opt_conditional_discard);
if (shader->options->max_unroll_iterations) {
......
......@@ -1301,6 +1301,10 @@ void
v3d_optimize_nir(struct nir_shader *s)
{
bool progress;
unsigned lower_flrp =
(s->options->lower_flrp16 ? 16 : 0) |
(s->options->lower_flrp32 ? 32 : 0) |
(s->options->lower_flrp64 ? 64 : 0);
do {
progress = false;
......@@ -1316,6 +1320,25 @@ v3d_optimize_nir(struct nir_shader *s)
NIR_PASS(progress, s, nir_opt_peephole_select, 8, true, true);
NIR_PASS(progress, s, nir_opt_algebraic);
NIR_PASS(progress, s, nir_opt_constant_folding);
if (lower_flrp != 0) {
bool lower_flrp_progress;
NIR_PASS(lower_flrp_progress, s, nir_lower_flrp,
lower_flrp,
false /* always_precise */,
s->options->lower_ffma);
if (lower_flrp_progress) {
NIR_PASS(progress, s, nir_opt_constant_folding);
progress = true;
}
/* Nothing should rematerialize any flrps, so we only
* need to do this lowering once.
*/
lower_flrp = 0;
}
NIR_PASS(progress, s, nir_opt_undef);
} while (progress);
......
......@@ -143,9 +143,6 @@ optimizations = [
(('~flrp', a, b, ('b2f', 'c@1')), ('bcsel', c, b, a), 'options->lower_flrp32'),
(('~flrp', a, 0.0, c), ('fadd', ('fmul', ('fneg', a), c), a)),
(('flrp@16', a, b, c), ('fadd', ('fmul', c, ('fsub', b, a)), a), 'options->lower_flrp16'),
(('flrp@32', a, b, c), ('fadd', ('fmul', c, ('fsub', b, a)), a), 'options->lower_flrp32'),
(('flrp@64', a, b, c), ('fadd', ('fmul', c, ('fsub', b, a)), a), 'options->lower_flrp64'),
(('ftrunc', a), ('bcsel', ('flt', a, 0.0), ('fneg', ('ffloor', ('fabs', a))), ('ffloor', ('fabs', a))), 'options->lower_ftrunc'),
(('ffloor', a), ('fsub', a, ('ffract', a)), 'options->lower_ffloor'),
(('fadd', a, ('fneg', ('ffract', a))), ('ffloor', a), '!options->lower_ffloor'),
......
......@@ -112,6 +112,11 @@ static void
ir3_optimize_loop(nir_shader *s)
{
bool progress;
unsigned lower_flrp =
(s->options->lower_flrp16 ? 16 : 0) |
(s->options->lower_flrp32 ? 32 : 0) |
(s->options->lower_flrp64 ? 64 : 0);
do {
progress = false;
......@@ -135,6 +140,22 @@ ir3_optimize_loop(nir_shader *s)
progress |= OPT(s, nir_opt_intrinsics);
progress |= OPT(s, nir_opt_algebraic);
progress |= OPT(s, nir_opt_constant_folding);
if (lower_flrp != 0) {
if (OPT(s, nir_lower_flrp,
lower_flrp,
false /* always_precise */,
s->options->lower_ffma)) {
OPT(s, nir_opt_constant_folding);
progress = true;
}
/* Nothing should rematerialize any flrps, so we only
* need to do this lowering once.
*/
lower_flrp = 0;
}
progress |= OPT(s, nir_opt_dead_cf);
if (OPT(s, nir_opt_trivial_continues)) {
progress |= true;
......
......@@ -885,6 +885,10 @@ static void
optimise_nir(nir_shader *nir)
{
bool progress;
unsigned lower_flrp =
(nir->options->lower_flrp16 ? 16 : 0) |
(nir->options->lower_flrp32 ? 32 : 0) |
(nir->options->lower_flrp64 ? 64 : 0);
NIR_PASS(progress, nir, nir_lower_regs_to_ssa);
NIR_PASS(progress, nir, midgard_nir_lower_fdot2);
......@@ -909,6 +913,27 @@ optimise_nir(nir_shader *nir)
NIR_PASS(progress, nir, nir_opt_peephole_select, 64, false, true);
NIR_PASS(progress, nir, nir_opt_algebraic);
NIR_PASS(progress, nir, nir_opt_constant_folding);
if (lower_flrp != 0) {
bool lower_flrp_progress;
NIR_PASS(lower_flrp_progress,
nir,
nir_lower_flrp,
lower_flrp,
false /* always_precise */,
nir->options->lower_ffma);
if (lower_flrp_progress) {
NIR_PASS(progress, nir,
nir_opt_constant_folding);
progress = true;
}
/* Nothing should rematerialize any flrps, so we only
* need to do this lowering once.
*/
lower_flrp = 0;
}
NIR_PASS(progress, nir, nir_opt_undef);
NIR_PASS(progress, nir, nir_opt_loop_unroll,
nir_var_shader_in |
......
......@@ -815,6 +815,11 @@ void
si_nir_opts(struct nir_shader *nir)
{
bool progress;
unsigned lower_flrp =
(sel->nir->options->lower_flrp16 ? 16 : 0) |
(sel->nir->options->lower_flrp32 ? 32 : 0) |
(sel->nir->options->lower_flrp64 ? 64 : 0);
do {
progress = false;
......@@ -844,6 +849,25 @@ si_nir_opts(struct nir_shader *nir)
NIR_PASS(progress, nir, nir_opt_algebraic);
NIR_PASS(progress, nir, nir_opt_constant_folding);
if (lower_flrp != 0) {
bool lower_flrp_progress;
NIR_PASS(lower_flrp_progress, sel->nir, nir_lower_flrp,
lower_flrp,
false /* always_precise */,
sel->nir->options->lower_ffma);
if (lower_flrp_progress) {
NIR_PASS(progress, sel->nir,
nir_opt_constant_folding);
progress = true;
}
/* Nothing should rematerialize any flrps, so we only
* need to do this lowering once.
*/
lower_flrp = 0;
}
NIR_PASS(progress, nir, nir_opt_undef);
NIR_PASS(progress, nir, nir_opt_conditional_discard);
if (nir->options->max_unroll_iterations) {
......
......@@ -1527,6 +1527,10 @@ static void
vc4_optimize_nir(struct nir_shader *s)
{
bool progress;
unsigned lower_flrp =
(s->options->lower_flrp16 ? 16 : 0) |
(s->options->lower_flrp32 ? 32 : 0) |
(s->options->lower_flrp64 ? 64 : 0);
do {
progress = false;
......@@ -1542,6 +1546,24 @@ vc4_optimize_nir(struct nir_shader *s)
NIR_PASS(progress, s, nir_opt_peephole_select, 8, true, true);
NIR_PASS(progress, s, nir_opt_algebraic);
NIR_PASS(progress, s, nir_opt_constant_folding);
if (lower_flrp != 0) {
bool lower_flrp_progress;
NIR_PASS(lower_flrp_progress, s, nir_lower_flrp,
lower_flrp,
false /* always_precise */,
s->options->lower_ffma);
if (lower_flrp_progress) {
NIR_PASS(progress, s, nir_opt_constant_folding);
progress = true;
}
/* Nothing should rematerialize any flrps, so we only
* need to do this lowering once.
*/
lower_flrp = 0;
}
NIR_PASS(progress, s, nir_opt_undef);
NIR_PASS(progress, s, nir_opt_loop_unroll,
nir_var_shader_in |
......
......@@ -538,6 +538,11 @@ brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
brw_nir_no_indirect_mask(compiler, nir->info.stage);
bool progress;
unsigned lower_flrp =
(nir->options->lower_flrp16 ? 16 : 0) |
(nir->options->lower_flrp32 ? 32 : 0) |
(nir->options->lower_flrp64 ? 64 : 0);
do {
progress = false;
OPT(nir_split_array_vars, nir_var_function_temp);
......@@ -598,6 +603,24 @@ brw_nir_optimize(nir_shader *nir, const struct brw_compiler *compiler,
OPT(nir_opt_idiv_const, 32);
OPT(nir_opt_algebraic);
OPT(nir_opt_constant_folding);
if (lower_flrp != 0) {
/* To match the old behavior, set always_precise only for scalar
* shader stages.
*/
if (OPT(nir_lower_flrp,
lower_flrp,
is_scalar /* always_precise */,
compiler->devinfo->gen >= 6)) {
OPT(nir_opt_constant_folding);
}
/* Nothing should rematerialize any flrps, so we only need to do this
* lowering once.
*/
lower_flrp = 0;
}
OPT(nir_opt_dead_cf);
if (OPT(nir_opt_trivial_continues)) {
/* If nir_opt_trivial_continues makes progress, then we need to clean
......
......@@ -304,6 +304,11 @@ void
st_nir_opts(nir_shader *nir, bool scalar)
{
bool progress;
unsigned lower_flrp =
(nir->options->lower_flrp16 ? 16 : 0) |
(nir->options->lower_flrp32 ? 32 : 0) |
(nir->options->lower_flrp64 ? 64 : 0);
do {
progress = false;
......@@ -332,6 +337,25 @@ st_nir_opts(nir_shader *nir, bool scalar)
NIR_PASS(progress, nir, nir_opt_algebraic);
NIR_PASS(progress, nir, nir_opt_constant_folding);
if (lower_flrp != 0) {
bool lower_flrp_progress;
NIR_PASS(lower_flrp_progress, nir, nir_lower_flrp,
lower_flrp,
false /* always_precise */,
nir->options->lower_ffma);
if (lower_flrp_progress) {
NIR_PASS(progress, nir,
nir_opt_constant_folding);
progress = true;
}
/* Nothing should rematerialize any flrps, so we only need to do this
* lowering once.
*/
lower_flrp = 0;
}
NIR_PASS(progress, nir, nir_opt_undef);
NIR_PASS(progress, nir, nir_opt_conditional_discard);
if (nir->options->max_unroll_iterations) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment