Commit 65f2e335 authored by Marek Olšák's avatar Marek Olšák

radeonsi: import r600_streamout from drivers/radeon

Reviewed-by: default avatarNicolai Hähnle <nicolai.haehnle@amd.com>
parent ed7f27de
......@@ -8,7 +8,6 @@ C_SOURCES := \
r600_pipe_common.h \
r600_query.c \
r600_query.h \
r600_streamout.c \
r600_test_dma.c \
r600_texture.c \
radeon_uvd.c \
......
......@@ -296,21 +296,10 @@ void si_preflush_suspend_features(struct r600_common_context *ctx)
/* suspend queries */
if (!LIST_IS_EMPTY(&ctx->active_queries))
si_suspend_queries(ctx);
ctx->streamout.suspended = false;
if (ctx->streamout.begin_emitted) {
si_emit_streamout_end(ctx);
ctx->streamout.suspended = true;
}
}
void si_postflush_resume_features(struct r600_common_context *ctx)
{
if (ctx->streamout.suspended) {
ctx->streamout.append_bitmask = ctx->streamout.enabled_mask;
si_streamout_buffers_dirty(ctx);
}
/* resume queries */
if (!LIST_IS_EMPTY(&ctx->active_queries))
si_resume_queries(ctx);
......@@ -647,7 +636,6 @@ bool si_common_context_init(struct r600_common_context *rctx,
rctx->b.set_device_reset_callback = r600_set_device_reset_callback;
si_init_context_texture_functions(rctx);
si_streamout_init(rctx);
si_init_query_functions(rctx);
si_init_msaa(&rctx->b);
......
......@@ -497,43 +497,6 @@ struct r600_atom {
unsigned short id;
};
struct r600_so_target {
struct pipe_stream_output_target b;
/* The buffer where BUFFER_FILLED_SIZE is stored. */
struct r600_resource *buf_filled_size;
unsigned buf_filled_size_offset;
bool buf_filled_size_valid;
unsigned stride_in_dw;
};
struct r600_streamout {
struct r600_atom begin_atom;
bool begin_emitted;
unsigned enabled_mask;
unsigned num_targets;
struct r600_so_target *targets[PIPE_MAX_SO_BUFFERS];
unsigned append_bitmask;
bool suspended;
/* External state which comes from the vertex shader,
* it must be set explicitly when binding a shader. */
uint16_t *stride_in_dw;
unsigned enabled_stream_buffers_mask; /* stream0 buffers0-3 in 4 LSB */
/* The state of VGT_STRMOUT_BUFFER_(CONFIG|EN). */
unsigned hw_enabled_mask;
/* The state of VGT_STRMOUT_(CONFIG|EN). */
struct r600_atom enable_atom;
bool streamout_enabled;
bool prims_gen_query_enabled;
int num_prims_gen_queries;
};
struct r600_ring {
struct radeon_winsys_cs *cs;
void (*flush)(void *ctx, unsigned flags,
......@@ -578,9 +541,6 @@ struct r600_common_context {
uint64_t vram;
uint64_t gtt;
/* States. */
struct r600_streamout streamout;
/* Additional context states. */
unsigned flags; /* flush flags */
......@@ -790,17 +750,6 @@ void si_init_query_functions(struct r600_common_context *rctx);
void si_suspend_queries(struct r600_common_context *ctx);
void si_resume_queries(struct r600_common_context *ctx);
/* r600_streamout.c */
void si_streamout_buffers_dirty(struct r600_common_context *rctx);
void si_common_set_streamout_targets(struct pipe_context *ctx,
unsigned num_targets,
struct pipe_stream_output_target **targets,
const unsigned *offset);
void si_emit_streamout_end(struct r600_common_context *rctx);
void si_update_prims_generated_query_state(struct r600_common_context *rctx,
unsigned type, int diff);
void si_streamout_init(struct r600_common_context *rctx);
/* r600_test_dma.c */
void si_test_dma(struct r600_common_screen *rscreen);
......@@ -900,12 +849,6 @@ r600_context_add_resource_size(struct pipe_context *ctx, struct pipe_resource *r
}
}
static inline bool r600_get_strmout_en(struct r600_common_context *rctx)
{
return rctx->streamout.streamout_enabled ||
rctx->streamout.prims_gen_query_enabled;
}
#define SQ_TEX_XY_FILTER_POINT 0x00
#define SQ_TEX_XY_FILTER_BILINEAR 0x01
#define SQ_TEX_XY_FILTER_ANISO_POINT 0x02
......
......@@ -29,6 +29,10 @@
#include "os/os_time.h"
#include "tgsi/tgsi_text.h"
/* TODO: remove this: */
void si_update_prims_generated_query_state(struct r600_common_context *rctx,
unsigned type, int diff);
#define R600_MAX_STREAMS 4
struct r600_hw_query_params {
......
......@@ -30,6 +30,7 @@ C_SOURCES := \
si_state_binning.c \
si_state_draw.c \
si_state_shaders.c \
si_state_streamout.c \
si_state_viewport.c \
si_state.h \
si_uvd.c
......@@ -58,8 +58,8 @@ static void si_blitter_begin(struct pipe_context *ctx, enum si_blitter_op op)
util_blitter_save_tessctrl_shader(sctx->blitter, sctx->tcs_shader.cso);
util_blitter_save_tesseval_shader(sctx->blitter, sctx->tes_shader.cso);
util_blitter_save_geometry_shader(sctx->blitter, sctx->gs_shader.cso);
util_blitter_save_so_targets(sctx->blitter, sctx->b.streamout.num_targets,
(struct pipe_stream_output_target**)sctx->b.streamout.targets);
util_blitter_save_so_targets(sctx->blitter, sctx->streamout.num_targets,
(struct pipe_stream_output_target**)sctx->streamout.targets);
util_blitter_save_rasterizer(sctx->blitter, sctx->queued.named.rasterizer);
if (op & SI_SAVE_FRAGMENT_STATE) {
......
......@@ -1373,11 +1373,11 @@ static void si_set_streamout_targets(struct pipe_context *ctx,
struct si_context *sctx = (struct si_context *)ctx;
struct si_buffer_resources *buffers = &sctx->rw_buffers;
struct si_descriptors *descs = &sctx->descriptors[SI_DESCS_RW_BUFFERS];
unsigned old_num_targets = sctx->b.streamout.num_targets;
unsigned old_num_targets = sctx->streamout.num_targets;
unsigned i, bufidx;
/* We are going to unbind the buffers. Mark which caches need to be flushed. */
if (sctx->b.streamout.num_targets && sctx->b.streamout.begin_emitted) {
if (sctx->streamout.num_targets && sctx->streamout.begin_emitted) {
/* Since streamout uses vector writes which go through TC L2
* and most other clients can use TC L2 as well, we don't need
* to flush it.
......@@ -1387,9 +1387,9 @@ static void si_set_streamout_targets(struct pipe_context *ctx,
* cases. Thus, flag the TC L2 dirtiness in the resource and
* handle it at draw call time.
*/
for (i = 0; i < sctx->b.streamout.num_targets; i++)
if (sctx->b.streamout.targets[i])
r600_resource(sctx->b.streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
for (i = 0; i < sctx->streamout.num_targets; i++)
if (sctx->streamout.targets[i])
r600_resource(sctx->streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
/* Invalidate the scalar cache in case a streamout buffer is
* going to be used as a constant buffer.
......@@ -1650,11 +1650,11 @@ static void si_rebind_buffer(struct pipe_context *ctx, struct pipe_resource *buf
true);
/* Update the streamout state. */
if (sctx->b.streamout.begin_emitted)
si_emit_streamout_end(&sctx->b);
sctx->b.streamout.append_bitmask =
sctx->b.streamout.enabled_mask;
si_streamout_buffers_dirty(&sctx->b);
if (sctx->streamout.begin_emitted)
si_emit_streamout_end(sctx);
sctx->streamout.append_bitmask =
sctx->streamout.enabled_mask;
si_streamout_buffers_dirty(sctx);
}
}
......
......@@ -100,6 +100,12 @@ void si_context_gfx_flush(void *context, unsigned flags,
si_preflush_suspend_features(&ctx->b);
ctx->streamout.suspended = false;
if (ctx->streamout.begin_emitted) {
si_emit_streamout_end(ctx);
ctx->streamout.suspended = true;
}
ctx->b.flags |= SI_CONTEXT_CS_PARTIAL_FLUSH |
SI_CONTEXT_PS_PARTIAL_FLUSH;
......@@ -243,7 +249,7 @@ void si_begin_new_cs(struct si_context *ctx)
si_mark_atom_dirty(ctx, &ctx->dpbb_state);
si_mark_atom_dirty(ctx, &ctx->stencil_ref.atom);
si_mark_atom_dirty(ctx, &ctx->spi_map);
si_mark_atom_dirty(ctx, &ctx->b.streamout.enable_atom);
si_mark_atom_dirty(ctx, &ctx->streamout.enable_atom);
si_mark_atom_dirty(ctx, &ctx->b.render_cond_atom);
si_all_descriptors_begin_new_cs(ctx);
si_all_resident_buffers_begin_new_cs(ctx);
......@@ -260,6 +266,11 @@ void si_begin_new_cs(struct si_context *ctx)
&ctx->scratch_buffer->b.b);
}
if (ctx->streamout.suspended) {
ctx->streamout.append_bitmask = ctx->streamout.enabled_mask;
si_streamout_buffers_dirty(ctx);
}
si_postflush_resume_features(&ctx->b);
assert(!ctx->b.gfx.cs->prev_dw);
......
......@@ -205,6 +205,7 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen,
si_init_compute_functions(sctx);
si_init_cp_dma_functions(sctx);
si_init_debug_functions(sctx);
si_init_streamout_functions(sctx);
if (sscreen->b.info.has_hw_decode) {
sctx->b.b.create_video_codec = si_uvd_create_decoder;
......
......@@ -255,6 +255,43 @@ struct si_sample_mask {
uint16_t sample_mask;
};
struct si_streamout_target {
struct pipe_stream_output_target b;
/* The buffer where BUFFER_FILLED_SIZE is stored. */
struct r600_resource *buf_filled_size;
unsigned buf_filled_size_offset;
bool buf_filled_size_valid;
unsigned stride_in_dw;
};
struct si_streamout {
struct r600_atom begin_atom;
bool begin_emitted;
unsigned enabled_mask;
unsigned num_targets;
struct si_streamout_target *targets[PIPE_MAX_SO_BUFFERS];
unsigned append_bitmask;
bool suspended;
/* External state which comes from the vertex shader,
* it must be set explicitly when binding a shader. */
uint16_t *stride_in_dw;
unsigned enabled_stream_buffers_mask; /* stream0 buffers0-3 in 4 LSB */
/* The state of VGT_STRMOUT_BUFFER_(CONFIG|EN). */
unsigned hw_enabled_mask;
/* The state of VGT_STRMOUT_(CONFIG|EN). */
struct r600_atom enable_atom;
bool streamout_enabled;
bool prims_gen_query_enabled;
int num_prims_gen_queries;
};
/* A shader state consists of the shader selector, which is a constant state
* object shared by multiple contexts and shouldn't be modified, and
* the current shader variant selected for this context.
......@@ -359,6 +396,7 @@ struct si_context {
struct si_stencil_ref stencil_ref;
struct r600_atom spi_map;
struct si_scissors scissors;
struct si_streamout streamout;
struct si_viewports viewports;
/* Precomputed states. */
......@@ -644,6 +682,12 @@ static inline struct si_shader* si_get_vs_state(struct si_context *sctx)
return vs->current ? vs->current : NULL;
}
static inline bool si_get_strmout_en(struct si_context *sctx)
{
return sctx->streamout.streamout_enabled ||
sctx->streamout.prims_gen_query_enabled;
}
static inline unsigned
si_optimal_tcc_alignment(struct si_context *sctx, unsigned upload_size)
{
......
......@@ -4407,8 +4407,8 @@ static void si_init_config(struct si_context *sctx);
void si_init_state_functions(struct si_context *sctx)
{
si_init_external_atom(sctx, &sctx->b.render_cond_atom, &sctx->atoms.s.render_cond);
si_init_external_atom(sctx, &sctx->b.streamout.begin_atom, &sctx->atoms.s.streamout_begin);
si_init_external_atom(sctx, &sctx->b.streamout.enable_atom, &sctx->atoms.s.streamout_enable);
si_init_external_atom(sctx, &sctx->streamout.begin_atom, &sctx->atoms.s.streamout_begin);
si_init_external_atom(sctx, &sctx->streamout.enable_atom, &sctx->atoms.s.streamout_enable);
si_init_external_atom(sctx, &sctx->scissors.atom, &sctx->atoms.s.scissors);
si_init_external_atom(sctx, &sctx->viewports.atom, &sctx->atoms.s.viewports);
......
......@@ -423,6 +423,17 @@ void si_draw_rectangle(struct blitter_context *blitter,
const union blitter_attrib *attrib);
void si_trace_emit(struct si_context *sctx);
/* si_state_streamout.c */
void si_streamout_buffers_dirty(struct si_context *sctx);
void si_common_set_streamout_targets(struct pipe_context *ctx,
unsigned num_targets,
struct pipe_stream_output_target **targets,
const unsigned *offset);
void si_emit_streamout_end(struct si_context *sctx);
void si_update_prims_generated_query_state(struct si_context *sctx,
unsigned type, int diff);
void si_init_streamout_functions(struct si_context *sctx);
static inline unsigned
si_tile_mode_index(struct r600_texture *rtex, unsigned level, bool stencil)
......
......@@ -652,8 +652,8 @@ static void si_emit_draw_packets(struct si_context *sctx,
uint64_t index_va = 0;
if (info->count_from_stream_output) {
struct r600_so_target *t =
(struct r600_so_target*)info->count_from_stream_output;
struct si_streamout_target *t =
(struct si_streamout_target*)info->count_from_stream_output;
uint64_t va = t->buf_filled_size->gpu_address +
t->buf_filled_size_offset;
......@@ -1486,7 +1486,7 @@ void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
if ((sctx->b.family == CHIP_HAWAII ||
sctx->b.family == CHIP_TONGA ||
sctx->b.family == CHIP_FIJI) &&
r600_get_strmout_en(&sctx->b)) {
si_get_strmout_en(sctx)) {
sctx->b.flags |= SI_CONTEXT_VGT_STREAMOUT_SYNC;
}
......
......@@ -2252,9 +2252,9 @@ static void si_update_streamout_state(struct si_context *sctx)
if (!shader_with_so)
return;
sctx->b.streamout.enabled_stream_buffers_mask =
sctx->streamout.enabled_stream_buffers_mask =
shader_with_so->enabled_streamout_buffer_mask;
sctx->b.streamout.stride_in_dw = shader_with_so->so.stride;
sctx->streamout.stride_in_dw = shader_with_so->so.stride;
}
static void si_update_clip_regs(struct si_context *sctx,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment