Commit 239bca3c authored by Maarten Lankhorst's avatar Maarten Lankhorst

lib/rendercopy: Implement support for 8/16 bpp

To handle drawing 16 bpp formats correctly with odd x/w, we need to
use the correct bpp to rendercopy. Now that everything sets bpp in
igt_buf, fix the rendercopy support to use it and set the correct
format.
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: Ville Syrjälä's avatarVille Syrjälä <ville.syrjala@linux.intel.com>
[mlankhorst: Add assert(src->bpp == dst->bpp)]
parent 10c98307
......@@ -136,7 +136,7 @@ gen4_render_flush(struct intel_batchbuffer *batch,
static uint32_t
gen4_bind_buf(struct intel_batchbuffer *batch,
const struct igt_buf *buf,
uint32_t format, int is_dst)
int is_dst)
{
struct gen4_surface_state *ss;
uint32_t write_domain, read_domain;
......@@ -152,7 +152,12 @@ gen4_bind_buf(struct intel_batchbuffer *batch,
ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 32);
ss->ss0.surface_type = SURFACE_2D;
ss->ss0.surface_format = format;
switch (buf->bpp) {
case 8: ss->ss0.surface_format = SURFACEFORMAT_R8_UNORM; break;
case 16: ss->ss0.surface_format = SURFACEFORMAT_R8G8_UNORM; break;
case 32: ss->ss0.surface_format = SURFACEFORMAT_B8G8R8A8_UNORM; break;
default: igt_assert(0);
}
ss->ss0.data_return_format = SURFACERETURNFORMAT_FLOAT32;
ss->ss0.color_blend = 1;
......@@ -182,10 +187,8 @@ gen4_bind_surfaces(struct intel_batchbuffer *batch,
binding_table = intel_batchbuffer_subdata_alloc(batch, 32, 32);
binding_table[0] =
gen4_bind_buf(batch, dst, SURFACEFORMAT_B8G8R8A8_UNORM, 1);
binding_table[1] =
gen4_bind_buf(batch, src, SURFACEFORMAT_B8G8R8A8_UNORM, 0);
binding_table[0] = gen4_bind_buf(batch, dst, 1);
binding_table[1] = gen4_bind_buf(batch, src, 0);
return intel_batchbuffer_subdata_offset(batch, binding_table);
}
......@@ -650,6 +653,7 @@ void gen4_render_copyfunc(struct intel_batchbuffer *batch,
uint32_t vs;
uint32_t offset, batch_end;
igt_assert(src->bpp == dst->bpp);
intel_batchbuffer_flush_with_context(batch, context);
batch->ptr = batch->buffer + 1024;
......
......@@ -73,7 +73,7 @@ gen6_render_flush(struct intel_batchbuffer *batch,
static uint32_t
gen6_bind_buf(struct intel_batchbuffer *batch, const struct igt_buf *buf,
uint32_t format, int is_dst)
int is_dst)
{
struct gen6_surface_state *ss;
uint32_t write_domain, read_domain;
......@@ -88,7 +88,13 @@ gen6_bind_buf(struct intel_batchbuffer *batch, const struct igt_buf *buf,
ss = intel_batchbuffer_subdata_alloc(batch, sizeof(*ss), 32);
ss->ss0.surface_type = SURFACE_2D;
ss->ss0.surface_format = format;
switch (buf->bpp) {
case 8: ss->ss0.surface_format = SURFACEFORMAT_R8_UNORM; break;
case 16: ss->ss0.surface_format = SURFACEFORMAT_R8G8_UNORM; break;
case 32: ss->ss0.surface_format = SURFACEFORMAT_B8G8R8A8_UNORM; break;
default: igt_assert(0);
}
ss->ss0.data_return_format = SURFACERETURNFORMAT_FLOAT32;
ss->ss0.color_blend = 1;
......@@ -118,10 +124,8 @@ gen6_bind_surfaces(struct intel_batchbuffer *batch,
binding_table = intel_batchbuffer_subdata_alloc(batch, 32, 32);
binding_table[0] =
gen6_bind_buf(batch, dst, SURFACEFORMAT_B8G8R8A8_UNORM, 1);
binding_table[1] =
gen6_bind_buf(batch, src, SURFACEFORMAT_B8G8R8A8_UNORM, 0);
binding_table[0] = gen6_bind_buf(batch, dst, 1);
binding_table[1] = gen6_bind_buf(batch, src, 0);
return intel_batchbuffer_subdata_offset(batch, binding_table);
}
......@@ -520,6 +524,7 @@ void gen6_render_copyfunc(struct intel_batchbuffer *batch,
uint32_t cc_vp, cc_blend, offset;
uint32_t batch_end;
igt_assert(src->bpp == dst->bpp);
intel_batchbuffer_flush_with_context(batch, context);
batch->ptr = batch->buffer + 1024;
......
......@@ -59,13 +59,19 @@ gen7_tiling_bits(uint32_t tiling)
static uint32_t
gen7_bind_buf(struct intel_batchbuffer *batch,
const struct igt_buf *buf,
uint32_t format,
int is_dst)
{
uint32_t *ss;
uint32_t format, *ss;
uint32_t write_domain, read_domain;
int ret;
switch (buf->bpp) {
case 8: format = SURFACEFORMAT_R8_UNORM; break;
case 16: format = SURFACEFORMAT_R8G8_UNORM; break;
case 32: format = SURFACEFORMAT_B8G8R8A8_UNORM; break;
default: igt_assert(0);
}
if (is_dst) {
write_domain = read_domain = I915_GEM_DOMAIN_RENDER;
} else {
......@@ -186,10 +192,8 @@ gen7_bind_surfaces(struct intel_batchbuffer *batch,
binding_table = intel_batchbuffer_subdata_alloc(batch, 8, 32);
binding_table[0] =
gen7_bind_buf(batch, dst, SURFACEFORMAT_B8G8R8A8_UNORM, 1);
binding_table[1] =
gen7_bind_buf(batch, src, SURFACEFORMAT_B8G8R8A8_UNORM, 0);
binding_table[0] = gen7_bind_buf(batch, dst, 1);
binding_table[1] = gen7_bind_buf(batch, src, 0);
return intel_batchbuffer_subdata_offset(batch, binding_table);
}
......@@ -501,6 +505,7 @@ void gen7_render_copyfunc(struct intel_batchbuffer *batch,
uint32_t vertex_buffer;
uint32_t batch_end;
igt_assert(src->bpp == dst->bpp);
intel_batchbuffer_flush_with_context(batch, context);
batch->ptr = &batch->buffer[BATCH_STATE_SPLIT];
......
......@@ -144,8 +144,7 @@ gen6_render_flush(struct intel_batchbuffer *batch,
static uint32_t
gen8_bind_buf(struct intel_batchbuffer *batch,
struct annotations_context *aub,
const struct igt_buf *buf,
uint32_t format, int is_dst)
const struct igt_buf *buf, int is_dst)
{
struct gen8_surface_state *ss;
uint32_t write_domain, read_domain, offset;
......@@ -163,7 +162,12 @@ gen8_bind_buf(struct intel_batchbuffer *batch,
annotation_add_state(aub, AUB_TRACE_SURFACE_STATE, offset, sizeof(*ss));
ss->ss0.surface_type = SURFACE_2D;
ss->ss0.surface_format = format;
switch (buf->bpp) {
case 8: ss->ss0.surface_format = SURFACEFORMAT_R8_UNORM; break;
case 16: ss->ss0.surface_format = SURFACEFORMAT_R8G8_UNORM; break;
case 32: ss->ss0.surface_format = SURFACEFORMAT_B8G8R8A8_UNORM; break;
default: igt_assert(0);
}
ss->ss0.render_cache_read_write = 1;
ss->ss0.vertical_alignment = 1; /* align 4 */
ss->ss0.horizontal_alignment = 1; /* align 4 */
......@@ -205,12 +209,8 @@ gen8_bind_surfaces(struct intel_batchbuffer *batch,
offset = intel_batchbuffer_subdata_offset(batch, binding_table);
annotation_add_state(aub, AUB_TRACE_BINDING_TABLE, offset, 8);
binding_table[0] =
gen8_bind_buf(batch, aub,
dst, SURFACEFORMAT_B8G8R8A8_UNORM, 1);
binding_table[1] =
gen8_bind_buf(batch, aub,
src, SURFACEFORMAT_B8G8R8A8_UNORM, 0);
binding_table[0] = gen8_bind_buf(batch, aub, dst, 1);
binding_table[1] = gen8_bind_buf(batch, aub, src, 0);
return offset;
}
......@@ -898,6 +898,7 @@ void gen8_render_copyfunc(struct intel_batchbuffer *batch,
uint32_t vertex_buffer;
uint32_t batch_end;
igt_assert(src->bpp == dst->bpp);
intel_batchbuffer_flush_with_context(batch, context);
intel_batchbuffer_align(batch, 8);
......
......@@ -175,7 +175,7 @@ gen6_render_flush(struct intel_batchbuffer *batch,
/* Mostly copy+paste from gen6, except height, width, pitch moved */
static uint32_t
gen8_bind_buf(struct intel_batchbuffer *batch, const struct igt_buf *buf,
uint32_t format, int is_dst) {
int is_dst) {
struct gen8_surface_state *ss;
uint32_t write_domain, read_domain, offset;
int ret;
......@@ -193,7 +193,12 @@ gen8_bind_buf(struct intel_batchbuffer *batch, const struct igt_buf *buf,
offset, sizeof(*ss));
ss->ss0.surface_type = SURFACE_2D;
ss->ss0.surface_format = format;
switch (buf->bpp) {
case 8: ss->ss0.surface_format = SURFACEFORMAT_R8_UNORM; break;
case 16: ss->ss0.surface_format = SURFACEFORMAT_R8G8_UNORM; break;
case 32: ss->ss0.surface_format = SURFACEFORMAT_B8G8R8A8_UNORM; break;
default: igt_assert(0);
}
ss->ss0.render_cache_read_write = 1;
ss->ss0.vertical_alignment = 1; /* align 4 */
ss->ss0.horizontal_alignment = 1; /* align 4 */
......@@ -249,10 +254,8 @@ gen8_bind_surfaces(struct intel_batchbuffer *batch,
annotation_add_state(&aub_annotations, AUB_TRACE_BINDING_TABLE,
offset, 8);
binding_table[0] =
gen8_bind_buf(batch, dst, SURFACEFORMAT_B8G8R8A8_UNORM, 1);
binding_table[1] =
gen8_bind_buf(batch, src, SURFACEFORMAT_B8G8R8A8_UNORM, 0);
binding_table[0] = gen8_bind_buf(batch, dst, 1);
binding_table[1] = gen8_bind_buf(batch, src, 0);
return offset;
}
......@@ -952,6 +955,7 @@ void _gen9_render_copyfunc(struct intel_batchbuffer *batch,
uint32_t vertex_buffer;
uint32_t batch_end;
igt_assert(src->bpp == dst->bpp);
intel_batchbuffer_flush_with_context(batch, context);
intel_batchbuffer_align(batch, 8);
......
......@@ -136,6 +136,14 @@ static void gen2_emit_target(struct intel_batchbuffer *batch,
const struct igt_buf *dst)
{
uint32_t tiling;
uint32_t format;
switch (dst->bpp) {
case 8: format = COLR_BUF_8BIT; break;
case 16: format = COLR_BUF_RGB565; break;
case 32: format = COLR_BUF_ARGB8888; break;
default: igt_assert(0);
}
tiling = 0;
if (dst->tiling != I915_TILING_NONE)
......@@ -148,7 +156,7 @@ static void gen2_emit_target(struct intel_batchbuffer *batch,
OUT_RELOC(dst->bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
OUT_BATCH(_3DSTATE_DST_BUF_VARS_CMD);
OUT_BATCH(COLR_BUF_ARGB8888 |
OUT_BATCH(format |
DSTORG_HORT_BIAS(0x8) |
DSTORG_VERT_BIAS(0x8));
......@@ -165,6 +173,14 @@ static void gen2_emit_texture(struct intel_batchbuffer *batch,
int unit)
{
uint32_t tiling;
uint32_t format;
switch (src->bpp) {
case 8: format = MAPSURF_8BIT | MT_8BIT_L8; break;
case 16: format = MAPSURF_16BIT | MT_16BIT_RGB565; break;
case 32: format = MAPSURF_32BIT | MT_32BIT_ARGB8888; break;
default: igt_assert(0);
}
tiling = 0;
if (src->tiling != I915_TILING_NONE)
......@@ -176,7 +192,7 @@ static void gen2_emit_texture(struct intel_batchbuffer *batch,
OUT_RELOC(src->bo, I915_GEM_DOMAIN_SAMPLER, 0, 0);
OUT_BATCH((igt_buf_height(src) - 1) << TM0S1_HEIGHT_SHIFT |
(igt_buf_width(src) - 1) << TM0S1_WIDTH_SHIFT |
MAPSURF_32BIT | MT_32BIT_ARGB8888 | tiling);
format | tiling);
OUT_BATCH((src->stride / 4 - 1) << TM0S2_PITCH_SHIFT | TM0S2_MAP_2D);
OUT_BATCH(FILTER_NEAREST << TM0S3_MAG_FILTER_SHIFT |
FILTER_NEAREST << TM0S3_MIN_FILTER_SHIFT |
......@@ -213,6 +229,8 @@ void gen2_render_copyfunc(struct intel_batchbuffer *batch,
unsigned width, unsigned height,
const struct igt_buf *dst, unsigned dst_x, unsigned dst_y)
{
igt_assert(src->bpp == dst->bpp);
gen2_emit_invariant(batch);
gen2_emit_copy_pipeline(batch);
......
......@@ -25,6 +25,8 @@ void gen3_render_copyfunc(struct intel_batchbuffer *batch,
unsigned width, unsigned height,
const struct igt_buf *dst, unsigned dst_x, unsigned dst_y)
{
igt_assert(src->bpp == dst->bpp);
/* invariant state */
{
OUT_BATCH(_3DSTATE_AA_CMD |
......@@ -84,17 +86,23 @@ void gen3_render_copyfunc(struct intel_batchbuffer *batch,
/* samler state */
{
#define TEX_COUNT 1
uint32_t tiling_bits = 0;
uint32_t format_bits, tiling_bits = 0;
if (src->tiling != I915_TILING_NONE)
tiling_bits = MS3_TILED_SURFACE;
if (src->tiling == I915_TILING_Y)
tiling_bits |= MS3_TILE_WALK;
switch (src->bpp) {
case 8: format_bits = MAPSURF_8BIT | MT_8BIT_L8; break;
case 16: format_bits = MAPSURF_16BIT | MT_16BIT_RGB565; break;
case 32: format_bits = MAPSURF_32BIT | MT_32BIT_ARGB8888; break;
default: igt_assert(0);
}
OUT_BATCH(_3DSTATE_MAP_STATE | (3 * TEX_COUNT));
OUT_BATCH((1 << TEX_COUNT) - 1);
OUT_RELOC(src->bo, I915_GEM_DOMAIN_SAMPLER, 0, 0);
OUT_BATCH(MAPSURF_32BIT | MT_32BIT_ARGB8888 |
tiling_bits |
OUT_BATCH(format_bits | tiling_bits |
(igt_buf_height(src) - 1) << MS3_HEIGHT_SHIFT |
(igt_buf_width(src) - 1) << MS3_WIDTH_SHIFT);
OUT_BATCH((src->stride/4-1) << MS4_PITCH_SHIFT);
......@@ -113,6 +121,15 @@ void gen3_render_copyfunc(struct intel_batchbuffer *batch,
/* render target state */
{
uint32_t tiling_bits = 0;
uint32_t format_bits;
switch (dst->bpp) {
case 8: format_bits = COLR_BUF_8BIT; break;
case 16: format_bits = COLR_BUF_RGB565; break;
case 32: format_bits = COLR_BUF_ARGB8888; break;
default: igt_assert(0);
}
if (dst->tiling != I915_TILING_NONE)
tiling_bits = BUF_3D_TILED_SURFACE;
if (dst->tiling == I915_TILING_Y)
......@@ -124,7 +141,7 @@ void gen3_render_copyfunc(struct intel_batchbuffer *batch,
OUT_RELOC(dst->bo, I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
OUT_BATCH(_3DSTATE_DST_BUF_VARS_CMD);
OUT_BATCH(COLR_BUF_ARGB8888 |
OUT_BATCH(format_bits |
DSTORG_HORT_BIAS(0x8) |
DSTORG_VERT_BIAS(0x8));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment