xa_composite.c 16.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
/**********************************************************
 * Copyright 2009-2011 VMware, Inc. All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person
 * obtaining a copy of this software and associated documentation
 * files (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use, copy,
 * modify, merge, publish, distribute, sublicense, and/or sell copies
 * of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be
 * included in all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 *
 *********************************************************
 * Authors:
 * Zack Rusin <zackr-at-vmware-dot-com>
 * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 */

#include "xa_composite.h"
#include "xa_context.h"
#include "xa_priv.h"
#include "cso_cache/cso_context.h"
#include "util/u_sampler.h"
#include "util/u_inlines.h"


/*XXX also in Xrender.h but the including it here breaks compilition */
#define XFixedToDouble(f)    (((double) (f)) / 65536.)

struct xa_composite_blend {
42
    unsigned op : 8;
43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80

    unsigned alpha_dst : 4;
    unsigned alpha_src : 4;

    unsigned rgb_src : 8;    /**< PIPE_BLENDFACTOR_x */
    unsigned rgb_dst : 8;    /**< PIPE_BLENDFACTOR_x */
};

#define XA_BLEND_OP_OVER 3
static const struct xa_composite_blend xa_blends[] = {
    { xa_op_clear,
      0, 0, PIPE_BLENDFACTOR_ZERO, PIPE_BLENDFACTOR_ZERO},
    { xa_op_src,
      0, 0, PIPE_BLENDFACTOR_ONE, PIPE_BLENDFACTOR_ZERO},
    { xa_op_dst,
      0, 0, PIPE_BLENDFACTOR_ZERO, PIPE_BLENDFACTOR_ONE},
    { xa_op_over,
      0, 1, PIPE_BLENDFACTOR_ONE, PIPE_BLENDFACTOR_INV_SRC_ALPHA},
    { xa_op_over_reverse,
      1, 0, PIPE_BLENDFACTOR_INV_DST_ALPHA, PIPE_BLENDFACTOR_ONE},
    { xa_op_in,
      1, 0, PIPE_BLENDFACTOR_DST_ALPHA, PIPE_BLENDFACTOR_ZERO},
    { xa_op_in_reverse,
      0, 1, PIPE_BLENDFACTOR_ZERO, PIPE_BLENDFACTOR_SRC_ALPHA},
    { xa_op_out,
      1, 0, PIPE_BLENDFACTOR_INV_DST_ALPHA, PIPE_BLENDFACTOR_ZERO},
    { xa_op_out_reverse,
      0, 1, PIPE_BLENDFACTOR_ZERO, PIPE_BLENDFACTOR_INV_SRC_ALPHA},
    { xa_op_atop,
      1, 1, PIPE_BLENDFACTOR_DST_ALPHA, PIPE_BLENDFACTOR_INV_SRC_ALPHA},
    { xa_op_atop_reverse,
      1, 1, PIPE_BLENDFACTOR_INV_DST_ALPHA, PIPE_BLENDFACTOR_SRC_ALPHA},
    { xa_op_xor,
      1, 1, PIPE_BLENDFACTOR_INV_DST_ALPHA, PIPE_BLENDFACTOR_INV_SRC_ALPHA},
    { xa_op_add,
      0, 0, PIPE_BLENDFACTOR_ONE, PIPE_BLENDFACTOR_ONE},
};

81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
/*
 * The alpha value stored in a L8 texture is read by the
 * hardware as color, and R8 is read as red. The source alpha value
 * at the end of the fragment shader is stored in all color channels,
 * so the correct approach is to blend using DST_COLOR instead of
 * DST_ALPHA and then output any color channel (L8) or the red channel (R8).
 */
static unsigned
xa_convert_blend_for_luminance(unsigned factor)
{
    switch(factor) {
    case PIPE_BLENDFACTOR_DST_ALPHA:
	return PIPE_BLENDFACTOR_DST_COLOR;
    case PIPE_BLENDFACTOR_INV_DST_ALPHA:
	return PIPE_BLENDFACTOR_INV_DST_COLOR;
    default:
	break;
    }
    return factor;
}

102 103 104 105 106 107 108 109 110 111 112 113 114
static boolean
blend_for_op(struct xa_composite_blend *blend,
	     enum xa_composite_op op,
	     struct xa_picture *src_pic,
	     struct xa_picture *mask_pic,
	     struct xa_picture *dst_pic)
{
    const int num_blends =
	sizeof(xa_blends)/sizeof(struct xa_composite_blend);
    int i;
    boolean supported = FALSE;

    /*
115
     * No component alpha yet.
116
     */
117
    if (mask_pic && mask_pic->component_alpha)
118
	return FALSE;
119

120 121 122
    /*
     * our default in case something goes wrong
     */
123 124 125 126 127 128
    *blend = xa_blends[XA_BLEND_OP_OVER];

    for (i = 0; i < num_blends; ++i) {
	if (xa_blends[i].op == op) {
	    *blend = xa_blends[i];
	    supported = TRUE;
129
            break;
130 131 132
	}
    }

133 134 135
    if (!dst_pic->srf)
	return supported;

136
    if ((dst_pic->srf->tex->format == PIPE_FORMAT_L8_UNORM ||
137 138 139 140
         dst_pic->srf->tex->format == PIPE_FORMAT_R8_UNORM)) {
        blend->rgb_src = xa_convert_blend_for_luminance(blend->rgb_src);
        blend->rgb_dst = xa_convert_blend_for_luminance(blend->rgb_dst);
    }
141 142 143 144 145 146

    /*
     * If there's no dst alpha channel, adjust the blend op so that we'll treat
     * it as always 1.
     */

147
    if (xa_format_a(dst_pic->pict_format) == 0 && blend->alpha_dst) {
148 149 150 151 152 153 154 155 156 157
	if (blend->rgb_src == PIPE_BLENDFACTOR_DST_ALPHA)
	    blend->rgb_src = PIPE_BLENDFACTOR_ONE;
	else if (blend->rgb_src == PIPE_BLENDFACTOR_INV_DST_ALPHA)
	    blend->rgb_src = PIPE_BLENDFACTOR_ZERO;
    }

    return supported;
}


158
static inline int
159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
xa_repeat_to_gallium(int mode)
{
    switch(mode) {
    case xa_wrap_clamp_to_border:
	return PIPE_TEX_WRAP_CLAMP_TO_BORDER;
    case xa_wrap_repeat:
	return PIPE_TEX_WRAP_REPEAT;
    case xa_wrap_mirror_repeat:
	return PIPE_TEX_WRAP_MIRROR_REPEAT;
    case xa_wrap_clamp_to_edge:
	return PIPE_TEX_WRAP_CLAMP_TO_EDGE;
    default:
	break;
    }
    return PIPE_TEX_WRAP_REPEAT;
}

176
static inline boolean
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
xa_filter_to_gallium(int xrender_filter, int *out_filter)
{

    switch (xrender_filter) {
    case xa_filter_nearest:
	*out_filter = PIPE_TEX_FILTER_NEAREST;
	break;
    case xa_filter_linear:
	*out_filter = PIPE_TEX_FILTER_LINEAR;
	break;
    default:
	*out_filter = PIPE_TEX_FILTER_NEAREST;
	return FALSE;
    }
    return TRUE;
}

static int
xa_is_filter_accelerated(struct xa_picture *pic)
{
    int filter;
    if (pic && !xa_filter_to_gallium(pic->filter, &filter))
	return 0;
    return 1;
}

203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
/**
 * xa_src_pict_is_accelerated - Check whether we support acceleration
 * of the given src_pict type
 *
 * \param src_pic[in]: Pointer to a union xa_source_pict to check.
 *
 * \returns TRUE if accelerated, FALSE otherwise.
 */
static boolean
xa_src_pict_is_accelerated(const union xa_source_pict *src_pic)
{
    if (!src_pic)
        return TRUE;

    if (src_pic->type == xa_src_pict_solid_fill ||
        src_pic->type == xa_src_pict_float_solid_fill)
        return TRUE;

    return FALSE;
}

224
XA_EXPORT int
225
xa_composite_check_accelerated(const struct xa_composite *comp)
226 227 228
{
    struct xa_composite_blend blend;
    struct xa_picture *src_pic = comp->src;
229 230 231 232 233 234 235
    struct xa_picture *mask_pic = comp->mask;

    /*
     * No component alpha yet.
     */
    if (mask_pic && mask_pic->component_alpha)
	return -XA_ERR_INVAL;
236 237 238

    if (!xa_is_filter_accelerated(src_pic) ||
	!xa_is_filter_accelerated(comp->mask)) {
239
	return -XA_ERR_INVAL;
240 241
    }

242 243
    if (!xa_src_pict_is_accelerated(src_pic->src_pict) ||
        (mask_pic && !xa_src_pict_is_accelerated(mask_pic->src_pict)))
244
        return -XA_ERR_INVAL;
245

246 247
    if (!blend_for_op(&blend, comp->op, comp->src, comp->mask, comp->dst))
	return -XA_ERR_INVAL;
248

249
    return XA_ERR_NONE;
250 251
}

252
static int
253 254 255 256 257 258
bind_composite_blend_state(struct xa_context *ctx,
			   const struct xa_composite *comp)
{
    struct xa_composite_blend blend_opt;
    struct pipe_blend_state blend;

259 260
    if (!blend_for_op(&blend_opt, comp->op, comp->src, comp->mask, comp->dst))
	return -XA_ERR_INVAL;
261 262 263 264 265 266 267 268 269 270 271

    memset(&blend, 0, sizeof(struct pipe_blend_state));
    blend.rt[0].blend_enable = 1;
    blend.rt[0].colormask = PIPE_MASK_RGBA;

    blend.rt[0].rgb_src_factor   = blend_opt.rgb_src;
    blend.rt[0].alpha_src_factor = blend_opt.rgb_src;
    blend.rt[0].rgb_dst_factor   = blend_opt.rgb_dst;
    blend.rt[0].alpha_dst_factor = blend_opt.rgb_dst;

    cso_set_blend(ctx->cso, &blend);
272
    return XA_ERR_NONE;
273 274 275 276 277 278 279 280 281 282 283 284 285
}

static unsigned int
picture_format_fixups(struct xa_picture *src_pic,
		      int mask)
{
    boolean set_alpha = FALSE;
    boolean swizzle = FALSE;
    unsigned ret = 0;
    struct xa_surface *src = src_pic->srf;
    enum xa_formats src_hw_format, src_pic_format;
    enum xa_surface_type src_hw_type, src_pic_type;

286 287 288
    if (!src)
	return 0;

289 290 291
    src_hw_format = xa_surface_format(src);
    src_pic_format = src_pic->pict_format;

292
    set_alpha = (xa_format_type_is_color(src_hw_format) &&
293 294 295 296 297 298
		 xa_format_a(src_pic_format) == 0);

    if (set_alpha)
	ret |= mask ? FS_MASK_SET_ALPHA : FS_SRC_SET_ALPHA;

    if (src_hw_format == src_pic_format) {
299 300
	if (src->tex->format == PIPE_FORMAT_L8_UNORM ||
            src->tex->format == PIPE_FORMAT_R8_UNORM)
301 302 303
	    return ((mask) ? FS_MASK_LUMINANCE : FS_SRC_LUMINANCE);

	return ret;
304 305 306 307 308 309 310 311 312 313 314
    }

    src_hw_type = xa_format_type(src_hw_format);
    src_pic_type = xa_format_type(src_pic_format);

    swizzle = ((src_hw_type == xa_type_argb &&
		src_pic_type == xa_type_abgr) ||
	       ((src_hw_type == xa_type_abgr &&
		 src_pic_type == xa_type_argb)));

    if (!swizzle && (src_hw_type != src_pic_type))
315
      return ret;
316 317 318 319 320 321 322

    if (swizzle)
	ret |= mask ? FS_MASK_SWIZZLE_RGB : FS_SRC_SWIZZLE_RGB;

    return ret;
}

323 324 325 326 327 328 329 330 331
static void
xa_src_in_mask(float src[4], const float mask[4])
{
	src[0] *= mask[3];
	src[1] *= mask[3];
	src[2] *= mask[3];
	src[3] *= mask[3];
}

332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
/**
 * xa_handle_src_pict - Set up xa_context state and fragment shader
 * input based on scr_pict type
 *
 * \param ctx[in, out]: Pointer to the xa context.
 * \param src_pict[in]: Pointer to the union xa_source_pict to consider.
 * \param is_mask[in]: Whether we're considering a mask picture.
 *
 * \returns TRUE if succesful, FALSE otherwise.
 *
 * This function computes some xa_context state used to determine whether
 * to upload the solid color and also the solid color itself used as an input
 * to the fragment shader.
 */
static boolean
xa_handle_src_pict(struct xa_context *ctx,
                   const union xa_source_pict *src_pict,
                   boolean is_mask)
{
    float solid_color[4];

    switch(src_pict->type) {
    case xa_src_pict_solid_fill:
        xa_pixel_to_float4(src_pict->solid_fill.color, solid_color);
        break;
    case xa_src_pict_float_solid_fill:
        memcpy(solid_color, src_pict->float_solid_fill.color,
               sizeof(solid_color));
        break;
    default:
        return FALSE;
    }

    if (is_mask && ctx->has_solid_src)
        xa_src_in_mask(ctx->solid_color, solid_color);
    else
        memcpy(ctx->solid_color, solid_color, sizeof(solid_color));

    if (is_mask)
        ctx->has_solid_mask = TRUE;
    else
        ctx->has_solid_src = TRUE;

    return TRUE;
}

378
static int
379 380 381 382 383 384 385
bind_shaders(struct xa_context *ctx, const struct xa_composite *comp)
{
    unsigned vs_traits = 0, fs_traits = 0;
    struct xa_shader shader;
    struct xa_picture *src_pic = comp->src;
    struct xa_picture *mask_pic = comp->mask;

386 387
    ctx->has_solid_src = FALSE;
    ctx->has_solid_mask = FALSE;
388 389 390 391 392

    if (src_pic) {
	if (src_pic->wrap == xa_wrap_clamp_to_border && src_pic->has_transform)
	    fs_traits |= FS_SRC_REPEAT_NONE;

393 394 395
        fs_traits |= FS_COMPOSITE;
        vs_traits |= VS_COMPOSITE;

396
	if (src_pic->src_pict) {
397 398 399 400
            if (!xa_handle_src_pict(ctx, src_pic->src_pict, false))
                return -XA_ERR_INVAL;
            fs_traits |= FS_SRC_SRC;
            vs_traits |= VS_SRC_SRC;
401 402
	} else
           fs_traits |= picture_format_fixups(src_pic, 0);
403 404 405 406 407
    }

    if (mask_pic) {
	vs_traits |= VS_MASK;
	fs_traits |= FS_MASK;
408
        if (mask_pic->src_pict) {
409 410 411 412 413 414 415 416 417
            if (!xa_handle_src_pict(ctx, mask_pic->src_pict, true))
                return -XA_ERR_INVAL;

            if (ctx->has_solid_src) {
                vs_traits &= ~VS_MASK;
                fs_traits &= ~FS_MASK;
            } else {
                vs_traits |= VS_MASK_SRC;
                fs_traits |= FS_MASK_SRC;
418 419 420 421 422 423 424 425
            }
        } else {
            if (mask_pic->wrap == xa_wrap_clamp_to_border &&
                mask_pic->has_transform)
                fs_traits |= FS_MASK_REPEAT_NONE;

            fs_traits |= picture_format_fixups(mask_pic, 1);
        }
426 427
    }

428 429
    if (ctx->srf->format == PIPE_FORMAT_L8_UNORM ||
        ctx->srf->format == PIPE_FORMAT_R8_UNORM)
430 431
	fs_traits |= FS_DST_LUMINANCE;

432 433 434
    shader = xa_shaders_get(ctx->shaders, vs_traits, fs_traits);
    cso_set_vertex_shader_handle(ctx->cso, shader.vs);
    cso_set_fragment_shader_handle(ctx->cso, shader.fs);
435
    return XA_ERR_NONE;
436 437 438 439 440 441 442 443 444 445 446 447 448
}

static void
bind_samplers(struct xa_context *ctx,
	      const struct xa_composite *comp)
{
    struct pipe_sampler_state *samplers[PIPE_MAX_SAMPLERS];
    struct pipe_sampler_state src_sampler, mask_sampler;
    struct pipe_sampler_view view_templ;
    struct pipe_sampler_view *src_view;
    struct pipe_context *pipe = ctx->pipe;
    struct xa_picture *src_pic = comp->src;
    struct xa_picture *mask_pic = comp->mask;
449
    int num_samplers = 0;
450

451
    xa_ctx_sampler_views_destroy(ctx);
452 453 454
    memset(&src_sampler, 0, sizeof(struct pipe_sampler_state));
    memset(&mask_sampler, 0, sizeof(struct pipe_sampler_state));

455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
    if (src_pic && !ctx->has_solid_src) {
	unsigned src_wrap = xa_repeat_to_gallium(src_pic->wrap);
	int filter;

	(void) xa_filter_to_gallium(src_pic->filter, &filter);

	src_sampler.wrap_s = src_wrap;
	src_sampler.wrap_t = src_wrap;
	src_sampler.min_img_filter = filter;
	src_sampler.mag_img_filter = filter;
	src_sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NEAREST;
	src_sampler.normalized_coords = 1;
	samplers[0] = &src_sampler;
	u_sampler_view_default_template(&view_templ,
					src_pic->srf->tex,+					src_pic->srf->tex->format);
	src_view = pipe->create_sampler_view(pipe, src_pic->srf->tex,
					     &view_templ);
	ctx->bound_sampler_views[0] = src_view;
	num_samplers++;
474 475
    }

476 477
    if (mask_pic && !ctx->has_solid_mask) {
        unsigned mask_wrap = xa_repeat_to_gallium(mask_pic->wrap);
478 479 480 481 482 483 484 485 486 487
	int filter;

	(void) xa_filter_to_gallium(mask_pic->filter, &filter);

	mask_sampler.wrap_s = mask_wrap;
	mask_sampler.wrap_t = mask_wrap;
	mask_sampler.min_img_filter = filter;
	mask_sampler.mag_img_filter = filter;
	src_sampler.min_mip_filter = PIPE_TEX_MIPFILTER_NEAREST;
	mask_sampler.normalized_coords = 1;
488
        samplers[num_samplers] = &mask_sampler;
489 490 491 492 493
	u_sampler_view_default_template(&view_templ,
					mask_pic->srf->tex,
					mask_pic->srf->tex->format);
	src_view = pipe->create_sampler_view(pipe, mask_pic->srf->tex,
					     &view_templ);
494 495
        ctx->bound_sampler_views[num_samplers] = src_view;
        num_samplers++;
496 497
    }

498
    cso_set_samplers(ctx->cso, PIPE_SHADER_FRAGMENT, num_samplers,
499
		     (const struct pipe_sampler_state **)samplers);
500
    cso_set_sampler_views(ctx->cso, PIPE_SHADER_FRAGMENT, num_samplers,
501
				   ctx->bound_sampler_views);
502
    ctx->num_bound_samplers = num_samplers;
503 504
}

505
XA_EXPORT int
506 507 508 509 510 511
xa_composite_prepare(struct xa_context *ctx,
		     const struct xa_composite *comp)
{
    struct xa_surface *dst_srf = comp->dst->srf;
    int ret;

512
    ret = xa_ctx_srf_create(ctx, dst_srf);
513 514 515
    if (ret != XA_ERR_NONE)
	return ret;

516
    ctx->dst = dst_srf;
Rob Clark's avatar
Rob Clark committed
517
    renderer_bind_destination(ctx, ctx->srf);
518

519 520 521 522 523 524
    ret = bind_composite_blend_state(ctx, comp);
    if (ret != XA_ERR_NONE)
	return ret;
    ret = bind_shaders(ctx, comp);
    if (ret != XA_ERR_NONE)
	return ret;
525 526 527 528 529 530
    bind_samplers(ctx, comp);

    if (ctx->num_bound_samplers == 0 ) { /* solid fill */
	renderer_begin_solid(ctx);
    } else {
	renderer_begin_textures(ctx);
531
	ctx->comp = comp;
532 533
    }

534
    xa_ctx_srf_destroy(ctx);
535 536 537
    return XA_ERR_NONE;
}

538 539 540 541
XA_EXPORT void
xa_composite_rect(struct xa_context *ctx,
		  int srcX, int srcY, int maskX, int maskY,
		  int dstX, int dstY, int width, int height)
542 543
{
    if (ctx->num_bound_samplers == 0 ) { /* solid fill */
544 545
	xa_scissor_update(ctx, dstX, dstY, dstX + width, dstY + height);
	renderer_solid(ctx, dstX, dstY, dstX + width, dstY + height);
546 547 548 549 550 551
    } else {
	const struct xa_composite *comp = ctx->comp;
	int pos[6] = {srcX, srcY, maskX, maskY, dstX, dstY};
	const float *src_matrix = NULL;
	const float *mask_matrix = NULL;

Rob Clark's avatar
Rob Clark committed
552 553
	xa_scissor_update(ctx, dstX, dstY, dstX + width, dstY + height);

554 555
	if (comp->src->has_transform)
	    src_matrix = comp->src->transform;
556
	if (comp->mask && comp->mask->has_transform)
557 558 559 560 561 562 563
	    mask_matrix = comp->mask->transform;

	renderer_texture(ctx, pos, width, height,
			 src_matrix, mask_matrix);
    }
}

564
XA_EXPORT void
565 566 567 568 569
xa_composite_done(struct xa_context *ctx)
{
    renderer_draw_flush(ctx);

    ctx->comp = NULL;
570 571
    ctx->has_solid_src = FALSE;
    ctx->has_solid_mask = FALSE;
572
    xa_ctx_sampler_views_destroy(ctx);
573 574 575 576 577 578 579 580
}

static const struct xa_composite_allocation a = {
    .xa_composite_size = sizeof(struct xa_composite),
    .xa_picture_size = sizeof(struct xa_picture),
    .xa_source_pict_size = sizeof(union xa_source_pict),
};

581
XA_EXPORT const struct xa_composite_allocation *
582 583 584 585
xa_composite_allocation(void)
{
    return &a;
}