gstavviddec.c 66.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* GStreamer
 * Copyright (C) <1999> Erik Walthinsen <omega@cse.ogi.edu>
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Library General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Library General Public License for more details.
 *
 * You should have received a copy of the GNU Library General Public
 * License along with this library; if not, write to the
Tim-Philipp Müller's avatar
Tim-Philipp Müller committed
16 17
 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
 * Boston, MA 02110-1301, USA.
18 19 20 21 22 23 24 25 26 27 28 29 30
 */

#ifdef HAVE_CONFIG_H
#include "config.h"
#endif

#include <assert.h>
#include <string.h>

#include <libavcodec/avcodec.h>

#include <gst/gst.h>
#include <gst/video/video.h>
31
#include <gst/video/gstvideodecoder.h>
32 33
#include <gst/video/gstvideometa.h>
#include <gst/video/gstvideopool.h>
34

35 36 37
#include "gstav.h"
#include "gstavcodecmap.h"
#include "gstavutils.h"
38
#include "gstavviddec.h"
39 40 41 42 43 44 45 46

#define MAX_TS_MASK 0xff

#define DEFAULT_LOWRES			0
#define DEFAULT_SKIPFRAME		0
#define DEFAULT_DIRECT_RENDERING	TRUE
#define DEFAULT_DEBUG_MV		FALSE
#define DEFAULT_MAX_THREADS		0
47
#define DEFAULT_OUTPUT_CORRUPT		TRUE
48
#define REQUIRED_POOL_MAX_BUFFERS       32
49
#define DEFAULT_STRIDE_ALIGN            31
50
#define DEFAULT_ALLOC_PARAM             { 0, DEFAULT_STRIDE_ALIGN, 0, 0, }
51 52 53 54 55 56 57 58 59

enum
{
  PROP_0,
  PROP_LOWRES,
  PROP_SKIPFRAME,
  PROP_DIRECT_RENDERING,
  PROP_DEBUG_MV,
  PROP_MAX_THREADS,
60
  PROP_OUTPUT_CORRUPT,
61 62 63 64
  PROP_LAST
};

/* A number of function prototypes are given so we can refer to them later. */
65 66 67 68
static void gst_ffmpegviddec_base_init (GstFFMpegVidDecClass * klass);
static void gst_ffmpegviddec_class_init (GstFFMpegVidDecClass * klass);
static void gst_ffmpegviddec_init (GstFFMpegVidDec * ffmpegdec);
static void gst_ffmpegviddec_finalize (GObject * object);
69

70 71 72 73
static gboolean gst_ffmpegviddec_set_format (GstVideoDecoder * decoder,
    GstVideoCodecState * state);
static GstFlowReturn gst_ffmpegviddec_handle_frame (GstVideoDecoder * decoder,
    GstVideoCodecFrame * frame);
74
static gboolean gst_ffmpegviddec_start (GstVideoDecoder * decoder);
75
static gboolean gst_ffmpegviddec_stop (GstVideoDecoder * decoder);
76
static gboolean gst_ffmpegviddec_flush (GstVideoDecoder * decoder);
77 78 79 80
static gboolean gst_ffmpegviddec_decide_allocation (GstVideoDecoder * decoder,
    GstQuery * query);
static gboolean gst_ffmpegviddec_propose_allocation (GstVideoDecoder * decoder,
    GstQuery * query);
81

82
static void gst_ffmpegviddec_set_property (GObject * object,
83
    guint prop_id, const GValue * value, GParamSpec * pspec);
84
static void gst_ffmpegviddec_get_property (GObject * object,
85 86
    guint prop_id, GValue * value, GParamSpec * pspec);

87
static gboolean gst_ffmpegviddec_negotiate (GstFFMpegVidDec * ffmpegdec,
88
    AVCodecContext * context, AVFrame * picture);
89 90

/* some sort of bufferpool handling, but different */
91 92
static int gst_ffmpegviddec_get_buffer2 (AVCodecContext * context,
    AVFrame * picture, int flags);
93

94
static GstFlowReturn gst_ffmpegviddec_finish (GstVideoDecoder * decoder);
95
static GstFlowReturn gst_ffmpegviddec_drain (GstVideoDecoder * decoder);
96

97 98 99 100 101
static gboolean picture_changed (GstFFMpegVidDec * ffmpegdec,
    AVFrame * picture);
static gboolean context_changed (GstFFMpegVidDec * ffmpegdec,
    AVCodecContext * context);

102
#define GST_FFDEC_PARAMS_QDATA g_quark_from_static_string("avdec-params")
103 104 105

static GstElementClass *parent_class = NULL;

106
#define GST_FFMPEGVIDDEC_TYPE_LOWRES (gst_ffmpegviddec_lowres_get_type())
107
static GType
108
gst_ffmpegviddec_lowres_get_type (void)
109 110 111 112 113 114 115 116 117 118 119 120
{
  static GType ffmpegdec_lowres_type = 0;

  if (!ffmpegdec_lowres_type) {
    static const GEnumValue ffmpegdec_lowres[] = {
      {0, "0", "full"},
      {1, "1", "1/2-size"},
      {2, "2", "1/4-size"},
      {0, NULL, NULL},
    };

    ffmpegdec_lowres_type =
121
        g_enum_register_static ("GstLibAVVidDecLowres", ffmpegdec_lowres);
122 123 124 125 126
  }

  return ffmpegdec_lowres_type;
}

127
#define GST_FFMPEGVIDDEC_TYPE_SKIPFRAME (gst_ffmpegviddec_skipframe_get_type())
128
static GType
129
gst_ffmpegviddec_skipframe_get_type (void)
130 131 132 133 134 135 136 137 138 139 140 141 142
{
  static GType ffmpegdec_skipframe_type = 0;

  if (!ffmpegdec_skipframe_type) {
    static const GEnumValue ffmpegdec_skipframe[] = {
      {0, "0", "Skip nothing"},
      {1, "1", "Skip B-frames"},
      {2, "2", "Skip IDCT/Dequantization"},
      {5, "5", "Skip everything"},
      {0, NULL, NULL},
    };

    ffmpegdec_skipframe_type =
143
        g_enum_register_static ("GstLibAVVidDecSkipFrame", ffmpegdec_skipframe);
144 145 146 147 148 149
  }

  return ffmpegdec_skipframe_type;
}

static void
150
gst_ffmpegviddec_base_init (GstFFMpegVidDecClass * klass)
151 152 153 154 155
{
  GstElementClass *element_class = GST_ELEMENT_CLASS (klass);
  GstPadTemplate *sinktempl, *srctempl;
  GstCaps *sinkcaps, *srccaps;
  AVCodec *in_plugin;
156
  gchar *longname, *description;
157 158 159 160 161 162 163

  in_plugin =
      (AVCodec *) g_type_get_qdata (G_OBJECT_CLASS_TYPE (klass),
      GST_FFDEC_PARAMS_QDATA);
  g_assert (in_plugin != NULL);

  /* construct the element details struct */
164 165 166
  longname = g_strdup_printf ("libav %s decoder", in_plugin->long_name);
  description = g_strdup_printf ("libav %s decoder", in_plugin->name);
  gst_element_class_set_metadata (element_class, longname,
167
      "Codec/Decoder/Video", description,
168 169 170 171 172 173 174 175 176 177
      "Wim Taymans <wim.taymans@gmail.com>, "
      "Ronald Bultje <rbultje@ronald.bitfreak.net>, "
      "Edward Hervey <bilboed@bilboed.com>");
  g_free (longname);
  g_free (description);

  /* get the caps */
  sinkcaps = gst_ffmpeg_codecid_to_caps (in_plugin->id, NULL, FALSE);
  if (!sinkcaps) {
    GST_DEBUG ("Couldn't get sink caps for decoder '%s'", in_plugin->name);
178
    sinkcaps = gst_caps_new_empty_simple ("unknown/unknown");
179
  }
180 181 182 183 184 185
  srccaps = gst_ffmpeg_codectype_to_video_caps (NULL,
      in_plugin->id, FALSE, in_plugin);
  if (!srccaps) {
    GST_DEBUG ("Couldn't get source caps for decoder '%s'", in_plugin->name);
    srccaps = gst_caps_from_string ("video/x-raw");
  }
186 187 188 189 190 191 192 193 194 195 196 197 198

  /* pad templates */
  sinktempl = gst_pad_template_new ("sink", GST_PAD_SINK,
      GST_PAD_ALWAYS, sinkcaps);
  srctempl = gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS, srccaps);

  gst_element_class_add_pad_template (element_class, srctempl);
  gst_element_class_add_pad_template (element_class, sinktempl);

  klass->in_plugin = in_plugin;
}

static void
199
gst_ffmpegviddec_class_init (GstFFMpegVidDecClass * klass)
200 201
{
  GObjectClass *gobject_class = G_OBJECT_CLASS (klass);
202 203
  GstVideoDecoderClass *viddec_class = GST_VIDEO_DECODER_CLASS (klass);
  int caps;
204 205 206

  parent_class = g_type_class_peek_parent (klass);

207
  gobject_class->finalize = gst_ffmpegviddec_finalize;
208

209 210
  gobject_class->set_property = gst_ffmpegviddec_set_property;
  gobject_class->get_property = gst_ffmpegviddec_get_property;
211

212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
  g_object_class_install_property (gobject_class, PROP_SKIPFRAME,
      g_param_spec_enum ("skip-frame", "Skip frames",
          "Which types of frames to skip during decoding",
          GST_FFMPEGVIDDEC_TYPE_SKIPFRAME, 0,
          G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
  g_object_class_install_property (gobject_class, PROP_LOWRES,
      g_param_spec_enum ("lowres", "Low resolution",
          "At which resolution to decode images",
          GST_FFMPEGVIDDEC_TYPE_LOWRES, 0,
          G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
  g_object_class_install_property (gobject_class, PROP_DIRECT_RENDERING,
      g_param_spec_boolean ("direct-rendering", "Direct Rendering",
          "Enable direct rendering", DEFAULT_DIRECT_RENDERING,
          G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
  g_object_class_install_property (gobject_class, PROP_DEBUG_MV,
      g_param_spec_boolean ("debug-mv", "Debug motion vectors",
228
          "Whether libav should print motion vectors on top of the image",
229
          DEFAULT_DEBUG_MV, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
230 231 232 233
  g_object_class_install_property (gobject_class, PROP_OUTPUT_CORRUPT,
      g_param_spec_boolean ("output-corrupt", "Output corrupt buffers",
          "Whether libav should output frames even if corrupted",
          DEFAULT_OUTPUT_CORRUPT, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
234 235 236 237 238 239 240

  caps = klass->in_plugin->capabilities;
  if (caps & (CODEC_CAP_FRAME_THREADS | CODEC_CAP_SLICE_THREADS)) {
    g_object_class_install_property (G_OBJECT_CLASS (klass), PROP_MAX_THREADS,
        g_param_spec_int ("max-threads", "Maximum decode threads",
            "Maximum number of worker threads to spawn. (0 = auto)",
            0, G_MAXINT, DEFAULT_MAX_THREADS,
241 242 243
            G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
  }

244 245
  viddec_class->set_format = gst_ffmpegviddec_set_format;
  viddec_class->handle_frame = gst_ffmpegviddec_handle_frame;
246
  viddec_class->start = gst_ffmpegviddec_start;
247
  viddec_class->stop = gst_ffmpegviddec_stop;
248
  viddec_class->flush = gst_ffmpegviddec_flush;
249
  viddec_class->finish = gst_ffmpegviddec_finish;
250
  viddec_class->drain = gst_ffmpegviddec_drain;
251 252
  viddec_class->decide_allocation = gst_ffmpegviddec_decide_allocation;
  viddec_class->propose_allocation = gst_ffmpegviddec_propose_allocation;
253 254 255
}

static void
256
gst_ffmpegviddec_init (GstFFMpegVidDec * ffmpegdec)
257
{
258 259 260
  GstFFMpegVidDecClass *klass =
      (GstFFMpegVidDecClass *) G_OBJECT_GET_CLASS (ffmpegdec);

261
  /* some ffmpeg data */
262
  ffmpegdec->context = avcodec_alloc_context3 (klass->in_plugin);
263
  ffmpegdec->context->opaque = ffmpegdec;
264
  ffmpegdec->picture = av_frame_alloc ();
265 266 267 268 269
  ffmpegdec->opened = FALSE;
  ffmpegdec->skip_frame = ffmpegdec->lowres = 0;
  ffmpegdec->direct_rendering = DEFAULT_DIRECT_RENDERING;
  ffmpegdec->debug_mv = DEFAULT_DEBUG_MV;
  ffmpegdec->max_threads = DEFAULT_MAX_THREADS;
270
  ffmpegdec->output_corrupt = DEFAULT_OUTPUT_CORRUPT;
271

272
  GST_PAD_SET_ACCEPT_TEMPLATE (GST_VIDEO_DECODER_SINK_PAD (ffmpegdec));
273 274
  gst_video_decoder_set_use_default_pad_acceptcaps (GST_VIDEO_DECODER_CAST
      (ffmpegdec), TRUE);
275

276
  gst_video_decoder_set_needs_format (GST_VIDEO_DECODER (ffmpegdec), TRUE);
277 278 279
}

static void
280
gst_ffmpegviddec_finalize (GObject * object)
281
{
282
  GstFFMpegVidDec *ffmpegdec = (GstFFMpegVidDec *) object;
283

284 285
  av_frame_free (&ffmpegdec->picture);

286
  if (ffmpegdec->context != NULL) {
287
    gst_ffmpeg_avcodec_close (ffmpegdec->context);
288 289 290 291 292 293 294
    av_free (ffmpegdec->context);
    ffmpegdec->context = NULL;
  }

  G_OBJECT_CLASS (parent_class)->finalize (object);
}

295 296 297 298 299 300 301 302 303 304 305
static void
gst_ffmpegviddec_context_set_flags (AVCodecContext * context, guint flags,
    gboolean enable)
{
  g_return_if_fail (context != NULL);

  if (enable)
    context->flags |= flags;
  else
    context->flags &= ~flags;
}
306 307

/* with LOCK */
308 309
static gboolean
gst_ffmpegviddec_close (GstFFMpegVidDec * ffmpegdec, gboolean reset)
310
{
311
  GstFFMpegVidDecClass *oclass;
312
  gint i;
313 314 315

  oclass = (GstFFMpegVidDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));

316 317
  GST_LOG_OBJECT (ffmpegdec, "closing ffmpeg codec");

318 319
  gst_caps_replace (&ffmpegdec->last_caps, NULL);

320
  gst_ffmpeg_avcodec_close (ffmpegdec->context);
321 322
  ffmpegdec->opened = FALSE;

323 324 325
  for (i = 0; i < G_N_ELEMENTS (ffmpegdec->stride); i++)
    ffmpegdec->stride[i] = -1;

326 327
  gst_buffer_replace (&ffmpegdec->palette, NULL);

328 329 330 331
  if (ffmpegdec->context->extradata) {
    av_free (ffmpegdec->context->extradata);
    ffmpegdec->context->extradata = NULL;
  }
332 333 334 335 336 337 338 339 340
  if (reset) {
    if (avcodec_get_context_defaults3 (ffmpegdec->context,
            oclass->in_plugin) < 0) {
      GST_DEBUG_OBJECT (ffmpegdec, "Failed to set context defaults");
      return FALSE;
    }
    ffmpegdec->context->opaque = ffmpegdec;
  }
  return TRUE;
341 342 343 344
}

/* with LOCK */
static gboolean
345
gst_ffmpegviddec_open (GstFFMpegVidDec * ffmpegdec)
346
{
347
  GstFFMpegVidDecClass *oclass;
348
  gint i;
349

350
  oclass = (GstFFMpegVidDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
351 352 353 354

  if (gst_ffmpeg_avcodec_open (ffmpegdec->context, oclass->in_plugin) < 0)
    goto could_not_open;

355 356 357
  for (i = 0; i < G_N_ELEMENTS (ffmpegdec->stride); i++)
    ffmpegdec->stride[i] = -1;

358 359
  ffmpegdec->opened = TRUE;

360
  GST_LOG_OBJECT (ffmpegdec, "Opened libav codec %s, id %d",
361 362
      oclass->in_plugin->name, oclass->in_plugin->id);

363 364 365
  gst_ffmpegviddec_context_set_flags (ffmpegdec->context,
      CODEC_FLAG_OUTPUT_CORRUPT, ffmpegdec->output_corrupt);

366 367 368 369 370
  return TRUE;

  /* ERRORS */
could_not_open:
  {
371
    gst_ffmpegviddec_close (ffmpegdec, TRUE);
372
    GST_DEBUG_OBJECT (ffmpegdec, "avdec_%s: Failed to open libav codec",
373 374 375 376 377
        oclass->in_plugin->name);
    return FALSE;
  }
}

378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
static void
gst_ffmpegviddec_get_palette (GstFFMpegVidDec * ffmpegdec,
    GstVideoCodecState * state)
{
  GstStructure *str = gst_caps_get_structure (state->caps, 0);
  const GValue *palette_v;
  GstBuffer *palette;

  /* do we have a palette? */
  if ((palette_v = gst_structure_get_value (str, "palette_data"))) {
    palette = gst_value_get_buffer (palette_v);
    GST_DEBUG ("got palette data %p", palette);
    if (gst_buffer_get_size (palette) >= AVPALETTE_SIZE) {
      gst_buffer_replace (&ffmpegdec->palette, palette);
    }
  }
}


397
static gboolean
398 399
gst_ffmpegviddec_set_format (GstVideoDecoder * decoder,
    GstVideoCodecState * state)
400
{
401 402
  GstFFMpegVidDec *ffmpegdec;
  GstFFMpegVidDecClass *oclass;
403
  GstClockTime latency = GST_CLOCK_TIME_NONE;
404
  gboolean ret = FALSE;
405

406
  ffmpegdec = (GstFFMpegVidDec *) decoder;
407
  oclass = (GstFFMpegVidDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
408

409 410 411 412 413
  if (ffmpegdec->last_caps != NULL &&
      gst_caps_is_equal (ffmpegdec->last_caps, state->caps)) {
    return TRUE;
  }

414
  GST_DEBUG_OBJECT (ffmpegdec, "setcaps called");
415 416 417

  GST_OBJECT_LOCK (ffmpegdec);
  /* stupid check for VC1 */
418 419
  if ((oclass->in_plugin->id == AV_CODEC_ID_WMV3) ||
      (oclass->in_plugin->id == AV_CODEC_ID_VC1))
420
    oclass->in_plugin->id = gst_ffmpeg_caps_to_codecid (state->caps, NULL);
421 422 423 424

  /* close old session */
  if (ffmpegdec->opened) {
    GST_OBJECT_UNLOCK (ffmpegdec);
425
    gst_ffmpegviddec_finish (decoder);
426
    GST_OBJECT_LOCK (ffmpegdec);
427 428 429 430
    if (!gst_ffmpegviddec_close (ffmpegdec, TRUE)) {
      GST_OBJECT_UNLOCK (ffmpegdec);
      return FALSE;
    }
431 432 433 434 435 436 437 438
    ffmpegdec->pic_pix_fmt = 0;
    ffmpegdec->pic_width = 0;
    ffmpegdec->pic_height = 0;
    ffmpegdec->pic_par_n = 0;
    ffmpegdec->pic_par_d = 0;
    ffmpegdec->ctx_ticks = 0;
    ffmpegdec->ctx_time_n = 0;
    ffmpegdec->ctx_time_d = 0;
439 440
  }

441 442
  gst_caps_replace (&ffmpegdec->last_caps, state->caps);

443
  /* set buffer functions */
444
  ffmpegdec->context->get_buffer2 = gst_ffmpegviddec_get_buffer2;
445
  ffmpegdec->context->draw_horiz_band = NULL;
446

447 448 449 450 451 452
  /* reset coded_width/_height to prevent it being reused from last time when
   * the codec is opened again, causing a mismatch and possible
   * segfault/corruption. (Common scenario when renegotiating caps) */
  ffmpegdec->context->coded_width = 0;
  ffmpegdec->context->coded_height = 0;

453 454 455
  GST_LOG_OBJECT (ffmpegdec, "size %dx%d", ffmpegdec->context->width,
      ffmpegdec->context->height);

456
  /* FIXME : Create a method that takes GstVideoCodecState instead */
457 458
  /* get size and so */
  gst_ffmpeg_caps_with_codecid (oclass->in_plugin->id,
459
      oclass->in_plugin->type, state->caps, ffmpegdec->context);
460 461 462 463

  GST_LOG_OBJECT (ffmpegdec, "size after %dx%d", ffmpegdec->context->width,
      ffmpegdec->context->height);

464 465
  gst_ffmpegviddec_get_palette (ffmpegdec, state);

466 467 468 469 470 471 472 473
  if (!ffmpegdec->context->time_base.den || !ffmpegdec->context->time_base.num) {
    GST_DEBUG_OBJECT (ffmpegdec, "forcing 25/1 framerate");
    ffmpegdec->context->time_base.num = 1;
    ffmpegdec->context->time_base.den = 25;
  }

  /* workaround encoder bugs */
  ffmpegdec->context->workaround_bugs |= FF_BUG_AUTODETECT;
474
  ffmpegdec->context->err_recognition = 1;
475 476 477 478 479 480 481 482 483

  /* for slow cpus */
  ffmpegdec->context->lowres = ffmpegdec->lowres;
  ffmpegdec->context->skip_frame = ffmpegdec->skip_frame;

  /* ffmpeg can draw motion vectors on top of the image (not every decoder
   * supports it) */
  ffmpegdec->context->debug_mv = ffmpegdec->debug_mv;

484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
  {
    GstQuery *query;
    gboolean is_live;

    if (ffmpegdec->max_threads == 0) {
      if (!(oclass->in_plugin->capabilities & CODEC_CAP_AUTO_THREADS))
        ffmpegdec->context->thread_count = gst_ffmpeg_auto_max_threads ();
      else
        ffmpegdec->context->thread_count = 0;
    } else
      ffmpegdec->context->thread_count = ffmpegdec->max_threads;

    query = gst_query_new_latency ();
    is_live = FALSE;
    /* Check if upstream is live. If it isn't we can enable frame based
     * threading, which is adding latency */
    if (gst_pad_peer_query (GST_VIDEO_DECODER_SINK_PAD (ffmpegdec), query)) {
      gst_query_parse_latency (query, &is_live, NULL, NULL);
    }
    gst_query_unref (query);
504

505 506 507 508 509
    if (is_live)
      ffmpegdec->context->thread_type = FF_THREAD_SLICE;
    else
      ffmpegdec->context->thread_type = FF_THREAD_SLICE | FF_THREAD_FRAME;
  }
510 511 512 513

  /* open codec - we don't select an output pix_fmt yet,
   * simply because we don't know! We only get it
   * during playback... */
514
  if (!gst_ffmpegviddec_open (ffmpegdec))
515 516
    goto open_failed;

517 518 519
  if (ffmpegdec->input_state)
    gst_video_codec_state_unref (ffmpegdec->input_state);
  ffmpegdec->input_state = gst_video_codec_state_ref (state);
520

521 522 523 524 525 526 527
  if (ffmpegdec->input_state->info.fps_n) {
    GstVideoInfo *info = &ffmpegdec->input_state->info;
    latency = gst_util_uint64_scale_ceil (
        (ffmpegdec->context->has_b_frames) * GST_SECOND, info->fps_d,
        info->fps_n);
  }

528 529
  ret = TRUE;

530 531 532
done:
  GST_OBJECT_UNLOCK (ffmpegdec);

533 534
  if (GST_CLOCK_TIME_IS_VALID (latency))
    gst_video_decoder_set_latency (decoder, latency, latency);
535

536 537 538 539 540 541 542 543 544 545
  return ret;

  /* ERRORS */
open_failed:
  {
    GST_DEBUG_OBJECT (ffmpegdec, "Failed to open");
    goto done;
  }
}

546 547
typedef struct
{
548
  GstFFMpegVidDec *ffmpegdec;
549 550 551
  GstVideoCodecFrame *frame;
  gboolean mapped;
  GstVideoFrame vframe;
552
  GstBuffer *buffer;
553
  AVBufferRef *avbuffer;
554 555 556
} GstFFMpegVidDecVideoFrame;

static GstFFMpegVidDecVideoFrame *
557 558
gst_ffmpegviddec_video_frame_new (GstFFMpegVidDec * ffmpegdec,
    GstVideoCodecFrame * frame)
559 560 561 562
{
  GstFFMpegVidDecVideoFrame *dframe;

  dframe = g_slice_new0 (GstFFMpegVidDecVideoFrame);
563
  dframe->ffmpegdec = ffmpegdec;
Wim Taymans's avatar
Wim Taymans committed
564
  dframe->frame = frame;
565

566 567
  GST_DEBUG_OBJECT (ffmpegdec, "new video frame %p", dframe);

568 569 570 571
  return dframe;
}

static void
572 573
gst_ffmpegviddec_video_frame_free (GstFFMpegVidDec * ffmpegdec,
    GstFFMpegVidDecVideoFrame * frame)
574
{
575 576
  GST_DEBUG_OBJECT (ffmpegdec, "free video frame %p", frame);

577 578
  if (frame->mapped)
    gst_video_frame_unmap (&frame->vframe);
579
  gst_video_decoder_release_frame (GST_VIDEO_DECODER (ffmpegdec), frame->frame);
580
  gst_buffer_replace (&frame->buffer, NULL);
581 582 583
  if (frame->avbuffer) {
    av_buffer_unref (&frame->avbuffer);
  }
584 585 586
  g_slice_free (GstFFMpegVidDecVideoFrame, frame);
}

587 588 589 590 591 592 593 594
static void
dummy_free_buffer (void *opaque, uint8_t * data)
{
  GstFFMpegVidDecVideoFrame *frame = opaque;

  gst_ffmpegviddec_video_frame_free (frame->ffmpegdec, frame);
}

595 596
/* This function prepares the pool configuration for direct rendering. To use
 * this method, the codec should support direct rendering and the pool should
597
 * support video meta and video alignment */
598 599 600 601
static void
gst_ffmpegvideodec_prepare_dr_pool (GstFFMpegVidDec * ffmpegdec,
    GstBufferPool * pool, GstVideoInfo * info, GstStructure * config)
{
602
  GstAllocationParams params;
603
  GstVideoAlignment align;
604
  GstAllocator *allocator = NULL;
605 606 607 608
  gint width, height;
  gint linesize_align[4];
  gint i;
  guint edge;
609
  gsize max_align;
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634

  width = GST_VIDEO_INFO_WIDTH (info);
  height = GST_VIDEO_INFO_HEIGHT (info);

  /* let ffmpeg find the alignment and padding */
  avcodec_align_dimensions2 (ffmpegdec->context, &width, &height,
      linesize_align);

  if (ffmpegdec->context->flags & CODEC_FLAG_EMU_EDGE)
    edge = 0;
  else
    edge = avcodec_get_edge_width ();

  /* increase the size for the padding */
  width += edge << 1;
  height += edge << 1;

  align.padding_top = edge;
  align.padding_left = edge;
  align.padding_right = width - GST_VIDEO_INFO_WIDTH (info) - edge;
  align.padding_bottom = height - GST_VIDEO_INFO_HEIGHT (info) - edge;

  /* add extra padding to match libav buffer allocation sizes */
  align.padding_bottom++;

635 636 637 638 639 640 641 642 643 644
  gst_buffer_pool_config_get_allocator (config, &allocator, &params);

  max_align = DEFAULT_STRIDE_ALIGN;
  max_align |= params.align;

  for (i = 0; i < GST_VIDEO_MAX_PLANES; i++) {
    if (linesize_align[i] > 0)
      max_align |= linesize_align[i] - 1;
  }

645
  for (i = 0; i < GST_VIDEO_MAX_PLANES; i++)
646 647 648 649 650
    align.stride_align[i] = max_align;

  params.align = max_align;

  gst_buffer_pool_config_set_allocator (config, allocator, &params);
651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666

  GST_DEBUG_OBJECT (ffmpegdec, "aligned dimension %dx%d -> %dx%d "
      "padding t:%u l:%u r:%u b:%u, stride_align %d:%d:%d:%d",
      GST_VIDEO_INFO_WIDTH (info),
      GST_VIDEO_INFO_HEIGHT (info), width, height, align.padding_top,
      align.padding_left, align.padding_right, align.padding_bottom,
      align.stride_align[0], align.stride_align[1], align.stride_align[2],
      align.stride_align[3]);

  gst_buffer_pool_config_add_option (config,
      GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT);
  gst_buffer_pool_config_set_video_alignment (config, &align);
}

static void
gst_ffmpegviddec_ensure_internal_pool (GstFFMpegVidDec * ffmpegdec,
667
    AVFrame * picture)
668 669
{
  GstAllocationParams params = DEFAULT_ALLOC_PARAM;
670
  GstVideoInfo info;
671 672 673
  GstVideoFormat format;
  GstCaps *caps;
  GstStructure *config;
674 675 676 677 678 679 680 681 682 683
  gint i;

  if (ffmpegdec->internal_pool != NULL &&
      ffmpegdec->pool_width == picture->width &&
      ffmpegdec->pool_height == picture->height &&
      ffmpegdec->pool_format == picture->format)
    return;

  GST_DEBUG_OBJECT (ffmpegdec, "Updating internal pool (%i, %i)",
      picture->width, picture->height);
684 685

  format = gst_ffmpeg_pixfmt_to_videoformat (picture->format);
686
  gst_video_info_set_format (&info, format, picture->width, picture->height);
687

688 689 690 691 692
  for (i = 0; i < G_N_ELEMENTS (ffmpegdec->stride); i++)
    ffmpegdec->stride[i] = -1;

  if (ffmpegdec->internal_pool)
    gst_object_unref (ffmpegdec->internal_pool);
693 694 695 696

  ffmpegdec->internal_pool = gst_video_buffer_pool_new ();
  config = gst_buffer_pool_get_config (ffmpegdec->internal_pool);

697 698
  caps = gst_video_info_to_caps (&info);
  gst_buffer_pool_config_set_params (config, caps, info.size, 2, 0);
699 700 701 702
  gst_buffer_pool_config_set_allocator (config, NULL, &params);
  gst_buffer_pool_config_add_option (config, GST_BUFFER_POOL_OPTION_VIDEO_META);

  gst_ffmpegvideodec_prepare_dr_pool (ffmpegdec,
703
      ffmpegdec->internal_pool, &info, config);
704 705 706 707 708
  /* generic video pool never fails */
  gst_buffer_pool_set_config (ffmpegdec->internal_pool, config);
  gst_caps_unref (caps);

  gst_buffer_pool_set_active (ffmpegdec->internal_pool, TRUE);
709 710 711 712 713 714

  /* Remember pool size so we can detect changes */
  ffmpegdec->pool_width = picture->width;
  ffmpegdec->pool_height = picture->height;
  ffmpegdec->pool_format = picture->format;
  ffmpegdec->pool_info = info;
715 716 717 718 719 720 721 722 723 724 725 726 727 728
}

static gboolean
gst_ffmpegviddec_can_direct_render (GstFFMpegVidDec * ffmpegdec)
{
  GstFFMpegVidDecClass *oclass;

  if (!ffmpegdec->direct_rendering)
    return FALSE;

  oclass = (GstFFMpegVidDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
  return ((oclass->in_plugin->capabilities & CODEC_CAP_DR1) == CODEC_CAP_DR1);
}

729 730
/* called when ffmpeg wants us to allocate a buffer to write the decoded frame
 * into. We try to give it memory from our pool */
731
static int
732 733
gst_ffmpegviddec_get_buffer2 (AVCodecContext * context, AVFrame * picture,
    int flags)
734
{
735
  GstVideoCodecFrame *frame;
736
  GstFFMpegVidDecVideoFrame *dframe;
737
  GstFFMpegVidDec *ffmpegdec;
738
  gint c;
739
  GstFlowReturn ret;
740

741
  ffmpegdec = (GstFFMpegVidDec *) context->opaque;
742

743
  GST_DEBUG_OBJECT (ffmpegdec, "getting buffer picture %p", picture);
744 745 746 747

  /* apply the last info we have seen to this picture, when we get the
   * picture back from ffmpeg we can use this to correctly timestamp the output
   * buffer */
748 749
  GST_DEBUG_OBJECT (ffmpegdec, "opaque value SN %d",
      (gint32) picture->reordered_opaque);
750

751 752 753 754 755
  frame =
      gst_video_decoder_get_frame (GST_VIDEO_DECODER (ffmpegdec),
      picture->reordered_opaque);
  if (G_UNLIKELY (frame == NULL))
    goto no_frame;
756

757 758 759 760 761
  /* now it has a buffer allocated, so it is real and will also
   * be _released */
  GST_VIDEO_CODEC_FRAME_FLAG_UNSET (frame,
      GST_VIDEO_CODEC_FRAME_FLAG_DECODE_ONLY);

762 763 764
  if (G_UNLIKELY (frame->output_buffer != NULL))
    goto duplicate_frame;

Wim Taymans's avatar
Wim Taymans committed
765
  /* GstFFMpegVidDecVideoFrame receives the frame ref */
766 767 768 769 770 771 772
  if (picture->opaque) {
    dframe = picture->opaque;
    dframe->frame = frame;
  } else {
    picture->opaque = dframe =
        gst_ffmpegviddec_video_frame_new (ffmpegdec, frame);
  }
773

Wim Taymans's avatar
Wim Taymans committed
774 775
  GST_DEBUG_OBJECT (ffmpegdec, "storing opaque %p", dframe);

776
  if (!gst_ffmpegviddec_can_direct_render (ffmpegdec))
777
    goto no_dr;
778

779
  gst_ffmpegviddec_ensure_internal_pool (ffmpegdec, picture);
780 781 782

  ret = gst_buffer_pool_acquire_buffer (ffmpegdec->internal_pool,
      &frame->output_buffer, NULL);
783 784
  if (ret != GST_FLOW_OK)
    goto alloc_failed;
785

786 787 788 789 790 791 792
  /* piggy-backed alloc'ed on the frame,
   * and there was much rejoicing and we are grateful.
   * Now take away buffer from frame, we will give it back later when decoded.
   * This allows multiple request for a buffer per frame; unusual but possible. */
  gst_buffer_replace (&dframe->buffer, frame->output_buffer);
  gst_buffer_replace (&frame->output_buffer, NULL);

793
  /* Fill avpicture */
794 795
  if (!gst_video_frame_map (&dframe->vframe, &ffmpegdec->pool_info,
          dframe->buffer, GST_MAP_READWRITE))
796
    goto map_failed;
797 798
  dframe->mapped = TRUE;

799
  for (c = 0; c < AV_NUM_DATA_POINTERS; c++) {
800
    if (c < GST_VIDEO_INFO_N_PLANES (&ffmpegdec->pool_info)) {
801
      picture->data[c] = GST_VIDEO_FRAME_PLANE_DATA (&dframe->vframe, c);
802
      picture->linesize[c] = GST_VIDEO_FRAME_PLANE_STRIDE (&dframe->vframe, c);
803

804 805 806 807 808
      if (ffmpegdec->stride[c] == -1)
        ffmpegdec->stride[c] = picture->linesize[c];

      /* libav does not allow stride changes, decide allocation should check
       * before replacing the internal pool with a downstream pool.
809 810 811
       * https://bugzilla.gnome.org/show_bug.cgi?id=704769
       * https://bugzilla.libav.org/show_bug.cgi?id=556
       */
812
      g_assert (picture->linesize[c] == ffmpegdec->stride[c]);
813 814 815 816
    } else {
      picture->data[c] = NULL;
      picture->linesize[c] = 0;
    }
817 818
    GST_LOG_OBJECT (ffmpegdec, "linesize %d, data %p", picture->linesize[c],
        picture->data[c]);
819
  }
820

821 822
  picture->buf[0] = av_buffer_create (NULL, 0, dummy_free_buffer, dframe, 0);

823
  GST_LOG_OBJECT (ffmpegdec, "returned frame %p", dframe->buffer);
824 825

  return 0;
826

827 828
no_dr:
  {
829
    int c;
830
    int ret = avcodec_default_get_buffer2 (context, picture, flags);
831

832 833
    GST_LOG_OBJECT (ffmpegdec, "direct rendering disabled, fallback alloc");

834
    for (c = 0; c < AV_NUM_DATA_POINTERS; c++) {
835
      ffmpegdec->stride[c] = picture->linesize[c];
836 837 838 839 840 841 842 843 844 845 846 847 848
    }
    /* Wrap our buffer around the default one to be able to have a callback
     * when our data can be freed. Just putting our data into the first free
     * buffer might not work if there are too many allocated already
     */
    if (picture->buf[0]) {
      dframe->avbuffer = picture->buf[0];
      picture->buf[0] =
          av_buffer_create (picture->buf[0]->data, picture->buf[0]->size,
          dummy_free_buffer, dframe, 0);
    } else {
      picture->buf[0] =
          av_buffer_create (NULL, 0, dummy_free_buffer, dframe, 0);
849 850
    }

851
    return ret;
852
  }
853 854 855 856 857 858 859 860 861 862 863 864 865 866 867
alloc_failed:
  {
    GST_ELEMENT_ERROR (ffmpegdec, RESOURCE, FAILED,
        ("Unable to allocate memory"),
        ("The downstream pool failed to allocated buffer."));
    return -1;
  }
map_failed:
  {
    GST_ELEMENT_ERROR (ffmpegdec, RESOURCE, OPEN_READ_WRITE,
        ("Cannot access memory for read and write operation."),
        ("The video memory allocated from downstream pool could not mapped for"
            "read and write."));
    return -1;
  }
868 869 870
duplicate_frame:
  {
    GST_WARNING_OBJECT (ffmpegdec, "already alloc'ed output buffer for frame");
871
    gst_video_codec_frame_unref (frame);
872 873
    return -1;
  }
874
no_frame:
875 876 877 878 879 880
  {
    GST_WARNING_OBJECT (ffmpegdec, "Couldn't get codec frame !");
    return -1;
  }
}

881
static gboolean
882
picture_changed (GstFFMpegVidDec * ffmpegdec, AVFrame * picture)
883
{
884 885 886 887 888 889 890 891 892 893 894 895
  return !(ffmpegdec->pic_width == picture->width
      && ffmpegdec->pic_height == picture->height
      && ffmpegdec->pic_pix_fmt == picture->format
      && ffmpegdec->pic_par_n == picture->sample_aspect_ratio.num
      && ffmpegdec->pic_par_d == picture->sample_aspect_ratio.den
      && ffmpegdec->pic_interlaced == picture->interlaced_frame);
}

static gboolean
context_changed (GstFFMpegVidDec * ffmpegdec, AVCodecContext * context)
{
  return !(ffmpegdec->ctx_ticks == context->ticks_per_frame
896
      && ffmpegdec->ctx_time_n == context->time_base.num
897 898 899 900 901
      && ffmpegdec->ctx_time_d == context->time_base.den);
}

static gboolean
update_video_context (GstFFMpegVidDec * ffmpegdec, AVCodecContext * context,
902
    AVFrame * picture)
903
{
904
  if (!picture_changed (ffmpegdec, picture)
905
      && !context_changed (ffmpegdec, context))
906
    return FALSE;
907

908
  GST_DEBUG_OBJECT (ffmpegdec,
909 910 911
      "Renegotiating video from %dx%d@ %d:%d PAR %d/%d fps pixfmt %d to %dx%d@ %d:%d PAR %d/%d fps pixfmt %d",
      ffmpegdec->pic_width, ffmpegdec->pic_height,
      ffmpegdec->pic_par_n, ffmpegdec->pic_par_d,
912
      ffmpegdec->ctx_time_n, ffmpegdec->ctx_time_d,
913 914 915 916 917 918 919 920 921 922 923 924
      ffmpegdec->pic_pix_fmt,
      picture->width, picture->height,
      picture->sample_aspect_ratio.num,
      picture->sample_aspect_ratio.den,
      context->time_base.num, context->time_base.den, picture->format);

  ffmpegdec->pic_pix_fmt = picture->format;
  ffmpegdec->pic_width = picture->width;
  ffmpegdec->pic_height = picture->height;
  ffmpegdec->pic_par_n = picture->sample_aspect_ratio.num;
  ffmpegdec->pic_par_d = picture->sample_aspect_ratio.den;
  ffmpegdec->pic_interlaced = picture->interlaced_frame;
925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
  ffmpegdec->ctx_ticks = context->ticks_per_frame;
  ffmpegdec->ctx_time_n = context->time_base.num;
  ffmpegdec->ctx_time_d = context->time_base.den;

  return TRUE;
}

static void
gst_ffmpegviddec_update_par (GstFFMpegVidDec * ffmpegdec,
    GstVideoInfo * in_info, GstVideoInfo * out_info)
{
  gboolean demuxer_par_set = FALSE;
  gboolean decoder_par_set = FALSE;
  gint demuxer_num = 1, demuxer_denom = 1;
  gint decoder_num = 1, decoder_denom = 1;

  if (in_info->par_n && in_info->par_d) {
    demuxer_num = in_info->par_n;
    demuxer_denom = in_info->par_d;
    demuxer_par_set = TRUE;
    GST_DEBUG_OBJECT (ffmpegdec, "Demuxer PAR: %d:%d", demuxer_num,
        demuxer_denom);
  }

949 950 951
  if (ffmpegdec->pic_par_n && ffmpegdec->pic_par_d) {
    decoder_num = ffmpegdec->pic_par_n;
    decoder_denom = ffmpegdec->pic_par_d;
952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
    decoder_par_set = TRUE;
    GST_DEBUG_OBJECT (ffmpegdec, "Decoder PAR: %d:%d", decoder_num,
        decoder_denom);
  }

  if (!demuxer_par_set && !decoder_par_set)
    goto no_par;

  if (demuxer_par_set && !decoder_par_set)
    goto use_demuxer_par;

  if (decoder_par_set && !demuxer_par_set)
    goto use_decoder_par;

  /* Both the demuxer and the decoder provide a PAR. If one of
   * the two PARs is 1:1 and the other one is not, use the one
   * that is not 1:1. */
  if (demuxer_num == demuxer_denom && decoder_num != decoder_denom)
    goto use_decoder_par;

  if (decoder_num == decoder_denom && demuxer_num != demuxer_denom)
    goto use_demuxer_par;

  /* Both PARs are non-1:1, so use the PAR provided by the demuxer */
  goto use_demuxer_par;

use_decoder_par:
  {
    GST_DEBUG_OBJECT (ffmpegdec,
        "Setting decoder provided pixel-aspect-ratio of %u:%u", decoder_num,
        decoder_denom);
    out_info->par_n = decoder_num;
    out_info->par_d = decoder_denom;
    return;
  }
use_demuxer_par:
  {
    GST_DEBUG_OBJECT (ffmpegdec,
        "Setting demuxer provided pixel-aspect-ratio of %u:%u", demuxer_num,
        demuxer_denom);
    out_info->par_n = demuxer_num;
    out_info->par_d = demuxer_denom;
    return;
  }
no_par:
  {
    GST_DEBUG_OBJECT (ffmpegdec,
        "Neither demuxer nor codec provide a pixel-aspect-ratio");
    out_info->par_n = 1;
    out_info->par_d = 1;
    return;
  }
}

static gboolean
1007
gst_ffmpegviddec_negotiate (GstFFMpegVidDec * ffmpegdec,
1008
    AVCodecContext * context, AVFrame * picture)
1009 1010 1011 1012 1013
{
  GstVideoFormat fmt;
  GstVideoInfo *in_info, *out_info;
  GstVideoCodecState *output_state;
  gint fps_n, fps_d;
1014
  GstClockTime latency;
1015

1016
  if (!update_video_context (ffmpegdec, context, picture))
1017
    return TRUE;
1018

1019
  fmt = gst_ffmpeg_pixfmt_to_videoformat (ffmpegdec->pic_pix_fmt);
1020 1021 1022
  if (G_UNLIKELY (fmt == GST_VIDEO_FORMAT_UNKNOWN))
    goto unknown_format;

1023
  output_state =
1024
      gst_video_decoder_set_output_state (GST_VIDEO_DECODER (ffmpegdec), fmt,
1025
      ffmpegdec->pic_width, ffmpegdec->pic_height, ffmpegdec->input_state);
1026 1027
  if (ffmpegdec->output_state)
    gst_video_codec_state_unref (ffmpegdec->output_state);
1028 1029 1030 1031 1032 1033
  ffmpegdec->output_state = output_state;

  in_info = &ffmpegdec->input_state->info;
  out_info = &ffmpegdec->output_state->info;

  /* set the interlaced flag */
1034
  if (ffmpegdec->pic_interlaced)
1035 1036 1037 1038
    out_info->interlace_mode = GST_VIDEO_INTERLACE_MODE_MIXED;
  else
    out_info->interlace_mode = GST_VIDEO_INTERLACE_MODE_PROGRESSIVE;

1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
  switch (context->chroma_sample_location) {
    case 1:
      out_info->chroma_site = GST_VIDEO_CHROMA_SITE_MPEG2;
      break;
    case 2:
      out_info->chroma_site = GST_VIDEO_CHROMA_SITE_JPEG;
      break;
    case 3:
      out_info->chroma_site = GST_VIDEO_CHROMA_SITE_DV;
      break;
    case 4:
      out_info->chroma_site = GST_VIDEO_CHROMA_SITE_V_COSITED;
      break;
    default:
      break;
  }

1056
  /* try to find a good framerate */
1057 1058
  if ((in_info->fps_d && in_info->fps_n) ||
      GST_VIDEO_INFO_FLAG_IS_SET (in_info, GST_VIDEO_FLAG_VARIABLE_FPS)) {
1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
    /* take framerate from input when it was specified (#313970) */
    fps_n = in_info->fps_n;
    fps_d = in_info->fps_d;
  } else {
    fps_n = ffmpegdec->ctx_time_d / ffmpegdec->ctx_ticks;
    fps_d = ffmpegdec->ctx_time_n;

    if (!fps_d) {
      GST_LOG_OBJECT (ffmpegdec, "invalid framerate: %d/0, -> %d/1", fps_n,
          fps_n);
      fps_d = 1;
    }
    if (gst_util_fraction_compare (fps_n, fps_d, 1000, 1) > 0) {
      GST_LOG_OBJECT (ffmpegdec, "excessive framerate: %d/%d, -> 0/1", fps_n,
          fps_d);
      fps_n = 0;
      fps_d = 1;
    }
  }
1078

1079 1080 1081 1082 1083 1084
  GST_LOG_OBJECT (ffmpegdec, "setting framerate: %d/%d", fps_n, fps_d);
  out_info->fps_n = fps_n;
  out_info->fps_d = fps_d;

  /* calculate and update par now */
  gst_ffmpegviddec_update_par (ffmpegdec, in_info, out_info);
1085

1086 1087 1088 1089 1090 1091 1092 1093
  /* Copy stereo/multiview info from upstream if set */
  if (GST_VIDEO_INFO_MULTIVIEW_MODE (in_info) != GST_VIDEO_MULTIVIEW_MODE_NONE) {
    GST_VIDEO_INFO_MULTIVIEW_MODE (out_info) =
        GST_VIDEO_INFO_MULTIVIEW_MODE (in_info);
    GST_VIDEO_INFO_MULTIVIEW_FLAGS (out_info) =
        GST_VIDEO_INFO_MULTIVIEW_FLAGS (in_info);
  }

1094 1095
  if (!gst_video_decoder_negotiate (GST_VIDEO_DECODER (ffmpegdec)))
    goto negotiate_failed;
1096

1097
  /* The decoder is configured, we now know the true latency */
1098 1099 1100 1101 1102 1103 1104
  if (fps_n) {
    latency =
        gst_util_uint64_scale_ceil (ffmpegdec->context->has_b_frames *
        GST_SECOND, fps_d, fps_n);
    gst_video_decoder_set_latency (GST_VIDEO_DECODER (ffmpegdec), latency,
        latency);
  }
1105

1106 1107 1108
  return TRUE;

  /* ERRORS */
1109
unknown_format:
1110
  {
1111 1112
    GST_ERROR_OBJECT (ffmpegdec,
        "decoder requires a video format unsupported by GStreamer");
1113 1114
    return FALSE;
  }
1115 1116 1117
negotiate_failed:
  {
    /* Reset so we try again next time even if force==FALSE */
1118 1119 1120 1121 1122
    ffmpegdec->pic_pix_fmt = 0;
    ffmpegdec->pic_width = 0;
    ffmpegdec->pic_height = 0;
    ffmpegdec->pic_par_n = 0;
    ffmpegdec->pic_par_d = 0;
1123 1124 1125 1126 1127 1128 1129
    ffmpegdec->ctx_ticks = 0;
    ffmpegdec->ctx_time_n = 0;
    ffmpegdec->ctx_time_d = 0;

    GST_ERROR_OBJECT (ffmpegdec, "negotiation failed");
    return FALSE;
  }
1130 1131 1132 1133 1134 1135
}

/* perform qos calculations before decoding the next frame.
 *
 * Sets the skip_frame flag and if things are really bad, skips to the next
 * keyframe.
1136
 *
1137
 */
1138
static void
1139 1140
gst_ffmpegviddec_do_qos (GstFFMpegVidDec * ffmpegdec,
    GstVideoCodecFrame * frame, gboolean * mode_switch)
1141 1142
{
  GstClockTimeDiff diff;
1143 1144
  GstSegmentFlags skip_flags =
      GST_VIDEO_DECODER_INPUT_SEGMENT (ffmpegdec).flags;
1145 1146 1147

  *mode_switch = FALSE;

1148
  if (frame == NULL)
1149
    return;
1150

1151 1152 1153 1154 1155 1156 1157 1158
  if (skip_flags & GST_SEGMENT_FLAG_TRICKMODE_KEY_UNITS) {
    ffmpegdec->context->skip_frame = AVDISCARD_NONKEY;
    *mode_switch = TRUE;
  } else if (skip_flags & GST_SEGMENT_FLAG_TRICKMODE) {
    ffmpegdec->context->skip_frame = AVDISCARD_NONREF;
    *mode_switch = TRUE;
  }

1159 1160 1161 1162 1163 1164
  if (*mode_switch == TRUE) {
    /* We've already switched mode, we can return straight away
     * without any further calculation */
    return;
  }

1165 1166 1167
  diff =
      gst_video_decoder_get_max_decode_time (GST_VIDEO_DECODER (ffmpegdec),
      frame);
1168

1169
  /* if we don't have timing info, then we don't do QoS */
1170 1171 1172
  if (G_UNLIKELY (diff == G_MAXINT64)) {
    /* Ensure the skipping strategy is the default one */
    ffmpegdec->context->skip_frame = ffmpegdec->skip_frame;
1173
    return;
1174
  }
1175

1176
  GST_DEBUG_OBJECT (ffmpegdec, "decoding time %" G_GINT64_FORMAT, diff);
1177

1178 1179 1180 1181 1182
  if (diff > 0 && ffmpegdec->context->skip_frame != AVDISCARD_DEFAULT) {
    ffmpegdec->context->skip_frame = AVDISCARD_DEFAULT;
    *mode_switch = TRUE;
    GST_DEBUG_OBJECT (ffmpegdec, "QOS: normal mode");
  }
1183

1184 1185 1186 1187 1188
  else if (diff <= 0 && ffmpegdec->context->skip_frame != AVDISCARD_NONREF) {
    ffmpegdec->context->skip_frame = AVDISCARD_NONREF;
    *mode_switch = TRUE;
    GST_DEBUG_OBJECT (ffmpegdec,
        "QOS: hurry up, diff %" G_GINT64_FORMAT " >= 0", diff);
1189 1190 1191 1192 1193
  }
}

/* get an outbuf buffer with the current picture */
static GstFlowReturn
1194
get_output_buffer (GstFFMpegVidDec * ffmpegdec, GstVideoCodecFrame * frame)
1195
{
1196
  GstFlowReturn ret = GST_FLOW_OK;
1197
  AVFrame pic, *outpic;
1198
  GstVideoFrame vframe;
1199 1200 1201 1202 1203
  GstVideoInfo *info;
  gint c;

  GST_LOG_OBJECT (ffmpegdec, "get output buffer");

1204 1205 1206
  if (!ffmpegdec->output_state)
    goto not_negotiated;

1207
  ret =
1208
      gst_video_decoder_allocate_output_frame (GST_VIDEO_DECODER (ffmpegdec),
1209
      frame);
1210 1211 1212 1213 1214 1215 1216
  if (G_UNLIKELY (ret != GST_FLOW_OK))
    goto alloc_failed;

  /* original ffmpeg code does not handle odd sizes correctly.
   * This patched up version does */
  /* Fill avpicture */
  info = &ffmpegdec->output_state->info;
1217 1218
  if (!gst_video_frame_map (&vframe, info, frame->output_buffer,
          GST_MAP_READ | GST_MAP_WRITE))
1219
    goto map_failed;
1220

1221 1222 1223 1224
  memset (&pic, 0, sizeof (pic));
  pic.format = ffmpegdec->pic_pix_fmt;
  pic.width = GST_VIDEO_FRAME_WIDTH (&vframe);
  pic.height = GST_VIDEO_FRAME_HEIGHT (&vframe);
1225
  for (c = 0; c < AV_NUM_DATA_POINTERS; c++) {
1226
    if (c < GST_VIDEO_INFO_N_PLANES (info)) {
1227
      pic.data[c] = GST_VIDEO_FRAME_PLANE_DATA (&vframe, c);
1228
      pic.linesize[c] = GST_VIDEO_FRAME_PLANE_STRIDE (&vframe, c);
1229 1230
      GST_LOG_OBJECT (ffmpegdec, "[%i] linesize %d, data %p", c,
          pic.linesize[c], pic.data[c]);
1231 1232 1233 1234 1235
    } else {
      pic.data[c] = NULL;
      pic.linesize[c] = 0;
    }
  }
1236

1237
  outpic = ffmpegdec->picture;
1238

1239 1240 1241 1242
  if (av_frame_copy (&pic, outpic) != 0) {
    GST_ERROR_OBJECT (ffmpegdec, "Failed to copy output frame");
    ret = GST_FLOW_ERROR;
  }
1243

1244 1245
  gst_video_frame_unmap (&vframe);

1246 1247 1248 1249 1250 1251 1252
  ffmpegdec->picture->reordered_opaque = -1;

  return ret;

  /* special cases */
alloc_failed:
  {
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263
    GST_ELEMENT_ERROR (ffmpegdec, RESOURCE, FAILED,
        ("Unable to allocate memory"),
        ("The downstream pool failed to allocated buffer."));
    return ret;
  }
map_failed:
  {
    GST_ELEMENT_ERROR (ffmpegdec, RESOURCE, OPEN_READ_WRITE,
        ("Cannot access memory for read and write operation."),
        ("The video memory allocated from downstream pool could not mapped for"
            "read and write."));
1264 1265
    return ret;
  }
1266 1267 1268 1269 1270
not_negotiated:
  {
    GST_DEBUG_OBJECT (ffmpegdec, "not negotiated");
    return GST_FLOW_NOT_NEGOTIATED;
  }
1271 1272 1273 1274 1275 1276 1277 1278 1279 1280
}

static void
gst_avpacket_init (AVPacket * packet, guint8 * data, guint size)
{
  memset (packet, 0, sizeof (AVPacket));
  packet->data = data;
  packet->size = size;
}

1281
/* gst_ffmpegviddec_[video|audio]_frame:
1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293
 * ffmpegdec:
 * data: pointer to the data to decode
 * size: size of data in bytes
 * in_timestamp: incoming timestamp.
 * in_duration: incoming duration.
 * in_offset: incoming offset (frame number).
 * ret: Return flow.
 *
 * Returns: number of bytes used in decoding. The check for successful decode is
 *   outbuf being non-NULL.
 */
static gint
1294
gst_ffmpegviddec_video_frame (GstFFMpegVidDec * ffmpegdec,
1295 1296
    guint8 * data, guint size, gint * have_data, GstVideoCodecFrame * frame,
    GstFlowReturn * ret)
1297 1298 1299
{
  gint len = -1;
  gboolean mode_switch;
1300
  GstVideoCodecFrame *out_frame;
1301
  GstFFMpegVidDecVideoFrame *out_dframe;
1302
  AVPacket packet;
1303
  GstBufferPool *pool;
1304 1305 1306 1307 1308 1309 1310 1311

  *ret = GST_FLOW_OK;

  /* in case we skip frames */
  ffmpegdec->picture->pict_type = -1;

  /* run QoS code, we don't stop decoding the frame when we are late because
   * else we might skip a reference frame */
1312
  gst_ffmpegviddec_do_qos (ffmpegdec, frame, &mode_switch);
1313

1314 1315 1316 1317
  if (frame) {
    /* save reference to the timing info */
    ffmpegdec->context->reordered_opaque = (gint64) frame->system_frame_number;
    ffmpegdec->picture->reordered_opaque = (gint64) frame->system_frame_number;
1318

1319 1320 1321
    GST_DEBUG_OBJECT (ffmpegdec, "stored opaque values idx %d",
        frame->system_frame_number);
  }
1322 1323 1324

  /* now decode the frame */
  gst_avpacket_init (&packet, data, size);
1325

1326 1327 1328 1329 1330 1331 1332 1333 1334
  if (ffmpegdec->palette) {
    guint8 *pal;

    pal = av_packet_new_side_data (&packet, AV_PKT_DATA_PALETTE,
        AVPALETTE_SIZE);
    gst_buffer_extract (ffmpegdec->palette, 0, pal, AVPALETTE_SIZE);
    GST_DEBUG_OBJECT (ffmpegdec, "copy pal %p %p", &packet, pal);
  }

1335 1336 1337 1338 1339 1340
  /* This might call into get_buffer() from another thread,
   * which would cause a deadlock. Release the lock here
   * and taking it again later seems safe
   * See https://bugzilla.gnome.org/show_bug.cgi?id=726020
   */
  GST_VIDEO_DECODER_STREAM_UNLOCK (ffmpegdec);
1341
  len = avcodec_decode_video2 (ffmpegdec->context,
1342
      ffmpegdec->picture, have_data, &packet);
1343
  GST_VIDEO_DECODER_STREAM_LOCK (ffmpegdec);
1344 1345

  GST_DEBUG_OBJECT (ffmpegdec, "after decode: len %d, have_data %d",
1346
      len, *have_data);
1347 1348 1349 1350 1351 1352 1353

  /* when we are in skip_frame mode, don't complain when ffmpeg returned
   * no data because we told it to skip stuff. */
  if (len < 0 && (mode_switch || ffmpegdec->context->skip_frame))
    len = 0;

  /* no data, we're done */
1354
  if (len < 0 || *have_data == 0)
1355 1356 1357
    goto beach;

  /* get the output picture timing info again */
1358
  out_dframe = ffmpegdec->picture->opaque;
1359
  out_frame = gst_video_codec_frame_ref (out_dframe->frame);
1360

1361 1362 1363 1364
  /* also give back a buffer allocated by the frame, if any */
  gst_buffer_replace (&out_frame->output_buffer, out_dframe->buffer);
  gst_buffer_replace (&out_dframe->buffer, NULL);

1365
  GST_DEBUG_OBJECT (ffmpegdec,
1366 1367
      "pts %" G_GUINT64_FORMAT " duration %" G_GUINT64_FORMAT,
      out_frame->pts, out_frame->duration);
1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
  GST_DEBUG_OBJECT (ffmpegdec, "picture: pts %" G_GUINT64_FORMAT,
      (guint64) ffmpegdec->picture->pts);
  GST_DEBUG_OBJECT (ffmpegdec, "picture: num %d",
      ffmpegdec->picture->coded_picture_number);
  GST_DEBUG_OBJECT (ffmpegdec, "picture: display %d",
      ffmpegdec->picture->display_picture_number);
  GST_DEBUG_OBJECT (ffmpegdec, "picture: opaque %p",
      ffmpegdec->picture->opaque);
  GST_DEBUG_OBJECT (ffmpegdec, "picture: reordered opaque %" G_GUINT64_FORMAT,
      (guint64) ffmpegdec->picture->reordered_opaque);
  GST_DEBUG_OBJECT (ffmpegdec, "repeat_pict:%d",
      ffmpegdec->picture->repeat_pict);
1380 1381
  GST_DEBUG_OBJECT (ffmpegdec, "corrupted frame: %d",
      ! !(ffmpegdec->picture->flags & AV_FRAME_FLAG_CORRUPT));
1382

1383
  if (!gst_ffmpegviddec_negotiate (ffmpegdec, ffmpegdec->context,
1384
          ffmpegdec->picture))
1385
    goto negotiation_error;
1386

1387 1388 1389 1390 1391 1392
  pool = gst_video_decoder_get_buffer_pool (GST_VIDEO_DECODER (ffmpegdec));
  if (G_UNLIKELY (out_frame->output_buffer == NULL)) {
    *ret = get_output_buffer (ffmpegdec, out_frame);
  } else if (G_UNLIKELY (out_frame->output_buffer->pool != pool)) {
    GstBuffer *tmp = out_frame->output_buffer;
    out_frame->output_buffer = NULL;
1393
    *ret = get_output_buffer (ffmpegdec, out_frame);
1394 1395
    gst_buffer_unref (tmp);
  }
1396 1397 1398 1399 1400 1401 1402 1403 1404 1405
#ifndef G_DISABLE_ASSERT
  else {
    GstVideoMeta *vmeta = gst_buffer_get_video_meta (out_frame->output_buffer);
    if (vmeta) {
      GstVideoInfo *info = &ffmpegdec->output_state->info;
      g_assert (vmeta->width == GST_VIDEO_INFO_WIDTH (info));
      g_assert (vmeta->height == GST_VIDEO_INFO_HEIGHT (info));
    }
  }
#endif
1406
  gst_object_unref (pool);
1407

1408 1409 1410
  if (G_UNLIKELY (*ret != GST_FLOW_OK))
    goto no_output;

1411 1412 1413 1414
  /* Mark corrupted frames as corrupted */
  if (ffmpegdec->picture->flags & AV_FRAME_FLAG_CORRUPT)
    GST_BUFFER_FLAG_SET (out_frame->output_buffer, GST_BUFFER_FLAG_CORRUPTED);

1415
  if (ffmpegdec->pic_interlaced) {
1416 1417 1418 1419