gstaudiodecoder.c 103 KB
Newer Older
1 2 3
/* GStreamer
 * Copyright (C) 2009 Igalia S.L.
 * Author: Iago Toral Quiroga <itoral@igalia.com>
4 5 6
 * Copyright (C) 2011 Mark Nauwelaerts <mark.nauwelaerts@collabora.co.uk>.
 * Copyright (C) 2011 Nokia Corporation. All rights reserved.
 *   Contact: Stefan Kost <stefan.kost@nokia.com>
7 8 9 10 11 12 13 14 15 16 17 18 19
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Library General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Library General Public License for more details.
 *
 * You should have received a copy of the GNU Library General Public
 * License along with this library; if not, write to the
Tim-Philipp Müller's avatar
Tim-Philipp Müller committed
20 21
 * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
 * Boston, MA 02110-1301, USA.
22 23 24
 */

/**
25
 * SECTION:gstaudiodecoder
26
 * @title: GstAudioDecoder
27 28
 * @short_description: Base class for audio decoders
 * @see_also: #GstBaseTransform
29
 *
30 31
 * This base class is for audio decoders turning encoded data into
 * raw audio samples.
32
 *
33
 * GstAudioDecoder and subclass should cooperate as follows.
34 35 36 37
 *
 * ## Configuration
 *
 *   * Initially, GstAudioDecoder calls @start when the decoder element
38
 *     is activated, which allows subclass to perform any global setup.
39
 *     Base class (context) parameters can already be set according to subclass
40 41
 *     capabilities (or possibly upon receive more information in subsequent
 *     @set_format).
42
 *   * GstAudioDecoder calls @set_format to inform subclass of the format
43 44 45
 *     of input audio data that it is about to receive.
 *     While unlikely, it might be called more than once, if changing input
 *     parameters require reconfiguration.
46 47
 *   * GstAudioDecoder calls @stop at end of all processing.
 *
48
 * As of configuration stage, and throughout processing, GstAudioDecoder
49
 * provides various (context) parameters, e.g. describing the format of
50
 * output audio data (valid when output caps have been set) or current parsing state.
51 52
 * Conversely, subclass can and should configure context to inform
 * base class of its expectation w.r.t. buffer handling.
53 54 55
 *
 * ## Data processing
 *     * Base class gathers input data, and optionally allows subclass
56 57 58
 *       to parse this into subsequently manageable (as defined by subclass)
 *       chunks.  Such chunks are subsequently referred to as 'frames',
 *       though they may or may not correspond to 1 (or more) audio format frame.
59 60
 *     * Input frame is provided to subclass' @handle_frame.
 *     * If codec processing results in decoded data, subclass should call
61
 *       @gst_audio_decoder_finish_frame to have decoded data pushed
62
 *       downstream.
63
 *     * Just prior to actually pushing a buffer downstream,
64 65 66 67
 *       it is passed to @pre_push.  Subclass should either use this callback
 *       to arrange for additional downstream pushing or otherwise ensure such
 *       custom pushing occurs after at least a method call has finished since
 *       setting src pad caps.
68
 *     * During the parsing process GstAudioDecoderClass will handle both
69 70
 *       srcpad and sinkpad events. Sink events will be passed to subclass
 *       if @event callback has been provided.
71 72 73 74
 *
 * ## Shutdown phase
 *
 *   * GstAudioDecoder class calls @stop to inform the subclass that data
75
 *     parsing will be stopped.
76
 *
77 78 79 80
 * Subclass is responsible for providing pad template caps for
 * source and sink pads. The pads need to be named "sink" and "src". It also
 * needs to set the fixed caps on srcpad, when the format is ensured.  This
 * is typically when base class calls subclass' @set_format function, though
81
 * it might be delayed until calling @gst_audio_decoder_finish_frame.
82
 *
83 84 85 86
 * In summary, above process should have subclass concentrating on
 * codec data processing while leaving other matters to base class,
 * such as most notably timestamp handling.  While it may exert more control
 * in this area (see e.g. @pre_push), it is very much not recommended.
87
 *
88 89 90
 * In particular, base class will try to arrange for perfect output timestamps
 * as much as possible while tracking upstream timestamps.
 * To this end, if deviation between the next ideal expected perfect timestamp
91
 * and upstream exceeds #GstAudioDecoder:tolerance, then resync to upstream
92
 * occurs (which would happen always if the tolerance mechanism is disabled).
93
 *
94 95 96
 * In non-live pipelines, baseclass can also (configurably) arrange for
 * output buffer aggregation which may help to redue large(r) numbers of
 * small(er) buffers being pushed and processed downstream.
97
 *
98 99 100 101 102
 * On the other hand, it should be noted that baseclass only provides limited
 * seeking support (upon explicit subclass request), as full-fledged support
 * should rather be left to upstream demuxer, parser or alike.  This simple
 * approach caters for seeking and duration reporting using estimated input
 * bitrates.
103
 *
104
 * Things that subclass need to take care of:
105 106 107 108
 *
 *   * Provide pad templates
 *   * Set source pad caps when appropriate
 *   * Set user-configurable properties to sane defaults for format and
109 110
 *      implementing codec at hand, and convey some subclass capabilities and
 *      expectations in context.
111 112
 *
 *   * Accept data in @handle_frame and provide encoded results to
113
 *      @gst_audio_decoder_finish_frame.  If it is prepared to perform
114 115
 *      PLC, it should also accept NULL data in @handle_frame and provide for
 *      data for indicated duration.
116
 *
117 118 119 120 121 122
 */

#ifdef HAVE_CONFIG_H
#include "config.h"
#endif

123
#include "gstaudiodecoder.h"
124
#include "gstaudioutilsprivate.h"
125
#include <gst/pbutils/descriptions.h>
126

127
#include <string.h>
128

129 130
GST_DEBUG_CATEGORY (audiodecoder_debug);
#define GST_CAT_DEFAULT audiodecoder_debug
131

132 133 134
#define GST_AUDIO_DECODER_GET_PRIVATE(obj)  \
    (G_TYPE_INSTANCE_GET_PRIVATE ((obj), GST_TYPE_AUDIO_DECODER, \
        GstAudioDecoderPrivate))
135

136 137 138 139 140 141 142 143
enum
{
  LAST_SIGNAL
};

enum
{
  PROP_0,
144 145 146 147 148 149 150 151
  PROP_LATENCY,
  PROP_TOLERANCE,
  PROP_PLC
};

#define DEFAULT_LATENCY    0
#define DEFAULT_TOLERANCE  0
#define DEFAULT_PLC        FALSE
152 153
#define DEFAULT_DRAINABLE  TRUE
#define DEFAULT_NEEDS_FORMAT  FALSE
154

155
typedef struct _GstAudioDecoderContext
156
{
157 158 159
  /* last negotiated input caps */
  GstCaps *input_caps;

160
  /* (output) audio format */
161
  GstAudioInfo info;
162
  gboolean output_format_changed;
163 164 165 166 167

  /* parsing state */
  gboolean eos;
  gboolean sync;

168 169 170
  gboolean had_output_data;
  gboolean had_input_data;

171 172 173 174 175
  /* misc */
  gint delay;

  /* output */
  gboolean do_plc;
176
  gboolean do_estimate_rate;
177
  gint max_errors;
178
  GstCaps *allocation_caps;
179 180 181
  /* MT-protected (with LOCK) */
  GstClockTime min_latency;
  GstClockTime max_latency;
182 183 184

  GstAllocator *allocator;
  GstAllocationParams params;
185
} GstAudioDecoderContext;
186

187
struct _GstAudioDecoderPrivate
188 189 190 191 192 193 194 195 196 197 198 199 200
{
  /* activation status */
  gboolean active;

  /* input base/first ts as basis for output ts */
  GstClockTime base_ts;
  /* input samples processed and sent downstream so far (w.r.t. base_ts) */
  guint64 samples;

  /* collected input data */
  GstAdapter *adapter;
  /* tracking input ts for changes */
  GstClockTime prev_ts;
201
  guint64 prev_distance;
202 203 204 205 206 207 208 209 210 211 212 213 214
  /* frames obtained from input */
  GQueue frames;
  /* collected output data */
  GstAdapter *adapter_out;
  /* ts and duration for output data collected above */
  GstClockTime out_ts, out_dur;
  /* mark outgoing discont */
  gboolean discont;

  /* subclass gave all it could already */
  gboolean drained;
  /* subclass currently being forcibly drained */
  gboolean force;
215 216
  /* input_segment are output_segment identical */
  gboolean in_out_segment_sync;
217 218
  /* expecting the buffer with DISCONT flag */
  gboolean expecting_discont_buf;
219

220 221 222 223 224 225 226 227

  /* input bps estimatation */
  /* global in bytes seen */
  guint64 bytes_in;
  /* global samples sent out */
  guint64 samples_out;
  /* bytes flushed during parsing */
  guint sync_flush;
228 229
  /* error count */
  gint error_count;
230 231 232 233 234 235 236 237 238

  /* upstream stream tags (global tags are passed through as-is) */
  GstTagList *upstream_tags;

  /* subclass tags */
  GstTagList *taglist;          /* FIXME: rename to decoder_tags */
  GstTagMergeMode decoder_tags_merge_mode;

  gboolean taglist_changed;     /* FIXME: rename to tags_changed */
239 240 241 242 243 244 245 246 247 248 249 250 251

  /* whether circumstances allow output aggregation */
  gint agg;

  /* reverse playback queues */
  /* collect input */
  GList *gather;
  /* to-be-decoded */
  GList *decode;
  /* reversed output */
  GList *queued;

  /* context storage */
252
  GstAudioDecoderContext ctx;
253 254 255 256 257

  /* properties */
  GstClockTime latency;
  GstClockTime tolerance;
  gboolean plc;
258 259
  gboolean drainable;
  gboolean needs_format;
260

261 262
  /* pending serialized sink events, will be sent from finish_frame() */
  GList *pending_events;
263 264 265

  /* flags */
  gboolean use_default_pad_acceptcaps;
266 267
};

268 269
static void gst_audio_decoder_finalize (GObject * object);
static void gst_audio_decoder_set_property (GObject * object,
270
    guint prop_id, const GValue * value, GParamSpec * pspec);
271
static void gst_audio_decoder_get_property (GObject * object,
272 273
    guint prop_id, GValue * value, GParamSpec * pspec);

274 275
static void gst_audio_decoder_clear_queues (GstAudioDecoder * dec);
static GstFlowReturn gst_audio_decoder_chain_reverse (GstAudioDecoder *
276 277
    dec, GstBuffer * buf);

278
static GstStateChangeReturn gst_audio_decoder_change_state (GstElement *
279
    element, GstStateChange transition);
280 281
static gboolean gst_audio_decoder_sink_eventfunc (GstAudioDecoder * dec,
    GstEvent * event);
282 283
static gboolean gst_audio_decoder_src_eventfunc (GstAudioDecoder * dec,
    GstEvent * event);
284 285 286 287
static gboolean gst_audio_decoder_sink_event (GstPad * pad, GstObject * parent,
    GstEvent * event);
static gboolean gst_audio_decoder_src_event (GstPad * pad, GstObject * parent,
    GstEvent * event);
288 289
static gboolean gst_audio_decoder_sink_setcaps (GstAudioDecoder * dec,
    GstCaps * caps);
290 291
static GstFlowReturn gst_audio_decoder_chain (GstPad * pad, GstObject * parent,
    GstBuffer * buf);
292 293 294 295
static gboolean gst_audio_decoder_src_query (GstPad * pad, GstObject * parent,
    GstQuery * query);
static gboolean gst_audio_decoder_sink_query (GstPad * pad, GstObject * parent,
    GstQuery * query);
296 297
static void gst_audio_decoder_reset (GstAudioDecoder * dec, gboolean full);

298 299 300 301
static gboolean gst_audio_decoder_decide_allocation_default (GstAudioDecoder *
    dec, GstQuery * query);
static gboolean gst_audio_decoder_propose_allocation_default (GstAudioDecoder *
    dec, GstQuery * query);
302
static gboolean gst_audio_decoder_negotiate_default (GstAudioDecoder * dec);
303
static gboolean gst_audio_decoder_negotiate_unlocked (GstAudioDecoder * dec);
304 305
static gboolean gst_audio_decoder_handle_gap (GstAudioDecoder * dec,
    GstEvent * event);
306 307
static gboolean gst_audio_decoder_sink_query_default (GstAudioDecoder * dec,
    GstQuery * query);
308 309
static gboolean gst_audio_decoder_src_query_default (GstAudioDecoder * dec,
    GstQuery * query);
310

311 312 313
static gboolean gst_audio_decoder_transform_meta_default (GstAudioDecoder *
    decoder, GstBuffer * outbuf, GstMeta * meta, GstBuffer * inbuf);

314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344
static GstElementClass *parent_class = NULL;

static void gst_audio_decoder_class_init (GstAudioDecoderClass * klass);
static void gst_audio_decoder_init (GstAudioDecoder * dec,
    GstAudioDecoderClass * klass);

GType
gst_audio_decoder_get_type (void)
{
  static volatile gsize audio_decoder_type = 0;

  if (g_once_init_enter (&audio_decoder_type)) {
    GType _type;
    static const GTypeInfo audio_decoder_info = {
      sizeof (GstAudioDecoderClass),
      NULL,
      NULL,
      (GClassInitFunc) gst_audio_decoder_class_init,
      NULL,
      NULL,
      sizeof (GstAudioDecoder),
      0,
      (GInstanceInitFunc) gst_audio_decoder_init,
    };

    _type = g_type_register_static (GST_TYPE_ELEMENT,
        "GstAudioDecoder", &audio_decoder_info, G_TYPE_FLAG_ABSTRACT);
    g_once_init_leave (&audio_decoder_type, _type);
  }
  return audio_decoder_type;
}
345

346 347

static void
348
gst_audio_decoder_class_init (GstAudioDecoderClass * klass)
349 350 351
{
  GObjectClass *gobject_class;
  GstElementClass *element_class;
352
  GstAudioDecoderClass *audiodecoder_class;
353 354 355

  gobject_class = G_OBJECT_CLASS (klass);
  element_class = GST_ELEMENT_CLASS (klass);
356
  audiodecoder_class = GST_AUDIO_DECODER_CLASS (klass);
357

358 359
  parent_class = g_type_class_peek_parent (klass);

360
  g_type_class_add_private (klass, sizeof (GstAudioDecoderPrivate));
361

362 363
  GST_DEBUG_CATEGORY_INIT (audiodecoder_debug, "audiodecoder", 0,
      "audio decoder base class");
364

365 366 367
  gobject_class->set_property = gst_audio_decoder_set_property;
  gobject_class->get_property = gst_audio_decoder_get_property;
  gobject_class->finalize = gst_audio_decoder_finalize;
368

369 370
  element_class->change_state =
      GST_DEBUG_FUNCPTR (gst_audio_decoder_change_state);
371 372

  /* Properties */
373
  g_object_class_install_property (gobject_class, PROP_LATENCY,
374
      g_param_spec_int64 ("min-latency", "Minimum Latency",
375 376 377 378 379 380 381 382 383 384 385 386 387 388
          "Aggregate output data to a minimum of latency time (ns)",
          0, G_MAXINT64, DEFAULT_LATENCY,
          G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));

  g_object_class_install_property (gobject_class, PROP_TOLERANCE,
      g_param_spec_int64 ("tolerance", "Tolerance",
          "Perfect ts while timestamp jitter/imperfection within tolerance (ns)",
          0, G_MAXINT64, DEFAULT_TOLERANCE,
          G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));

  g_object_class_install_property (gobject_class, PROP_PLC,
      g_param_spec_boolean ("plc", "Packet Loss Concealment",
          "Perform packet loss concealment (if supported)",
          DEFAULT_PLC, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
389

390
  audiodecoder_class->sink_event =
391
      GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_eventfunc);
392 393
  audiodecoder_class->src_event =
      GST_DEBUG_FUNCPTR (gst_audio_decoder_src_eventfunc);
394 395 396 397
  audiodecoder_class->propose_allocation =
      GST_DEBUG_FUNCPTR (gst_audio_decoder_propose_allocation_default);
  audiodecoder_class->decide_allocation =
      GST_DEBUG_FUNCPTR (gst_audio_decoder_decide_allocation_default);
398 399
  audiodecoder_class->negotiate =
      GST_DEBUG_FUNCPTR (gst_audio_decoder_negotiate_default);
400 401 402 403
  audiodecoder_class->sink_query =
      GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_query_default);
  audiodecoder_class->src_query =
      GST_DEBUG_FUNCPTR (gst_audio_decoder_src_query_default);
404 405
  audiodecoder_class->transform_meta =
      GST_DEBUG_FUNCPTR (gst_audio_decoder_transform_meta_default);
406 407 408
}

static void
409
gst_audio_decoder_init (GstAudioDecoder * dec, GstAudioDecoderClass * klass)
410 411 412
{
  GstPadTemplate *pad_template;

413
  GST_DEBUG_OBJECT (dec, "gst_audio_decoder_init");
414

415
  dec->priv = GST_AUDIO_DECODER_GET_PRIVATE (dec);
416 417 418 419 420 421

  /* Setup sink pad */
  pad_template =
      gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "sink");
  g_return_if_fail (pad_template != NULL);

422 423
  dec->sinkpad = gst_pad_new_from_template (pad_template, "sink");
  gst_pad_set_event_function (dec->sinkpad,
424
      GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_event));
425
  gst_pad_set_chain_function (dec->sinkpad,
426
      GST_DEBUG_FUNCPTR (gst_audio_decoder_chain));
427
  gst_pad_set_query_function (dec->sinkpad,
428
      GST_DEBUG_FUNCPTR (gst_audio_decoder_sink_query));
429 430
  gst_element_add_pad (GST_ELEMENT (dec), dec->sinkpad);
  GST_DEBUG_OBJECT (dec, "sinkpad created");
431 432 433 434 435 436

  /* Setup source pad */
  pad_template =
      gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src");
  g_return_if_fail (pad_template != NULL);

437 438
  dec->srcpad = gst_pad_new_from_template (pad_template, "src");
  gst_pad_set_event_function (dec->srcpad,
439
      GST_DEBUG_FUNCPTR (gst_audio_decoder_src_event));
440
  gst_pad_set_query_function (dec->srcpad,
441
      GST_DEBUG_FUNCPTR (gst_audio_decoder_src_query));
442 443 444 445 446 447 448
  gst_element_add_pad (GST_ELEMENT (dec), dec->srcpad);
  GST_DEBUG_OBJECT (dec, "srcpad created");

  dec->priv->adapter = gst_adapter_new ();
  dec->priv->adapter_out = gst_adapter_new ();
  g_queue_init (&dec->priv->frames);

449
  g_rec_mutex_init (&dec->stream_lock);
450

451
  /* property default */
452 453 454
  dec->priv->latency = DEFAULT_LATENCY;
  dec->priv->tolerance = DEFAULT_TOLERANCE;
  dec->priv->plc = DEFAULT_PLC;
455 456
  dec->priv->drainable = DEFAULT_DRAINABLE;
  dec->priv->needs_format = DEFAULT_NEEDS_FORMAT;
457 458

  /* init state */
459
  dec->priv->ctx.min_latency = 0;
460
  dec->priv->ctx.max_latency = 0;
461
  gst_audio_decoder_reset (dec, TRUE);
462
  GST_DEBUG_OBJECT (dec, "init ok");
463 464 465
}

static void
466
gst_audio_decoder_reset (GstAudioDecoder * dec, gboolean full)
467
{
468
  GST_DEBUG_OBJECT (dec, "gst_audio_decoder_reset");
469

470
  GST_AUDIO_DECODER_STREAM_LOCK (dec);
471

472 473
  if (full) {
    dec->priv->active = FALSE;
474
    GST_OBJECT_LOCK (dec);
475 476
    dec->priv->bytes_in = 0;
    dec->priv->samples_out = 0;
477
    GST_OBJECT_UNLOCK (dec);
478
    dec->priv->agg = -1;
479
    dec->priv->error_count = 0;
480
    gst_audio_decoder_clear_queues (dec);
481

482
    if (dec->priv->taglist) {
483
      gst_tag_list_unref (dec->priv->taglist);
484 485
      dec->priv->taglist = NULL;
    }
486 487 488 489 490
    dec->priv->decoder_tags_merge_mode = GST_TAG_MERGE_KEEP_ALL;
    if (dec->priv->upstream_tags) {
      gst_tag_list_unref (dec->priv->upstream_tags);
      dec->priv->upstream_tags = NULL;
    }
491
    dec->priv->taglist_changed = FALSE;
492

493 494
    gst_segment_init (&dec->input_segment, GST_FORMAT_TIME);
    gst_segment_init (&dec->output_segment, GST_FORMAT_TIME);
495
    dec->priv->in_out_segment_sync = TRUE;
496 497 498 499

    g_list_foreach (dec->priv->pending_events, (GFunc) gst_event_unref, NULL);
    g_list_free (dec->priv->pending_events);
    dec->priv->pending_events = NULL;
500 501 502

    if (dec->priv->ctx.allocator)
      gst_object_unref (dec->priv->ctx.allocator);
503

504
    GST_OBJECT_LOCK (dec);
505
    gst_caps_replace (&dec->priv->ctx.input_caps, NULL);
506
    gst_caps_replace (&dec->priv->ctx.allocation_caps, NULL);
507 508 509 510

    memset (&dec->priv->ctx, 0, sizeof (dec->priv->ctx));

    gst_audio_info_init (&dec->priv->ctx.info);
511
    GST_OBJECT_UNLOCK (dec);
512
    dec->priv->ctx.max_errors = GST_AUDIO_DECODER_MAX_ERRORS;
513 514
    dec->priv->ctx.had_output_data = FALSE;
    dec->priv->ctx.had_input_data = FALSE;
515
  }
516 517 518 519 520 521 522 523

  g_queue_foreach (&dec->priv->frames, (GFunc) gst_buffer_unref, NULL);
  g_queue_clear (&dec->priv->frames);
  gst_adapter_clear (dec->priv->adapter);
  gst_adapter_clear (dec->priv->adapter_out);
  dec->priv->out_ts = GST_CLOCK_TIME_NONE;
  dec->priv->out_dur = 0;
  dec->priv->prev_ts = GST_CLOCK_TIME_NONE;
524
  dec->priv->prev_distance = 0;
525 526 527 528 529 530
  dec->priv->drained = TRUE;
  dec->priv->base_ts = GST_CLOCK_TIME_NONE;
  dec->priv->samples = 0;
  dec->priv->discont = TRUE;
  dec->priv->sync_flush = FALSE;

531
  GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
532 533 534
}

static void
535
gst_audio_decoder_finalize (GObject * object)
536
{
537
  GstAudioDecoder *dec;
538

539 540
  g_return_if_fail (GST_IS_AUDIO_DECODER (object));
  dec = GST_AUDIO_DECODER (object);
541

542 543
  if (dec->priv->adapter) {
    g_object_unref (dec->priv->adapter);
544
  }
545 546
  if (dec->priv->adapter_out) {
    g_object_unref (dec->priv->adapter_out);
547 548
  }

549
  g_rec_mutex_clear (&dec->stream_lock);
550

551 552 553
  G_OBJECT_CLASS (parent_class)->finalize (object);
}

554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
static GstEvent *
gst_audio_decoder_create_merged_tags_event (GstAudioDecoder * dec)
{
  GstTagList *merged_tags;

  GST_LOG_OBJECT (dec, "upstream : %" GST_PTR_FORMAT, dec->priv->upstream_tags);
  GST_LOG_OBJECT (dec, "decoder  : %" GST_PTR_FORMAT, dec->priv->taglist);
  GST_LOG_OBJECT (dec, "mode     : %d", dec->priv->decoder_tags_merge_mode);

  merged_tags =
      gst_tag_list_merge (dec->priv->upstream_tags,
      dec->priv->taglist, dec->priv->decoder_tags_merge_mode);

  GST_DEBUG_OBJECT (dec, "merged   : %" GST_PTR_FORMAT, merged_tags);

  if (merged_tags == NULL)
    return NULL;

  if (gst_tag_list_is_empty (merged_tags)) {
    gst_tag_list_unref (merged_tags);
    return NULL;
  }

  return gst_event_new_tag (merged_tags);
}

580 581 582 583 584 585 586 587 588 589 590 591 592
static gboolean
gst_audio_decoder_push_event (GstAudioDecoder * dec, GstEvent * event)
{
  switch (GST_EVENT_TYPE (event)) {
    case GST_EVENT_SEGMENT:{
      GstSegment seg;

      GST_AUDIO_DECODER_STREAM_LOCK (dec);
      gst_event_copy_segment (event, &seg);

      GST_DEBUG_OBJECT (dec, "starting segment %" GST_SEGMENT_FORMAT, &seg);

      dec->output_segment = seg;
593
      dec->priv->in_out_segment_sync =
594
          gst_segment_is_equal (&dec->input_segment, &seg);
595 596 597 598 599 600 601 602 603 604
      GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
      break;
    }
    default:
      break;
  }

  return gst_pad_push_event (dec->srcpad, event);
}

605 606
static gboolean
gst_audio_decoder_negotiate_default (GstAudioDecoder * dec)
607
{
608
  GstAudioDecoderClass *klass;
609
  gboolean res = TRUE;
610
  GstCaps *caps;
611
  GstCaps *prevcaps;
612 613 614
  GstQuery *query = NULL;
  GstAllocator *allocator;
  GstAllocationParams params;
615

616 617
  g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
  g_return_val_if_fail (GST_AUDIO_INFO_IS_VALID (&dec->priv->ctx.info), FALSE);
618

619
  klass = GST_AUDIO_DECODER_GET_CLASS (dec);
620

621
  caps = gst_audio_info_to_caps (&dec->priv->ctx.info);
622 623
  if (dec->priv->ctx.allocation_caps == NULL)
    dec->priv->ctx.allocation_caps = gst_caps_ref (caps);
624

625 626
  GST_DEBUG_OBJECT (dec, "setting src caps %" GST_PTR_FORMAT, caps);

627
  if (dec->priv->pending_events) {
628
    GList **pending_events, *l;
629

630
    pending_events = &dec->priv->pending_events;
631 632

    GST_DEBUG_OBJECT (dec, "Pushing pending events");
633
    for (l = *pending_events; l;) {
634
      GstEvent *event = GST_EVENT (l->data);
635 636 637 638 639 640 641 642 643
      GList *tmp;

      if (GST_EVENT_TYPE (event) < GST_EVENT_CAPS) {
        gst_audio_decoder_push_event (dec, l->data);
        tmp = l;
        l = l->next;
        *pending_events = g_list_delete_link (*pending_events, tmp);
      } else {
        l = l->next;
644 645 646
      }
    }
  }
647

648 649 650 651 652 653
  prevcaps = gst_pad_get_current_caps (dec->srcpad);
  if (!prevcaps || !gst_caps_is_equal (prevcaps, caps))
    res = gst_pad_set_caps (dec->srcpad, caps);
  if (prevcaps)
    gst_caps_unref (prevcaps);

654 655
  if (!res)
    goto done;
656
  dec->priv->ctx.output_format_changed = FALSE;
657

658
  query = gst_query_new_allocation (dec->priv->ctx.allocation_caps, TRUE);
659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
  if (!gst_pad_peer_query (dec->srcpad, query)) {
    GST_DEBUG_OBJECT (dec, "didn't get downstream ALLOCATION hints");
  }

  g_assert (klass->decide_allocation != NULL);
  res = klass->decide_allocation (dec, query);

  GST_DEBUG_OBJECT (dec, "ALLOCATION (%d) params: %" GST_PTR_FORMAT, res,
      query);

  if (!res)
    goto no_decide_allocation;

  /* we got configuration from our peer or the decide_allocation method,
   * parse them */
  if (gst_query_get_n_allocation_params (query) > 0) {
    gst_query_parse_nth_allocation_param (query, 0, &allocator, &params);
  } else {
    allocator = NULL;
    gst_allocation_params_init (&params);
  }

  if (dec->priv->ctx.allocator)
    gst_object_unref (dec->priv->ctx.allocator);
  dec->priv->ctx.allocator = allocator;
  dec->priv->ctx.params = params;
685

686
done:
687

688 689
  if (query)
    gst_query_unref (query);
690
  gst_caps_unref (caps);
691

692 693 694
  return res;

  /* ERRORS */
695
no_decide_allocation:
696
  {
697
    GST_WARNING_OBJECT (dec, "Subclass failed to decide allocation");
698
    goto done;
699
  }
700 701
}

702 703 704 705 706 707 708 709 710 711 712 713
static gboolean
gst_audio_decoder_negotiate_unlocked (GstAudioDecoder * dec)
{
  GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (dec);
  gboolean ret = TRUE;

  if (G_LIKELY (klass->negotiate))
    ret = klass->negotiate (dec);

  return ret;
}

714 715 716 717
/**
 * gst_audio_decoder_negotiate:
 * @dec: a #GstAudioDecoder
 *
718 719 720
 * Negotiate with downstream elements to currently configured #GstAudioInfo.
 * Unmark GST_PAD_FLAG_NEED_RECONFIGURE in any case. But mark it again if
 * negotiate fails.
721
 *
722
 * Returns: %TRUE if the negotiation succeeded, else %FALSE.
723 724 725 726 727 728 729 730 731 732 733 734
 */
gboolean
gst_audio_decoder_negotiate (GstAudioDecoder * dec)
{
  GstAudioDecoderClass *klass;
  gboolean res = TRUE;

  g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);

  klass = GST_AUDIO_DECODER_GET_CLASS (dec);

  GST_AUDIO_DECODER_STREAM_LOCK (dec);
735 736
  gst_pad_check_reconfigure (dec->srcpad);
  if (klass->negotiate) {
737
    res = klass->negotiate (dec);
738 739 740
    if (!res)
      gst_pad_mark_reconfigure (dec->srcpad);
  }
741 742 743 744 745
  GST_AUDIO_DECODER_STREAM_UNLOCK (dec);

  return res;
}

746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
/**
 * gst_audio_decoder_set_output_format:
 * @dec: a #GstAudioDecoder
 * @info: #GstAudioInfo
 *
 * Configure output info on the srcpad of @dec.
 *
 * Returns: %TRUE on success.
 **/
gboolean
gst_audio_decoder_set_output_format (GstAudioDecoder * dec,
    const GstAudioInfo * info)
{
  gboolean res = TRUE;
  guint old_rate;
  GstCaps *caps = NULL;
  GstCaps *templ_caps;

  g_return_val_if_fail (GST_IS_AUDIO_DECODER (dec), FALSE);
  g_return_val_if_fail (GST_AUDIO_INFO_IS_VALID (info), FALSE);

  GST_DEBUG_OBJECT (dec, "Setting output format");

  GST_AUDIO_DECODER_STREAM_LOCK (dec);

  /* If the audio info can't be converted to caps,
   * it was invalid */
  caps = gst_audio_info_to_caps (info);
  if (!caps)
    goto refuse_caps;

  /* Only allow caps that are a subset of the template caps */
  templ_caps = gst_pad_get_pad_template_caps (dec->srcpad);
  if (!gst_caps_is_subset (caps, templ_caps)) {
    GST_WARNING_OBJECT (dec, "Requested output format %" GST_PTR_FORMAT
        " do not match template %" GST_PTR_FORMAT, caps, templ_caps);
    gst_caps_unref (templ_caps);
    goto refuse_caps;
  }
  gst_caps_unref (templ_caps);

  /* adjust ts tracking to new sample rate */
  old_rate = GST_AUDIO_INFO_RATE (&dec->priv->ctx.info);
  if (GST_CLOCK_TIME_IS_VALID (dec->priv->base_ts) && old_rate) {
    dec->priv->base_ts +=
        GST_FRAMES_TO_CLOCK_TIME (dec->priv->samples, old_rate);
    dec->priv->samples = 0;
  }

  /* copy the GstAudioInfo */
796
  GST_OBJECT_LOCK (dec);
797
  dec->priv->ctx.info = *info;
798
  GST_OBJECT_UNLOCK (dec);
799 800 801 802 803
  dec->priv->ctx.output_format_changed = TRUE;

done:
  GST_AUDIO_DECODER_STREAM_UNLOCK (dec);

804 805
  if (caps)
    gst_caps_unref (caps);
806 807 808 809 810

  return res;

  /* ERRORS */
refuse_caps:
811
  {
812 813
    GST_WARNING_OBJECT (dec, "invalid output format");
    res = FALSE;
814 815
    goto done;
  }
816 817 818
}

static gboolean
819
gst_audio_decoder_sink_setcaps (GstAudioDecoder * dec, GstCaps * caps)
820
{
821
  GstAudioDecoderClass *klass;
822
  gboolean res = TRUE;
823

824
  klass = GST_AUDIO_DECODER_GET_CLASS (dec);
825

826
  GST_DEBUG_OBJECT (dec, "caps: %" GST_PTR_FORMAT, caps);
827

828
  GST_AUDIO_DECODER_STREAM_LOCK (dec);
829 830 831 832 833 834 835

  if (dec->priv->ctx.input_caps
      && gst_caps_is_equal (dec->priv->ctx.input_caps, caps)) {
    GST_DEBUG_OBJECT (dec, "Caps did not change, not setting again");
    goto done;
  }

836 837
  /* NOTE pbutils only needed here */
  /* TODO maybe (only) upstream demuxer/parser etc should handle this ? */
838
#if 0
839 840 841
  if (!dec->priv->taglist)
    dec->priv->taglist = gst_tag_list_new ();
  dec->priv->taglist = gst_tag_list_make_writable (dec->priv->taglist);
842 843
  gst_pb_utils_add_codec_description_to_tag_list (dec->priv->taglist,
      GST_TAG_AUDIO_CODEC, caps);
844
  dec->priv->taglist_changed = TRUE;
845
#endif
846

847 848
  if (klass->set_format)
    res = klass->set_format (dec, caps);
849

850 851 852 853
  if (res)
    gst_caps_replace (&dec->priv->ctx.input_caps, caps);

done:
854 855
  GST_AUDIO_DECODER_STREAM_UNLOCK (dec);

856 857
  return res;
}
858

859
static void
860
gst_audio_decoder_setup (GstAudioDecoder * dec)
861 862 863 864 865 866 867 868 869 870 871 872
{
  GstQuery *query;
  gboolean res;

  /* check if in live pipeline, then latency messing is no-no */
  query = gst_query_new_latency ();
  res = gst_pad_peer_query (dec->sinkpad, query);
  if (res) {
    gst_query_parse_latency (query, &res, NULL, NULL);
    res = !res;
  }
  gst_query_unref (query);
873

874
  /* normalize to bool */
Wim Taymans's avatar
Wim Taymans committed
875
  dec->priv->agg = ! !res;
876 877 878
}

static GstFlowReturn
879
gst_audio_decoder_push_forward (GstAudioDecoder * dec, GstBuffer * buf)
880
{
881 882 883
  GstAudioDecoderClass *klass;
  GstAudioDecoderPrivate *priv;
  GstAudioDecoderContext *ctx;
884
  GstFlowReturn ret = GST_FLOW_OK;
885
  GstClockTime ts;
886

887
  klass = GST_AUDIO_DECODER_GET_CLASS (dec);
888
  priv = dec->priv;
889
  ctx = &dec->priv->ctx;
890

891 892 893 894 895 896 897
  g_return_val_if_fail (ctx->info.bpf != 0, GST_FLOW_ERROR);

  if (G_UNLIKELY (!buf)) {
    g_assert_not_reached ();
    return GST_FLOW_OK;
  }

898
  ctx->had_output_data = TRUE;
899
  ts = GST_BUFFER_TIMESTAMP (buf);
900

901 902
  GST_LOG_OBJECT (dec,
      "clipping buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT
903
      ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buf),
904 905 906 907
      GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
      GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));

  /* clip buffer */
908
  buf = gst_audio_buffer_clip (buf, &dec->output_segment, ctx->info.rate,
909 910 911
      ctx->info.bpf);
  if (G_UNLIKELY (!buf)) {
    GST_DEBUG_OBJECT (dec, "no data after clipping to segment");
912 913 914 915 916 917 918
    /* only check and return EOS if upstream still
     * in the same segment and interested as such */
    if (dec->priv->in_out_segment_sync) {
      if (dec->output_segment.rate >= 0) {
        if (ts >= dec->output_segment.stop)
          ret = GST_FLOW_EOS;
      } else if (ts < dec->output_segment.start) {
919
        ret = GST_FLOW_EOS;
920
      }
921
    }
922 923 924 925 926 927 928 929 930 931 932 933 934 935
    goto exit;
  }

  /* decorate */
  if (G_UNLIKELY (priv->discont)) {
    GST_LOG_OBJECT (dec, "marking discont");
    GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
    priv->discont = FALSE;
  }

  /* track where we are */
  if (G_LIKELY (GST_BUFFER_TIMESTAMP_IS_VALID (buf))) {
    /* duration should always be valid for raw audio */
    g_assert (GST_BUFFER_DURATION_IS_VALID (buf));
936
    dec->output_segment.position =
937 938 939 940 941 942 943 944 945 946 947 948 949 950 951
        GST_BUFFER_TIMESTAMP (buf) + GST_BUFFER_DURATION (buf);
  }

  if (klass->pre_push) {
    /* last chance for subclass to do some dirty stuff */
    ret = klass->pre_push (dec, &buf);
    if (ret != GST_FLOW_OK || !buf) {
      GST_DEBUG_OBJECT (dec, "subclass returned %s, buf %p",
          gst_flow_get_name (ret), buf);
      if (buf)
        gst_buffer_unref (buf);
      goto exit;
    }
  }

952 953
  GST_LOG_OBJECT (dec,
      "pushing buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT
954
      ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buf),
955 956 957 958 959 960 961 962 963
      GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
      GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));

  ret = gst_pad_push (dec->srcpad, buf);

exit:
  return ret;
}

964 965 966
/* mini aggregator combining output buffers into fewer larger ones,
 * if so allowed/configured */
static GstFlowReturn
967
gst_audio_decoder_output (GstAudioDecoder * dec, GstBuffer * buf)
968
{
969
  GstAudioDecoderPrivate *priv;
970 971 972 973 974 975
  GstFlowReturn ret = GST_FLOW_OK;
  GstBuffer *inbuf = NULL;

  priv = dec->priv;

  if (G_UNLIKELY (priv->agg < 0))
976
    gst_audio_decoder_setup (dec);
977 978

  if (G_LIKELY (buf)) {
979 980
    GST_LOG_OBJECT (dec,
        "output buffer of size %" G_GSIZE_FORMAT " with ts %" GST_TIME_FORMAT
Wim Taymans's avatar
Wim Taymans committed
981
        ", duration %" GST_TIME_FORMAT, gst_buffer_get_size (buf),
982 983
        GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)),
        GST_TIME_ARGS (GST_BUFFER_DURATION (buf)));
984 985
  }

986 987
again:
  inbuf = NULL;
988
  if (priv->agg && dec->priv->latency > 0) {
989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
    gint av;
    gboolean assemble = FALSE;
    const GstClockTimeDiff tol = 10 * GST_MSECOND;
    GstClockTimeDiff diff = -100 * GST_MSECOND;

    av = gst_adapter_available (priv->adapter_out);
    if (G_UNLIKELY (!buf)) {
      /* forcibly send current */
      assemble = TRUE;
      GST_LOG_OBJECT (dec, "forcing fragment flush");
    } else if (av && (!GST_BUFFER_TIMESTAMP_IS_VALID (buf) ||
            !GST_CLOCK_TIME_IS_VALID (priv->out_ts) ||
            ((diff = GST_CLOCK_DIFF (GST_BUFFER_TIMESTAMP (buf),
                        priv->out_ts + priv->out_dur)) > tol) || diff < -tol)) {
      assemble = TRUE;
      GST_LOG_OBJECT (dec, "buffer %d ms apart from current fragment",
          (gint) (diff / GST_MSECOND));
    } else {
      /* add or start collecting */
      if (!av) {
        GST_LOG_OBJECT (dec, "starting new fragment");
        priv->out_ts = GST_BUFFER_TIMESTAMP (buf);
      } else {
        GST_LOG_OBJECT (dec, "adding to fragment");
      }
      gst_adapter_push (priv->adapter_out, buf);
      priv->out_dur += GST_BUFFER_DURATION (buf);
Wim Taymans's avatar
Wim Taymans committed
1016
      av += gst_buffer_get_size (buf);
1017 1018
      buf = NULL;
    }
1019
    if (priv->out_dur > dec->priv->latency)
1020 1021 1022 1023 1024 1025 1026 1027 1028
      assemble = TRUE;
    if (av && assemble) {
      GST_LOG_OBJECT (dec, "assembling fragment");
      inbuf = buf;
      buf = gst_adapter_take_buffer (priv->adapter_out, av);
      GST_BUFFER_TIMESTAMP (buf) = priv->out_ts;
      GST_BUFFER_DURATION (buf) = priv->out_dur;
      priv->out_ts = GST_CLOCK_TIME_NONE;
      priv->out_dur = 0;
1029 1030 1031
    }
  }

1032
  if (G_LIKELY (buf)) {
1033
    if (dec->output_segment.rate > 0.0) {
1034
      ret = gst_audio_decoder_push_forward (dec, buf);
1035 1036 1037 1038 1039 1040
      GST_LOG_OBJECT (dec, "buffer pushed: %s", gst_flow_get_name (ret));
    } else {
      ret = GST_FLOW_OK;
      priv->queued = g_list_prepend (priv->queued, buf);
      GST_LOG_OBJECT (dec, "buffer queued");
    }
1041

1042
    if (inbuf) {
1043 1044
      buf = inbuf;
      goto again;
1045 1046
    }
  }
1047

1048
  return ret;
1049 1050
}

1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
static void
send_pending_events (GstAudioDecoder * dec)
{
  GstAudioDecoderPrivate *priv = dec->priv;
  GList *pending_events, *l;

  pending_events = priv->pending_events;
  priv->pending_events = NULL;

  GST_DEBUG_OBJECT (dec, "Pushing pending events");
  for (l = pending_events; l; l = l->next)
    gst_audio_decoder_push_event (dec, l->data);
  g_list_free (pending_events);
}

1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
/* Iterate the list of pending events, and ensure
 * the current output segment is up to date for
 * decoding */
static void
apply_pending_events (GstAudioDecoder * dec)
{
  GstAudioDecoderPrivate *priv = dec->priv;
  GList *l;

  GST_DEBUG_OBJECT (dec, "Applying pending segments");
  for (l = priv->pending_events; l; l = l->next) {
    GstEvent *event = GST_EVENT (l->data);
    switch (GST_EVENT_TYPE (event)) {
      case GST_EVENT_SEGMENT:{
        GstSegment seg;

        GST_AUDIO_DECODER_STREAM_LOCK (dec);
        gst_event_copy_segment (event, &seg);

        GST_DEBUG_OBJECT (dec, "starting segment %" GST_SEGMENT_FORMAT, &seg);

        dec->output_segment = seg;
1088
        dec->priv->in_out_segment_sync =
1089
            gst_segment_is_equal (&dec->input_segment, &seg);
1090 1091 1092 1093 1094 1095 1096 1097 1098
        GST_AUDIO_DECODER_STREAM_UNLOCK (dec);
        break;
      }
      default:
        break;
    }
  }
}

1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
static GstFlowReturn
check_pending_reconfigure (GstAudioDecoder * dec)
{
  GstFlowReturn ret = GST_FLOW_OK;
  GstAudioDecoderContext *ctx;
  gboolean needs_reconfigure;

  ctx = &dec->priv->ctx;

  needs_reconfigure = gst_pad_check_reconfigure (dec->srcpad);
  if (G_UNLIKELY (ctx->output_format_changed ||
          (GST_AUDIO_INFO_IS_VALID (&ctx->info)
              && needs_reconfigure))) {
    if (!gst_audio_decoder_negotiate_unlocked (dec)) {
      gst_pad_mark_reconfigure (dec->srcpad);
      if (GST_PAD_IS_FLUSHING (dec->srcpad))
        ret = GST_FLOW_FLUSHING;
      else
        ret = GST_FLOW_NOT_NEGOTIATED;
    }
  }
  return ret;
}

1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
static gboolean
gst_audio_decoder_transform_meta_default (GstAudioDecoder *
    decoder, GstBuffer * outbuf, GstMeta * meta, GstBuffer * inbuf)
{
  const GstMetaInfo *info = meta->info;
  const gchar *const *tags;

  tags = gst_meta_api_type_get_tags (info->api);

  if (!tags || (g_strv_length ((gchar **) tags) == 1
          && gst_meta_api_type_has_tag (info->api,
              g_quark_from_string (GST_META_TAG_AUDIO_STR))))
    return TRUE;

  return FALSE;
}

typedef struct
{
  GstAudioDecoder *decoder;
  GstBuffer *outbuf;
} CopyMetaData;

static gboolean
foreach_metadata (GstBuffer * inbuf, GstMeta ** meta, gpointer user_data)
{
  CopyMetaData *data = user_data;
  GstAudioDecoder *decoder = data->decoder;
  GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (decoder);
  GstBuffer *outbuf = data->outbuf;
  const GstMetaInfo *info = (*meta)->info;
  gboolean do_copy = FALSE;

1156
  if (gst_meta_api_type_has_tag (info->api, _gst_meta_tag_memory)) {
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168
    /* never call the transform_meta with memory specific metadata */
    GST_DEBUG_OBJECT (decoder, "not copying memory specific metadata %s",
        g_type_name (info->api));
    do_copy = FALSE;
  } else if (klass->transform_meta) {
    do_copy = klass->transform_meta (decoder, outbuf, *meta, inbuf);
    GST_DEBUG_OBJECT (decoder, "transformed metadata %s: copy: %d",
        g_type_name (info->api), do_copy);
  }

  /* we only copy metadata when the subclass implemented a transform_meta
   * function and when it returns %TRUE */
1169
  if (do_copy && info->transform_func) {
1170 1171 1172 1173 1174 1175 1176 1177
    GstMetaTransformCopy copy_data = { FALSE, 0, -1 };
    GST_DEBUG_OBJECT (decoder, "copy metadata %s", g_type_name (info->api));
    /* simply copy then */
    info->transform_func (outbuf, *meta, inbuf,
        _gst_meta_transform_copy, &copy_data);
  }
  return TRUE;
}
1178

1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197
/**
 * gst_audio_decoder_finish_frame:
 * @dec: a #GstAudioDecoder
 * @buf: decoded data
 * @frames: number of decoded frames represented by decoded data
 *
 * Collects decoded data and pushes it downstream.
 *
 * @buf may be NULL in which case the indicated number of frames
 * are discarded and considered to have produced no output
 * (e.g. lead-in or setup frames).
 * Otherwise, source pad caps must be set when it is called with valid
 * data in @buf.
 *
 * Note that a frame received in gst_audio_decoder_handle_frame() may be
 * invalidated by a call to this function.
 *
 * Returns: a #GstFlowReturn that should be escalated to caller (of caller)
 */
1198
GstFlowReturn
1199
gst_audio_decoder_finish_frame (GstAudioDecoder * dec, GstBuffer * buf,
1200
    gint frames)
1201
{
1202 1203
  GstAudioDecoderPrivate *priv;
  GstAudioDecoderContext *ctx;
1204
  GstAudioDecoderClass *klass = GST_AUDIO_DECODER_GET_CLASS (dec);
1205 1206
  gint samples = 0;
  GstClockTime ts, next_ts;
Wim Taymans's avatar
Wim Taymans committed
1207
  gsize size;
1208
  GstFlowReturn ret = GST_FLOW_OK;
1209
  GQueue inbufs = G_QUEUE_INIT;
1210 1211

  /* subclass should not hand us no data */
Wim Taymans's avatar
Wim Taymans committed
1212
  g_return_val_if_fail (buf == NULL || gst_buffer_get_size (buf) > 0,
1213 1214 1215 1216 1217
      GST_FLOW_ERROR);
  /* no dummy calls please */
  g_return_val_if_fail (frames != 0, GST_FLOW_ERROR);

  priv = dec->priv;
1218
  ctx = &dec->priv->ctx;
Wim Taymans's avatar
Wim Taymans committed
1219
  size = buf ? gst_buffer_get_size (buf) : 0;
1220

1221
  /* must know the output format by now */
1222 1223
  g_return_val_if_fail (buf == NULL || GST_AUDIO_INFO_IS_VALID (&ctx->info),
      GST_FLOW_ERROR);
1224

1225 1226 1227 1228
  GST_LOG_OBJECT (dec,
      "accepting %" G_GSIZE_FORMAT " bytes == %" G_GSIZE_FORMAT
      " samples for %d frames", buf ? size : -1,
      buf ? size / ctx->info.bpf : -1, frames);
1229

1230 1231
  GST_AUDIO_DECODER_STREAM_LOCK (dec);

1232 1233
  if (buf) {
    ret = check_pending_reconfigure (dec);
1234 1235
    if (ret == GST_FLOW_FLUSHING || ret == GST_FLOW_NOT_NEGOTIATED) {
      gst_buffer_unref (buf);
1236
      goto exit;
1237
    }
1238

1239 1240
    if (priv->pending_events)
      send_pending_events (dec);
1241 1242
  }

1243
  /* output shoud be whole number of sample frames */
1244
  if (G_LIKELY (buf && ctx->info.bpf)) {
Wim Taymans's avatar
Wim Taymans committed
1245
    if (size % ctx->info.bpf)
1246 1247
      goto wrong_buffer;
    /* per channel least */
Wim Taymans's avatar
Wim Taymans committed
1248
    samples = size / ctx->info.bpf;
1249
  }
1250

1251 1252
  /* frame and ts book-keeping */
  if (G_UNLIKELY (frames < 0)) {
1253
    if (G_UNLIKELY (-frames - 1 > priv->frames.length)) {
1254
      GST_ELEMENT_WARNING (dec, STREAM, DECODE,
1255 1256 1257 1258 1259 1260
          ("received more decoded frames %d than provided %d", frames,
              priv->frames.length), (NULL));
      frames = 0;
    } else {
      frames = priv->frames.length + frames + 1;
    }
1261 1262
  } else if (G_UNLIKELY (frames > priv->frames.length)) {
    if (G_LIKELY (!priv->force)) {
1263
      GST_ELEMENT_WARNING (dec, STREAM, DECODE,
1264 1265
          ("received more decoded frames %d than provided %d", frames,
              priv->frames.length), (NULL));
1266
    }
1267
    frames = priv->frames.length;
1268 1269
  }

1270 1271 1272 1273
  if (G_LIKELY (priv->frames.length))
    ts = GST_BUFFER_TIMESTAMP (priv->frames.head->data);
  else
    ts = GST_CLOCK_TIME_NONE;
1274

1275 1276
  GST_DEBUG_OBJECT (dec, "leading frame ts %" GST_TIME_FORMAT,
      GST_TIME_ARGS (ts));
1277

1278
  while (priv->frames.length && frames) {
1279
    g_queue_push_tail (&inbufs, g_queue_pop_head (&priv->frames));
1280
    dec->priv->ctx.delay = dec->priv->frames.length;
1281 1282 1283
    frames--;
  }

1284 1285 1286
  if (G_UNLIKELY (!buf))
    goto exit;

1287 1288 1289 1290 1291 1292
  /* lock on */
  if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (priv->base_ts))) {
    priv->base_ts = ts;
    GST_DEBUG_OBJECT (dec, "base_ts now %" GST_TIME_FORMAT, GST_TIME_ARGS (ts));
  }

1293
  /* still no valid ts, track the segment one */
1294 1295
  if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (priv->base_ts)) &&
      dec->output_segment.rate > 0.0) {
1296 1297 1298
    priv->base_ts = dec->output_segment.start;
  }

1299 1300
  /* slightly convoluted approach caters for perfect ts if subclass desires */
  if (GST_CLOCK_TIME_IS_VALID (ts)) {
1301
    if (dec->priv->tolerance > 0) {
1302 1303 1304 1305
      GstClockTimeDiff diff;

      g_assert (GST_CLOCK_TIME_IS_VALID (priv->base_ts));
      next_ts = priv->base_ts +
1306
          gst_util_uint64_scale (priv->samples, GST_SECOND, ctx->info.rate);
1307 1308 1309
      GST_LOG_OBJECT (dec,
          "buffer is %" G_GUINT64_FORMAT " samples past base_ts %"
          GST_TIME_FORMAT ", expected ts %" GST_TIME_FORMAT, priv->samples,
1310
          GST_TIME_ARGS (priv->base_ts), GST_TIME_ARGS (next_ts));
1311
      diff = GST_CLOCK_DIFF (next_ts, ts);
1312 1313 1314 1315
      GST_LOG_OBJECT (dec, "ts diff %d ms", (gint) (diff / GST_MSECOND));
      /* if within tolerance,
       * discard buffer ts and carry on producing perfect stream,
       * otherwise resync to ts */
1316 1317
      if (G_UNLIKELY (diff < (gint64) - dec->priv->tolerance ||
              diff > (gint64) dec->priv->tolerance)) {
1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328
        GST_DEBUG_OBJECT (dec, "base_ts resync");
        priv->base_ts = ts;
        priv->samples = 0;
      }
    } else {
      GST_DEBUG_OBJECT (dec, "base_ts resync");
      priv->base_ts = ts;
      priv->samples = 0;
    }
  }

1329
  /* delayed one-shot stuff until confirmed data */
1330
  if (priv->taglist && priv->taglist_changed) {
1331 1332 1333 1334 1335 1336 1337
    GstEvent *tags_event;

    tags_event = gst_audio_decoder_create_merged_tags_event (dec);

    if (tags_event != NULL)
      gst_audio_decoder_push_event (dec, tags_event);

1338
    priv->taglist_changed = FALSE;
1339
  }
1340

Wim Taymans's avatar
Wim Taymans committed
1341
  buf = gst_buffer_make_writable (buf);
1342 1343 1344
  if (G_LIKELY (GST_CLOCK_TIME_IS_VALID (priv->base_ts))) {
    GST_BUFFER_TIMESTAMP (buf) =
        priv->base_ts +