Commit c2225781 authored by Sebastian Dröge's avatar Sebastian Dröge 🍵

qtmux: Allow configuring the interleave size in bytes/time

Previously we were switching from one chunk to another on every single
buffer. This wastes some space in the headers and, depending on the
software, might depend in more reads (e.g. if the software is reading
multiple samples in one go if they're in the same chunk).

The ProRes guidelines suggest an interleave of 0.5s is common, but
specifies that for ProRes at most 2MB (for SD) and 4MB (for HD) should
be used per chunk. This will be handled in a follow-up commit.

https://bugzilla.gnome.org/show_bug.cgi?id=773217
parent cba6cc4f
......@@ -2201,12 +2201,20 @@ atom_stsc_copy_data (AtomSTSC * stsc, guint8 ** buffer, guint64 * size,
guint64 * offset)
{
guint64 original_offset = *offset;
guint i;
guint i, len;
if (!atom_full_copy_data (&stsc->header, buffer, size, offset)) {
return 0;
}
/* Last two entries might be the same size here as we only merge once the
* next chunk is started */
if ((len = atom_array_get_len (&stsc->entries)) > 1 &&
((atom_array_index (&stsc->entries, len - 1)).samples_per_chunk ==
(atom_array_index (&stsc->entries, len - 2)).samples_per_chunk)) {
stsc->entries.len--;
}
prop_copy_uint32 (atom_array_get_len (&stsc->entries), buffer, size, offset);
/* minimize realloc */
prop_copy_ensure_buffer (buffer, size, offset,
......@@ -2894,7 +2902,6 @@ atom_wave_copy_data (AtomWAVE * wave, guint8 ** buffer,
static void
atom_stsc_add_new_entry (AtomSTSC * stsc, guint32 first_chunk, guint32 nsamples)
{
STSCEntry nentry;
gint len;
if ((len = atom_array_get_len (&stsc->entries)) &&
......@@ -2902,10 +2909,37 @@ atom_stsc_add_new_entry (AtomSTSC * stsc, guint32 first_chunk, guint32 nsamples)
nsamples))
return;
nentry.first_chunk = first_chunk;
nentry.samples_per_chunk = nsamples;
nentry.sample_description_index = 1;
atom_array_append (&stsc->entries, nentry, 128);
if ((len = atom_array_get_len (&stsc->entries)) > 1 &&
((atom_array_index (&stsc->entries, len - 1)).samples_per_chunk ==
(atom_array_index (&stsc->entries, len - 2)).samples_per_chunk)) {
STSCEntry *nentry;
/* Merge last two entries as they have the same number of samples per chunk */
nentry = &atom_array_index (&stsc->entries, len - 1);
nentry->first_chunk = first_chunk;
nentry->samples_per_chunk = nsamples;
nentry->sample_description_index = 1;
} else {
STSCEntry nentry;
nentry.first_chunk = first_chunk;
nentry.samples_per_chunk = nsamples;
nentry.sample_description_index = 1;
atom_array_append (&stsc->entries, nentry, 128);
}
}
static void
atom_stsc_update_entry (AtomSTSC * stsc, guint32 first_chunk, guint32 nsamples)
{
gint len;
len = atom_array_get_len (&stsc->entries);
g_assert (len != 0);
g_assert (atom_array_index (&stsc->entries,
len - 1).first_chunk == first_chunk);
atom_array_index (&stsc->entries, len - 1).samples_per_chunk += nsamples;
}
static void
......@@ -2949,12 +2983,22 @@ atom_stco64_get_entry_count (AtomSTCO64 * stco64)
return atom_array_get_len (&stco64->entries);
}
static void
/* returns TRUE if a new entry was added */
static gboolean
atom_stco64_add_entry (AtomSTCO64 * stco64, guint64 entry)
{
guint32 len;
/* Only add a new entry if the chunk offset changed */
if ((len = atom_array_get_len (&stco64->entries)) &&
((atom_array_index (&stco64->entries, len - 1)) == entry))
return FALSE;
atom_array_append (&stco64->entries, entry, 256);
if (entry > G_MAXUINT32)
stco64->header.header.type = FOURCC_co64;
return TRUE;
}
void
......@@ -3014,9 +3058,14 @@ atom_stbl_add_samples (AtomSTBL * stbl, guint32 nsamples, guint32 delta,
{
atom_stts_add_entry (&stbl->stts, nsamples, delta);
atom_stsz_add_entry (&stbl->stsz, nsamples, size);
atom_stco64_add_entry (&stbl->stco64, chunk_offset);
atom_stsc_add_new_entry (&stbl->stsc,
atom_stco64_get_entry_count (&stbl->stco64), nsamples);
if (atom_stco64_add_entry (&stbl->stco64, chunk_offset)) {
atom_stsc_add_new_entry (&stbl->stsc,
atom_stco64_get_entry_count (&stbl->stco64), nsamples);
} else {
atom_stsc_update_entry (&stbl->stsc,
atom_stco64_get_entry_count (&stbl->stco64), nsamples);
}
if (sync)
atom_stbl_add_stss_entry (stbl);
/* always store to arrange for consistent content */
......
This diff is collapsed.
......@@ -196,6 +196,12 @@ struct _GstQTMux
/* Last DTS across all pads (= duration) */
GstClockTime last_dts;
/* Last pad we used for writing the current chunk */
GstQTPad *current_pad;
guint64 current_chunk_size;
GstClockTime current_chunk_duration;
guint64 current_chunk_offset;
/* atom helper objects */
AtomsContext *context;
AtomFTYP *ftyp;
......@@ -247,6 +253,10 @@ struct _GstQTMux
/* Multiplier for conversion from reserved_max_duration to bytes */
guint reserved_bytes_per_sec_per_trak;
guint64 interleave_bytes;
GstClockTime interleave_time;
gboolean interleave_bytes_set, interleave_time_set;
/* Reserved minimum MOOV size in bytes
* This is converted from reserved_max_duration
* using the bytes/trak/sec estimate */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment