Commit 571d3224 authored by Thibault Saunier's avatar Thibault Saunier 🌵

Merging gst-python

parents aee2edec d365954f
include: "https://gitlab.freedesktop.org/gstreamer/gst-ci/raw/master/gitlab/ci_template.yml"
This file will be autogenerated. Please read README-docs.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This file will be autogenerated. Please read README-docs.
This is GStreamer gst-python 1.17.0.1.
The GStreamer team is thrilled to announce a new major feature release in the
stable 1.0 API series of your favourite cross-platform multimedia framework!
As always, this release is again packed with new features, bug fixes and
other improvements.
The 1.16 release series adds new features on top of the 1.14 series and is
part of the API and ABI-stable 1.x release series of the GStreamer multimedia
framework.
Full release notes will one day be found at:
https://gstreamer.freedesktop.org/releases/1.16/
Binaries for Android, iOS, Mac OS X and Windows will usually be provided
shortly after the release.
This module will not be very useful by itself and should be used in conjunction
with other GStreamer modules for a complete multimedia experience.
- gstreamer: provides the core GStreamer libraries and some generic plugins
- gst-plugins-base: a basic set of well-supported plugins and additional
media-specific GStreamer helper libraries for audio,
video, rtsp, rtp, tags, OpenGL, etc.
- gst-plugins-good: a set of well-supported plugins under our preferred
license
- gst-plugins-ugly: a set of well-supported plugins which might pose
problems for distributors
- gst-plugins-bad: a set of plugins of varying quality that have not made
their way into one of core/base/good/ugly yet, for one
reason or another. Many of these are are production quality
elements, but may still be missing documentation or unit
tests; others haven't passed the rigorous quality testing
we expect yet.
- gst-libav: a set of codecs plugins based on the ffmpeg library. This is
where you can find audio and video decoders and encoders
for a wide variety of formats including H.264, AAC, etc.
- gstreamer-vaapi: hardware-accelerated video decoding and encoding using
VA-API on Linux. Primarily for Intel graphics hardware.
- gst-omx: hardware-accelerated video decoding and encoding, primarily for
embedded Linux systems that provide an OpenMax
implementation layer such as the Raspberry Pi.
- gst-rtsp-server: library to serve files or streaming pipelines via RTSP
- gst-editing-services: library an plugins for non-linear editing
==== Download ====
You can find source releases of gstreamer in the download
directory: https://gstreamer.freedesktop.org/src/gstreamer/
The git repository and details how to clone it can be found at
https://cgit.freedesktop.org/gstreamer/gstreamer/
==== Homepage ====
The project's website is https://gstreamer.freedesktop.org/
==== Support and Bugs ====
We have recently moved from GNOME Bugzilla to GitLab on freedesktop.org
for bug reports and feature requests:
https://gitlab.freedesktop.org/gstreamer
Please submit patches via GitLab as well, in form of Merge Requests. See
https://gstreamer.freedesktop.org/documentation/contribute/
for more details.
For help and support, please subscribe to and send questions to the
gstreamer-devel mailing list (see below for details).
There is also a #gstreamer IRC channel on the Freenode IRC network.
==== Developers ====
GStreamer source code repositories can be found on GitLab on freedesktop.org:
https://gitlab.freedesktop.org/gstreamer
and can also be cloned from there and this is also where you can submit
Merge Requests or file issues for bugs or feature requests.
Interested developers of the core library, plugins, and applications should
subscribe to the gstreamer-devel list:
https://lists.freedesktop.org/mailman/listinfo/gstreamer-devel
Port old examples to GStreamer 1.0
# Dependencies
Some of the examples require external python dependencies, for this purpose
an illustrative requirements.txt is provided, with annotations documenting
which example requires a dependency.
You can install all the dependencies with:
```
python3 -m pip install -r requirements.txt --user
```
#!/usr/bin/env python3
'''
Simple example to demonstrate dynamically adding and removing source elements
to a playing pipeline.
'''
import sys
import random
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GLib', '2.0')
gi.require_version('GObject', '2.0')
from gi.repository import GLib, GObject, Gst
class ProbeData:
def __init__(self, pipe, src):
self.pipe = pipe
self.src = src
def bus_call(bus, message, loop):
t = message.type
if t == Gst.MessageType.EOS:
sys.stdout.write("End-of-stream\n")
loop.quit()
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
sys.stderr.write("Error: %s: %s\n" % (err, debug))
loop.quit()
return True
def dispose_src_cb(src):
src.set_state(Gst.State.NULL)
def probe_cb(pad, info, pdata):
peer = pad.get_peer()
pad.unlink(peer)
pdata.pipe.remove(pdata.src)
# Can't set the state of the src to NULL from its streaming thread
GLib.idle_add(dispose_src_cb, pdata.src)
pdata.src = Gst.ElementFactory.make('videotestsrc')
pdata.src.props.pattern = random.randint(0, 24)
pdata.pipe.add(pdata.src)
srcpad = pdata.src.get_static_pad ("src")
srcpad.link(peer)
pdata.src.sync_state_with_parent()
GLib.timeout_add_seconds(1, timeout_cb, pdata)
return Gst.PadProbeReturn.REMOVE
def timeout_cb(pdata):
srcpad = pdata.src.get_static_pad('src')
srcpad.add_probe(Gst.PadProbeType.IDLE, probe_cb, pdata)
return GLib.SOURCE_REMOVE
def main(args):
GObject.threads_init()
Gst.init(None)
pipe = Gst.Pipeline.new('dynamic')
src = Gst.ElementFactory.make('videotestsrc')
sink = Gst.ElementFactory.make('autovideosink')
pipe.add(src, sink)
src.link(sink)
pdata = ProbeData(pipe, src)
loop = GObject.MainLoop()
GLib.timeout_add_seconds(1, timeout_cb, pdata)
bus = pipe.get_bus()
bus.add_signal_watch()
bus.connect ("message", bus_call, loop)
# start play back and listen to events
pipe.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
# cleanup
pipe.set_state(Gst.State.NULL)
if __name__ == '__main__':
sys.exit(main(sys.argv))
#!/usr/bin/env python
import sys
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
def bus_call(bus, message, loop):
t = message.type
if t == Gst.MessageType.EOS:
sys.stdout.write("End-of-stream\n")
loop.quit()
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
sys.stderr.write("Error: %s: %s\n" % (err, debug))
loop.quit()
return True
def main(args):
if len(args) != 2:
sys.stderr.write("usage: %s <media file or uri>\n" % args[0])
sys.exit(1)
GObject.threads_init()
Gst.init(None)
playbin = Gst.ElementFactory.make("playbin", None)
if not playbin:
sys.stderr.write("'playbin' gstreamer plugin missing\n")
sys.exit(1)
# take the commandline argument and ensure that it is a uri
if Gst.uri_is_valid(args[1]):
uri = args[1]
else:
uri = Gst.filename_to_uri(args[1])
playbin.set_property('uri', uri)
# create and event loop and feed gstreamer bus mesages to it
loop = GObject.MainLoop()
bus = playbin.get_bus()
bus.add_signal_watch()
bus.connect ("message", bus_call, loop)
# start play back and listed to events
playbin.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
# cleanup
playbin.set_state(Gst.State.NULL)
if __name__ == '__main__':
sys.exit(main(sys.argv))
'''
Element that transforms audio samples to video frames representing
the waveform.
Requires matplotlib, numpy and numpy_ringbuffer
Example pipeline:
gst-launch-1.0 audiotestsrc ! audioplot window-duration=0.01 ! videoconvert ! autovideosink
'''
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstBase', '1.0')
gi.require_version('GstAudio', '1.0')
gi.require_version('GstVideo', '1.0')
from gi.repository import Gst, GLib, GObject, GstBase, GstAudio, GstVideo
try:
import numpy as np
import matplotlib.patheffects as pe
from numpy_ringbuffer import RingBuffer
from matplotlib import pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg
except ImportError:
Gst.error('audioplot requires numpy, numpy_ringbuffer and matplotlib')
raise
Gst.init(None)
AUDIO_FORMATS = [f.strip() for f in
GstAudio.AUDIO_FORMATS_ALL.strip('{ }').split(',')]
ICAPS = Gst.Caps(Gst.Structure('audio/x-raw',
format=Gst.ValueList(AUDIO_FORMATS),
layout='interleaved',
rate = Gst.IntRange(range(1, GLib.MAXINT)),
channels = Gst.IntRange(range(1, GLib.MAXINT))))
OCAPS = Gst.Caps(Gst.Structure('video/x-raw',
format='ARGB',
width=Gst.IntRange(range(1, GLib.MAXINT)),
height=Gst.IntRange(range(1, GLib.MAXINT)),
framerate=Gst.FractionRange(Gst.Fraction(1, 1),
Gst.Fraction(GLib.MAXINT, 1))))
DEFAULT_WINDOW_DURATION = 1.0
DEFAULT_WIDTH = 640
DEFAULT_HEIGHT = 480
DEFAULT_FRAMERATE_NUM = 25
DEFAULT_FRAMERATE_DENOM = 1
class AudioPlotFilter(GstBase.BaseTransform):
__gstmetadata__ = ('AudioPlotFilter','Filter', \
'Plot audio waveforms', 'Mathieu Duponchelle')
__gsttemplates__ = (Gst.PadTemplate.new("src",
Gst.PadDirection.SRC,
Gst.PadPresence.ALWAYS,
OCAPS),
Gst.PadTemplate.new("sink",
Gst.PadDirection.SINK,
Gst.PadPresence.ALWAYS,
ICAPS))
__gproperties__ = {
"window-duration": (float,
"Window Duration",
"Duration of the sliding window, in seconds",
0.01,
100.0,
DEFAULT_WINDOW_DURATION,
GObject.ParamFlags.READWRITE
)
}
def __init__(self):
GstBase.BaseTransform.__init__(self)
self.window_duration = DEFAULT_WINDOW_DURATION
def do_get_property(self, prop):
if prop.name == 'window-duration':
return self.window_duration
else:
raise AttributeError('unknown property %s' % prop.name)
def do_set_property(self, prop, value):
if prop.name == 'window-duration':
self.window_duration = value
else:
raise AttributeError('unknown property %s' % prop.name)
def do_transform(self, inbuf, outbuf):
if not self.h:
self.h, = self.ax.plot(np.array(self.ringbuffer),
lw=0.5,
color='k',
path_effects=[pe.Stroke(linewidth=1.0,
foreground='g'),
pe.Normal()])
else:
self.h.set_ydata(np.array(self.ringbuffer))
self.fig.canvas.restore_region(self.background)
self.ax.draw_artist(self.h)
self.fig.canvas.blit(self.ax.bbox)
s = self.agg.tostring_argb()
outbuf.fill(0, s)
outbuf.pts = self.next_time
outbuf.duration = self.frame_duration
self.next_time += self.frame_duration
return Gst.FlowReturn.OK
def __append(self, data):
arr = np.array(data)
end = self.thinning_factor * int(len(arr) / self.thinning_factor)
arr = np.mean(arr[:end].reshape(-1, self.thinning_factor), 1)
self.ringbuffer.extend(arr)
def do_generate_output(self):
inbuf = self.queued_buf
_, info = inbuf.map(Gst.MapFlags.READ)
res, data = self.converter.convert(GstAudio.AudioConverterFlags.NONE,
info.data)
data = memoryview(data).cast('i')
nsamples = len(data) - self.buf_offset
if nsamples == 0:
self.buf_offset = 0
inbuf.unmap(info)
return Gst.FlowReturn.OK, None
if self.cur_offset + nsamples < self.next_offset:
self.__append(data[self.buf_offset:])
self.buf_offset = 0
self.cur_offset += nsamples
inbuf.unmap(info)
return Gst.FlowReturn.OK, None
consumed = self.next_offset - self.cur_offset
self.__append(data[self.buf_offset:self.buf_offset + consumed])
inbuf.unmap(info)
_, outbuf = GstBase.BaseTransform.do_prepare_output_buffer(self, inbuf)
ret = self.do_transform(inbuf, outbuf)
self.next_offset += self.samplesperbuffer
self.cur_offset += consumed
self.buf_offset += consumed
return ret, outbuf
def do_transform_caps(self, direction, caps, filter_):
if direction == Gst.PadDirection.SRC:
res = ICAPS
else:
res = OCAPS
if filter_:
res = res.intersect(filter_)
return res
def do_fixate_caps(self, direction, caps, othercaps):
if direction == Gst.PadDirection.SRC:
return othercaps.fixate()
else:
so = othercaps.get_structure(0).copy()
so.fixate_field_nearest_fraction("framerate",
DEFAULT_FRAMERATE_NUM,
DEFAULT_FRAMERATE_DENOM)
so.fixate_field_nearest_int("width", DEFAULT_WIDTH)
so.fixate_field_nearest_int("height", DEFAULT_HEIGHT)
ret = Gst.Caps.new_empty()
ret.append_structure(so)
return ret.fixate()
def do_set_caps(self, icaps, ocaps):
in_info = GstAudio.AudioInfo()
in_info.from_caps(icaps)
out_info = GstVideo.VideoInfo()
out_info.from_caps(ocaps)
self.convert_info = GstAudio.AudioInfo()
self.convert_info.set_format(GstAudio.AudioFormat.S32,
in_info.rate,
in_info.channels,
in_info.position)
self.converter = GstAudio.AudioConverter.new(GstAudio.AudioConverterFlags.NONE,
in_info,
self.convert_info,
None)
self.fig = plt.figure()
dpi = self.fig.get_dpi()
self.fig.patch.set_alpha(0.3)
self.fig.set_size_inches(out_info.width / float(dpi),
out_info.height / float(dpi))
self.ax = plt.Axes(self.fig, [0., 0., 1., 1.])
self.fig.add_axes(self.ax)
self.ax.set_axis_off()
self.ax.set_ylim((GLib.MININT, GLib.MAXINT))
self.agg = self.fig.canvas.switch_backends(FigureCanvasAgg)
self.h = None
samplesperwindow = int(in_info.rate * in_info.channels * self.window_duration)
self.thinning_factor = max(int(samplesperwindow / out_info.width - 1), 1)
cap = int(samplesperwindow / self.thinning_factor)
self.ax.set_xlim([0, cap])
self.ringbuffer = RingBuffer(capacity=cap)
self.ringbuffer.extend([0.0] * cap)
self.frame_duration = Gst.util_uint64_scale_int(Gst.SECOND,
out_info.fps_d,
out_info.fps_n)
self.next_time = self.frame_duration
self.agg.draw()
self.background = self.fig.canvas.copy_from_bbox(self.ax.bbox)
self.samplesperbuffer = Gst.util_uint64_scale_int(in_info.rate * in_info.channels,
out_info.fps_d,
out_info.fps_n)
self.next_offset = self.samplesperbuffer
self.cur_offset = 0
self.buf_offset = 0
return True
GObject.type_register(AudioPlotFilter)
__gstelementfactory__ = ("audioplot", Gst.Rank.NONE, AudioPlotFilter)
#!/usr/bin/python3
# exampleTransform.py
# 2019 Daniel Klamt <graphics@pengutronix.de>
# Inverts a grayscale image in place, requires numpy.
#
# gst-launch-1.0 videotestsrc ! ExampleTransform ! videoconvert ! xvimagesink
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstBase', '1.0')
gi.require_version('GstVideo', '1.0')
from gi.repository import Gst, GObject, GstBase, GstVideo
import numpy as np
Gst.init(None)
FIXED_CAPS = Gst.Caps.from_string('video/x-raw,format=GRAY8,width=[1,2147483647],height=[1,2147483647]')
class ExampleTransform(GstBase.BaseTransform):
__gstmetadata__ = ('ExampleTransform Python','Transform',
'example gst-python element that can modify the buffer gst-launch-1.0 videotestsrc ! ExampleTransform ! videoconvert ! xvimagesink', 'dkl')
__gsttemplates__ = (Gst.PadTemplate.new("src",
Gst.PadDirection.SRC,
Gst.PadPresence.ALWAYS,
FIXED_CAPS),
Gst.PadTemplate.new("sink",
Gst.PadDirection.SINK,
Gst.PadPresence.ALWAYS,
FIXED_CAPS))
def do_set_caps(self, incaps, outcaps):
struct = incaps.get_structure(0)
self.width = struct.get_int("width").value
self.height = struct.get_int("height").value
return True
def do_transform_ip(self, buf):
try:
with buf.map(Gst.MapFlags.READ | Gst.MapFlags.WRITE) as info:
# Create a NumPy ndarray from the memoryview and modify it in place:
A = np.ndarray(shape = (self.height, self.width), dtype = np.uint8, buffer = info.data)
A[:] = np.invert(A)
return Gst.FlowReturn.OK
except Gst.MapError as e:
Gst.error("Mapping error: %s" % e)
return Gst.FlowReturn.ERROR
GObject.type_register(ExampleTransform)
__gstelementfactory__ = ("ExampleTransform", Gst.Rank.NONE, ExampleTransform)
#!/usr/bin/env python
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
# identity.py
# 2016 Marianna S. Buschle <msb@qtec.com>
#
# Simple identity element in python
#
# You can run the example from the source doing from gst-python/:
#
# $ export GST_PLUGIN_PATH=$GST_PLUGIN_PATH:$PWD/plugin:$PWD/examples/plugins
# $ GST_DEBUG=python:4 gst-launch-1.0 fakesrc num-buffers=10 ! identity_py ! fakesink
import gi
gi.require_version('GstBase', '1.0')
from gi.repository import Gst, GObject, GstBase
Gst.init(None)
#
# Simple Identity element created entirely in python
#
class Identity(GstBase.BaseTransform):
__gstmetadata__ = ('Identity Python','Transform', \
'Simple identity element written in python', 'Marianna S. Buschle')
__gsttemplates__ = (Gst.PadTemplate.new("src",
Gst.PadDirection.SRC,
Gst.PadPresence.ALWAYS,
Gst.Caps.new_any()),
Gst.PadTemplate.new("sink",
Gst.PadDirection.SINK,
Gst.PadPresence.ALWAYS,
Gst.Caps.new_any()))
def do_transform_ip(self, buffer):
Gst.info("timestamp(buffer):%s" % (Gst.TIME_ARGS(buffer.pts)))
return Gst.FlowReturn.OK
GObject.type_register(Identity)
__gstelementfactory__ = ("identity_py", Gst.Rank.NONE, Identity)
'''
Simple mixer element, accepts 320 x 240 RGBA at 30 fps
on any number of sinkpads.
Requires PIL (Python Imaging Library)
Example pipeline:
gst-launch-1.0 py_videomixer name=mixer ! videoconvert ! autovideosink \
videotestsrc ! mixer. \
videotestsrc pattern=ball ! mixer. \
videotestsrc pattern=snow ! mixer.
'''
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstBase', '1.0')
gi.require_version('GObject', '2.0')
from gi.repository import Gst, GObject, GstBase
Gst.init(None)
try:
from PIL import Image
except ImportError: