Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.freedesktop.org/gstreamer/gst-plugins-rs.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'utils/gst-plugin-fallbackswitch/src')
-rw-r--r--utils/gst-plugin-fallbackswitch/src/base/aggregator.rs95
-rw-r--r--utils/gst-plugin-fallbackswitch/src/base/aggregator_pad.rs28
-rw-r--r--utils/gst-plugin-fallbackswitch/src/base/auto/aggregator.rs190
-rw-r--r--utils/gst-plugin-fallbackswitch/src/base/auto/aggregator_pad.rs182
-rw-r--r--utils/gst-plugin-fallbackswitch/src/base/auto/mod.rs13
-rw-r--r--utils/gst-plugin-fallbackswitch/src/base/gstaggregator.c3465
-rw-r--r--utils/gst-plugin-fallbackswitch/src/base/gstaggregator.h393
-rw-r--r--utils/gst-plugin-fallbackswitch/src/base/mod.rs27
-rw-r--r--utils/gst-plugin-fallbackswitch/src/base/subclass/aggregator.rs1042
-rw-r--r--utils/gst-plugin-fallbackswitch/src/base/subclass/aggregator_pad.rs147
-rw-r--r--utils/gst-plugin-fallbackswitch/src/base/subclass/mod.rs17
-rw-r--r--utils/gst-plugin-fallbackswitch/src/base/sys.rs235
-rw-r--r--utils/gst-plugin-fallbackswitch/src/base/utils.rs30
-rw-r--r--utils/gst-plugin-fallbackswitch/src/fallbackswitch.rs794
-rw-r--r--utils/gst-plugin-fallbackswitch/src/lib.rs62
15 files changed, 6720 insertions, 0 deletions
diff --git a/utils/gst-plugin-fallbackswitch/src/base/aggregator.rs b/utils/gst-plugin-fallbackswitch/src/base/aggregator.rs
new file mode 100644
index 000000000..d6ac7fa60
--- /dev/null
+++ b/utils/gst-plugin-fallbackswitch/src/base/aggregator.rs
@@ -0,0 +1,95 @@
+// Copyright (C) 2018 Sebastian Dröge <sebastian@centricular.com>
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use super::gst_base_sys;
+use super::Aggregator;
+use glib::prelude::*;
+use glib::signal::{connect_raw, SignalHandlerId};
+use glib::translate::*;
+use glib::IsA;
+use glib::Value;
+use gst;
+use std::boxed::Box as Box_;
+use std::mem::transmute;
+
+pub trait AggregatorExtManual: 'static {
+ fn finish_buffer(&self, buffer: gst::Buffer) -> Result<gst::FlowSuccess, gst::FlowError>;
+ fn get_property_min_upstream_latency(&self) -> gst::ClockTime;
+
+ fn set_property_min_upstream_latency(&self, min_upstream_latency: gst::ClockTime);
+
+ fn connect_property_min_upstream_latency_notify<F: Fn(&Self) + Send + Sync + 'static>(
+ &self,
+ f: F,
+ ) -> SignalHandlerId;
+}
+
+impl<O: IsA<Aggregator>> AggregatorExtManual for O {
+ fn finish_buffer(&self, buffer: gst::Buffer) -> Result<gst::FlowSuccess, gst::FlowError> {
+ let ret: gst::FlowReturn = unsafe {
+ from_glib(gst_base_sys::gst_aggregator_finish_buffer(
+ self.as_ref().to_glib_none().0,
+ buffer.into_ptr(),
+ ))
+ };
+ ret.into_result()
+ }
+
+ fn get_property_min_upstream_latency(&self) -> gst::ClockTime {
+ unsafe {
+ let mut value = Value::from_type(<gst::ClockTime as StaticType>::static_type());
+ gobject_sys::g_object_get_property(
+ self.to_glib_none().0 as *mut gobject_sys::GObject,
+ b"min-upstream-latency\0".as_ptr() as *const _,
+ value.to_glib_none_mut().0,
+ );
+ value
+ .get()
+ .expect("AggregatorExtManual::get_property_min_upstream_latency")
+ .unwrap()
+ }
+ }
+
+ fn set_property_min_upstream_latency(&self, min_upstream_latency: gst::ClockTime) {
+ unsafe {
+ gobject_sys::g_object_set_property(
+ self.to_glib_none().0 as *mut gobject_sys::GObject,
+ b"min-upstream-latency\0".as_ptr() as *const _,
+ Value::from(&min_upstream_latency).to_glib_none().0,
+ );
+ }
+ }
+
+ fn connect_property_min_upstream_latency_notify<F: Fn(&Self) + Send + Sync + 'static>(
+ &self,
+ f: F,
+ ) -> SignalHandlerId {
+ unsafe {
+ let f: Box_<F> = Box_::new(f);
+ connect_raw(
+ self.as_ptr() as *mut _,
+ b"notify::min-upstream-latency\0".as_ptr() as *const _,
+ Some(transmute(
+ notify_min_upstream_latency_trampoline::<Self, F> as usize,
+ )),
+ Box_::into_raw(f),
+ )
+ }
+ }
+}
+
+unsafe extern "C" fn notify_min_upstream_latency_trampoline<P, F: Fn(&P) + Send + Sync + 'static>(
+ this: *mut gst_base_sys::GstAggregator,
+ _param_spec: glib_sys::gpointer,
+ f: glib_sys::gpointer,
+) where
+ P: IsA<Aggregator>,
+{
+ let f: &F = &*(f as *const F);
+ f(&Aggregator::from_glib_borrow(this).unsafe_cast_ref())
+}
diff --git a/utils/gst-plugin-fallbackswitch/src/base/aggregator_pad.rs b/utils/gst-plugin-fallbackswitch/src/base/aggregator_pad.rs
new file mode 100644
index 000000000..eb57f6143
--- /dev/null
+++ b/utils/gst-plugin-fallbackswitch/src/base/aggregator_pad.rs
@@ -0,0 +1,28 @@
+// Copyright (C) 2018 Sebastian Dröge <sebastian@centricular.com>
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use super::gst_base_sys;
+use super::AggregatorPad;
+use glib::object::IsA;
+use glib::translate::*;
+use gst;
+use gst_sys;
+
+pub trait AggregatorPadExtManual: 'static {
+ fn get_segment(&self) -> gst::Segment;
+}
+
+impl<O: IsA<AggregatorPad>> AggregatorPadExtManual for O {
+ fn get_segment(&self) -> gst::Segment {
+ unsafe {
+ let ptr: &gst_base_sys::GstAggregatorPad = &*(self.as_ptr() as *const _);
+ super::utils::MutexGuard::lock(&ptr.parent.object.lock);
+ from_glib_none(&ptr.segment as *const gst_sys::GstSegment)
+ }
+ }
+}
diff --git a/utils/gst-plugin-fallbackswitch/src/base/auto/aggregator.rs b/utils/gst-plugin-fallbackswitch/src/base/auto/aggregator.rs
new file mode 100644
index 000000000..912892eef
--- /dev/null
+++ b/utils/gst-plugin-fallbackswitch/src/base/auto/aggregator.rs
@@ -0,0 +1,190 @@
+// This file was generated by gir (https://github.com/gtk-rs/gir)
+// from gir-files (https://github.com/gtk-rs/gir-files)
+// DO NOT EDIT
+
+use super::super::gst_base_sys;
+use glib::object::Cast;
+use glib::object::IsA;
+use glib::signal::connect_raw;
+use glib::signal::SignalHandlerId;
+use glib::translate::*;
+use glib::StaticType;
+use glib::Value;
+use glib_sys;
+use gobject_sys;
+use gst;
+use std::boxed::Box as Box_;
+use std::mem::transmute;
+
+glib_wrapper! {
+ pub struct Aggregator(Object<gst_base_sys::GstAggregator, gst_base_sys::GstAggregatorClass, AggregatorClass>) @extends gst::Element, gst::Object;
+
+ match fn {
+ get_type => || gst_base_sys::gst_aggregator_get_type(),
+ }
+}
+
+unsafe impl Send for Aggregator {}
+unsafe impl Sync for Aggregator {}
+
+pub const NONE_AGGREGATOR: Option<&Aggregator> = None;
+
+pub trait AggregatorExt: 'static {
+ fn get_buffer_pool(&self) -> Option<gst::BufferPool>;
+
+ fn get_latency(&self) -> gst::ClockTime;
+
+ fn set_latency(&self, min_latency: gst::ClockTime, max_latency: gst::ClockTime);
+
+ fn set_src_caps(&self, caps: &gst::Caps);
+
+ fn simple_get_next_time(&self) -> gst::ClockTime;
+
+ fn get_property_start_time(&self) -> u64;
+
+ fn set_property_start_time(&self, start_time: u64);
+
+ fn connect_property_latency_notify<F: Fn(&Self) + Send + Sync + 'static>(
+ &self,
+ f: F,
+ ) -> SignalHandlerId;
+
+ fn connect_property_start_time_notify<F: Fn(&Self) + Send + Sync + 'static>(
+ &self,
+ f: F,
+ ) -> SignalHandlerId;
+
+ fn negotiate(&self) -> bool;
+}
+
+impl<O: IsA<Aggregator>> AggregatorExt for O {
+ //fn get_allocator(&self, allocator: /*Ignored*/gst::Allocator, params: /*Ignored*/gst::AllocationParams) {
+ // unsafe { TODO: call gst_base_sys:gst_aggregator_get_allocator() }
+ //}
+
+ fn get_buffer_pool(&self) -> Option<gst::BufferPool> {
+ unsafe {
+ from_glib_full(gst_base_sys::gst_aggregator_get_buffer_pool(
+ self.as_ref().to_glib_none().0,
+ ))
+ }
+ }
+
+ fn get_latency(&self) -> gst::ClockTime {
+ unsafe {
+ from_glib(gst_base_sys::gst_aggregator_get_latency(
+ self.as_ref().to_glib_none().0,
+ ))
+ }
+ }
+
+ fn set_latency(&self, min_latency: gst::ClockTime, max_latency: gst::ClockTime) {
+ unsafe {
+ gst_base_sys::gst_aggregator_set_latency(
+ self.as_ref().to_glib_none().0,
+ min_latency.to_glib(),
+ max_latency.to_glib(),
+ );
+ }
+ }
+
+ fn set_src_caps(&self, caps: &gst::Caps) {
+ unsafe {
+ gst_base_sys::gst_aggregator_set_src_caps(
+ self.as_ref().to_glib_none().0,
+ caps.to_glib_none().0,
+ );
+ }
+ }
+
+ fn simple_get_next_time(&self) -> gst::ClockTime {
+ unsafe {
+ from_glib(gst_base_sys::gst_aggregator_simple_get_next_time(
+ self.as_ref().to_glib_none().0,
+ ))
+ }
+ }
+
+ fn get_property_start_time(&self) -> u64 {
+ unsafe {
+ let mut value = Value::from_type(<u64 as StaticType>::static_type());
+ gobject_sys::g_object_get_property(
+ self.to_glib_none().0 as *mut gobject_sys::GObject,
+ b"start-time\0".as_ptr() as *const _,
+ value.to_glib_none_mut().0,
+ );
+ value
+ .get()
+ .expect("Return Value for property `start-time` getter")
+ .unwrap()
+ }
+ }
+
+ fn set_property_start_time(&self, start_time: u64) {
+ unsafe {
+ gobject_sys::g_object_set_property(
+ self.to_glib_none().0 as *mut gobject_sys::GObject,
+ b"start-time\0".as_ptr() as *const _,
+ Value::from(&start_time).to_glib_none().0,
+ );
+ }
+ }
+
+ fn connect_property_latency_notify<F: Fn(&Self) + Send + Sync + 'static>(
+ &self,
+ f: F,
+ ) -> SignalHandlerId {
+ unsafe extern "C" fn notify_latency_trampoline<P, F: Fn(&P) + Send + Sync + 'static>(
+ this: *mut gst_base_sys::GstAggregator,
+ _param_spec: glib_sys::gpointer,
+ f: glib_sys::gpointer,
+ ) where
+ P: IsA<Aggregator>,
+ {
+ let f: &F = &*(f as *const F);
+ f(&Aggregator::from_glib_borrow(this).unsafe_cast_ref())
+ }
+ unsafe {
+ let f: Box_<F> = Box_::new(f);
+ connect_raw(
+ self.as_ptr() as *mut _,
+ b"notify::latency\0".as_ptr() as *const _,
+ Some(transmute(notify_latency_trampoline::<Self, F> as usize)),
+ Box_::into_raw(f),
+ )
+ }
+ }
+
+ fn connect_property_start_time_notify<F: Fn(&Self) + Send + Sync + 'static>(
+ &self,
+ f: F,
+ ) -> SignalHandlerId {
+ unsafe extern "C" fn notify_start_time_trampoline<P, F: Fn(&P) + Send + Sync + 'static>(
+ this: *mut gst_base_sys::GstAggregator,
+ _param_spec: glib_sys::gpointer,
+ f: glib_sys::gpointer,
+ ) where
+ P: IsA<Aggregator>,
+ {
+ let f: &F = &*(f as *const F);
+ f(&Aggregator::from_glib_borrow(this).unsafe_cast_ref())
+ }
+ unsafe {
+ let f: Box_<F> = Box_::new(f);
+ connect_raw(
+ self.as_ptr() as *mut _,
+ b"notify::start-time\0".as_ptr() as *const _,
+ Some(transmute(notify_start_time_trampoline::<Self, F> as usize)),
+ Box_::into_raw(f),
+ )
+ }
+ }
+
+ fn negotiate(&self) -> bool {
+ unsafe {
+ from_glib(gst_base_sys::gst_aggregator_negotiate(
+ self.as_ref().to_glib_none().0,
+ ))
+ }
+ }
+}
diff --git a/utils/gst-plugin-fallbackswitch/src/base/auto/aggregator_pad.rs b/utils/gst-plugin-fallbackswitch/src/base/auto/aggregator_pad.rs
new file mode 100644
index 000000000..7c8df57e5
--- /dev/null
+++ b/utils/gst-plugin-fallbackswitch/src/base/auto/aggregator_pad.rs
@@ -0,0 +1,182 @@
+// This file was generated by gir (https://github.com/gtk-rs/gir)
+// from gir-files (https://github.com/gtk-rs/gir-files)
+// DO NOT EDIT
+
+use super::super::gst_base_sys;
+use glib::object::Cast;
+use glib::object::IsA;
+use glib::signal::connect_raw;
+use glib::signal::SignalHandlerId;
+use glib::translate::*;
+use glib::StaticType;
+use glib::Value;
+use glib_sys;
+use gobject_sys;
+use gst;
+use gst_sys;
+use std::boxed::Box as Box_;
+use std::mem::transmute;
+
+glib_wrapper! {
+ pub struct AggregatorPad(Object<gst_base_sys::GstAggregatorPad, gst_base_sys::GstAggregatorPadClass, AggregatorPadClass>) @extends gst::Pad, gst::Object;
+
+ match fn {
+ get_type => || gst_base_sys::gst_aggregator_pad_get_type(),
+ }
+}
+
+unsafe impl Send for AggregatorPad {}
+unsafe impl Sync for AggregatorPad {}
+
+pub const NONE_AGGREGATOR_PAD: Option<&AggregatorPad> = None;
+
+pub trait AggregatorPadExt: 'static {
+ fn drop_buffer(&self) -> bool;
+
+ fn has_buffer(&self) -> bool;
+
+ fn is_eos(&self) -> bool;
+
+ fn peek_buffer(&self) -> Option<gst::Buffer>;
+
+ fn pop_buffer(&self) -> Option<gst::Buffer>;
+
+ fn get_property_emit_signals(&self) -> bool;
+
+ fn set_property_emit_signals(&self, emit_signals: bool);
+
+ fn connect_buffer_consumed<F: Fn(&Self, &gst::Buffer) + Send + Sync + 'static>(
+ &self,
+ f: F,
+ ) -> SignalHandlerId;
+
+ fn connect_property_emit_signals_notify<F: Fn(&Self) + Send + Sync + 'static>(
+ &self,
+ f: F,
+ ) -> SignalHandlerId;
+}
+
+impl<O: IsA<AggregatorPad>> AggregatorPadExt for O {
+ fn drop_buffer(&self) -> bool {
+ unsafe {
+ from_glib(gst_base_sys::gst_aggregator_pad_drop_buffer(
+ self.as_ref().to_glib_none().0,
+ ))
+ }
+ }
+
+ fn has_buffer(&self) -> bool {
+ unsafe {
+ from_glib(gst_base_sys::gst_aggregator_pad_has_buffer(
+ self.as_ref().to_glib_none().0,
+ ))
+ }
+ }
+
+ fn is_eos(&self) -> bool {
+ unsafe {
+ from_glib(gst_base_sys::gst_aggregator_pad_is_eos(
+ self.as_ref().to_glib_none().0,
+ ))
+ }
+ }
+
+ fn peek_buffer(&self) -> Option<gst::Buffer> {
+ unsafe {
+ from_glib_full(gst_base_sys::gst_aggregator_pad_peek_buffer(
+ self.as_ref().to_glib_none().0,
+ ))
+ }
+ }
+
+ fn pop_buffer(&self) -> Option<gst::Buffer> {
+ unsafe {
+ from_glib_full(gst_base_sys::gst_aggregator_pad_pop_buffer(
+ self.as_ref().to_glib_none().0,
+ ))
+ }
+ }
+
+ fn get_property_emit_signals(&self) -> bool {
+ unsafe {
+ let mut value = Value::from_type(<bool as StaticType>::static_type());
+ gobject_sys::g_object_get_property(
+ self.to_glib_none().0 as *mut gobject_sys::GObject,
+ b"emit-signals\0".as_ptr() as *const _,
+ value.to_glib_none_mut().0,
+ );
+ value
+ .get()
+ .expect("Return Value for property `emit-signals` getter")
+ .unwrap()
+ }
+ }
+
+ fn set_property_emit_signals(&self, emit_signals: bool) {
+ unsafe {
+ gobject_sys::g_object_set_property(
+ self.to_glib_none().0 as *mut gobject_sys::GObject,
+ b"emit-signals\0".as_ptr() as *const _,
+ Value::from(&emit_signals).to_glib_none().0,
+ );
+ }
+ }
+
+ fn connect_buffer_consumed<F: Fn(&Self, &gst::Buffer) + Send + Sync + 'static>(
+ &self,
+ f: F,
+ ) -> SignalHandlerId {
+ unsafe extern "C" fn buffer_consumed_trampoline<
+ P,
+ F: Fn(&P, &gst::Buffer) + Send + Sync + 'static,
+ >(
+ this: *mut gst_base_sys::GstAggregatorPad,
+ object: *mut gst_sys::GstBuffer,
+ f: glib_sys::gpointer,
+ ) where
+ P: IsA<AggregatorPad>,
+ {
+ let f: &F = &*(f as *const F);
+ f(
+ &AggregatorPad::from_glib_borrow(this).unsafe_cast_ref(),
+ &from_glib_borrow(object),
+ )
+ }
+ unsafe {
+ let f: Box_<F> = Box_::new(f);
+ connect_raw(
+ self.as_ptr() as *mut _,
+ b"buffer-consumed\0".as_ptr() as *const _,
+ Some(transmute(buffer_consumed_trampoline::<Self, F> as usize)),
+ Box_::into_raw(f),
+ )
+ }
+ }
+
+ fn connect_property_emit_signals_notify<F: Fn(&Self) + Send + Sync + 'static>(
+ &self,
+ f: F,
+ ) -> SignalHandlerId {
+ unsafe extern "C" fn notify_emit_signals_trampoline<P, F: Fn(&P) + Send + Sync + 'static>(
+ this: *mut gst_base_sys::GstAggregatorPad,
+ _param_spec: glib_sys::gpointer,
+ f: glib_sys::gpointer,
+ ) where
+ P: IsA<AggregatorPad>,
+ {
+ let f: &F = &*(f as *const F);
+ f(&AggregatorPad::from_glib_borrow(this).unsafe_cast_ref())
+ }
+ unsafe {
+ let f: Box_<F> = Box_::new(f);
+ connect_raw(
+ self.as_ptr() as *mut _,
+ b"notify::emit-signals\0".as_ptr() as *const _,
+ Some(transmute(
+ notify_emit_signals_trampoline::<Self, F> as usize,
+ )),
+ Box_::into_raw(f),
+ )
+ }
+ }
+}
diff --git a/utils/gst-plugin-fallbackswitch/src/base/auto/mod.rs b/utils/gst-plugin-fallbackswitch/src/base/auto/mod.rs
new file mode 100644
index 000000000..0081d7c4f
--- /dev/null
+++ b/utils/gst-plugin-fallbackswitch/src/base/auto/mod.rs
@@ -0,0 +1,13 @@
+mod aggregator;
+pub use self::aggregator::AggregatorExt;
+pub use self::aggregator::{Aggregator, AggregatorClass, NONE_AGGREGATOR};
+
+mod aggregator_pad;
+pub use self::aggregator_pad::AggregatorPadExt;
+pub use self::aggregator_pad::{AggregatorPad, AggregatorPadClass, NONE_AGGREGATOR_PAD};
+
+#[doc(hidden)]
+pub mod traits {
+ pub use super::AggregatorExt;
+ pub use super::AggregatorPadExt;
+}
diff --git a/utils/gst-plugin-fallbackswitch/src/base/gstaggregator.c b/utils/gst-plugin-fallbackswitch/src/base/gstaggregator.c
new file mode 100644
index 000000000..06389895e
--- /dev/null
+++ b/utils/gst-plugin-fallbackswitch/src/base/gstaggregator.c
@@ -0,0 +1,3465 @@
+/* GStreamer aggregator base class
+ * Copyright (C) 2014 Mathieu Duponchelle <mathieu.duponchelle@opencreed.com>
+ * Copyright (C) 2014 Thibault Saunier <tsaunier@gnome.org>
+ *
+ * gstaggregator.c:
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+/**
+ * SECTION: gstaggregator
+ * @title: GstAggregator
+ * @short_description: Base class for mixers and muxers, manages a set of input
+ * pads and aggregates their streams
+ * @see_also: gstcollectpads for historical reasons.
+ *
+ * Manages a set of pads with the purpose of aggregating their buffers.
+ * Control is given to the subclass when all pads have data.
+ *
+ * * Base class for mixers and muxers. Subclasses should at least implement
+ * the #GstAggregatorClass.aggregate() virtual method.
+ *
+ * * Installs a #GstPadChainFunction, a #GstPadEventFullFunction and a
+ * #GstPadQueryFunction to queue all serialized data packets per sink pad.
+ * Subclasses should not overwrite those, but instead implement
+ * #GstAggregatorClass.sink_event() and #GstAggregatorClass.sink_query() as
+ * needed.
+ *
+ * * When data is queued on all pads, the aggregate vmethod is called.
+ *
+ * * One can peek at the data on any given GstAggregatorPad with the
+ * gst_aggregator_pad_peek_buffer () method, and remove it from the pad
+ * with the gst_aggregator_pad_pop_buffer () method. When a buffer
+ * has been taken with pop_buffer (), a new buffer can be queued
+ * on that pad.
+ *
+ * * If the subclass wishes to push a buffer downstream in its aggregate
+ * implementation, it should do so through the
+ * gst_aggregator_finish_buffer () method. This method will take care
+ * of sending and ordering mandatory events such as stream start, caps
+ * and segment.
+ *
+ * * Same goes for EOS events, which should not be pushed directly by the
+ * subclass, it should instead return GST_FLOW_EOS in its aggregate
+ * implementation.
+ *
+ * * Note that the aggregator logic regarding gap event handling is to turn
+ * these into gap buffers with matching PTS and duration. It will also
+ * flag these buffers with GST_BUFFER_FLAG_GAP and GST_BUFFER_FLAG_DROPPABLE
+ * to ease their identification and subsequent processing.
+ *
+ * * Subclasses must use (a subclass of) #GstAggregatorPad for both their
+ * sink and source pads.
+ * See gst_element_class_add_static_pad_template_with_gtype().
+ *
+ * This class used to live in gst-plugins-bad and was moved to core.
+ *
+ * Since: 1.14
+ */
+
+/**
+ * SECTION: gstaggregatorpad
+ * @title: GstAggregatorPad
+ * @short_description: #GstPad subclass for pads managed by #GstAggregator
+ * @see_also: gstcollectpads for historical reasons.
+ *
+ * Pads managed by a #GstAggregator subclass.
+ *
+ * This class used to live in gst-plugins-bad and was moved to core.
+ *
+ * Since: 1.14
+ */
+
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include <string.h> /* strlen */
+
+#include "gstaggregator.h"
+
+typedef enum
+{
+ GST_AGGREGATOR_START_TIME_SELECTION_ZERO,
+ GST_AGGREGATOR_START_TIME_SELECTION_FIRST,
+ GST_AGGREGATOR_START_TIME_SELECTION_SET
+} GstAggregatorStartTimeSelection;
+
+static GType
+gst_aggregator_start_time_selection_get_type (void)
+{
+ static GType gtype = 0;
+
+ if (gtype == 0) {
+ static const GEnumValue values[] = {
+ {GST_AGGREGATOR_START_TIME_SELECTION_ZERO,
+ "Start at 0 running time (default)", "zero"},
+ {GST_AGGREGATOR_START_TIME_SELECTION_FIRST,
+ "Start at first observed input running time", "first"},
+ {GST_AGGREGATOR_START_TIME_SELECTION_SET,
+ "Set start time with start-time property", "set"},
+ {0, NULL, NULL}
+ };
+
+ gtype =
+ g_enum_register_static ("GstAggregatorFallbackStartTimeSelection",
+ values);
+ }
+ return gtype;
+}
+
+/* Might become API */
+#if 0
+static void gst_aggregator_merge_tags (GstAggregator * aggregator,
+ const GstTagList * tags, GstTagMergeMode mode);
+#endif
+static void gst_aggregator_set_latency_property (GstAggregator * agg,
+ GstClockTime latency);
+static GstClockTime gst_aggregator_get_latency_property (GstAggregator * agg);
+
+static GstClockTime gst_aggregator_get_latency_unlocked (GstAggregator * self);
+
+static void gst_aggregator_pad_buffer_consumed (GstAggregatorPad * pad,
+ GstBuffer * buffer);
+
+GST_DEBUG_CATEGORY_STATIC (aggregator_debug);
+#define GST_CAT_DEFAULT aggregator_debug
+
+/* Locking order, locks in this element must always be taken in this order
+ *
+ * standard sink pad stream lock -> GST_PAD_STREAM_LOCK (aggpad)
+ * Aggregator pad flush lock -> PAD_FLUSH_LOCK(aggpad)
+ * standard src pad stream lock -> GST_PAD_STREAM_LOCK (srcpad)
+ * Aggregator src lock -> SRC_LOCK(agg) w/ SRC_WAIT/BROADCAST
+ * standard element object lock -> GST_OBJECT_LOCK(agg)
+ * Aggregator pad lock -> PAD_LOCK (aggpad) w/ PAD_WAIT/BROADCAST_EVENT(aggpad)
+ * standard src pad object lock -> GST_OBJECT_LOCK(srcpad)
+ * standard sink pad object lock -> GST_OBJECT_LOCK(aggpad)
+ */
+
+/* GstAggregatorPad definitions */
+#define PAD_LOCK(pad) G_STMT_START { \
+ GST_TRACE_OBJECT (pad, "Taking PAD lock from thread %p", \
+ g_thread_self()); \
+ g_mutex_lock(&pad->priv->lock); \
+ GST_TRACE_OBJECT (pad, "Took PAD lock from thread %p", \
+ g_thread_self()); \
+ } G_STMT_END
+
+#define PAD_UNLOCK(pad) G_STMT_START { \
+ GST_TRACE_OBJECT (pad, "Releasing PAD lock from thread %p", \
+ g_thread_self()); \
+ g_mutex_unlock(&pad->priv->lock); \
+ GST_TRACE_OBJECT (pad, "Release PAD lock from thread %p", \
+ g_thread_self()); \
+ } G_STMT_END
+
+
+#define PAD_WAIT_EVENT(pad) G_STMT_START { \
+ GST_LOG_OBJECT (pad, "Waiting for buffer to be consumed thread %p", \
+ g_thread_self()); \
+ g_cond_wait(&(((GstAggregatorPad* )pad)->priv->event_cond), \
+ (&((GstAggregatorPad*)pad)->priv->lock)); \
+ GST_LOG_OBJECT (pad, "DONE Waiting for buffer to be consumed on thread %p", \
+ g_thread_self()); \
+ } G_STMT_END
+
+#define PAD_BROADCAST_EVENT(pad) G_STMT_START { \
+ GST_LOG_OBJECT (pad, "Signaling buffer consumed from thread %p", \
+ g_thread_self()); \
+ g_cond_broadcast(&(((GstAggregatorPad* )pad)->priv->event_cond)); \
+ } G_STMT_END
+
+
+#define PAD_FLUSH_LOCK(pad) G_STMT_START { \
+ GST_TRACE_OBJECT (pad, "Taking lock from thread %p", \
+ g_thread_self()); \
+ g_mutex_lock(&pad->priv->flush_lock); \
+ GST_TRACE_OBJECT (pad, "Took lock from thread %p", \
+ g_thread_self()); \
+ } G_STMT_END
+
+#define PAD_FLUSH_UNLOCK(pad) G_STMT_START { \
+ GST_TRACE_OBJECT (pad, "Releasing lock from thread %p", \
+ g_thread_self()); \
+ g_mutex_unlock(&pad->priv->flush_lock); \
+ GST_TRACE_OBJECT (pad, "Release lock from thread %p", \
+ g_thread_self()); \
+ } G_STMT_END
+
+#define SRC_LOCK(self) G_STMT_START { \
+ GST_TRACE_OBJECT (self, "Taking src lock from thread %p", \
+ g_thread_self()); \
+ g_mutex_lock(&self->priv->src_lock); \
+ GST_TRACE_OBJECT (self, "Took src lock from thread %p", \
+ g_thread_self()); \
+ } G_STMT_END
+
+#define SRC_UNLOCK(self) G_STMT_START { \
+ GST_TRACE_OBJECT (self, "Releasing src lock from thread %p", \
+ g_thread_self()); \
+ g_mutex_unlock(&self->priv->src_lock); \
+ GST_TRACE_OBJECT (self, "Released src lock from thread %p", \
+ g_thread_self()); \
+ } G_STMT_END
+
+#define SRC_WAIT(self) G_STMT_START { \
+ GST_LOG_OBJECT (self, "Waiting for src on thread %p", \
+ g_thread_self()); \
+ g_cond_wait(&(self->priv->src_cond), &(self->priv->src_lock)); \
+ GST_LOG_OBJECT (self, "DONE Waiting for src on thread %p", \
+ g_thread_self()); \
+ } G_STMT_END
+
+#define SRC_BROADCAST(self) G_STMT_START { \
+ GST_LOG_OBJECT (self, "Signaling src from thread %p", \
+ g_thread_self()); \
+ if (self->priv->aggregate_id) \
+ gst_clock_id_unschedule (self->priv->aggregate_id); \
+ g_cond_broadcast(&(self->priv->src_cond)); \
+ } G_STMT_END
+
+struct _GstAggregatorPadPrivate
+{
+ /* Following fields are protected by the PAD_LOCK */
+ GstFlowReturn flow_return;
+
+ guint32 last_flush_start_seqnum;
+ guint32 last_flush_stop_seqnum;
+
+ gboolean first_buffer;
+
+ GQueue data; /* buffers, events and queries */
+ GstBuffer *clipped_buffer;
+ guint num_buffers;
+
+ /* used to track fill state of queues, only used with live-src and when
+ * latency property is set to > 0 */
+ GstClockTime head_position;
+ GstClockTime tail_position;
+ GstClockTime head_time; /* running time */
+ GstClockTime tail_time;
+ GstClockTime time_level; /* how much head is ahead of tail */
+ GstSegment head_segment; /* segment before the queue */
+
+ gboolean negotiated;
+
+ gboolean eos;
+
+ GMutex lock;
+ GCond event_cond;
+ /* This lock prevents a flush start processing happening while
+ * the chain function is also happening.
+ */
+ GMutex flush_lock;
+
+ /* properties */
+ gboolean emit_signals;
+};
+
+/* Must be called with PAD_LOCK held */
+static void
+gst_aggregator_pad_reset_unlocked (GstAggregatorPad * aggpad)
+{
+ aggpad->priv->eos = FALSE;
+ aggpad->priv->flow_return = GST_FLOW_OK;
+ GST_OBJECT_LOCK (aggpad);
+ gst_segment_init (&aggpad->segment, GST_FORMAT_UNDEFINED);
+ gst_segment_init (&aggpad->priv->head_segment, GST_FORMAT_UNDEFINED);
+ GST_OBJECT_UNLOCK (aggpad);
+ aggpad->priv->head_position = GST_CLOCK_TIME_NONE;
+ aggpad->priv->tail_position = GST_CLOCK_TIME_NONE;
+ aggpad->priv->head_time = GST_CLOCK_TIME_NONE;
+ aggpad->priv->tail_time = GST_CLOCK_TIME_NONE;
+ aggpad->priv->time_level = 0;
+ aggpad->priv->first_buffer = TRUE;
+}
+
+static gboolean
+gst_aggregator_pad_flush (GstAggregatorPad * aggpad, GstAggregator * agg)
+{
+ GstAggregatorPadClass *klass = GST_AGGREGATOR_PAD_GET_CLASS (aggpad);
+
+ PAD_LOCK (aggpad);
+ gst_aggregator_pad_reset_unlocked (aggpad);
+ PAD_UNLOCK (aggpad);
+
+ if (klass->flush)
+ return (klass->flush (aggpad, agg) == GST_FLOW_OK);
+
+ return TRUE;
+}
+
+/*************************************
+ * GstAggregator implementation *
+ *************************************/
+static GstElementClass *aggregator_parent_class = NULL;
+static gint aggregator_private_offset = 0;
+
+/* All members are protected by the object lock unless otherwise noted */
+
+struct _GstAggregatorPrivate
+{
+ gint max_padserial;
+
+ /* Our state is >= PAUSED */
+ gboolean running; /* protected by src_lock */
+
+ /* seqnum from last seek or common seqnum to flush start events received
+ * on all pads, for flushing without a seek */
+ guint32 next_seqnum;
+ /* seqnum to apply to synthetic segment/eos events */
+ guint32 seqnum;
+ gboolean send_stream_start; /* protected by srcpad stream lock */
+ gboolean send_segment;
+ gboolean flushing;
+ gboolean send_eos; /* protected by srcpad stream lock */
+
+ GstCaps *srccaps; /* protected by the srcpad stream lock */
+
+ GstTagList *tags;
+ gboolean tags_changed;
+
+ gboolean peer_latency_live; /* protected by src_lock */
+ GstClockTime peer_latency_min; /* protected by src_lock */
+ GstClockTime peer_latency_max; /* protected by src_lock */
+ gboolean has_peer_latency; /* protected by src_lock */
+
+ GstClockTime sub_latency_min; /* protected by src_lock */
+ GstClockTime sub_latency_max; /* protected by src_lock */
+
+ GstClockTime upstream_latency_min; /* protected by src_lock */
+
+ /* aggregate */
+ GstClockID aggregate_id; /* protected by src_lock */
+ GMutex src_lock;
+ GCond src_cond;
+
+ gboolean first_buffer; /* protected by object lock */
+ GstAggregatorStartTimeSelection start_time_selection;
+ GstClockTime start_time;
+
+ /* protected by the object lock */
+ GstQuery *allocation_query;
+ GstAllocator *allocator;
+ GstBufferPool *pool;
+ GstAllocationParams allocation_params;
+
+ /* properties */
+ gint64 latency; /* protected by both src_lock and all pad locks */
+};
+
+/* Seek event forwarding helper */
+typedef struct
+{
+ /* parameters */
+ GstEvent *event;
+ gboolean flush;
+ gboolean only_to_active_pads;
+
+ /* results */
+ gboolean result;
+ gboolean one_actually_seeked;
+} EventData;
+
+#define DEFAULT_LATENCY 0
+#define DEFAULT_MIN_UPSTREAM_LATENCY 0
+#define DEFAULT_START_TIME_SELECTION GST_AGGREGATOR_START_TIME_SELECTION_ZERO
+#define DEFAULT_START_TIME (-1)
+
+enum
+{
+ PROP_0,
+ PROP_LATENCY,
+ PROP_MIN_UPSTREAM_LATENCY,
+ PROP_START_TIME_SELECTION,
+ PROP_START_TIME,
+ PROP_LAST
+};
+
+static GstFlowReturn gst_aggregator_pad_chain_internal (GstAggregator * self,
+ GstAggregatorPad * aggpad, GstBuffer * buffer, gboolean head);
+
+static gboolean
+gst_aggregator_pad_queue_is_empty (GstAggregatorPad * pad)
+{
+ return (g_queue_peek_tail (&pad->priv->data) == NULL &&
+ pad->priv->clipped_buffer == NULL);
+}
+
+/* Will return FALSE if there's no buffer available on every non-EOS pad, or
+ * if at least one of the pads has an event or query at the top of its queue.
+ *
+ * Only returns TRUE if all non-EOS pads have a buffer available at the top of
+ * their queue
+ */
+static gboolean
+gst_aggregator_check_pads_ready (GstAggregator * self,
+ gboolean * have_event_or_query_ret)
+{
+ GstAggregatorPad *pad = NULL;
+ GList *l, *sinkpads;
+ gboolean have_buffer = TRUE;
+ gboolean have_event_or_query = FALSE;
+
+ GST_LOG_OBJECT (self, "checking pads");
+
+ GST_OBJECT_LOCK (self);
+
+ sinkpads = GST_ELEMENT_CAST (self)->sinkpads;
+ if (sinkpads == NULL)
+ goto no_sinkpads;
+
+ for (l = sinkpads; l != NULL; l = l->next) {
+ pad = l->data;
+
+ PAD_LOCK (pad);
+
+ /* If there's an event or query at the top of the queue and we don't yet
+ * have taken the top buffer out and stored it as clip_buffer, remember
+ * that and exit the loop. We first have to handle all events/queries
+ * before we handle any buffers. */
+ if (!pad->priv->clipped_buffer
+ && (GST_IS_EVENT (g_queue_peek_tail (&pad->priv->data))
+ || GST_IS_QUERY (g_queue_peek_tail (&pad->priv->data)))) {
+ PAD_UNLOCK (pad);
+ have_event_or_query = TRUE;
+ break;
+ }
+
+ /* Otherwise check if we have a clipped buffer or a buffer at the top of
+ * the queue, and if not then this pad is not ready unless it is also EOS */
+ if (!pad->priv->clipped_buffer
+ && !GST_IS_BUFFER (g_queue_peek_tail (&pad->priv->data))) {
+ /* We must not have any buffers at all in this pad then as otherwise we
+ * would've had an event/query at the top of the queue */
+ g_assert (pad->priv->num_buffers == 0);
+
+ /* Only consider this pad as worth waiting for if it's not already EOS.
+ * There's no point in waiting for buffers on EOS pads */
+ if (!pad->priv->eos)
+ have_buffer = FALSE;
+ } else if (self->priv->peer_latency_live) {
+ /* In live mode, having a single pad with buffers is enough to
+ * generate a start time from it. In non-live mode all pads need
+ * to have a buffer
+ */
+ self->priv->first_buffer = FALSE;
+ }
+
+ PAD_UNLOCK (pad);
+ }
+
+ if (have_event_or_query)
+ goto pad_not_ready_but_event_or_query;
+
+ if (!have_buffer)
+ goto pad_not_ready;
+
+ if (have_buffer)
+ self->priv->first_buffer = FALSE;
+
+ GST_OBJECT_UNLOCK (self);
+ GST_LOG_OBJECT (self, "pads are ready");
+
+ if (have_event_or_query_ret)
+ *have_event_or_query_ret = have_event_or_query;
+
+ return TRUE;
+
+no_sinkpads:
+ {
+ GST_LOG_OBJECT (self, "pads not ready: no sink pads");
+ GST_OBJECT_UNLOCK (self);
+
+ if (have_event_or_query_ret)
+ *have_event_or_query_ret = have_event_or_query;
+
+ return FALSE;
+ }
+pad_not_ready:
+ {
+ GST_LOG_OBJECT (pad, "pad not ready to be aggregated yet");
+ GST_OBJECT_UNLOCK (self);
+
+ if (have_event_or_query_ret)
+ *have_event_or_query_ret = have_event_or_query;
+
+ return FALSE;
+ }
+pad_not_ready_but_event_or_query:
+ {
+ GST_LOG_OBJECT (pad,
+ "pad not ready to be aggregated yet, need to handle serialized event or query first");
+ GST_OBJECT_UNLOCK (self);
+
+ if (have_event_or_query_ret)
+ *have_event_or_query_ret = have_event_or_query;
+
+ return FALSE;
+ }
+}
+
+static void
+gst_aggregator_reset_flow_values (GstAggregator * self)
+{
+ GST_OBJECT_LOCK (self);
+ self->priv->send_stream_start = TRUE;
+ self->priv->send_segment = TRUE;
+ gst_segment_init (&GST_AGGREGATOR_PAD (self->srcpad)->segment,
+ GST_FORMAT_TIME);
+ self->priv->first_buffer = TRUE;
+ GST_OBJECT_UNLOCK (self);
+}
+
+static inline void
+gst_aggregator_push_mandatory_events (GstAggregator * self)
+{
+ GstAggregatorPrivate *priv = self->priv;
+ GstEvent *segment = NULL;
+ GstEvent *tags = NULL;
+
+ if (self->priv->send_stream_start) {
+ gchar s_id[32];
+
+ GST_INFO_OBJECT (self, "pushing stream start");
+ /* stream-start (FIXME: create id based on input ids) */
+ g_snprintf (s_id, sizeof (s_id), "agg-%08x", g_random_int ());
+ if (!gst_pad_push_event (GST_PAD (self->srcpad),
+ gst_event_new_stream_start (s_id))) {
+ GST_WARNING_OBJECT (self->srcpad, "Sending stream start event failed");
+ }
+ self->priv->send_stream_start = FALSE;
+ }
+
+ if (self->priv->srccaps) {
+
+ GST_INFO_OBJECT (self, "pushing caps: %" GST_PTR_FORMAT,
+ self->priv->srccaps);
+ if (!gst_pad_push_event (GST_PAD (self->srcpad),
+ gst_event_new_caps (self->priv->srccaps))) {
+ GST_WARNING_OBJECT (self->srcpad, "Sending caps event failed");
+ }
+ gst_caps_unref (self->priv->srccaps);
+ self->priv->srccaps = NULL;
+ }
+
+ GST_OBJECT_LOCK (self);
+ if (self->priv->send_segment && !self->priv->flushing) {
+ segment =
+ gst_event_new_segment (&GST_AGGREGATOR_PAD (self->srcpad)->segment);
+
+ if (!self->priv->seqnum)
+ /* This code-path is in preparation to be able to run without a source
+ * connected. Then we won't have a seq-num from a segment event. */
+ self->priv->seqnum = gst_event_get_seqnum (segment);
+ else
+ gst_event_set_seqnum (segment, self->priv->seqnum);
+ self->priv->send_segment = FALSE;
+
+ GST_DEBUG_OBJECT (self, "pushing segment %" GST_PTR_FORMAT, segment);
+ }
+
+ if (priv->tags && priv->tags_changed && !self->priv->flushing) {
+ tags = gst_event_new_tag (gst_tag_list_ref (priv->tags));
+ priv->tags_changed = FALSE;
+ }
+ GST_OBJECT_UNLOCK (self);
+
+ if (segment)
+ gst_pad_push_event (self->srcpad, segment);
+ if (tags)
+ gst_pad_push_event (self->srcpad, tags);
+
+}
+
+/**
+ * gst_aggregator_set_src_caps:
+ * @self: The #GstAggregator
+ * @caps: The #GstCaps to set on the src pad.
+ *
+ * Sets the caps to be used on the src pad.
+ */
+void
+gst_aggregator_set_src_caps (GstAggregator * self, GstCaps * caps)
+{
+ GST_PAD_STREAM_LOCK (self->srcpad);
+ gst_caps_replace (&self->priv->srccaps, caps);
+ gst_aggregator_push_mandatory_events (self);
+ GST_PAD_STREAM_UNLOCK (self->srcpad);
+}
+
+static GstFlowReturn
+gst_aggregator_default_finish_buffer (GstAggregator * self, GstBuffer * buffer)
+{
+ gst_aggregator_push_mandatory_events (self);
+
+ GST_OBJECT_LOCK (self);
+ if (!self->priv->flushing && gst_pad_is_active (self->srcpad)) {
+ GST_TRACE_OBJECT (self, "pushing buffer %" GST_PTR_FORMAT, buffer);
+ GST_OBJECT_UNLOCK (self);
+ return gst_pad_push (self->srcpad, buffer);
+ } else {
+ GST_INFO_OBJECT (self, "Not pushing (active: %i, flushing: %i)",
+ self->priv->flushing, gst_pad_is_active (self->srcpad));
+ GST_OBJECT_UNLOCK (self);
+ gst_buffer_unref (buffer);
+ return GST_FLOW_OK;
+ }
+}
+
+/**
+ * gst_aggregator_finish_buffer:
+ * @aggregator: The #GstAggregator
+ * @buffer: (transfer full): the #GstBuffer to push.
+ *
+ * This method will push the provided output buffer downstream. If needed,
+ * mandatory events such as stream-start, caps, and segment events will be
+ * sent before pushing the buffer.
+ */
+GstFlowReturn
+gst_aggregator_finish_buffer (GstAggregator * aggregator, GstBuffer * buffer)
+{
+ GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (aggregator);
+
+ g_assert (klass->finish_buffer != NULL);
+
+ return klass->finish_buffer (aggregator, buffer);
+}
+
+static void
+gst_aggregator_push_eos (GstAggregator * self)
+{
+ GstEvent *event;
+ gst_aggregator_push_mandatory_events (self);
+
+ event = gst_event_new_eos ();
+
+ GST_OBJECT_LOCK (self);
+ self->priv->send_eos = FALSE;
+ gst_event_set_seqnum (event, self->priv->seqnum);
+ GST_OBJECT_UNLOCK (self);
+
+ gst_pad_push_event (self->srcpad, event);
+}
+
+static GstClockTime
+gst_aggregator_get_next_time (GstAggregator * self)
+{
+ GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (self);
+
+ if (klass->get_next_time)
+ return klass->get_next_time (self);
+
+ return GST_CLOCK_TIME_NONE;
+}
+
+static gboolean
+gst_aggregator_wait_and_check (GstAggregator * self, gboolean * timeout)
+{
+ GstClockTime latency;
+ GstClockTime start;
+ gboolean res;
+ gboolean have_event_or_query = FALSE;
+
+ *timeout = FALSE;
+
+ SRC_LOCK (self);
+
+ latency = gst_aggregator_get_latency_unlocked (self);
+
+ if (gst_aggregator_check_pads_ready (self, &have_event_or_query)) {
+ GST_DEBUG_OBJECT (self, "all pads have data");
+ SRC_UNLOCK (self);
+
+ return TRUE;
+ }
+
+ /* If we have an event or query, immediately return FALSE instead of waiting
+ * and handle it immediately */
+ if (have_event_or_query) {
+ GST_DEBUG_OBJECT (self, "Have serialized event or query to handle first");
+ SRC_UNLOCK (self);
+ return FALSE;
+ }
+
+ /* Before waiting, check if we're actually still running */
+ if (!self->priv->running || !self->priv->send_eos) {
+ SRC_UNLOCK (self);
+
+ return FALSE;
+ }
+
+ start = gst_aggregator_get_next_time (self);
+
+ /* If we're not live, or if we use the running time
+ * of the first buffer as start time, we wait until
+ * all pads have buffers.
+ * Otherwise (i.e. if we are live!), we wait on the clock
+ * and if a pad does not have a buffer in time we ignore
+ * that pad.
+ */
+ GST_OBJECT_LOCK (self);
+ if (!GST_CLOCK_TIME_IS_VALID (latency) ||
+ !GST_IS_CLOCK (GST_ELEMENT_CLOCK (self)) ||
+ !GST_CLOCK_TIME_IS_VALID (start) ||
+ (self->priv->first_buffer
+ && self->priv->start_time_selection ==
+ GST_AGGREGATOR_START_TIME_SELECTION_FIRST)) {
+ /* We wake up here when something happened, and below
+ * then check if we're ready now. If we return FALSE,
+ * we will be directly called again.
+ */
+ GST_OBJECT_UNLOCK (self);
+ SRC_WAIT (self);
+ } else {
+ GstClockTime base_time, time;
+ GstClock *clock;
+ GstClockReturn status;
+ GstClockTimeDiff jitter;
+
+ GST_DEBUG_OBJECT (self, "got subclass start time: %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (start));
+
+ base_time = GST_ELEMENT_CAST (self)->base_time;
+ clock = gst_object_ref (GST_ELEMENT_CLOCK (self));
+ GST_OBJECT_UNLOCK (self);
+
+ time = base_time + start;
+ time += latency;
+
+ GST_DEBUG_OBJECT (self, "possibly waiting for clock to reach %"
+ GST_TIME_FORMAT " (base %" GST_TIME_FORMAT " start %" GST_TIME_FORMAT
+ " latency %" GST_TIME_FORMAT " current %" GST_TIME_FORMAT ")",
+ GST_TIME_ARGS (time),
+ GST_TIME_ARGS (base_time),
+ GST_TIME_ARGS (start), GST_TIME_ARGS (latency),
+ GST_TIME_ARGS (gst_clock_get_time (clock)));
+
+ self->priv->aggregate_id = gst_clock_new_single_shot_id (clock, time);
+ gst_object_unref (clock);
+ SRC_UNLOCK (self);
+
+ jitter = 0;
+ status = gst_clock_id_wait (self->priv->aggregate_id, &jitter);
+
+ SRC_LOCK (self);
+ if (self->priv->aggregate_id) {
+ gst_clock_id_unref (self->priv->aggregate_id);
+ self->priv->aggregate_id = NULL;
+ }
+
+ GST_DEBUG_OBJECT (self,
+ "clock returned %d (jitter: %" GST_STIME_FORMAT ")",
+ status, GST_STIME_ARGS (jitter));
+
+ /* we timed out */
+ if (status == GST_CLOCK_OK || status == GST_CLOCK_EARLY) {
+ SRC_UNLOCK (self);
+ *timeout = TRUE;
+ return TRUE;
+ }
+ }
+
+ res = gst_aggregator_check_pads_ready (self, &have_event_or_query);
+ SRC_UNLOCK (self);
+
+ return res;
+}
+
+typedef struct
+{
+ gboolean processed_event;
+ GstFlowReturn flow_ret;
+} DoHandleEventsAndQueriesData;
+
+static gboolean
+gst_aggregator_do_events_and_queries (GstElement * self, GstPad * epad,
+ gpointer user_data)
+{
+ GstAggregatorPad *pad = GST_AGGREGATOR_PAD_CAST (epad);
+ GstAggregator *aggregator = GST_AGGREGATOR_CAST (self);
+ GstEvent *event = NULL;
+ GstQuery *query = NULL;
+ GstAggregatorClass *klass = NULL;
+ DoHandleEventsAndQueriesData *data = user_data;
+
+ do {
+ event = NULL;
+ query = NULL;
+
+ PAD_LOCK (pad);
+ if (pad->priv->clipped_buffer == NULL &&
+ !GST_IS_BUFFER (g_queue_peek_tail (&pad->priv->data))) {
+ if (GST_IS_EVENT (g_queue_peek_tail (&pad->priv->data)))
+ event = gst_event_ref (g_queue_peek_tail (&pad->priv->data));
+ if (GST_IS_QUERY (g_queue_peek_tail (&pad->priv->data)))
+ query = g_queue_peek_tail (&pad->priv->data);
+ }
+ PAD_UNLOCK (pad);
+ if (event || query) {
+ gboolean ret;
+
+ data->processed_event = TRUE;
+ if (klass == NULL)
+ klass = GST_AGGREGATOR_GET_CLASS (self);
+
+ if (event) {
+ GST_LOG_OBJECT (pad, "Processing %" GST_PTR_FORMAT, event);
+ gst_event_ref (event);
+ ret = klass->sink_event (aggregator, pad, event);
+
+ PAD_LOCK (pad);
+ if (GST_EVENT_TYPE (event) == GST_EVENT_CAPS) {
+ pad->priv->negotiated = ret;
+ if (!ret)
+ pad->priv->flow_return = data->flow_ret = GST_FLOW_NOT_NEGOTIATED;
+ }
+ if (g_queue_peek_tail (&pad->priv->data) == event)
+ gst_event_unref (g_queue_pop_tail (&pad->priv->data));
+ gst_event_unref (event);
+ } else if (query) {
+ GST_LOG_OBJECT (pad, "Processing %" GST_PTR_FORMAT, query);
+ ret = klass->sink_query (aggregator, pad, query);
+
+ PAD_LOCK (pad);
+ if (g_queue_peek_tail (&pad->priv->data) == query) {
+ GstStructure *s;
+
+ s = gst_query_writable_structure (query);
+ gst_structure_set (s, "gst-aggregator-retval", G_TYPE_BOOLEAN, ret,
+ NULL);
+ g_queue_pop_tail (&pad->priv->data);
+ }
+ }
+
+ PAD_BROADCAST_EVENT (pad);
+ PAD_UNLOCK (pad);
+ }
+ } while (event || query);
+
+ return TRUE;
+}
+
+static gboolean
+gst_aggregator_pad_skip_buffers (GstElement * self, GstPad * epad,
+ gpointer user_data)
+{
+ GList *item;
+ GstAggregatorPad *aggpad = (GstAggregatorPad *) epad;
+ GstAggregator *agg = (GstAggregator *) self;
+ GstAggregatorPadClass *klass = GST_AGGREGATOR_PAD_GET_CLASS (aggpad);
+
+ if (!klass->skip_buffer)
+ return FALSE;
+
+ PAD_LOCK (aggpad);
+
+ item = g_queue_peek_head_link (&aggpad->priv->data);
+ while (item) {
+ GList *next = item->next;
+
+ if (GST_IS_BUFFER (item->data)
+ && klass->skip_buffer (aggpad, agg, item->data)) {
+ GST_LOG_OBJECT (aggpad, "Skipping %" GST_PTR_FORMAT, item->data);
+ gst_aggregator_pad_buffer_consumed (aggpad, GST_BUFFER (item->data));
+ gst_buffer_unref (item->data);
+ g_queue_delete_link (&aggpad->priv->data, item);
+ } else {
+ break;
+ }
+
+ item = next;
+ }
+
+ PAD_UNLOCK (aggpad);
+
+ return TRUE;
+}
+
+static void
+gst_aggregator_pad_set_flushing (GstAggregatorPad * aggpad,
+ GstFlowReturn flow_return, gboolean full)
+{
+ GList *item;
+
+ PAD_LOCK (aggpad);
+ if (flow_return == GST_FLOW_NOT_LINKED)
+ aggpad->priv->flow_return = MIN (flow_return, aggpad->priv->flow_return);
+ else
+ aggpad->priv->flow_return = flow_return;
+
+ item = g_queue_peek_head_link (&aggpad->priv->data);
+ while (item) {
+ GList *next = item->next;
+
+ /* In partial flush, we do like the pad, we get rid of non-sticky events
+ * and EOS/SEGMENT.
+ */
+ if (full || GST_IS_BUFFER (item->data) ||
+ GST_EVENT_TYPE (item->data) == GST_EVENT_EOS ||
+ GST_EVENT_TYPE (item->data) == GST_EVENT_SEGMENT ||
+ !GST_EVENT_IS_STICKY (item->data)) {
+ if (!GST_IS_QUERY (item->data))
+ gst_mini_object_unref (item->data);
+ g_queue_delete_link (&aggpad->priv->data, item);
+ }
+ item = next;
+ }
+ aggpad->priv->num_buffers = 0;
+ gst_buffer_replace (&aggpad->priv->clipped_buffer, NULL);
+
+ PAD_BROADCAST_EVENT (aggpad);
+ PAD_UNLOCK (aggpad);
+}
+
+static GstFlowReturn
+gst_aggregator_default_update_src_caps (GstAggregator * agg, GstCaps * caps,
+ GstCaps ** ret)
+{
+ *ret = gst_caps_ref (caps);
+
+ return GST_FLOW_OK;
+}
+
+static GstCaps *
+gst_aggregator_default_fixate_src_caps (GstAggregator * agg, GstCaps * caps)
+{
+ caps = gst_caps_fixate (caps);
+
+ return caps;
+}
+
+static gboolean
+gst_aggregator_default_negotiated_src_caps (GstAggregator * agg, GstCaps * caps)
+{
+ return TRUE;
+}
+
+
+/* takes ownership of the pool, allocator and query */
+static gboolean
+gst_aggregator_set_allocation (GstAggregator * self,
+ GstBufferPool * pool, GstAllocator * allocator,
+ GstAllocationParams * params, GstQuery * query)
+{
+ GstAllocator *oldalloc;
+ GstBufferPool *oldpool;
+ GstQuery *oldquery;
+
+ GST_DEBUG ("storing allocation query");
+
+ GST_OBJECT_LOCK (self);
+ oldpool = self->priv->pool;
+ self->priv->pool = pool;
+
+ oldalloc = self->priv->allocator;
+ self->priv->allocator = allocator;
+
+ oldquery = self->priv->allocation_query;
+ self->priv->allocation_query = query;
+
+ if (params)
+ self->priv->allocation_params = *params;
+ else
+ gst_allocation_params_init (&self->priv->allocation_params);
+ GST_OBJECT_UNLOCK (self);
+
+ if (oldpool) {
+ GST_DEBUG_OBJECT (self, "deactivating old pool %p", oldpool);
+ gst_buffer_pool_set_active (oldpool, FALSE);
+ gst_object_unref (oldpool);
+ }
+ if (oldalloc) {
+ gst_object_unref (oldalloc);
+ }
+ if (oldquery) {
+ gst_query_unref (oldquery);
+ }
+ return TRUE;
+}
+
+
+static gboolean
+gst_aggregator_decide_allocation (GstAggregator * self, GstQuery * query)
+{
+ GstAggregatorClass *aggclass = GST_AGGREGATOR_GET_CLASS (self);
+
+ if (aggclass->decide_allocation)
+ if (!aggclass->decide_allocation (self, query))
+ return FALSE;
+
+ return TRUE;
+}
+
+static gboolean
+gst_aggregator_do_allocation (GstAggregator * self, GstCaps * caps)
+{
+ GstQuery *query;
+ gboolean result = TRUE;
+ GstBufferPool *pool = NULL;
+ GstAllocator *allocator;
+ GstAllocationParams params;
+
+ /* find a pool for the negotiated caps now */
+ GST_DEBUG_OBJECT (self, "doing allocation query");
+ query = gst_query_new_allocation (caps, TRUE);
+ if (!gst_pad_peer_query (self->srcpad, query)) {
+ /* not a problem, just debug a little */
+ GST_DEBUG_OBJECT (self, "peer ALLOCATION query failed");
+ }
+
+ GST_DEBUG_OBJECT (self, "calling decide_allocation");
+ result = gst_aggregator_decide_allocation (self, query);
+
+ GST_DEBUG_OBJECT (self, "ALLOCATION (%d) params: %" GST_PTR_FORMAT, result,
+ query);
+
+ if (!result)
+ goto no_decide_allocation;
+
+ /* we got configuration from our peer or the decide_allocation method,
+ * parse them */
+ if (gst_query_get_n_allocation_params (query) > 0) {
+ gst_query_parse_nth_allocation_param (query, 0, &allocator, &params);
+ } else {
+ allocator = NULL;
+ gst_allocation_params_init (&params);
+ }
+
+ if (gst_query_get_n_allocation_pools (query) > 0)
+ gst_query_parse_nth_allocation_pool (query, 0, &pool, NULL, NULL, NULL);
+
+ /* now store */
+ result =
+ gst_aggregator_set_allocation (self, pool, allocator, &params, query);
+
+ return result;
+
+ /* Errors */
+no_decide_allocation:
+ {
+ GST_WARNING_OBJECT (self, "Failed to decide allocation");
+ gst_query_unref (query);
+
+ return result;
+ }
+
+}
+
+static gboolean
+gst_aggregator_default_negotiate (GstAggregator * self)
+{
+ GstAggregatorClass *agg_klass = GST_AGGREGATOR_GET_CLASS (self);
+ GstCaps *downstream_caps, *template_caps, *caps = NULL;
+ GstFlowReturn ret = GST_FLOW_OK;
+
+ template_caps = gst_pad_get_pad_template_caps (self->srcpad);
+ downstream_caps = gst_pad_peer_query_caps (self->srcpad, template_caps);
+
+ if (gst_caps_is_empty (downstream_caps)) {
+ GST_INFO_OBJECT (self, "Downstream caps (%"
+ GST_PTR_FORMAT ") not compatible with pad template caps (%"
+ GST_PTR_FORMAT ")", downstream_caps, template_caps);
+ ret = GST_FLOW_NOT_NEGOTIATED;
+ goto done;
+ }
+
+ g_assert (agg_klass->update_src_caps);
+ GST_DEBUG_OBJECT (self, "updating caps from %" GST_PTR_FORMAT,
+ downstream_caps);
+ ret = agg_klass->update_src_caps (self, downstream_caps, &caps);
+ if (ret < GST_FLOW_OK) {
+ GST_WARNING_OBJECT (self, "Subclass failed to update provided caps");
+ goto done;
+ } else if (ret == GST_AGGREGATOR_FLOW_NEED_DATA) {
+ GST_DEBUG_OBJECT (self, "Subclass needs more data to decide on caps");
+ goto done;
+ }
+ if ((caps == NULL || gst_caps_is_empty (caps)) && ret >= GST_FLOW_OK) {
+ ret = GST_FLOW_NOT_NEGOTIATED;
+ goto done;
+ }
+ GST_DEBUG_OBJECT (self, " to %" GST_PTR_FORMAT, caps);
+
+#ifdef GST_ENABLE_EXTRA_CHECKS
+ if (!gst_caps_is_subset (caps, template_caps)) {
+ GstCaps *intersection;
+
+ GST_ERROR_OBJECT (self,
+ "update_src_caps returned caps %" GST_PTR_FORMAT
+ " which are not a real subset of the template caps %"
+ GST_PTR_FORMAT, caps, template_caps);
+ g_warning ("%s: update_src_caps returned caps which are not a real "
+ "subset of the filter caps", GST_ELEMENT_NAME (self));
+
+ intersection =
+ gst_caps_intersect_full (template_caps, caps, GST_CAPS_INTERSECT_FIRST);
+ gst_caps_unref (caps);
+ caps = intersection;
+ }
+#endif
+
+ if (gst_caps_is_any (caps)) {
+ goto done;
+ }
+
+ if (!gst_caps_is_fixed (caps)) {
+ g_assert (agg_klass->fixate_src_caps);
+
+ GST_DEBUG_OBJECT (self, "fixate caps from %" GST_PTR_FORMAT, caps);
+ if (!(caps = agg_klass->fixate_src_caps (self, caps))) {
+ GST_WARNING_OBJECT (self, "Subclass failed to fixate provided caps");
+ ret = GST_FLOW_NOT_NEGOTIATED;
+ goto done;
+ }
+ GST_DEBUG_OBJECT (self, " to %" GST_PTR_FORMAT, caps);
+ }
+
+ if (agg_klass->negotiated_src_caps) {
+ if (!agg_klass->negotiated_src_caps (self, caps)) {
+ GST_WARNING_OBJECT (self, "Subclass failed to accept negotiated caps");
+ ret = GST_FLOW_NOT_NEGOTIATED;
+ goto done;
+ }
+ }
+
+ gst_aggregator_set_src_caps (self, caps);
+
+ if (!gst_aggregator_do_allocation (self, caps)) {
+ GST_WARNING_OBJECT (self, "Allocation negotiation failed");
+ ret = GST_FLOW_NOT_NEGOTIATED;
+ }
+
+done:
+ gst_caps_unref (downstream_caps);
+ gst_caps_unref (template_caps);
+
+ if (caps)
+ gst_caps_unref (caps);
+
+ return ret >= GST_FLOW_OK || ret == GST_AGGREGATOR_FLOW_NEED_DATA;
+}
+
+/* WITH SRC_LOCK held */
+static gboolean
+gst_aggregator_negotiate_unlocked (GstAggregator * self)
+{
+ GstAggregatorClass *agg_klass = GST_AGGREGATOR_GET_CLASS (self);
+
+ if (agg_klass->negotiate)
+ return agg_klass->negotiate (self);
+
+ return TRUE;
+}
+
+/**
+ * gst_aggregator_negotiate:
+ * @self: a #GstAggregator
+ *
+ * Negotiates src pad caps with downstream elements.
+ * Unmarks GST_PAD_FLAG_NEED_RECONFIGURE in any case. But marks it again
+ * if #GstAggregatorClass.negotiate() fails.
+ *
+ * Returns: %TRUE if the negotiation succeeded, else %FALSE.
+ *
+ * Since: 1.18
+ */
+gboolean
+gst_aggregator_negotiate (GstAggregator * self)
+{
+ gboolean ret = TRUE;
+
+ g_return_val_if_fail (GST_IS_AGGREGATOR (self), FALSE);
+
+ GST_PAD_STREAM_LOCK (GST_AGGREGATOR_SRC_PAD (self));
+ gst_pad_check_reconfigure (GST_AGGREGATOR_SRC_PAD (self));
+ ret = gst_aggregator_negotiate_unlocked (self);
+ if (!ret)
+ gst_pad_mark_reconfigure (GST_AGGREGATOR_SRC_PAD (self));
+ GST_PAD_STREAM_UNLOCK (GST_AGGREGATOR_SRC_PAD (self));
+
+ return ret;
+}
+
+static void
+gst_aggregator_aggregate_func (GstAggregator * self)
+{
+ GstAggregatorPrivate *priv = self->priv;
+ GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (self);
+ gboolean timeout = FALSE;
+
+ if (self->priv->running == FALSE) {
+ GST_DEBUG_OBJECT (self, "Not running anymore");
+ return;
+ }
+
+ GST_LOG_OBJECT (self, "Checking aggregate");
+ while (priv->send_eos && priv->running) {
+ GstFlowReturn flow_return = GST_FLOW_OK;
+ DoHandleEventsAndQueriesData events_query_data = { FALSE, GST_FLOW_OK };
+
+ gst_element_foreach_sink_pad (GST_ELEMENT_CAST (self),
+ gst_aggregator_do_events_and_queries, &events_query_data);
+
+ if ((flow_return = events_query_data.flow_ret) != GST_FLOW_OK)
+ goto handle_error;
+
+ if (self->priv->peer_latency_live)
+ gst_element_foreach_sink_pad (GST_ELEMENT_CAST (self),
+ gst_aggregator_pad_skip_buffers, NULL);
+
+ /* Ensure we have buffers ready (either in clipped_buffer or at the head of
+ * the queue */
+ if (!gst_aggregator_wait_and_check (self, &timeout))
+ continue;
+
+ if (gst_pad_check_reconfigure (GST_AGGREGATOR_SRC_PAD (self))) {
+ if (!gst_aggregator_negotiate_unlocked (self)) {
+ gst_pad_mark_reconfigure (GST_AGGREGATOR_SRC_PAD (self));
+ if (GST_PAD_IS_FLUSHING (GST_AGGREGATOR_SRC_PAD (self))) {
+ flow_return = GST_FLOW_FLUSHING;
+ } else {
+ flow_return = GST_FLOW_NOT_NEGOTIATED;
+ }
+ }
+ }
+
+ if (timeout || flow_return >= GST_FLOW_OK) {
+ GST_TRACE_OBJECT (self, "Actually aggregating!");
+ flow_return = klass->aggregate (self, timeout);
+ }
+
+ if (flow_return == GST_AGGREGATOR_FLOW_NEED_DATA)
+ continue;
+
+ GST_OBJECT_LOCK (self);
+ if (flow_return == GST_FLOW_FLUSHING && priv->flushing) {
+ /* We don't want to set the pads to flushing, but we want to
+ * stop the thread, so just break here */
+ GST_OBJECT_UNLOCK (self);
+ break;
+ }
+ GST_OBJECT_UNLOCK (self);
+
+ if (flow_return == GST_FLOW_EOS || flow_return == GST_FLOW_ERROR) {
+ gst_aggregator_push_eos (self);
+ }
+
+ handle_error:
+ GST_LOG_OBJECT (self, "flow return is %s", gst_flow_get_name (flow_return));
+
+ if (flow_return != GST_FLOW_OK) {
+ GList *item;
+
+ GST_OBJECT_LOCK (self);
+ for (item = GST_ELEMENT (self)->sinkpads; item; item = item->next) {
+ GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (item->data);
+
+ gst_aggregator_pad_set_flushing (aggpad, flow_return, TRUE);
+ }
+ GST_OBJECT_UNLOCK (self);
+ break;
+ }
+ }
+
+ /* Pause the task here, the only ways to get here are:
+ * 1) We're stopping, in which case the task is stopped anyway
+ * 2) We got a flow error above, in which case it might take
+ * some time to forward the flow return upstream and we
+ * would otherwise call the task function over and over
+ * again without doing anything
+ */
+ gst_pad_pause_task (self->srcpad);
+}
+
+static gboolean
+gst_aggregator_start (GstAggregator * self)
+{
+ GstAggregatorClass *klass;
+ gboolean result;
+
+ self->priv->send_stream_start = TRUE;
+ self->priv->send_segment = TRUE;
+ self->priv->send_eos = TRUE;
+ self->priv->srccaps = NULL;
+
+ gst_aggregator_set_allocation (self, NULL, NULL, NULL, NULL);
+
+ klass = GST_AGGREGATOR_GET_CLASS (self);
+
+ if (klass->start)
+ result = klass->start (self);
+ else
+ result = TRUE;
+
+ return result;
+}
+
+static gboolean
+gst_aggregator_stop_srcpad_task (GstAggregator * self, GstEvent * flush_start)
+{
+ gboolean res = TRUE;
+
+ GST_INFO_OBJECT (self, "%s srcpad task",
+ flush_start ? "Pausing" : "Stopping");
+
+ SRC_LOCK (self);
+ self->priv->running = FALSE;
+ SRC_BROADCAST (self);
+ SRC_UNLOCK (self);
+
+ if (flush_start) {
+ res = gst_pad_push_event (self->srcpad, flush_start);
+ }
+
+ gst_pad_stop_task (self->srcpad);
+
+ return res;
+}
+
+static void
+gst_aggregator_start_srcpad_task (GstAggregator * self)
+{
+ GST_INFO_OBJECT (self, "Starting srcpad task");
+
+ self->priv->running = TRUE;
+ gst_pad_start_task (GST_PAD (self->srcpad),
+ (GstTaskFunction) gst_aggregator_aggregate_func, self, NULL);
+}
+
+static GstFlowReturn
+gst_aggregator_flush (GstAggregator * self)
+{
+ GstFlowReturn ret = GST_FLOW_OK;
+ GstAggregatorPrivate *priv = self->priv;
+ GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (self);
+
+ GST_DEBUG_OBJECT (self, "Flushing everything");
+ GST_OBJECT_LOCK (self);
+ priv->send_segment = TRUE;
+ priv->flushing = FALSE;
+ priv->tags_changed = FALSE;
+ GST_OBJECT_UNLOCK (self);
+ if (klass->flush)
+ ret = klass->flush (self);
+
+ return ret;
+}
+
+
+/* Called with GstAggregator's object lock held */
+
+static gboolean
+gst_aggregator_all_flush_stop_received (GstAggregator * self, guint32 seqnum)
+{
+ GList *tmp;
+ GstAggregatorPad *tmppad;
+
+ for (tmp = GST_ELEMENT (self)->sinkpads; tmp; tmp = tmp->next) {
+ tmppad = (GstAggregatorPad *) tmp->data;
+
+ if (tmppad->priv->last_flush_stop_seqnum != seqnum)
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+/* Called with GstAggregator's object lock held */
+
+static gboolean
+gst_aggregator_all_flush_start_received (GstAggregator * self, guint32 seqnum)
+{
+ GList *tmp;
+ GstAggregatorPad *tmppad;
+
+ for (tmp = GST_ELEMENT (self)->sinkpads; tmp; tmp = tmp->next) {
+ tmppad = (GstAggregatorPad *) tmp->data;
+
+ if (tmppad->priv->last_flush_start_seqnum != seqnum) {
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+static void
+gst_aggregator_flush_start (GstAggregator * self, GstAggregatorPad * aggpad,
+ GstEvent * event)
+{
+ GstAggregatorPrivate *priv = self->priv;
+ GstAggregatorPadPrivate *padpriv = aggpad->priv;
+ guint32 seqnum = gst_event_get_seqnum (event);
+
+ gst_aggregator_pad_set_flushing (aggpad, GST_FLOW_FLUSHING, FALSE);
+
+ PAD_FLUSH_LOCK (aggpad);
+ PAD_LOCK (aggpad);
+ padpriv->last_flush_start_seqnum = seqnum;
+ PAD_UNLOCK (aggpad);
+
+ GST_OBJECT_LOCK (self);
+
+ if (!priv->flushing && gst_aggregator_all_flush_start_received (self, seqnum)) {
+ /* Make sure we don't forward more than one FLUSH_START */
+ priv->flushing = TRUE;
+ priv->next_seqnum = seqnum;
+ GST_OBJECT_UNLOCK (self);
+
+ GST_INFO_OBJECT (self, "Flushing, pausing srcpad task");
+ gst_aggregator_stop_srcpad_task (self, event);
+
+ event = NULL;
+ } else {
+ gst_event_unref (event);
+ GST_OBJECT_UNLOCK (self);
+ }
+
+ PAD_FLUSH_UNLOCK (aggpad);
+}
+
+/* Must be called with the the PAD_LOCK held */
+static void
+update_time_level (GstAggregatorPad * aggpad, gboolean head)
+{
+ GstAggregatorPadPrivate *priv = aggpad->priv;
+
+ if (head) {
+ if (GST_CLOCK_TIME_IS_VALID (priv->head_position) &&
+ priv->head_segment.format == GST_FORMAT_TIME)
+ priv->head_time = gst_segment_to_running_time (&priv->head_segment,
+ GST_FORMAT_TIME, priv->head_position);
+ else
+ priv->head_time = GST_CLOCK_TIME_NONE;
+
+ if (!GST_CLOCK_TIME_IS_VALID (priv->tail_time))
+ priv->tail_time = priv->head_time;
+ } else {
+ if (GST_CLOCK_TIME_IS_VALID (priv->tail_position) &&
+ aggpad->segment.format == GST_FORMAT_TIME)
+ priv->tail_time = gst_segment_to_running_time (&aggpad->segment,
+ GST_FORMAT_TIME, priv->tail_position);
+ else
+ priv->tail_time = priv->head_time;
+ }
+
+ if (priv->head_time == GST_CLOCK_TIME_NONE ||
+ priv->tail_time == GST_CLOCK_TIME_NONE) {
+ priv->time_level = 0;
+ return;
+ }
+
+ if (priv->tail_time > priv->head_time)
+ priv->time_level = 0;
+ else
+ priv->time_level = priv->head_time - priv->tail_time;
+}
+
+
+/* GstAggregator vmethods default implementations */
+static gboolean
+gst_aggregator_default_sink_event (GstAggregator * self,
+ GstAggregatorPad * aggpad, GstEvent * event)
+{
+ gboolean res = TRUE;
+ GstPad *pad = GST_PAD (aggpad);
+ GstAggregatorPrivate *priv = self->priv;
+
+ GST_DEBUG_OBJECT (aggpad, "Got event: %" GST_PTR_FORMAT, event);
+
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_FLUSH_START:
+ {
+ gst_aggregator_flush_start (self, aggpad, event);
+ /* We forward only in one case: right after flushing */
+ event = NULL;
+ goto eat;
+ }
+ case GST_EVENT_FLUSH_STOP:
+ {
+ guint32 seqnum = gst_event_get_seqnum (event);
+
+ PAD_FLUSH_LOCK (aggpad);
+ PAD_LOCK (aggpad);
+ aggpad->priv->last_flush_stop_seqnum = seqnum;
+ PAD_UNLOCK (aggpad);
+
+ gst_aggregator_pad_flush (aggpad, self);
+
+ GST_OBJECT_LOCK (self);
+ if (priv->flushing
+ && gst_aggregator_all_flush_stop_received (self, seqnum)) {
+ GST_OBJECT_UNLOCK (self);
+ /* That means we received FLUSH_STOP/FLUSH_STOP on
+ * all sinkpads -- Seeking is Done... sending FLUSH_STOP */
+ gst_aggregator_flush (self);
+ gst_pad_push_event (self->srcpad, event);
+ event = NULL;
+ SRC_LOCK (self);
+ priv->send_eos = TRUE;
+ SRC_BROADCAST (self);
+ SRC_UNLOCK (self);
+
+ GST_INFO_OBJECT (self, "Flush stopped");
+
+ gst_aggregator_start_srcpad_task (self);
+ } else {
+ GST_OBJECT_UNLOCK (self);
+ }
+
+ PAD_FLUSH_UNLOCK (aggpad);
+
+ /* We never forward the event */
+ goto eat;
+ }
+ case GST_EVENT_EOS:
+ {
+ SRC_LOCK (self);
+ PAD_LOCK (aggpad);
+ g_assert (aggpad->priv->num_buffers == 0);
+ aggpad->priv->eos = TRUE;
+ PAD_UNLOCK (aggpad);
+ SRC_BROADCAST (self);
+ SRC_UNLOCK (self);
+ goto eat;
+ }
+ case GST_EVENT_SEGMENT:
+ {
+ PAD_LOCK (aggpad);
+ GST_OBJECT_LOCK (aggpad);
+ gst_event_copy_segment (event, &aggpad->segment);
+ /* We've got a new segment, tail_position is now meaningless
+ * and may interfere with the time_level calculation
+ */
+ aggpad->priv->tail_position = GST_CLOCK_TIME_NONE;
+ update_time_level (aggpad, FALSE);
+ GST_OBJECT_UNLOCK (aggpad);
+ PAD_UNLOCK (aggpad);
+
+ GST_OBJECT_LOCK (self);
+ self->priv->seqnum = gst_event_get_seqnum (event);
+ GST_OBJECT_UNLOCK (self);
+ goto eat;
+ }
+ case GST_EVENT_STREAM_START:
+ {
+ goto eat;
+ }
+ case GST_EVENT_GAP:
+ {
+ GstClockTime pts, endpts;
+ GstClockTime duration;
+ GstBuffer *gapbuf;
+
+ gst_event_parse_gap (event, &pts, &duration);
+
+ if (GST_CLOCK_TIME_IS_VALID (duration))
+ endpts = pts + duration;
+ else
+ endpts = GST_CLOCK_TIME_NONE;
+
+ GST_OBJECT_LOCK (aggpad);
+ res = gst_segment_clip (&aggpad->segment, GST_FORMAT_TIME, pts, endpts,
+ &pts, &endpts);
+ GST_OBJECT_UNLOCK (aggpad);
+
+ if (!res) {
+ GST_WARNING_OBJECT (self, "GAP event outside segment, dropping");
+ goto eat;
+ }
+
+ if (GST_CLOCK_TIME_IS_VALID (endpts) && GST_CLOCK_TIME_IS_VALID (pts))
+ duration = endpts - pts;
+ else
+ duration = GST_CLOCK_TIME_NONE;
+
+ gapbuf = gst_buffer_new ();
+ GST_BUFFER_PTS (gapbuf) = pts;
+ GST_BUFFER_DURATION (gapbuf) = duration;
+ GST_BUFFER_FLAG_SET (gapbuf, GST_BUFFER_FLAG_GAP);
+ GST_BUFFER_FLAG_SET (gapbuf, GST_BUFFER_FLAG_DROPPABLE);
+
+ /* Remove GAP event so we can replace it with the buffer */
+ PAD_LOCK (aggpad);
+ if (g_queue_peek_tail (&aggpad->priv->data) == event)
+ gst_event_unref (g_queue_pop_tail (&aggpad->priv->data));
+ PAD_UNLOCK (aggpad);
+
+ if (gst_aggregator_pad_chain_internal (self, aggpad, gapbuf, FALSE) !=
+ GST_FLOW_OK) {
+ GST_WARNING_OBJECT (self, "Failed to chain gap buffer");
+ res = FALSE;
+ }
+
+ goto eat;
+ }
+ case GST_EVENT_TAG:
+ goto eat;
+ default:
+ {
+ break;
+ }
+ }
+
+ GST_DEBUG_OBJECT (pad, "Forwarding event: %" GST_PTR_FORMAT, event);
+ return gst_pad_event_default (pad, GST_OBJECT (self), event);
+
+eat:
+ GST_DEBUG_OBJECT (pad, "Eating event: %" GST_PTR_FORMAT, event);
+ if (event)
+ gst_event_unref (event);
+
+ return res;
+}
+
+/* Queue serialized events and let the others go through directly.
+ * The queued events with be handled from the src-pad task in
+ * gst_aggregator_do_events_and_queries().
+ */
+static gboolean
+gst_aggregator_default_sink_event_pre_queue (GstAggregator * self,
+ GstAggregatorPad * aggpad, GstEvent * event)
+{
+ GstFlowReturn ret = GST_FLOW_OK;
+
+ if (GST_EVENT_IS_SERIALIZED (event)
+ && GST_EVENT_TYPE (event) != GST_EVENT_FLUSH_STOP) {
+ SRC_LOCK (self);
+ PAD_LOCK (aggpad);
+
+ if (aggpad->priv->flow_return != GST_FLOW_OK)
+ goto flushing;
+
+ if (GST_EVENT_TYPE (event) == GST_EVENT_SEGMENT) {
+ GST_OBJECT_LOCK (aggpad);
+ gst_event_copy_segment (event, &aggpad->priv->head_segment);
+ aggpad->priv->head_position = aggpad->priv->head_segment.position;
+ update_time_level (aggpad, TRUE);
+ GST_OBJECT_UNLOCK (aggpad);
+ }
+
+ GST_DEBUG_OBJECT (aggpad, "Store event in queue: %" GST_PTR_FORMAT, event);
+ g_queue_push_head (&aggpad->priv->data, event);
+ SRC_BROADCAST (self);
+ PAD_UNLOCK (aggpad);
+ SRC_UNLOCK (self);
+ } else {
+ GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (self);
+
+ if (!klass->sink_event (self, aggpad, event)) {
+ /* Copied from GstPad to convert boolean to a GstFlowReturn in
+ * the event handling func */
+ ret = GST_FLOW_ERROR;
+ }
+ }
+
+ return ret;
+
+flushing:
+ GST_DEBUG_OBJECT (aggpad, "Pad is %s, dropping event",
+ gst_flow_get_name (aggpad->priv->flow_return));
+ PAD_UNLOCK (aggpad);
+ SRC_UNLOCK (self);
+ if (GST_EVENT_IS_STICKY (event))
+ gst_pad_store_sticky_event (GST_PAD (aggpad), event);
+ gst_event_unref (event);
+
+ return aggpad->priv->flow_return;
+}
+
+static gboolean
+gst_aggregator_stop_pad (GstElement * self, GstPad * epad, gpointer user_data)
+{
+ GstAggregatorPad *pad = GST_AGGREGATOR_PAD_CAST (epad);
+ GstAggregator *agg = GST_AGGREGATOR_CAST (self);
+
+ gst_aggregator_pad_flush (pad, agg);
+
+ PAD_LOCK (pad);
+ pad->priv->flow_return = GST_FLOW_FLUSHING;
+ pad->priv->negotiated = FALSE;
+ PAD_BROADCAST_EVENT (pad);
+ PAD_UNLOCK (pad);
+
+ return TRUE;
+}
+
+static gboolean
+gst_aggregator_stop (GstAggregator * agg)
+{
+ GstAggregatorClass *klass;
+ gboolean result;
+
+ gst_aggregator_reset_flow_values (agg);
+
+ /* Application needs to make sure no pads are added while it shuts us down */
+ gst_element_foreach_sink_pad (GST_ELEMENT_CAST (agg),
+ gst_aggregator_stop_pad, NULL);
+
+ klass = GST_AGGREGATOR_GET_CLASS (agg);
+
+ if (klass->stop)
+ result = klass->stop (agg);
+ else
+ result = TRUE;
+
+ agg->priv->has_peer_latency = FALSE;
+ agg->priv->peer_latency_live = FALSE;
+ agg->priv->peer_latency_min = agg->priv->peer_latency_max = 0;
+
+ if (agg->priv->tags)
+ gst_tag_list_unref (agg->priv->tags);
+ agg->priv->tags = NULL;
+
+ gst_aggregator_set_allocation (agg, NULL, NULL, NULL, NULL);
+
+ if (agg->priv->running) {
+ /* As sinkpads get deactivated after the src pad, we
+ * may have restarted the source pad task after receiving
+ * flush events on one of our sinkpads. Stop our src pad
+ * task again if that is the case */
+ gst_aggregator_stop_srcpad_task (agg, NULL);
+ }
+
+ return result;
+}
+
+/* GstElement vmethods implementations */
+static GstStateChangeReturn
+gst_aggregator_change_state (GstElement * element, GstStateChange transition)
+{
+ GstStateChangeReturn ret;
+ GstAggregator *self = GST_AGGREGATOR (element);
+
+ switch (transition) {
+ case GST_STATE_CHANGE_READY_TO_PAUSED:
+ if (!gst_aggregator_start (self))
+ goto error_start;
+ break;
+ default:
+ break;
+ }
+
+ if ((ret =
+ GST_ELEMENT_CLASS (aggregator_parent_class)->change_state (element,
+ transition)) == GST_STATE_CHANGE_FAILURE)
+ goto failure;
+
+
+ switch (transition) {
+ case GST_STATE_CHANGE_PAUSED_TO_READY:
+ if (!gst_aggregator_stop (self)) {
+ /* What to do in this case? Error out? */
+ GST_ERROR_OBJECT (self, "Subclass failed to stop.");
+ }
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+
+/* ERRORS */
+failure:
+ {
+ GST_ERROR_OBJECT (element, "parent failed state change");
+ return ret;
+ }
+error_start:
+ {
+ GST_ERROR_OBJECT (element, "Subclass failed to start");
+ return GST_STATE_CHANGE_FAILURE;
+ }
+}
+
+static void
+gst_aggregator_release_pad (GstElement * element, GstPad * pad)
+{
+ GstAggregator *self = GST_AGGREGATOR (element);
+ GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad);
+
+ GST_INFO_OBJECT (pad, "Removing pad");
+
+ SRC_LOCK (self);
+ gst_aggregator_pad_set_flushing (aggpad, GST_FLOW_FLUSHING, TRUE);
+ gst_element_remove_pad (element, pad);
+
+ self->priv->has_peer_latency = FALSE;
+ SRC_BROADCAST (self);
+ SRC_UNLOCK (self);
+}
+
+static GstAggregatorPad *
+gst_aggregator_default_create_new_pad (GstAggregator * self,
+ GstPadTemplate * templ, const gchar * req_name, const GstCaps * caps)
+{
+ GstAggregatorPad *agg_pad;
+ GstAggregatorPrivate *priv = self->priv;
+ gint serial = 0;
+ gchar *name = NULL;
+ GType pad_type =
+ GST_PAD_TEMPLATE_GTYPE (templ) ==
+ G_TYPE_NONE ? GST_TYPE_AGGREGATOR_PAD : GST_PAD_TEMPLATE_GTYPE (templ);
+
+ if (templ->direction != GST_PAD_SINK)
+ goto not_sink;
+
+ if (templ->presence != GST_PAD_REQUEST)
+ goto not_request;
+
+ GST_OBJECT_LOCK (self);
+ if (req_name == NULL || strlen (req_name) < 6
+ || !g_str_has_prefix (req_name, "sink_")
+ || strrchr (req_name, '%') != NULL) {
+ /* no name given when requesting the pad, use next available int */
+ serial = ++priv->max_padserial;
+ } else {
+ gchar *endptr = NULL;
+
+ /* parse serial number from requested padname */
+ serial = g_ascii_strtoull (&req_name[5], &endptr, 10);
+ if (endptr != NULL && *endptr == '\0') {
+ if (serial > priv->max_padserial) {
+ priv->max_padserial = serial;
+ }
+ } else {
+ serial = ++priv->max_padserial;
+ }
+ }
+
+ name = g_strdup_printf ("sink_%u", serial);
+ g_assert (g_type_is_a (pad_type, GST_TYPE_AGGREGATOR_PAD));
+ agg_pad = g_object_new (pad_type,
+ "name", name, "direction", GST_PAD_SINK, "template", templ, NULL);
+ g_free (name);
+
+ GST_OBJECT_UNLOCK (self);
+
+ return agg_pad;
+
+ /* errors */
+not_sink:
+ {
+ GST_WARNING_OBJECT (self, "request new pad that is not a SINK pad");
+ return NULL;
+ }
+not_request:
+ {
+ GST_WARNING_OBJECT (self, "request new pad that is not a REQUEST pad");
+ return NULL;
+ }
+}
+
+static GstPad *
+gst_aggregator_request_new_pad (GstElement * element,
+ GstPadTemplate * templ, const gchar * req_name, const GstCaps * caps)
+{
+ GstAggregator *self;
+ GstAggregatorPad *agg_pad;
+ GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (element);
+ GstAggregatorPrivate *priv = GST_AGGREGATOR (element)->priv;
+
+ self = GST_AGGREGATOR (element);
+
+ agg_pad = klass->create_new_pad (self, templ, req_name, caps);
+ if (!agg_pad) {
+ GST_ERROR_OBJECT (element, "Couldn't create new pad");
+ return NULL;
+ }
+
+ GST_DEBUG_OBJECT (element, "Adding pad %s", GST_PAD_NAME (agg_pad));
+
+ if (priv->running)
+ gst_pad_set_active (GST_PAD (agg_pad), TRUE);
+
+ /* add the pad to the element */
+ gst_element_add_pad (element, GST_PAD (agg_pad));
+
+ return GST_PAD (agg_pad);
+}
+
+/* Must be called with SRC_LOCK held */
+
+static gboolean
+gst_aggregator_query_latency_unlocked (GstAggregator * self, GstQuery * query)
+{
+ gboolean query_ret, live;
+ GstClockTime our_latency, min, max;
+
+ query_ret = gst_pad_query_default (self->srcpad, GST_OBJECT (self), query);
+
+ if (!query_ret) {
+ GST_WARNING_OBJECT (self, "Latency query failed");
+ return FALSE;
+ }
+
+ gst_query_parse_latency (query, &live, &min, &max);
+
+ if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (min))) {
+ GST_ERROR_OBJECT (self, "Invalid minimum latency %" GST_TIME_FORMAT
+ ". Please file a bug at " PACKAGE_BUGREPORT ".", GST_TIME_ARGS (min));
+ return FALSE;
+ }
+
+ if (self->priv->upstream_latency_min > min) {
+ GstClockTimeDiff diff =
+ GST_CLOCK_DIFF (min, self->priv->upstream_latency_min);
+
+ min += diff;
+ if (GST_CLOCK_TIME_IS_VALID (max)) {
+ max += diff;
+ }
+ }
+
+ if (min > max && GST_CLOCK_TIME_IS_VALID (max)) {
+ GST_ELEMENT_WARNING (self, CORE, CLOCK, (NULL),
+ ("Impossible to configure latency: max %" GST_TIME_FORMAT " < min %"
+ GST_TIME_FORMAT ". Add queues or other buffering elements.",
+ GST_TIME_ARGS (max), GST_TIME_ARGS (min)));
+ return FALSE;
+ }
+
+ our_latency = self->priv->latency;
+
+ self->priv->peer_latency_live = live;
+ self->priv->peer_latency_min = min;
+ self->priv->peer_latency_max = max;
+ self->priv->has_peer_latency = TRUE;
+
+ /* add our own */
+ min += our_latency;
+ min += self->priv->sub_latency_min;
+ if (GST_CLOCK_TIME_IS_VALID (self->priv->sub_latency_max)
+ && GST_CLOCK_TIME_IS_VALID (max))
+ max += self->priv->sub_latency_max + our_latency;
+ else
+ max = GST_CLOCK_TIME_NONE;
+
+ SRC_BROADCAST (self);
+
+ GST_DEBUG_OBJECT (self, "configured latency live:%s min:%" G_GINT64_FORMAT
+ " max:%" G_GINT64_FORMAT, live ? "true" : "false", min, max);
+
+ gst_query_set_latency (query, live, min, max);
+
+ return query_ret;
+}
+
+/*
+ * MUST be called with the src_lock held.
+ *
+ * See gst_aggregator_get_latency() for doc
+ */
+static GstClockTime
+gst_aggregator_get_latency_unlocked (GstAggregator * self)
+{
+ GstClockTime latency;
+
+ g_return_val_if_fail (GST_IS_AGGREGATOR (self), 0);
+
+ if (!self->priv->has_peer_latency) {
+ GstQuery *query = gst_query_new_latency ();
+ gboolean ret;
+
+ ret = gst_aggregator_query_latency_unlocked (self, query);
+ gst_query_unref (query);
+ if (!ret)
+ return GST_CLOCK_TIME_NONE;
+ }
+
+ if (!self->priv->has_peer_latency || !self->priv->peer_latency_live)
+ return GST_CLOCK_TIME_NONE;
+
+ /* latency_min is never GST_CLOCK_TIME_NONE by construction */
+ latency = self->priv->peer_latency_min;
+
+ /* add our own */
+ latency += self->priv->latency;
+ latency += self->priv->sub_latency_min;
+
+ return latency;
+}
+
+/**
+ * gst_aggregator_get_latency:
+ * @self: a #GstAggregator
+ *
+ * Retrieves the latency values reported by @self in response to the latency
+ * query, or %GST_CLOCK_TIME_NONE if there is not live source connected and the element
+ * will not wait for the clock.
+ *
+ * Typically only called by subclasses.
+ *
+ * Returns: The latency or %GST_CLOCK_TIME_NONE if the element does not sync
+ */
+GstClockTime
+gst_aggregator_get_latency (GstAggregator * self)
+{
+ GstClockTime ret;
+
+ SRC_LOCK (self);
+ ret = gst_aggregator_get_latency_unlocked (self);
+ SRC_UNLOCK (self);
+
+ return ret;
+}
+
+static gboolean
+gst_aggregator_send_event (GstElement * element, GstEvent * event)
+{
+ GstAggregator *self = GST_AGGREGATOR (element);
+
+ GST_STATE_LOCK (element);
+ if (GST_EVENT_TYPE (event) == GST_EVENT_SEEK &&
+ GST_STATE (element) < GST_STATE_PAUSED) {
+ gdouble rate;
+ GstFormat fmt;
+ GstSeekFlags flags;
+ GstSeekType start_type, stop_type;
+ gint64 start, stop;
+
+ gst_event_parse_seek (event, &rate, &fmt, &flags, &start_type,
+ &start, &stop_type, &stop);
+
+ GST_OBJECT_LOCK (self);
+ gst_segment_do_seek (&GST_AGGREGATOR_PAD (self->srcpad)->segment, rate, fmt,
+ flags, start_type, start, stop_type, stop, NULL);
+ self->priv->next_seqnum = gst_event_get_seqnum (event);
+ self->priv->first_buffer = FALSE;
+ GST_OBJECT_UNLOCK (self);
+
+ GST_DEBUG_OBJECT (element, "Storing segment %" GST_PTR_FORMAT, event);
+ }
+ GST_STATE_UNLOCK (element);
+
+ return GST_ELEMENT_CLASS (aggregator_parent_class)->send_event (element,
+ event);
+}
+
+static gboolean
+gst_aggregator_default_src_query (GstAggregator * self, GstQuery * query)
+{
+ gboolean res = TRUE;
+
+ switch (GST_QUERY_TYPE (query)) {
+ case GST_QUERY_SEEKING:
+ {
+ GstFormat format;
+
+ /* don't pass it along as some (file)sink might claim it does
+ * whereas with a collectpads in between that will not likely work */
+ gst_query_parse_seeking (query, &format, NULL, NULL, NULL);
+ gst_query_set_seeking (query, format, FALSE, 0, -1);
+ res = TRUE;
+
+ break;
+ }
+ case GST_QUERY_LATENCY:
+ SRC_LOCK (self);
+ res = gst_aggregator_query_latency_unlocked (self, query);
+ SRC_UNLOCK (self);
+ break;
+ default:
+ return gst_pad_query_default (self->srcpad, GST_OBJECT (self), query);
+ }
+
+ return res;
+}
+
+static gboolean
+gst_aggregator_event_forward_func (GstPad * pad, gpointer user_data)
+{
+ EventData *evdata = user_data;
+ gboolean ret = TRUE;
+ GstPad *peer = gst_pad_get_peer (pad);
+ GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad);
+
+ if (peer) {
+ if (evdata->only_to_active_pads && aggpad->priv->first_buffer) {
+ GST_DEBUG_OBJECT (pad, "not sending event to inactive pad");
+ ret = TRUE;
+ } else {
+ ret = gst_pad_send_event (peer, gst_event_ref (evdata->event));
+ GST_DEBUG_OBJECT (pad, "return of event push is %d", ret);
+ }
+ }
+
+ if (ret == FALSE) {
+ if (GST_EVENT_TYPE (evdata->event) == GST_EVENT_SEEK) {
+ GstQuery *seeking = gst_query_new_seeking (GST_FORMAT_TIME);
+
+ GST_DEBUG_OBJECT (pad, "Event %" GST_PTR_FORMAT " failed", evdata->event);
+
+ if (gst_pad_query (peer, seeking)) {
+ gboolean seekable;
+
+ gst_query_parse_seeking (seeking, NULL, &seekable, NULL, NULL);
+
+ if (seekable == FALSE) {
+ GST_INFO_OBJECT (pad,
+ "Source not seekable, We failed but it does not matter!");
+
+ ret = TRUE;
+ }
+ } else {
+ GST_ERROR_OBJECT (pad, "Query seeking FAILED");
+ }
+
+ gst_query_unref (seeking);
+ }
+ } else {
+ evdata->one_actually_seeked = TRUE;
+ }
+
+ evdata->result &= ret;
+
+ if (peer)
+ gst_object_unref (peer);
+
+ /* Always send to all pads */
+ return FALSE;
+}
+
+static void
+gst_aggregator_forward_event_to_all_sinkpads (GstAggregator * self,
+ EventData * evdata)
+{
+ evdata->result = TRUE;
+ evdata->one_actually_seeked = FALSE;
+
+ gst_pad_forward (self->srcpad, gst_aggregator_event_forward_func, evdata);
+
+ gst_event_unref (evdata->event);
+}
+
+static gboolean
+gst_aggregator_do_seek (GstAggregator * self, GstEvent * event)
+{
+ gdouble rate;
+ GstFormat fmt;
+ GstSeekFlags flags;
+ GstSeekType start_type, stop_type;
+ gint64 start, stop;
+ gboolean flush;
+ EventData evdata = { 0, };
+ GstAggregatorPrivate *priv = self->priv;
+
+ gst_event_parse_seek (event, &rate, &fmt, &flags, &start_type,
+ &start, &stop_type, &stop);
+
+ GST_INFO_OBJECT (self, "starting SEEK");
+
+ flush = flags & GST_SEEK_FLAG_FLUSH;
+
+ GST_OBJECT_LOCK (self);
+
+ if (gst_event_get_seqnum (event) == self->priv->next_seqnum) {
+ evdata.result = TRUE;
+ GST_DEBUG_OBJECT (self, "Dropping duplicated seek event with seqnum %d",
+ self->priv->next_seqnum);
+ GST_OBJECT_UNLOCK (self);
+ goto done;
+ }
+
+ self->priv->next_seqnum = gst_event_get_seqnum (event);
+
+ gst_segment_do_seek (&GST_AGGREGATOR_PAD (self->srcpad)->segment, rate, fmt,
+ flags, start_type, start, stop_type, stop, NULL);
+
+ /* Seeking sets a position */
+ self->priv->first_buffer = FALSE;
+
+ if (flush)
+ priv->flushing = TRUE;
+
+ GST_OBJECT_UNLOCK (self);
+
+ if (flush) {
+ GstEvent *event = gst_event_new_flush_start ();
+
+ gst_event_set_seqnum (event, self->priv->next_seqnum);
+ gst_aggregator_stop_srcpad_task (self, event);
+ }
+
+ /* forward the seek upstream */
+ evdata.event = event;
+ evdata.flush = flush;
+ evdata.only_to_active_pads = FALSE;
+ gst_aggregator_forward_event_to_all_sinkpads (self, &evdata);
+ event = NULL;
+
+ if (!evdata.result || !evdata.one_actually_seeked) {
+ GST_OBJECT_LOCK (self);
+ priv->flushing = FALSE;
+ GST_OBJECT_UNLOCK (self);
+
+ /* No flush stop is inbound for us to forward */
+ if (flush) {
+ GstEvent *event = gst_event_new_flush_stop (TRUE);
+
+ gst_event_set_seqnum (event, self->priv->next_seqnum);
+ gst_pad_push_event (self->srcpad, event);
+ }
+ }
+
+done:
+ GST_INFO_OBJECT (self, "seek done, result: %d", evdata.result);
+
+ return evdata.result;
+}
+
+static gboolean
+gst_aggregator_default_src_event (GstAggregator * self, GstEvent * event)
+{
+ EventData evdata = { 0, };
+
+ switch (GST_EVENT_TYPE (event)) {
+ case GST_EVENT_SEEK:
+ /* _do_seek() unrefs the event. */
+ return gst_aggregator_do_seek (self, event);
+ case GST_EVENT_NAVIGATION:
+ /* navigation is rather pointless. */
+ gst_event_unref (event);
+ return FALSE;
+ default:
+ break;
+ }
+
+ /* Don't forward QOS events to pads that had no active buffer yet. Otherwise
+ * they will receive a QOS event that has earliest_time=0 (because we can't
+ * have negative timestamps), and consider their buffer as too late */
+ evdata.event = event;
+ evdata.flush = FALSE;
+ evdata.only_to_active_pads = GST_EVENT_TYPE (event) == GST_EVENT_QOS;
+ gst_aggregator_forward_event_to_all_sinkpads (self, &evdata);
+ return evdata.result;
+}
+
+static gboolean
+gst_aggregator_src_pad_event_func (GstPad * pad, GstObject * parent,
+ GstEvent * event)
+{
+ GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (parent);
+
+ return klass->src_event (GST_AGGREGATOR (parent), event);
+}
+
+static gboolean
+gst_aggregator_src_pad_query_func (GstPad * pad, GstObject * parent,
+ GstQuery * query)
+{
+ GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (parent);
+
+ return klass->src_query (GST_AGGREGATOR (parent), query);
+}
+
+static gboolean
+gst_aggregator_src_pad_activate_mode_func (GstPad * pad,
+ GstObject * parent, GstPadMode mode, gboolean active)
+{
+ GstAggregator *self = GST_AGGREGATOR (parent);
+ GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (parent);
+
+ if (klass->src_activate) {
+ if (klass->src_activate (self, mode, active) == FALSE) {
+ return FALSE;
+ }
+ }
+
+ if (active == TRUE) {
+ switch (mode) {
+ case GST_PAD_MODE_PUSH:
+ {
+ GST_INFO_OBJECT (pad, "Activating pad!");
+ gst_aggregator_start_srcpad_task (self);
+ return TRUE;
+ }
+ default:
+ {
+ GST_ERROR_OBJECT (pad, "Only supported mode is PUSH");
+ return FALSE;
+ }
+ }
+ }
+
+ /* deactivating */
+ GST_INFO_OBJECT (self, "Deactivating srcpad");
+
+ gst_aggregator_stop_srcpad_task (self, FALSE);
+
+ return TRUE;
+}
+
+static gboolean
+gst_aggregator_default_sink_query (GstAggregator * self,
+ GstAggregatorPad * aggpad, GstQuery * query)
+{
+ GstPad *pad = GST_PAD (aggpad);
+
+ if (GST_QUERY_TYPE (query) == GST_QUERY_ALLOCATION) {
+ GstQuery *decide_query = NULL;
+ GstAggregatorClass *agg_class;
+ gboolean ret;
+
+ GST_OBJECT_LOCK (self);
+ PAD_LOCK (aggpad);
+ if (G_UNLIKELY (!aggpad->priv->negotiated)) {
+ GST_DEBUG_OBJECT (self,
+ "not negotiated yet, can't answer ALLOCATION query");
+ PAD_UNLOCK (aggpad);
+ GST_OBJECT_UNLOCK (self);
+
+ return FALSE;
+ }
+
+ if ((decide_query = self->priv->allocation_query))
+ gst_query_ref (decide_query);
+ PAD_UNLOCK (aggpad);
+ GST_OBJECT_UNLOCK (self);
+
+ GST_DEBUG_OBJECT (self,
+ "calling propose allocation with query %" GST_PTR_FORMAT, decide_query);
+
+ agg_class = GST_AGGREGATOR_GET_CLASS (self);
+
+ /* pass the query to the propose_allocation vmethod if any */
+ if (agg_class->propose_allocation)
+ ret = agg_class->propose_allocation (self, aggpad, decide_query, query);
+ else
+ ret = FALSE;
+
+ if (decide_query)
+ gst_query_unref (decide_query);
+
+ GST_DEBUG_OBJECT (self, "ALLOCATION ret %d, %" GST_PTR_FORMAT, ret, query);
+ return ret;
+ }
+
+ return gst_pad_query_default (pad, GST_OBJECT (self), query);
+}
+
+static gboolean
+gst_aggregator_default_sink_query_pre_queue (GstAggregator * self,
+ GstAggregatorPad * aggpad, GstQuery * query)
+{
+ if (GST_QUERY_IS_SERIALIZED (query)) {
+ GstStructure *s;
+ gboolean ret = FALSE;
+
+ SRC_LOCK (self);
+ PAD_LOCK (aggpad);
+
+ if (aggpad->priv->flow_return != GST_FLOW_OK) {
+ SRC_UNLOCK (self);
+ goto flushing;
+ }
+
+ g_queue_push_head (&aggpad->priv->data, query);
+ SRC_BROADCAST (self);
+ SRC_UNLOCK (self);
+
+ while (!gst_aggregator_pad_queue_is_empty (aggpad)
+ && aggpad->priv->flow_return == GST_FLOW_OK) {
+ GST_DEBUG_OBJECT (aggpad, "Waiting for buffer to be consumed");
+ PAD_WAIT_EVENT (aggpad);
+ }
+
+ s = gst_query_writable_structure (query);
+ if (gst_structure_get_boolean (s, "gst-aggregator-retval", &ret))
+ gst_structure_remove_field (s, "gst-aggregator-retval");
+ else
+ g_queue_remove (&aggpad->priv->data, query);
+
+ if (aggpad->priv->flow_return != GST_FLOW_OK)
+ goto flushing;
+
+ PAD_UNLOCK (aggpad);
+
+ return ret;
+ } else {
+ GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (self);
+
+ return klass->sink_query (self, aggpad, query);
+ }
+
+flushing:
+ GST_DEBUG_OBJECT (aggpad, "Pad is %s, dropping query",
+ gst_flow_get_name (aggpad->priv->flow_return));
+ PAD_UNLOCK (aggpad);
+
+ return FALSE;
+}
+
+static void
+gst_aggregator_finalize (GObject * object)
+{
+ GstAggregator *self = (GstAggregator *) object;
+
+ g_mutex_clear (&self->priv->src_lock);
+ g_cond_clear (&self->priv->src_cond);
+
+ G_OBJECT_CLASS (aggregator_parent_class)->finalize (object);
+}
+
+/*
+ * gst_aggregator_set_latency_property:
+ * @agg: a #GstAggregator
+ * @latency: the new latency value (in nanoseconds).
+ *
+ * Sets the new latency value to @latency. This value is used to limit the
+ * amount of time a pad waits for data to appear before considering the pad
+ * as unresponsive.
+ */
+static void
+gst_aggregator_set_latency_property (GstAggregator * self, GstClockTime latency)
+{
+ gboolean changed;
+
+ g_return_if_fail (GST_IS_AGGREGATOR (self));
+ g_return_if_fail (GST_CLOCK_TIME_IS_VALID (latency));
+
+ SRC_LOCK (self);
+ changed = (self->priv->latency != latency);
+
+ if (changed) {
+ GList *item;
+
+ GST_OBJECT_LOCK (self);
+ /* First lock all the pads */
+ for (item = GST_ELEMENT_CAST (self)->sinkpads; item; item = item->next) {
+ GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (item->data);
+ PAD_LOCK (aggpad);
+ }
+
+ self->priv->latency = latency;
+
+ SRC_BROADCAST (self);
+
+ /* Now wake up the pads */
+ for (item = GST_ELEMENT_CAST (self)->sinkpads; item; item = item->next) {
+ GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (item->data);
+ PAD_BROADCAST_EVENT (aggpad);
+ PAD_UNLOCK (aggpad);
+ }
+ GST_OBJECT_UNLOCK (self);
+ }
+
+ SRC_UNLOCK (self);
+
+ if (changed)
+ gst_element_post_message (GST_ELEMENT_CAST (self),
+ gst_message_new_latency (GST_OBJECT_CAST (self)));
+}
+
+/*
+ * gst_aggregator_get_latency_property:
+ * @agg: a #GstAggregator
+ *
+ * Gets the latency value. See gst_aggregator_set_latency for
+ * more details.
+ *
+ * Returns: The time in nanoseconds to wait for data to arrive on a sink pad
+ * before a pad is deemed unresponsive. A value of -1 means an
+ * unlimited time.
+ */
+static GstClockTime
+gst_aggregator_get_latency_property (GstAggregator * agg)
+{
+ GstClockTime res;
+
+ g_return_val_if_fail (GST_IS_AGGREGATOR (agg), GST_CLOCK_TIME_NONE);
+
+ GST_OBJECT_LOCK (agg);
+ res = agg->priv->latency;
+ GST_OBJECT_UNLOCK (agg);
+
+ return res;
+}
+
+static void
+gst_aggregator_set_property (GObject * object, guint prop_id,
+ const GValue * value, GParamSpec * pspec)
+{
+ GstAggregator *agg = GST_AGGREGATOR (object);
+
+ switch (prop_id) {
+ case PROP_LATENCY:
+ gst_aggregator_set_latency_property (agg, g_value_get_uint64 (value));
+ break;
+ case PROP_MIN_UPSTREAM_LATENCY:
+ SRC_LOCK (agg);
+ agg->priv->upstream_latency_min = g_value_get_uint64 (value);
+ SRC_UNLOCK (agg);
+ break;
+ case PROP_START_TIME_SELECTION:
+ agg->priv->start_time_selection = g_value_get_enum (value);
+ break;
+ case PROP_START_TIME:
+ agg->priv->start_time = g_value_get_uint64 (value);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+}
+
+static void
+gst_aggregator_get_property (GObject * object, guint prop_id,
+ GValue * value, GParamSpec * pspec)
+{
+ GstAggregator *agg = GST_AGGREGATOR (object);
+
+ switch (prop_id) {
+ case PROP_LATENCY:
+ g_value_set_uint64 (value, gst_aggregator_get_latency_property (agg));
+ break;
+ case PROP_MIN_UPSTREAM_LATENCY:
+ SRC_LOCK (agg);
+ g_value_set_uint64 (value, agg->priv->upstream_latency_min);
+ SRC_UNLOCK (agg);
+ break;
+ case PROP_START_TIME_SELECTION:
+ g_value_set_enum (value, agg->priv->start_time_selection);
+ break;
+ case PROP_START_TIME:
+ g_value_set_uint64 (value, agg->priv->start_time);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+}
+
+/* GObject vmethods implementations */
+static void
+gst_aggregator_class_init (GstAggregatorClass * klass)
+{
+ GObjectClass *gobject_class = (GObjectClass *) klass;
+ GstElementClass *gstelement_class = (GstElementClass *) klass;
+
+ aggregator_parent_class = g_type_class_peek_parent (klass);
+
+ GST_DEBUG_CATEGORY_INIT (aggregator_debug, "aggregator",
+ GST_DEBUG_FG_MAGENTA, "GstAggregator");
+
+ if (aggregator_private_offset != 0)
+ g_type_class_adjust_private_offset (klass, &aggregator_private_offset);
+
+ klass->finish_buffer = gst_aggregator_default_finish_buffer;
+
+ klass->sink_event = gst_aggregator_default_sink_event;
+ klass->sink_query = gst_aggregator_default_sink_query;
+
+ klass->src_event = gst_aggregator_default_src_event;
+ klass->src_query = gst_aggregator_default_src_query;
+
+ klass->create_new_pad = gst_aggregator_default_create_new_pad;
+ klass->update_src_caps = gst_aggregator_default_update_src_caps;
+ klass->fixate_src_caps = gst_aggregator_default_fixate_src_caps;
+ klass->negotiated_src_caps = gst_aggregator_default_negotiated_src_caps;
+
+ klass->negotiate = gst_aggregator_default_negotiate;
+
+ klass->sink_event_pre_queue = gst_aggregator_default_sink_event_pre_queue;
+ klass->sink_query_pre_queue = gst_aggregator_default_sink_query_pre_queue;
+
+ gstelement_class->request_new_pad =
+ GST_DEBUG_FUNCPTR (gst_aggregator_request_new_pad);
+ gstelement_class->send_event = GST_DEBUG_FUNCPTR (gst_aggregator_send_event);
+ gstelement_class->release_pad =
+ GST_DEBUG_FUNCPTR (gst_aggregator_release_pad);
+ gstelement_class->change_state =
+ GST_DEBUG_FUNCPTR (gst_aggregator_change_state);
+
+ gobject_class->set_property = gst_aggregator_set_property;
+ gobject_class->get_property = gst_aggregator_get_property;
+ gobject_class->finalize = gst_aggregator_finalize;
+
+ g_object_class_install_property (gobject_class, PROP_LATENCY,
+ g_param_spec_uint64 ("latency", "Buffer latency",
+ "Additional latency in live mode to allow upstream "
+ "to take longer to produce buffers for the current "
+ "position (in nanoseconds)", 0, G_MAXUINT64,
+ DEFAULT_LATENCY, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ /**
+ * GstAggregator:min-upstream-latency:
+ *
+ * Force minimum upstream latency (in nanoseconds). When sources with a
+ * higher latency are expected to be plugged in dynamically after the
+ * aggregator has started playing, this allows overriding the minimum
+ * latency reported by the initial source(s). This is only taken into
+ * account when larger than the actually reported minimum latency.
+ *
+ * Since: 1.16
+ */
+ g_object_class_install_property (gobject_class, PROP_MIN_UPSTREAM_LATENCY,
+ g_param_spec_uint64 ("min-upstream-latency", "Buffer latency",
+ "When sources with a higher latency are expected to be plugged "
+ "in dynamically after the aggregator has started playing, "
+ "this allows overriding the minimum latency reported by the "
+ "initial source(s). This is only taken into account when larger "
+ "than the actually reported minimum latency. (nanoseconds)",
+ 0, G_MAXUINT64,
+ DEFAULT_LATENCY, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_START_TIME_SELECTION,
+ g_param_spec_enum ("start-time-selection", "Start Time Selection",
+ "Decides which start time is output",
+ gst_aggregator_start_time_selection_get_type (),
+ DEFAULT_START_TIME_SELECTION,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+
+ g_object_class_install_property (gobject_class, PROP_START_TIME,
+ g_param_spec_uint64 ("start-time", "Start Time",
+ "Start time to use if start-time-selection=set", 0,
+ G_MAXUINT64,
+ DEFAULT_START_TIME, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+}
+
+static inline gpointer
+gst_aggregator_get_instance_private (GstAggregator * self)
+{
+ return (G_STRUCT_MEMBER_P (self, aggregator_private_offset));
+}
+
+static void
+gst_aggregator_init (GstAggregator * self, GstAggregatorClass * klass)
+{
+ GstPadTemplate *pad_template;
+ GstAggregatorPrivate *priv;
+ GType pad_type;
+
+ g_return_if_fail (klass->aggregate != NULL);
+
+ self->priv = gst_aggregator_get_instance_private (self);
+
+ priv = self->priv;
+
+ pad_template =
+ gst_element_class_get_pad_template (GST_ELEMENT_CLASS (klass), "src");
+ g_return_if_fail (pad_template != NULL);
+
+ priv->max_padserial = -1;
+ priv->tags_changed = FALSE;
+
+ self->priv->peer_latency_live = FALSE;
+ self->priv->peer_latency_min = self->priv->sub_latency_min = 0;
+ self->priv->peer_latency_max = self->priv->sub_latency_max = 0;
+ self->priv->has_peer_latency = FALSE;
+
+ pad_type =
+ GST_PAD_TEMPLATE_GTYPE (pad_template) ==
+ G_TYPE_NONE ? GST_TYPE_AGGREGATOR_PAD :
+ GST_PAD_TEMPLATE_GTYPE (pad_template);
+ g_assert (g_type_is_a (pad_type, GST_TYPE_AGGREGATOR_PAD));
+ self->srcpad =
+ g_object_new (pad_type, "name", "src", "direction", GST_PAD_SRC,
+ "template", pad_template, NULL);
+
+ gst_aggregator_reset_flow_values (self);
+
+ gst_pad_set_event_function (self->srcpad,
+ GST_DEBUG_FUNCPTR (gst_aggregator_src_pad_event_func));
+ gst_pad_set_query_function (self->srcpad,
+ GST_DEBUG_FUNCPTR (gst_aggregator_src_pad_query_func));
+ gst_pad_set_activatemode_function (self->srcpad,
+ GST_DEBUG_FUNCPTR (gst_aggregator_src_pad_activate_mode_func));
+
+ gst_element_add_pad (GST_ELEMENT (self), self->srcpad);
+
+ self->priv->upstream_latency_min = DEFAULT_MIN_UPSTREAM_LATENCY;
+ self->priv->latency = DEFAULT_LATENCY;
+ self->priv->start_time_selection = DEFAULT_START_TIME_SELECTION;
+ self->priv->start_time = DEFAULT_START_TIME;
+
+ g_mutex_init (&self->priv->src_lock);
+ g_cond_init (&self->priv->src_cond);
+}
+
+/* we can't use G_DEFINE_ABSTRACT_TYPE because we need the klass in the _init
+ * method to get to the padtemplates */
+GType
+gst_aggregator_get_type (void)
+{
+ static volatile gsize type = 0;
+
+ if (g_once_init_enter (&type)) {
+ GType _type;
+ static const GTypeInfo info = {
+ sizeof (GstAggregatorClass),
+ NULL,
+ NULL,
+ (GClassInitFunc) gst_aggregator_class_init,
+ NULL,
+ NULL,
+ sizeof (GstAggregator),
+ 0,
+ (GInstanceInitFunc) gst_aggregator_init,
+ };
+
+ _type = g_type_register_static (GST_TYPE_ELEMENT,
+ "GstAggregatorFallback", &info, G_TYPE_FLAG_ABSTRACT);
+
+ aggregator_private_offset =
+ g_type_add_instance_private (_type, sizeof (GstAggregatorPrivate));
+
+ g_once_init_leave (&type, _type);
+ }
+ return type;
+}
+
+/* Must be called with SRC lock and PAD lock held */
+static gboolean
+gst_aggregator_pad_has_space (GstAggregator * self, GstAggregatorPad * aggpad)
+{
+ /* Empty queue always has space */
+ if (aggpad->priv->num_buffers == 0 && aggpad->priv->clipped_buffer == NULL)
+ return TRUE;
+
+ /* We also want at least two buffers, one is being processed and one is ready
+ * for the next iteration when we operate in live mode. */
+ if (self->priv->peer_latency_live && aggpad->priv->num_buffers < 2)
+ return TRUE;
+
+ /* zero latency, if there is a buffer, it's full */
+ if (self->priv->latency == 0)
+ return FALSE;
+
+ /* Allow no more buffers than the latency */
+ return (aggpad->priv->time_level <= self->priv->latency);
+}
+
+/* Must be called with the PAD_LOCK held */
+static void
+apply_buffer (GstAggregatorPad * aggpad, GstBuffer * buffer, gboolean head)
+{
+ GstClockTime timestamp;
+
+ if (GST_BUFFER_DTS_IS_VALID (buffer))
+ timestamp = GST_BUFFER_DTS (buffer);
+ else
+ timestamp = GST_BUFFER_PTS (buffer);
+
+ if (timestamp == GST_CLOCK_TIME_NONE) {
+ if (head)
+ timestamp = aggpad->priv->head_position;
+ else
+ timestamp = aggpad->priv->tail_position;
+ }
+
+ /* add duration */
+ if (GST_BUFFER_DURATION_IS_VALID (buffer))
+ timestamp += GST_BUFFER_DURATION (buffer);
+
+ if (head)
+ aggpad->priv->head_position = timestamp;
+ else
+ aggpad->priv->tail_position = timestamp;
+
+ update_time_level (aggpad, head);
+}
+
+/*
+ * Can be called either from the sinkpad's chain function or from the srcpad's
+ * thread in the case of a buffer synthetized from a GAP event.
+ * Because of this second case, FLUSH_LOCK can't be used here.
+ */
+
+static GstFlowReturn
+gst_aggregator_pad_chain_internal (GstAggregator * self,
+ GstAggregatorPad * aggpad, GstBuffer * buffer, gboolean head)
+{
+ GstFlowReturn flow_return;
+ GstClockTime buf_pts;
+
+ PAD_LOCK (aggpad);
+ flow_return = aggpad->priv->flow_return;
+ if (flow_return != GST_FLOW_OK)
+ goto flushing;
+
+ PAD_UNLOCK (aggpad);
+
+ buf_pts = GST_BUFFER_PTS (buffer);
+
+ for (;;) {
+ SRC_LOCK (self);
+ GST_OBJECT_LOCK (self);
+ PAD_LOCK (aggpad);
+
+ if (aggpad->priv->first_buffer) {
+ self->priv->has_peer_latency = FALSE;
+ aggpad->priv->first_buffer = FALSE;
+ }
+
+ if ((gst_aggregator_pad_has_space (self, aggpad) || !head)
+ && aggpad->priv->flow_return == GST_FLOW_OK) {
+ if (head)
+ g_queue_push_head (&aggpad->priv->data, buffer);
+ else
+ g_queue_push_tail (&aggpad->priv->data, buffer);
+ apply_buffer (aggpad, buffer, head);
+ aggpad->priv->num_buffers++;
+ buffer = NULL;
+ SRC_BROADCAST (self);
+ break;
+ }
+
+ flow_return = aggpad->priv->flow_return;
+ if (flow_return != GST_FLOW_OK) {
+ GST_OBJECT_UNLOCK (self);
+ SRC_UNLOCK (self);
+ goto flushing;
+ }
+ GST_DEBUG_OBJECT (aggpad, "Waiting for buffer to be consumed");
+ GST_OBJECT_UNLOCK (self);
+ SRC_UNLOCK (self);
+ PAD_WAIT_EVENT (aggpad);
+
+ PAD_UNLOCK (aggpad);
+ }
+
+ if (self->priv->first_buffer) {
+ GstClockTime start_time;
+ GstAggregatorPad *srcpad = GST_AGGREGATOR_PAD (self->srcpad);
+
+ switch (self->priv->start_time_selection) {
+ case GST_AGGREGATOR_START_TIME_SELECTION_ZERO:
+ default:
+ start_time = 0;
+ break;
+ case GST_AGGREGATOR_START_TIME_SELECTION_FIRST:
+ GST_OBJECT_LOCK (aggpad);
+ if (aggpad->priv->head_segment.format == GST_FORMAT_TIME) {
+ start_time = buf_pts;
+ if (start_time != -1) {
+ start_time = MAX (start_time, aggpad->priv->head_segment.start);
+ start_time =
+ gst_segment_to_running_time (&aggpad->priv->head_segment,
+ GST_FORMAT_TIME, start_time);
+ }
+ } else {
+ start_time = 0;
+ GST_WARNING_OBJECT (aggpad,
+ "Ignoring request of selecting the first start time "
+ "as the segment is a %s segment instead of a time segment",
+ gst_format_get_name (aggpad->segment.format));
+ }
+ GST_OBJECT_UNLOCK (aggpad);
+ break;
+ case GST_AGGREGATOR_START_TIME_SELECTION_SET:
+ start_time = self->priv->start_time;
+ if (start_time == -1)
+ start_time = 0;
+ break;
+ }
+
+ if (start_time != -1) {
+ if (srcpad->segment.position == -1)
+ srcpad->segment.position = start_time;
+ else
+ srcpad->segment.position = MIN (start_time, srcpad->segment.position);
+
+ GST_DEBUG_OBJECT (self, "Selecting start time %" GST_TIME_FORMAT,
+ GST_TIME_ARGS (start_time));
+ }
+ }
+
+ PAD_UNLOCK (aggpad);
+ GST_OBJECT_UNLOCK (self);
+ SRC_UNLOCK (self);
+
+ GST_DEBUG_OBJECT (aggpad, "Done chaining");
+
+ return flow_return;
+
+flushing:
+ PAD_UNLOCK (aggpad);
+
+ GST_DEBUG_OBJECT (aggpad, "Pad is %s, dropping buffer",
+ gst_flow_get_name (flow_return));
+ if (buffer)
+ gst_buffer_unref (buffer);
+
+ return flow_return;
+}
+
+static GstFlowReturn
+gst_aggregator_pad_chain (GstPad * pad, GstObject * object, GstBuffer * buffer)
+{
+ GstFlowReturn ret;
+ GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad);
+
+ PAD_FLUSH_LOCK (aggpad);
+
+ ret = gst_aggregator_pad_chain_internal (GST_AGGREGATOR_CAST (object),
+ aggpad, buffer, TRUE);
+
+ PAD_FLUSH_UNLOCK (aggpad);
+
+ return ret;
+}
+
+static gboolean
+gst_aggregator_pad_query_func (GstPad * pad, GstObject * parent,
+ GstQuery * query)
+{
+ GstAggregator *self = GST_AGGREGATOR (parent);
+ GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (self);
+ GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad);
+
+ g_assert (klass->sink_query_pre_queue);
+ return klass->sink_query_pre_queue (self, aggpad, query);
+}
+
+static GstFlowReturn
+gst_aggregator_pad_event_func (GstPad * pad, GstObject * parent,
+ GstEvent * event)
+{
+ GstAggregator *self = GST_AGGREGATOR (parent);
+ GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (self);
+ GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad);
+
+ g_assert (klass->sink_event_pre_queue);
+ return klass->sink_event_pre_queue (self, aggpad, event);
+}
+
+static gboolean
+gst_aggregator_pad_activate_mode_func (GstPad * pad,
+ GstObject * parent, GstPadMode mode, gboolean active)
+{
+ GstAggregator *self = GST_AGGREGATOR (parent);
+ GstAggregatorPad *aggpad = GST_AGGREGATOR_PAD (pad);
+
+ if (active == FALSE) {
+ SRC_LOCK (self);
+ gst_aggregator_pad_set_flushing (aggpad, GST_FLOW_FLUSHING, TRUE);
+ SRC_BROADCAST (self);
+ SRC_UNLOCK (self);
+ } else {
+ PAD_LOCK (aggpad);
+ aggpad->priv->flow_return = GST_FLOW_OK;
+ PAD_BROADCAST_EVENT (aggpad);
+ PAD_UNLOCK (aggpad);
+ }
+
+ return TRUE;
+}
+
+/***********************************
+ * GstAggregatorPad implementation *
+ ************************************/
+G_DEFINE_TYPE_WITH_PRIVATE (GstAggregatorPad, gst_aggregator_pad, GST_TYPE_PAD);
+
+#define DEFAULT_PAD_EMIT_SIGNALS FALSE
+
+enum
+{
+ PAD_PROP_0,
+ PAD_PROP_EMIT_SIGNALS,
+};
+
+enum
+{
+ PAD_SIGNAL_BUFFER_CONSUMED,
+ PAD_LAST_SIGNAL,
+};
+
+static guint gst_aggregator_pad_signals[PAD_LAST_SIGNAL] = { 0 };
+
+static void
+gst_aggregator_pad_constructed (GObject * object)
+{
+ GstPad *pad = GST_PAD (object);
+
+ if (GST_PAD_IS_SINK (pad)) {
+ gst_pad_set_chain_function (pad,
+ GST_DEBUG_FUNCPTR (gst_aggregator_pad_chain));
+ gst_pad_set_event_full_function_full (pad,
+ GST_DEBUG_FUNCPTR (gst_aggregator_pad_event_func), NULL, NULL);
+ gst_pad_set_query_function (pad,
+ GST_DEBUG_FUNCPTR (gst_aggregator_pad_query_func));
+ gst_pad_set_activatemode_function (pad,
+ GST_DEBUG_FUNCPTR (gst_aggregator_pad_activate_mode_func));
+ }
+}
+
+static void
+gst_aggregator_pad_finalize (GObject * object)
+{
+ GstAggregatorPad *pad = (GstAggregatorPad *) object;
+
+ g_cond_clear (&pad->priv->event_cond);
+ g_mutex_clear (&pad->priv->flush_lock);
+ g_mutex_clear (&pad->priv->lock);
+
+ G_OBJECT_CLASS (gst_aggregator_pad_parent_class)->finalize (object);
+}
+
+static void
+gst_aggregator_pad_dispose (GObject * object)
+{
+ GstAggregatorPad *pad = (GstAggregatorPad *) object;
+
+ gst_aggregator_pad_set_flushing (pad, GST_FLOW_FLUSHING, TRUE);
+
+ G_OBJECT_CLASS (gst_aggregator_pad_parent_class)->dispose (object);
+}
+
+static void
+gst_aggregator_pad_set_property (GObject * object, guint prop_id,
+ const GValue * value, GParamSpec * pspec)
+{
+ GstAggregatorPad *pad = GST_AGGREGATOR_PAD (object);
+
+ switch (prop_id) {
+ case PAD_PROP_EMIT_SIGNALS:
+ pad->priv->emit_signals = g_value_get_boolean (value);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+}
+
+static void
+gst_aggregator_pad_get_property (GObject * object, guint prop_id,
+ GValue * value, GParamSpec * pspec)
+{
+ GstAggregatorPad *pad = GST_AGGREGATOR_PAD (object);
+
+ switch (prop_id) {
+ case PAD_PROP_EMIT_SIGNALS:
+ g_value_set_boolean (value, pad->priv->emit_signals);
+ break;
+ default:
+ G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
+ break;
+ }
+}
+
+static void
+gst_aggregator_pad_class_init (GstAggregatorPadClass * klass)
+{
+ GObjectClass *gobject_class = (GObjectClass *) klass;
+
+ gobject_class->constructed = gst_aggregator_pad_constructed;
+ gobject_class->finalize = gst_aggregator_pad_finalize;
+ gobject_class->dispose = gst_aggregator_pad_dispose;
+ gobject_class->set_property = gst_aggregator_pad_set_property;
+ gobject_class->get_property = gst_aggregator_pad_get_property;
+
+ /**
+ * GstAggregatorPad:buffer-consumed:
+ * @buffer: The buffer that was consumed
+ *
+ * Signals that a buffer was consumed. As aggregator pads store buffers
+ * in an internal queue, there is no direct match between input and output
+ * buffers at any given time. This signal can be useful to forward metas
+ * such as #GstVideoTimeCodeMeta or #GstVideoCaptionMeta at the right time.
+ *
+ * Since: 1.16
+ */
+ gst_aggregator_pad_signals[PAD_SIGNAL_BUFFER_CONSUMED] =
+ g_signal_new ("buffer-consumed", G_TYPE_FROM_CLASS (klass),
+ G_SIGNAL_RUN_FIRST, 0, NULL, NULL, g_cclosure_marshal_generic,
+ G_TYPE_NONE, 1, GST_TYPE_BUFFER);
+
+ /**
+ * GstAggregatorPad:emit-signals:
+ *
+ * Enables the emission of signals such as #GstAggregatorPad::buffer-consumed
+ *
+ * Since: 1.16
+ */
+ g_object_class_install_property (gobject_class, PAD_PROP_EMIT_SIGNALS,
+ g_param_spec_boolean ("emit-signals", "Emit signals",
+ "Send signals to signal data consumption", DEFAULT_PAD_EMIT_SIGNALS,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
+}
+
+static void
+gst_aggregator_pad_init (GstAggregatorPad * pad)
+{
+ pad->priv = gst_aggregator_pad_get_instance_private (pad);
+
+ g_queue_init (&pad->priv->data);
+ g_cond_init (&pad->priv->event_cond);
+
+ g_mutex_init (&pad->priv->flush_lock);
+ g_mutex_init (&pad->priv->lock);
+
+ gst_aggregator_pad_reset_unlocked (pad);
+ pad->priv->negotiated = FALSE;
+ pad->priv->emit_signals = DEFAULT_PAD_EMIT_SIGNALS;
+}
+
+/* Must be called with the PAD_LOCK held */
+static void
+gst_aggregator_pad_buffer_consumed (GstAggregatorPad * pad, GstBuffer * buffer)
+{
+ pad->priv->num_buffers--;
+ GST_TRACE_OBJECT (pad, "Consuming buffer %" GST_PTR_FORMAT, buffer);
+ if (buffer && pad->priv->emit_signals) {
+ g_signal_emit (pad, gst_aggregator_pad_signals[PAD_SIGNAL_BUFFER_CONSUMED],
+ 0, buffer);
+ }
+ PAD_BROADCAST_EVENT (pad);
+}
+
+/* Must be called with the PAD_LOCK held */
+static void
+gst_aggregator_pad_clip_buffer_unlocked (GstAggregatorPad * pad)
+{
+ GstAggregator *self = NULL;
+ GstAggregatorClass *aggclass = NULL;
+ GstBuffer *buffer = NULL;
+
+ while (pad->priv->clipped_buffer == NULL &&
+ GST_IS_BUFFER (g_queue_peek_tail (&pad->priv->data))) {
+ buffer = g_queue_pop_tail (&pad->priv->data);
+
+ apply_buffer (pad, buffer, FALSE);
+
+ /* We only take the parent here so that it's not taken if the buffer is
+ * already clipped or if the queue is empty.
+ */
+ if (self == NULL) {
+ self = GST_AGGREGATOR (gst_pad_get_parent_element (GST_PAD (pad)));
+ if (self == NULL) {
+ gst_buffer_unref (buffer);
+ return;
+ }
+
+ aggclass = GST_AGGREGATOR_GET_CLASS (self);
+ }
+
+ if (aggclass->clip) {
+ GST_TRACE_OBJECT (pad, "Clipping: %" GST_PTR_FORMAT, buffer);
+
+ buffer = aggclass->clip (self, pad, buffer);
+
+ if (buffer == NULL) {
+ gst_aggregator_pad_buffer_consumed (pad, buffer);
+ GST_TRACE_OBJECT (pad, "Clipping consumed the buffer");
+ }
+ }
+
+ pad->priv->clipped_buffer = buffer;
+ }
+
+ if (self)
+ gst_object_unref (self);
+}
+
+/**
+ * gst_aggregator_pad_pop_buffer:
+ * @pad: the pad to get buffer from
+ *
+ * Steal the ref to the buffer currently queued in @pad.
+ *
+ * Returns: (transfer full): The buffer in @pad or NULL if no buffer was
+ * queued. You should unref the buffer after usage.
+ */
+GstBuffer *
+gst_aggregator_pad_pop_buffer (GstAggregatorPad * pad)
+{
+ GstBuffer *buffer;
+
+ PAD_LOCK (pad);
+
+ if (pad->priv->flow_return != GST_FLOW_OK) {
+ PAD_UNLOCK (pad);
+ return NULL;
+ }
+
+ gst_aggregator_pad_clip_buffer_unlocked (pad);
+
+ buffer = pad->priv->clipped_buffer;
+
+ if (buffer) {
+ pad->priv->clipped_buffer = NULL;
+ gst_aggregator_pad_buffer_consumed (pad, buffer);
+ GST_DEBUG_OBJECT (pad, "Consumed: %" GST_PTR_FORMAT, buffer);
+ }
+
+ PAD_UNLOCK (pad);
+
+ return buffer;
+}
+
+/**
+ * gst_aggregator_pad_drop_buffer:
+ * @pad: the pad where to drop any pending buffer
+ *
+ * Drop the buffer currently queued in @pad.
+ *
+ * Returns: TRUE if there was a buffer queued in @pad, or FALSE if not.
+ */
+gboolean
+gst_aggregator_pad_drop_buffer (GstAggregatorPad * pad)
+{
+ GstBuffer *buf;
+
+ buf = gst_aggregator_pad_pop_buffer (pad);
+
+ if (buf == NULL)
+ return FALSE;
+
+ gst_buffer_unref (buf);
+ return TRUE;
+}
+
+/**
+ * gst_aggregator_pad_peek_buffer:
+ * @pad: the pad to get buffer from
+ *
+ * Returns: (transfer full): A reference to the buffer in @pad or
+ * NULL if no buffer was queued. You should unref the buffer after
+ * usage.
+ */
+GstBuffer *
+gst_aggregator_pad_peek_buffer (GstAggregatorPad * pad)
+{
+ GstBuffer *buffer;
+
+ PAD_LOCK (pad);
+
+ if (pad->priv->flow_return != GST_FLOW_OK) {
+ PAD_UNLOCK (pad);
+ return NULL;
+ }
+
+ gst_aggregator_pad_clip_buffer_unlocked (pad);
+
+ if (pad->priv->clipped_buffer) {
+ buffer = gst_buffer_ref (pad->priv->clipped_buffer);
+ } else {
+ buffer = NULL;
+ }
+ PAD_UNLOCK (pad);
+
+ return buffer;
+}
+
+/**
+ * gst_aggregator_pad_has_buffer:
+ * @pad: the pad to check the buffer on
+ *
+ * This checks if a pad has a buffer available that will be returned by
+ * a call to gst_aggregator_pad_peek_buffer() or
+ * gst_aggregator_pad_pop_buffer().
+ *
+ * Returns: %TRUE if the pad has a buffer available as the next thing.
+ *
+ * Since: 1.14.1
+ */
+gboolean
+gst_aggregator_pad_has_buffer (GstAggregatorPad * pad)
+{
+ gboolean has_buffer;
+
+ PAD_LOCK (pad);
+ gst_aggregator_pad_clip_buffer_unlocked (pad);
+ has_buffer = (pad->priv->clipped_buffer != NULL);
+ PAD_UNLOCK (pad);
+
+ return has_buffer;
+}
+
+/**
+ * gst_aggregator_pad_is_eos:
+ * @pad: an aggregator pad
+ *
+ * Returns: %TRUE if the pad is EOS, otherwise %FALSE.
+ */
+gboolean
+gst_aggregator_pad_is_eos (GstAggregatorPad * pad)
+{
+ gboolean is_eos;
+
+ PAD_LOCK (pad);
+ is_eos = pad->priv->eos;
+ PAD_UNLOCK (pad);
+
+ return is_eos;
+}
+
+#if 0
+/*
+ * gst_aggregator_merge_tags:
+ * @self: a #GstAggregator
+ * @tags: a #GstTagList to merge
+ * @mode: the #GstTagMergeMode to use
+ *
+ * Adds tags to so-called pending tags, which will be processed
+ * before pushing out data downstream.
+ *
+ * Note that this is provided for convenience, and the subclass is
+ * not required to use this and can still do tag handling on its own.
+ *
+ * MT safe.
+ */
+void
+gst_aggregator_merge_tags (GstAggregator * self,
+ const GstTagList * tags, GstTagMergeMode mode)
+{
+ GstTagList *otags;
+
+ g_return_if_fail (GST_IS_AGGREGATOR (self));
+ g_return_if_fail (tags == NULL || GST_IS_TAG_LIST (tags));
+
+ /* FIXME Check if we can use OBJECT lock here! */
+ GST_OBJECT_LOCK (self);
+ if (tags)
+ GST_DEBUG_OBJECT (self, "merging tags %" GST_PTR_FORMAT, tags);
+ otags = self->priv->tags;
+ self->priv->tags = gst_tag_list_merge (self->priv->tags, tags, mode);
+ if (otags)
+ gst_tag_list_unref (otags);
+ self->priv->tags_changed = TRUE;
+ GST_OBJECT_UNLOCK (self);
+}
+#endif
+
+/**
+ * gst_aggregator_set_latency:
+ * @self: a #GstAggregator
+ * @min_latency: minimum latency
+ * @max_latency: maximum latency
+ *
+ * Lets #GstAggregator sub-classes tell the baseclass what their internal
+ * latency is. Will also post a LATENCY message on the bus so the pipeline
+ * can reconfigure its global latency.
+ */
+void
+gst_aggregator_set_latency (GstAggregator * self,
+ GstClockTime min_latency, GstClockTime max_latency)
+{
+ gboolean changed = FALSE;
+
+ g_return_if_fail (GST_IS_AGGREGATOR (self));
+ g_return_if_fail (GST_CLOCK_TIME_IS_VALID (min_latency));
+ g_return_if_fail (max_latency >= min_latency);
+
+ SRC_LOCK (self);
+ if (self->priv->sub_latency_min != min_latency) {
+ self->priv->sub_latency_min = min_latency;
+ changed = TRUE;
+ }
+ if (self->priv->sub_latency_max != max_latency) {
+ self->priv->sub_latency_max = max_latency;
+ changed = TRUE;
+ }
+
+ if (changed)
+ SRC_BROADCAST (self);
+ SRC_UNLOCK (self);
+
+ if (changed) {
+ gst_element_post_message (GST_ELEMENT_CAST (self),
+ gst_message_new_latency (GST_OBJECT_CAST (self)));
+ }
+}
+
+/**
+ * gst_aggregator_get_buffer_pool:
+ * @self: a #GstAggregator
+ *
+ * Returns: (transfer full): the instance of the #GstBufferPool used
+ * by @trans; free it after use it
+ */
+GstBufferPool *
+gst_aggregator_get_buffer_pool (GstAggregator * self)
+{
+ GstBufferPool *pool;
+
+ g_return_val_if_fail (GST_IS_AGGREGATOR (self), NULL);
+
+ GST_OBJECT_LOCK (self);
+ pool = self->priv->pool;
+ if (pool)
+ gst_object_ref (pool);
+ GST_OBJECT_UNLOCK (self);
+
+ return pool;
+}
+
+/**
+ * gst_aggregator_get_allocator:
+ * @self: a #GstAggregator
+ * @allocator: (out) (allow-none) (transfer full): the #GstAllocator
+ * used
+ * @params: (out) (allow-none) (transfer full): the
+ * #GstAllocationParams of @allocator
+ *
+ * Lets #GstAggregator sub-classes get the memory @allocator
+ * acquired by the base class and its @params.
+ *
+ * Unref the @allocator after use it.
+ */
+void
+gst_aggregator_get_allocator (GstAggregator * self,
+ GstAllocator ** allocator, GstAllocationParams * params)
+{
+ g_return_if_fail (GST_IS_AGGREGATOR (self));
+
+ if (allocator)
+ *allocator = self->priv->allocator ?
+ gst_object_ref (self->priv->allocator) : NULL;
+
+ if (params)
+ *params = self->priv->allocation_params;
+}
+
+/**
+ * gst_aggregator_simple_get_next_time:
+ * @self: A #GstAggregator
+ *
+ * This is a simple #GstAggregatorClass.get_next_time() implementation that
+ * just looks at the #GstSegment on the srcpad of the aggregator and bases
+ * the next time on the running time there.
+ *
+ * This is the desired behaviour in most cases where you have a live source
+ * and you have a dead line based aggregator subclass.
+ *
+ * Returns: The running time based on the position
+ *
+ * Since: 1.16
+ */
+GstClockTime
+gst_aggregator_simple_get_next_time (GstAggregator * self)
+{
+ GstClockTime next_time;
+ GstAggregatorPad *srcpad = GST_AGGREGATOR_PAD (self->srcpad);
+ GstSegment *segment = &srcpad->segment;
+
+ GST_OBJECT_LOCK (self);
+ if (segment->position == -1 || segment->position < segment->start)
+ next_time = segment->start;
+ else
+ next_time = segment->position;
+
+ if (segment->stop != -1 && next_time > segment->stop)
+ next_time = segment->stop;
+
+ next_time = gst_segment_to_running_time (segment, GST_FORMAT_TIME, next_time);
+ GST_OBJECT_UNLOCK (self);
+
+ return next_time;
+}
diff --git a/utils/gst-plugin-fallbackswitch/src/base/gstaggregator.h b/utils/gst-plugin-fallbackswitch/src/base/gstaggregator.h
new file mode 100644
index 000000000..b8f4216e1
--- /dev/null
+++ b/utils/gst-plugin-fallbackswitch/src/base/gstaggregator.h
@@ -0,0 +1,393 @@
+/* GStreamer aggregator base class
+ * Copyright (C) 2014 Mathieu Duponchelle <mathieu.duponchelle@oencreed.com>
+ * Copyright (C) 2014 Thibault Saunier <tsaunier@gnome.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __GST_AGGREGATOR_H__
+#define __GST_AGGREGATOR_H__
+
+#include <gst/gst.h>
+
+G_BEGIN_DECLS
+
+/**************************
+ * GstAggregator Structs *
+ *************************/
+
+typedef struct _GstAggregator GstAggregator;
+typedef struct _GstAggregatorPrivate GstAggregatorPrivate;
+typedef struct _GstAggregatorClass GstAggregatorClass;
+
+/************************
+ * GstAggregatorPad API *
+ ***********************/
+
+#define GST_TYPE_AGGREGATOR_PAD (gst_aggregator_pad_get_type())
+#define GST_AGGREGATOR_PAD(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_AGGREGATOR_PAD, GstAggregatorPad))
+#define GST_AGGREGATOR_PAD_CAST(obj) ((GstAggregatorPad *)(obj))
+#define GST_AGGREGATOR_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_AGGREGATOR_PAD, GstAggregatorPadClass))
+#define GST_AGGREGATOR_PAD_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj),GST_TYPE_AGGREGATOR_PAD, GstAggregatorPadClass))
+#define GST_IS_AGGREGATOR_PAD(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_AGGREGATOR_PAD))
+#define GST_IS_AGGREGATOR_PAD_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_AGGREGATOR_PAD))
+
+/****************************
+ * GstAggregatorPad Structs *
+ ***************************/
+
+typedef struct _GstAggregatorPad GstAggregatorPad;
+typedef struct _GstAggregatorPadClass GstAggregatorPadClass;
+typedef struct _GstAggregatorPadPrivate GstAggregatorPadPrivate;
+
+/**
+ * GstAggregatorPad:
+ * @segment: last segment received.
+ *
+ * The implementation the GstPad to use with #GstAggregator
+ *
+ * Since: 1.14
+ */
+struct _GstAggregatorPad
+{
+ GstPad parent;
+
+ /*< public >*/
+ /* Protected by the OBJECT_LOCK */
+ GstSegment segment;
+
+ /* < private > */
+ GstAggregatorPadPrivate * priv;
+
+ gpointer _gst_reserved[GST_PADDING];
+};
+
+/**
+ * GstAggregatorPadClass:
+ * @flush: Optional
+ * Called when the pad has received a flush stop, this is the place
+ * to flush any information specific to the pad, it allows for individual
+ * pads to be flushed while others might not be.
+ * @skip_buffer: Optional
+ * Called before input buffers are queued in the pad, return %TRUE
+ * if the buffer should be skipped.
+ *
+ * Since: 1.14
+ */
+struct _GstAggregatorPadClass
+{
+ GstPadClass parent_class;
+
+ GstFlowReturn (*flush) (GstAggregatorPad * aggpad, GstAggregator * aggregator);
+ gboolean (*skip_buffer) (GstAggregatorPad * aggpad, GstAggregator * aggregator, GstBuffer * buffer);
+
+ /*< private >*/
+ gpointer _gst_reserved[GST_PADDING_LARGE];
+};
+
+GST_BASE_API
+GType gst_aggregator_pad_get_type (void);
+
+/****************************
+ * GstAggregatorPad methods *
+ ***************************/
+
+GST_BASE_API
+GstBuffer * gst_aggregator_pad_pop_buffer (GstAggregatorPad * pad);
+
+GST_BASE_API
+GstBuffer * gst_aggregator_pad_peek_buffer (GstAggregatorPad * pad);
+
+GST_BASE_API
+gboolean gst_aggregator_pad_drop_buffer (GstAggregatorPad * pad);
+
+GST_BASE_API
+gboolean gst_aggregator_pad_has_buffer (GstAggregatorPad * pad);
+
+GST_BASE_API
+gboolean gst_aggregator_pad_is_eos (GstAggregatorPad * pad);
+
+/*********************
+ * GstAggregator API *
+ ********************/
+
+#define GST_TYPE_AGGREGATOR (gst_aggregator_get_type())
+#define GST_AGGREGATOR(obj) (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_AGGREGATOR,GstAggregator))
+#define GST_AGGREGATOR_CAST(obj) ((GstAggregator *)(obj))
+#define GST_AGGREGATOR_CLASS(klass) (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_AGGREGATOR,GstAggregatorClass))
+#define GST_AGGREGATOR_GET_CLASS(obj) (G_TYPE_INSTANCE_GET_CLASS ((obj),GST_TYPE_AGGREGATOR,GstAggregatorClass))
+#define GST_IS_AGGREGATOR(obj) (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_AGGREGATOR))
+#define GST_IS_AGGREGATOR_CLASS(klass) (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_AGGREGATOR))
+
+#define GST_AGGREGATOR_FLOW_NEED_DATA GST_FLOW_CUSTOM_ERROR
+
+/**
+ * GstAggregator:
+ * @srcpad: the aggregator's source pad
+ *
+ * Aggregator base class object structure.
+ *
+ * Since: 1.14
+ */
+struct _GstAggregator
+{
+ GstElement parent;
+
+ /*< public >*/
+ GstPad * srcpad;
+
+ /*< private >*/
+ GstAggregatorPrivate * priv;
+
+ gpointer _gst_reserved[GST_PADDING_LARGE];
+};
+
+/**
+ * GstAggregatorClass:
+ * @flush: Optional.
+ * Called after a successful flushing seek, once all the flush
+ * stops have been received. Flush pad-specific data in
+ * #GstAggregatorPad->flush.
+ * @clip: Optional.
+ * Called when a buffer is received on a sink pad, the task of
+ * clipping it and translating it to the current segment falls
+ * on the subclass. The function should use the segment of data
+ * and the negotiated media type on the pad to perform
+ * clipping of input buffer. This function takes ownership of
+ * buf and should output a buffer or return NULL in
+ * if the buffer should be dropped.
+ * @finish_buffer: Optional.
+ * Called when a subclass calls gst_aggregator_finish_buffer()
+ * from their aggregate function to push out a buffer.
+ * Subclasses can override this to modify or decorate buffers
+ * before they get pushed out. This function takes ownership
+ * of the buffer passed. Subclasses that override this method
+ * should always chain up to the parent class virtual method.
+ * @sink_event: Optional.
+ * Called when an event is received on a sink pad, the subclass
+ * should always chain up.
+ * @sink_query: Optional.
+ * Called when a query is received on a sink pad, the subclass
+ * should always chain up.
+ * @src_event: Optional.
+ * Called when an event is received on the src pad, the subclass
+ * should always chain up.
+ * @src_query: Optional.
+ * Called when a query is received on the src pad, the subclass
+ * should always chain up.
+ * @src_activate: Optional.
+ * Called when the src pad is activated, it will start/stop its
+ * pad task right after that call.
+ * @aggregate: Mandatory.
+ * Called when buffers are queued on all sinkpads. Classes
+ * should iterate the GstElement->sinkpads and peek or steal
+ * buffers from the #GstAggregatorPads. If the subclass returns
+ * GST_FLOW_EOS, sending of the eos event will be taken care
+ * of. Once / if a buffer has been constructed from the
+ * aggregated buffers, the subclass should call _finish_buffer.
+ * @stop: Optional.
+ * Called when the element goes from PAUSED to READY.
+ * The subclass should free all resources and reset its state.
+ * @start: Optional.
+ * Called when the element goes from READY to PAUSED.
+ * The subclass should get ready to process
+ * aggregated buffers.
+ * @get_next_time: Optional.
+ * Called when the element needs to know the running time of the next
+ * rendered buffer for live pipelines. This causes deadline
+ * based aggregation to occur. Defaults to returning
+ * GST_CLOCK_TIME_NONE causing the element to wait for buffers
+ * on all sink pads before aggregating.
+ * @create_new_pad: Optional.
+ * Called when a new pad needs to be created. Allows subclass that
+ * don't have a single sink pad template to provide a pad based
+ * on the provided information.
+ * @update_src_caps: Lets subclasses update the #GstCaps representing
+ * the src pad caps before usage. The result should end up
+ * in @ret. Return %GST_AGGREGATOR_FLOW_NEED_DATA to indicate that the
+ * element needs more information (caps, a buffer, etc) to
+ * choose the correct caps. Should return ANY caps if the
+ * stream has not caps at all.
+ * @fixate_src_caps: Optional.
+ * Fixate and return the src pad caps provided. The function takes
+ * ownership of @caps and returns a fixated version of
+ * @caps. @caps is not guaranteed to be writable.
+ * @negotiated_src_caps: Optional.
+ * Notifies subclasses what caps format has been negotiated
+ * @decide_allocation: Optional.
+ * Allows the subclass to influence the allocation choices.
+ * Setup the allocation parameters for allocating output
+ * buffers. The passed in query contains the result of the
+ * downstream allocation query.
+ * @propose_allocation: Optional.
+ * Allows the subclass to handle the allocation query from upstream.
+ * @negotiate: Optional.
+ * Negotiate the caps with the peer (Since: 1.18).
+ * @sink_event_pre_queue: Optional.
+ * Called when an event is received on a sink pad before queueing up
+ * serialized events. The subclass should always chain up (Since: 1.18).
+ * @sink_query_pre_queue: Optional.
+ * Called when a query is received on a sink pad before queueing up
+ * serialized queries. The subclass should always chain up (Since: 1.18).
+ *
+ * The aggregator base class will handle in a thread-safe way all manners of
+ * concurrent flushes, seeks, pad additions and removals, leaving to the
+ * subclass the responsibility of clipping buffers, and aggregating buffers in
+ * the way the implementor sees fit.
+ *
+ * It will also take care of event ordering (stream-start, segment, eos).
+ *
+ * Basically, a simple implementation will override @aggregate, and call
+ * _finish_buffer from inside that function.
+ *
+ * Since: 1.14
+ */
+struct _GstAggregatorClass {
+ GstElementClass parent_class;
+
+ GstFlowReturn (*flush) (GstAggregator * aggregator);
+
+ GstBuffer * (*clip) (GstAggregator * aggregator,
+ GstAggregatorPad * aggregator_pad,
+ GstBuffer * buf);
+
+ GstFlowReturn (*finish_buffer) (GstAggregator * aggregator,
+ GstBuffer * buffer);
+
+ /* sinkpads virtual methods */
+ gboolean (*sink_event) (GstAggregator * aggregator,
+ GstAggregatorPad * aggregator_pad,
+ GstEvent * event);
+
+ gboolean (*sink_query) (GstAggregator * aggregator,
+ GstAggregatorPad * aggregator_pad,
+ GstQuery * query);
+
+ /* srcpad virtual methods */
+ gboolean (*src_event) (GstAggregator * aggregator,
+ GstEvent * event);
+
+ gboolean (*src_query) (GstAggregator * aggregator,
+ GstQuery * query);
+
+ gboolean (*src_activate) (GstAggregator * aggregator,
+ GstPadMode mode,
+ gboolean active);
+
+ GstFlowReturn (*aggregate) (GstAggregator * aggregator,
+ gboolean timeout);
+
+ gboolean (*stop) (GstAggregator * aggregator);
+
+ gboolean (*start) (GstAggregator * aggregator);
+
+ GstClockTime (*get_next_time) (GstAggregator * aggregator);
+
+ GstAggregatorPad * (*create_new_pad) (GstAggregator * self,
+ GstPadTemplate * templ,
+ const gchar * req_name,
+ const GstCaps * caps);
+
+ /**
+ * GstAggregatorClass::update_src_caps:
+ * @ret: (out) (allow-none):
+ */
+ GstFlowReturn (*update_src_caps) (GstAggregator * self,
+ GstCaps * caps,
+ GstCaps ** ret);
+ GstCaps * (*fixate_src_caps) (GstAggregator * self,
+ GstCaps * caps);
+ gboolean (*negotiated_src_caps) (GstAggregator * self,
+ GstCaps * caps);
+ gboolean (*decide_allocation) (GstAggregator * self,
+ GstQuery * query);
+ gboolean (*propose_allocation) (GstAggregator * self,
+ GstAggregatorPad * pad,
+ GstQuery * decide_query,
+ GstQuery * query);
+
+ gboolean (*negotiate) (GstAggregator * self);
+
+ gboolean (*sink_event_pre_queue) (GstAggregator * aggregator,
+ GstAggregatorPad * aggregator_pad,
+ GstEvent * event);
+
+ gboolean (*sink_query_pre_queue) (GstAggregator * aggregator,
+ GstAggregatorPad * aggregator_pad,
+ GstQuery * query);
+
+ /*< private >*/
+ gpointer _gst_reserved[GST_PADDING_LARGE-3];
+};
+
+/************************************
+ * GstAggregator convenience macros *
+ ***********************************/
+
+/**
+ * GST_AGGREGATOR_SRC_PAD:
+ * @agg: a #GstAggregator
+ *
+ * Convenience macro to access the source pad of #GstAggregator
+ *
+ * Since: 1.6
+ */
+#define GST_AGGREGATOR_SRC_PAD(agg) (((GstAggregator *)(agg))->srcpad)
+
+/*************************
+ * GstAggregator methods *
+ ************************/
+
+GST_BASE_API
+GstFlowReturn gst_aggregator_finish_buffer (GstAggregator * aggregator,
+ GstBuffer * buffer);
+
+GST_BASE_API
+void gst_aggregator_set_src_caps (GstAggregator * self,
+ GstCaps * caps);
+
+GST_BASE_API
+gboolean gst_aggregator_negotiate (GstAggregator * self);
+
+GST_BASE_API
+void gst_aggregator_set_latency (GstAggregator * self,
+ GstClockTime min_latency,
+ GstClockTime max_latency);
+
+GST_BASE_API
+GType gst_aggregator_get_type(void);
+
+GST_BASE_API
+GstClockTime gst_aggregator_get_latency (GstAggregator * self);
+
+GST_BASE_API
+GstBufferPool * gst_aggregator_get_buffer_pool (GstAggregator * self);
+
+GST_BASE_API
+void gst_aggregator_get_allocator (GstAggregator * self,
+ GstAllocator ** allocator,
+ GstAllocationParams * params);
+
+GST_BASE_API
+GstClockTime gst_aggregator_simple_get_next_time (GstAggregator * self);
+
+
+G_END_DECLS
+
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(GstAggregator, gst_object_unref)
+G_DEFINE_AUTOPTR_CLEANUP_FUNC(GstAggregatorPad, gst_object_unref)
+
+#endif /* __GST_AGGREGATOR_H__ */
diff --git a/utils/gst-plugin-fallbackswitch/src/base/mod.rs b/utils/gst-plugin-fallbackswitch/src/base/mod.rs
new file mode 100644
index 000000000..2d0973560
--- /dev/null
+++ b/utils/gst-plugin-fallbackswitch/src/base/mod.rs
@@ -0,0 +1,27 @@
+#[allow(clippy::unreadable_literal)]
+#[allow(clippy::too_many_arguments)]
+#[allow(clippy::match_same_arms)]
+#[allow(clippy::type_complexity)]
+mod auto;
+pub use auto::*;
+
+mod utils;
+
+mod aggregator;
+mod aggregator_pad;
+
+pub mod prelude {
+ pub use glib::prelude::*;
+ pub use gst::prelude::*;
+
+ pub use super::aggregator::AggregatorExtManual;
+ pub use super::aggregator_pad::AggregatorPadExtManual;
+ pub use super::auto::traits::*;
+}
+
+pub mod subclass;
+
+mod sys;
+use sys as gst_base_sys;
+
+pub const AGGREGATOR_FLOW_NEED_DATA: gst::FlowError = gst::FlowError::CustomError;
diff --git a/utils/gst-plugin-fallbackswitch/src/base/subclass/aggregator.rs b/utils/gst-plugin-fallbackswitch/src/base/subclass/aggregator.rs
new file mode 100644
index 000000000..5f8e27444
--- /dev/null
+++ b/utils/gst-plugin-fallbackswitch/src/base/subclass/aggregator.rs
@@ -0,0 +1,1042 @@
+// Copyright (C) 2017-2019 Sebastian Dröge <sebastian@centricular.com>
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use libc;
+
+use super::super::gst_base_sys;
+use glib_sys;
+use gst_sys;
+
+use glib::translate::*;
+
+use glib::subclass::prelude::*;
+use gst;
+use gst::prelude::*;
+use gst::subclass::prelude::*;
+
+use std::ptr;
+
+use super::super::Aggregator;
+use super::super::AggregatorClass;
+use super::super::AggregatorPad;
+
+pub trait AggregatorImpl: AggregatorImplExt + ElementImpl + Send + Sync + 'static {
+ fn flush(&self, aggregator: &Aggregator) -> Result<gst::FlowSuccess, gst::FlowError> {
+ self.parent_flush(aggregator)
+ }
+
+ fn clip(
+ &self,
+ aggregator: &Aggregator,
+ aggregator_pad: &AggregatorPad,
+ buffer: gst::Buffer,
+ ) -> Option<gst::Buffer> {
+ self.parent_clip(aggregator, aggregator_pad, buffer)
+ }
+
+ fn finish_buffer(
+ &self,
+ aggregator: &Aggregator,
+ buffer: gst::Buffer,
+ ) -> Result<gst::FlowSuccess, gst::FlowError> {
+ self.parent_finish_buffer(aggregator, buffer)
+ }
+
+ fn sink_event(
+ &self,
+ aggregator: &Aggregator,
+ aggregator_pad: &AggregatorPad,
+ event: gst::Event,
+ ) -> bool {
+ self.parent_sink_event(aggregator, aggregator_pad, event)
+ }
+
+ fn sink_event_pre_queue(
+ &self,
+ aggregator: &Aggregator,
+ aggregator_pad: &AggregatorPad,
+ event: gst::Event,
+ ) -> bool {
+ self.parent_sink_event_pre_queue(aggregator, aggregator_pad, event)
+ }
+
+ fn sink_query(
+ &self,
+ aggregator: &Aggregator,
+ aggregator_pad: &AggregatorPad,
+ query: &mut gst::QueryRef,
+ ) -> bool {
+ self.parent_sink_query(aggregator, aggregator_pad, query)
+ }
+
+ fn sink_query_pre_queue(
+ &self,
+ aggregator: &Aggregator,
+ aggregator_pad: &AggregatorPad,
+ query: &mut gst::QueryRef,
+ ) -> bool {
+ self.parent_sink_query_pre_queue(aggregator, aggregator_pad, query)
+ }
+
+ fn src_event(&self, aggregator: &Aggregator, event: gst::Event) -> bool {
+ self.parent_src_event(aggregator, event)
+ }
+
+ fn src_query(&self, aggregator: &Aggregator, query: &mut gst::QueryRef) -> bool {
+ self.parent_src_query(aggregator, query)
+ }
+
+ fn src_activate(
+ &self,
+ aggregator: &Aggregator,
+ mode: gst::PadMode,
+ active: bool,
+ ) -> Result<(), gst::LoggableError> {
+ self.parent_src_activate(aggregator, mode, active)
+ }
+
+ fn aggregate(
+ &self,
+ aggregator: &Aggregator,
+ timeout: bool,
+ ) -> Result<gst::FlowSuccess, gst::FlowError>;
+
+ fn start(&self, aggregator: &Aggregator) -> Result<(), gst::ErrorMessage> {
+ self.parent_start(aggregator)
+ }
+
+ fn stop(&self, aggregator: &Aggregator) -> Result<(), gst::ErrorMessage> {
+ self.parent_stop(aggregator)
+ }
+
+ fn get_next_time(&self, aggregator: &Aggregator) -> gst::ClockTime {
+ self.parent_get_next_time(aggregator)
+ }
+
+ fn create_new_pad(
+ &self,
+ aggregator: &Aggregator,
+ templ: &gst::PadTemplate,
+ req_name: Option<&str>,
+ caps: Option<&gst::Caps>,
+ ) -> Option<AggregatorPad> {
+ self.parent_create_new_pad(aggregator, templ, req_name, caps)
+ }
+
+ fn update_src_caps(
+ &self,
+ aggregator: &Aggregator,
+ caps: &gst::Caps,
+ ) -> Result<gst::Caps, gst::FlowError> {
+ self.parent_update_src_caps(aggregator, caps)
+ }
+
+ fn fixate_src_caps(&self, aggregator: &Aggregator, caps: gst::Caps) -> gst::Caps {
+ self.parent_fixate_src_caps(aggregator, caps)
+ }
+
+ fn negotiated_src_caps(
+ &self,
+ aggregator: &Aggregator,
+ caps: &gst::Caps,
+ ) -> Result<(), gst::LoggableError> {
+ self.parent_negotiated_src_caps(aggregator, caps)
+ }
+
+ fn negotiate(&self, aggregator: &Aggregator) -> bool {
+ self.parent_negotiate(aggregator)
+ }
+}
+
+pub trait AggregatorImplExt {
+ fn parent_flush(&self, aggregator: &Aggregator) -> Result<gst::FlowSuccess, gst::FlowError>;
+
+ fn parent_clip(
+ &self,
+ aggregator: &Aggregator,
+ aggregator_pad: &AggregatorPad,
+ buffer: gst::Buffer,
+ ) -> Option<gst::Buffer>;
+
+ fn parent_finish_buffer(
+ &self,
+ aggregator: &Aggregator,
+ buffer: gst::Buffer,
+ ) -> Result<gst::FlowSuccess, gst::FlowError>;
+
+ fn parent_sink_event(
+ &self,
+ aggregator: &Aggregator,
+ aggregator_pad: &AggregatorPad,
+ event: gst::Event,
+ ) -> bool;
+
+ fn parent_sink_event_pre_queue(
+ &self,
+ aggregator: &Aggregator,
+ aggregator_pad: &AggregatorPad,
+ event: gst::Event,
+ ) -> bool;
+
+ fn parent_sink_query(
+ &self,
+ aggregator: &Aggregator,
+ aggregator_pad: &AggregatorPad,
+ query: &mut gst::QueryRef,
+ ) -> bool;
+
+ fn parent_sink_query_pre_queue(
+ &self,
+ aggregator: &Aggregator,
+ aggregator_pad: &AggregatorPad,
+ query: &mut gst::QueryRef,
+ ) -> bool;
+
+ fn parent_src_event(&self, aggregator: &Aggregator, event: gst::Event) -> bool;
+
+ fn parent_src_query(&self, aggregator: &Aggregator, query: &mut gst::QueryRef) -> bool;
+
+ fn parent_src_activate(
+ &self,
+ aggregator: &Aggregator,
+ mode: gst::PadMode,
+ active: bool,
+ ) -> Result<(), gst::LoggableError>;
+
+ fn parent_aggregate(
+ &self,
+ aggregator: &Aggregator,
+ timeout: bool,
+ ) -> Result<gst::FlowSuccess, gst::FlowError>;
+
+ fn parent_start(&self, aggregator: &Aggregator) -> Result<(), gst::ErrorMessage>;
+
+ fn parent_stop(&self, aggregator: &Aggregator) -> Result<(), gst::ErrorMessage>;
+
+ fn parent_get_next_time(&self, aggregator: &Aggregator) -> gst::ClockTime;
+
+ fn parent_create_new_pad(
+ &self,
+ aggregator: &Aggregator,
+ templ: &gst::PadTemplate,
+ req_name: Option<&str>,
+ caps: Option<&gst::Caps>,
+ ) -> Option<AggregatorPad>;
+
+ fn parent_update_src_caps(
+ &self,
+ aggregator: &Aggregator,
+ caps: &gst::Caps,
+ ) -> Result<gst::Caps, gst::FlowError>;
+
+ fn parent_fixate_src_caps(&self, aggregator: &Aggregator, caps: gst::Caps) -> gst::Caps;
+
+ fn parent_negotiated_src_caps(
+ &self,
+ aggregator: &Aggregator,
+ caps: &gst::Caps,
+ ) -> Result<(), gst::LoggableError>;
+
+ fn parent_negotiate(&self, aggregator: &Aggregator) -> bool;
+}
+
+impl<T: AggregatorImpl + ObjectImpl> AggregatorImplExt for T {
+ fn parent_flush(&self, aggregator: &Aggregator) -> Result<gst::FlowSuccess, gst::FlowError> {
+ unsafe {
+ let data = self.get_type_data();
+ let parent_class =
+ data.as_ref().get_parent_class() as *mut gst_base_sys::GstAggregatorClass;
+ (*parent_class)
+ .flush
+ .map(|f| from_glib(f(aggregator.to_glib_none().0)))
+ .unwrap_or(gst::FlowReturn::Ok)
+ .into_result()
+ }
+ }
+
+ fn parent_clip(
+ &self,
+ aggregator: &Aggregator,
+ aggregator_pad: &AggregatorPad,
+ buffer: gst::Buffer,
+ ) -> Option<gst::Buffer> {
+ unsafe {
+ let data = self.get_type_data();
+ let parent_class =
+ data.as_ref().get_parent_class() as *mut gst_base_sys::GstAggregatorClass;
+ match (*parent_class).clip {
+ None => Some(buffer),
+ Some(ref func) => from_glib_full(func(
+ aggregator.to_glib_none().0,
+ aggregator_pad.to_glib_none().0,
+ buffer.into_ptr(),
+ )),
+ }
+ }
+ }
+
+ fn parent_finish_buffer(
+ &self,
+ aggregator: &Aggregator,
+ buffer: gst::Buffer,
+ ) -> Result<gst::FlowSuccess, gst::FlowError> {
+ unsafe {
+ let data = self.get_type_data();
+ let parent_class =
+ data.as_ref().get_parent_class() as *mut gst_base_sys::GstAggregatorClass;
+ let f = (*parent_class)
+ .finish_buffer
+ .expect("Missing parent function `finish_buffer`");
+ gst::FlowReturn::from_glib(f(aggregator.to_glib_none().0, buffer.into_ptr()))
+ .into_result()
+ }
+ }
+
+ fn parent_sink_event(
+ &self,
+ aggregator: &Aggregator,
+ aggregator_pad: &AggregatorPad,
+ event: gst::Event,
+ ) -> bool {
+ unsafe {
+ let data = self.get_type_data();
+ let parent_class =
+ data.as_ref().get_parent_class() as *mut gst_base_sys::GstAggregatorClass;
+ let f = (*parent_class)
+ .sink_event
+ .expect("Missing parent function `sink_event`");
+ from_glib(f(
+ aggregator.to_glib_none().0,
+ aggregator_pad.to_glib_none().0,
+ event.into_ptr(),
+ ))
+ }
+ }
+
+ fn parent_sink_event_pre_queue(
+ &self,
+ aggregator: &Aggregator,
+ aggregator_pad: &AggregatorPad,
+ event: gst::Event,
+ ) -> bool {
+ unsafe {
+ let data = self.get_type_data();
+ let parent_class =
+ data.as_ref().get_parent_class() as *mut gst_base_sys::GstAggregatorClass;
+ let f = (*parent_class)
+ .sink_event_pre_queue
+ .expect("Missing parent function `sink_event_pre_queue`");
+ from_glib(f(
+ aggregator.to_glib_none().0,
+ aggregator_pad.to_glib_none().0,
+ event.into_ptr(),
+ ))
+ }
+ }
+
+ fn parent_sink_query(
+ &self,
+ aggregator: &Aggregator,
+ aggregator_pad: &AggregatorPad,
+ query: &mut gst::QueryRef,
+ ) -> bool {
+ unsafe {
+ let data = self.get_type_data();
+ let parent_class =
+ data.as_ref().get_parent_class() as *mut gst_base_sys::GstAggregatorClass;
+ let f = (*parent_class)
+ .sink_query
+ .expect("Missing parent function `sink_query`");
+ from_glib(f(
+ aggregator.to_glib_none().0,
+ aggregator_pad.to_glib_none().0,
+ query.as_mut_ptr(),
+ ))
+ }
+ }
+
+ fn parent_sink_query_pre_queue(
+ &self,
+ aggregator: &Aggregator,
+ aggregator_pad: &AggregatorPad,
+ query: &mut gst::QueryRef,
+ ) -> bool {
+ unsafe {
+ let data = self.get_type_data();
+ let parent_class =
+ data.as_ref().get_parent_class() as *mut gst_base_sys::GstAggregatorClass;
+ let f = (*parent_class)
+ .sink_query_pre_queue
+ .expect("Missing parent function `sink_query`");
+ from_glib(f(
+ aggregator.to_glib_none().0,
+ aggregator_pad.to_glib_none().0,
+ query.as_mut_ptr(),
+ ))
+ }
+ }
+
+ fn parent_src_event(&self, aggregator: &Aggregator, event: gst::Event) -> bool {
+ unsafe {
+ let data = self.get_type_data();
+ let parent_class =
+ data.as_ref().get_parent_class() as *mut gst_base_sys::GstAggregatorClass;
+ let f = (*parent_class)
+ .src_event
+ .expect("Missing parent function `src_event`");
+ from_glib(f(aggregator.to_glib_none().0, event.into_ptr()))
+ }
+ }
+
+ fn parent_src_query(&self, aggregator: &Aggregator, query: &mut gst::QueryRef) -> bool {
+ unsafe {
+ let data = self.get_type_data();
+ let parent_class =
+ data.as_ref().get_parent_class() as *mut gst_base_sys::GstAggregatorClass;
+ let f = (*parent_class)
+ .src_query
+ .expect("Missing parent function `src_query`");
+ from_glib(f(aggregator.to_glib_none().0, query.as_mut_ptr()))
+ }
+ }
+
+ fn parent_src_activate(
+ &self,
+ aggregator: &Aggregator,
+ mode: gst::PadMode,
+ active: bool,
+ ) -> Result<(), gst::LoggableError> {
+ unsafe {
+ let data = self.get_type_data();
+ let parent_class =
+ data.as_ref().get_parent_class() as *mut gst_base_sys::GstAggregatorClass;
+ match (*parent_class).src_activate {
+ None => Ok(()),
+ Some(f) => gst_result_from_gboolean!(
+ f(
+ aggregator.to_glib_none().0,
+ mode.to_glib(),
+ active.to_glib()
+ ),
+ gst::CAT_RUST,
+ "Parent function `src_activate` failed"
+ ),
+ }
+ }
+ }
+
+ fn parent_aggregate(
+ &self,
+ aggregator: &Aggregator,
+ timeout: bool,
+ ) -> Result<gst::FlowSuccess, gst::FlowError> {
+ unsafe {
+ let data = self.get_type_data();
+ let parent_class =
+ data.as_ref().get_parent_class() as *mut gst_base_sys::GstAggregatorClass;
+ let f = (*parent_class)
+ .aggregate
+ .expect("Missing parent function `aggregate`");
+ gst::FlowReturn::from_glib(f(aggregator.to_glib_none().0, timeout.to_glib()))
+ .into_result()
+ }
+ }
+
+ fn parent_start(&self, aggregator: &Aggregator) -> Result<(), gst::ErrorMessage> {
+ unsafe {
+ let data = self.get_type_data();
+ let parent_class =
+ data.as_ref().get_parent_class() as *mut gst_base_sys::GstAggregatorClass;
+ (*parent_class)
+ .start
+ .map(|f| {
+ if from_glib(f(aggregator.to_glib_none().0)) {
+ Ok(())
+ } else {
+ Err(gst_error_msg!(
+ gst::CoreError::Failed,
+ ["Parent function `start` failed"]
+ ))
+ }
+ })
+ .unwrap_or(Ok(()))
+ }
+ }
+
+ fn parent_stop(&self, aggregator: &Aggregator) -> Result<(), gst::ErrorMessage> {
+ unsafe {
+ let data = self.get_type_data();
+ let parent_class =
+ data.as_ref().get_parent_class() as *mut gst_base_sys::GstAggregatorClass;
+ (*parent_class)
+ .stop
+ .map(|f| {
+ if from_glib(f(aggregator.to_glib_none().0)) {
+ Ok(())
+ } else {
+ Err(gst_error_msg!(
+ gst::CoreError::Failed,
+ ["Parent function `stop` failed"]
+ ))
+ }
+ })
+ .unwrap_or(Ok(()))
+ }
+ }
+
+ fn parent_get_next_time(&self, aggregator: &Aggregator) -> gst::ClockTime {
+ unsafe {
+ let data = self.get_type_data();
+ let parent_class =
+ data.as_ref().get_parent_class() as *mut gst_base_sys::GstAggregatorClass;
+ (*parent_class)
+ .get_next_time
+ .map(|f| from_glib(f(aggregator.to_glib_none().0)))
+ .unwrap_or(gst::CLOCK_TIME_NONE)
+ }
+ }
+
+ fn parent_create_new_pad(
+ &self,
+ aggregator: &Aggregator,
+ templ: &gst::PadTemplate,
+ req_name: Option<&str>,
+ caps: Option<&gst::Caps>,
+ ) -> Option<AggregatorPad> {
+ unsafe {
+ let data = self.get_type_data();
+ let parent_class =
+ data.as_ref().get_parent_class() as *mut gst_base_sys::GstAggregatorClass;
+ let f = (*parent_class)
+ .create_new_pad
+ .expect("Missing parent function `create_new_pad`");
+ from_glib_full(f(
+ aggregator.to_glib_none().0,
+ templ.to_glib_none().0,
+ req_name.to_glib_none().0,
+ caps.to_glib_none().0,
+ ))
+ }
+ }
+
+ fn parent_update_src_caps(
+ &self,
+ aggregator: &Aggregator,
+ caps: &gst::Caps,
+ ) -> Result<gst::Caps, gst::FlowError> {
+ unsafe {
+ let data = self.get_type_data();
+ let parent_class =
+ data.as_ref().get_parent_class() as *mut gst_base_sys::GstAggregatorClass;
+ let f = (*parent_class)
+ .update_src_caps
+ .expect("Missing parent function `update_src_caps`");
+
+ let mut out_caps = ptr::null_mut();
+ gst::FlowReturn::from_glib(f(
+ aggregator.to_glib_none().0,
+ caps.as_mut_ptr(),
+ &mut out_caps,
+ ))
+ .into_result_value(|| from_glib_full(out_caps))
+ }
+ }
+
+ fn parent_fixate_src_caps(&self, aggregator: &Aggregator, caps: gst::Caps) -> gst::Caps {
+ unsafe {
+ let data = self.get_type_data();
+ let parent_class =
+ data.as_ref().get_parent_class() as *mut gst_base_sys::GstAggregatorClass;
+
+ let f = (*parent_class)
+ .fixate_src_caps
+ .expect("Missing parent function `fixate_src_caps`");
+ from_glib_full(f(aggregator.to_glib_none().0, caps.into_ptr()))
+ }
+ }
+
+ fn parent_negotiated_src_caps(
+ &self,
+ aggregator: &Aggregator,
+ caps: &gst::Caps,
+ ) -> Result<(), gst::LoggableError> {
+ unsafe {
+ let data = self.get_type_data();
+ let parent_class =
+ data.as_ref().get_parent_class() as *mut gst_base_sys::GstAggregatorClass;
+ (*parent_class)
+ .negotiated_src_caps
+ .map(|f| {
+ gst_result_from_gboolean!(
+ f(aggregator.to_glib_none().0, caps.to_glib_none().0),
+ gst::CAT_RUST,
+ "Parent function `negotiated_src_caps` failed"
+ )
+ })
+ .unwrap_or(Ok(()))
+ }
+ }
+
+ fn parent_negotiate(&self, aggregator: &Aggregator) -> bool {
+ unsafe {
+ let data = self.get_type_data();
+ let parent_class =
+ data.as_ref().get_parent_class() as *mut gst_base_sys::GstAggregatorClass;
+ (*parent_class)
+ .negotiate
+ .map(|f| from_glib(f(aggregator.to_glib_none().0)))
+ .unwrap_or(true)
+ }
+ }
+}
+
+unsafe impl<T: ObjectSubclass + AggregatorImpl> IsSubclassable<T> for AggregatorClass
+where
+ <T as ObjectSubclass>::Instance: PanicPoison,
+{
+ fn override_vfuncs(&mut self) {
+ <gst::ElementClass as IsSubclassable<T>>::override_vfuncs(self);
+ unsafe {
+ let klass = &mut *(self as *mut Self as *mut gst_base_sys::GstAggregatorClass);
+ klass.flush = Some(aggregator_flush::<T>);
+ klass.clip = Some(aggregator_clip::<T>);
+ klass.finish_buffer = Some(aggregator_finish_buffer::<T>);
+ klass.sink_event = Some(aggregator_sink_event::<T>);
+ klass.sink_event_pre_queue = Some(aggregator_sink_event_pre_queue::<T>);
+ klass.sink_query = Some(aggregator_sink_query::<T>);
+ klass.sink_query_pre_queue = Some(aggregator_sink_query_pre_queue::<T>);
+ klass.src_event = Some(aggregator_src_event::<T>);
+ klass.src_query = Some(aggregator_src_query::<T>);
+ klass.src_activate = Some(aggregator_src_activate::<T>);
+ klass.aggregate = Some(aggregator_aggregate::<T>);
+ klass.start = Some(aggregator_start::<T>);
+ klass.stop = Some(aggregator_stop::<T>);
+ klass.get_next_time = Some(aggregator_get_next_time::<T>);
+ klass.create_new_pad = Some(aggregator_create_new_pad::<T>);
+ klass.update_src_caps = Some(aggregator_update_src_caps::<T>);
+ klass.fixate_src_caps = Some(aggregator_fixate_src_caps::<T>);
+ klass.negotiated_src_caps = Some(aggregator_negotiated_src_caps::<T>);
+ klass.negotiate = Some(aggregator_negotiate::<T>);
+ }
+ }
+}
+
+unsafe extern "C" fn aggregator_flush<T: ObjectSubclass>(
+ ptr: *mut gst_base_sys::GstAggregator,
+) -> gst_sys::GstFlowReturn
+where
+ T: AggregatorImpl,
+ T::Instance: PanicPoison,
+{
+ let instance = &*(ptr as *mut T::Instance);
+ let imp = instance.get_impl();
+ let wrap: Borrowed<Aggregator> = from_glib_borrow(ptr);
+
+ gst_panic_to_error!(&wrap, &instance.panicked(), gst::FlowReturn::Error, {
+ imp.flush(&wrap).into()
+ })
+ .to_glib()
+}
+
+unsafe extern "C" fn aggregator_clip<T: ObjectSubclass>(
+ ptr: *mut gst_base_sys::GstAggregator,
+ aggregator_pad: *mut gst_base_sys::GstAggregatorPad,
+ buffer: *mut gst_sys::GstBuffer,
+) -> *mut gst_sys::GstBuffer
+where
+ T: AggregatorImpl,
+ T::Instance: PanicPoison,
+{
+ let instance = &*(ptr as *mut T::Instance);
+ let imp = instance.get_impl();
+ let wrap: Borrowed<Aggregator> = from_glib_borrow(ptr);
+
+ let ret = gst_panic_to_error!(&wrap, &instance.panicked(), None, {
+ imp.clip(
+ &wrap,
+ &from_glib_borrow(aggregator_pad),
+ from_glib_full(buffer),
+ )
+ });
+
+ ret.map(|r| r.into_ptr()).unwrap_or(ptr::null_mut())
+}
+
+unsafe extern "C" fn aggregator_finish_buffer<T: ObjectSubclass>(
+ ptr: *mut gst_base_sys::GstAggregator,
+ buffer: *mut gst_sys::GstBuffer,
+) -> gst_sys::GstFlowReturn
+where
+ T: AggregatorImpl,
+ T::Instance: PanicPoison,
+{
+ let instance = &*(ptr as *mut T::Instance);
+ let imp = instance.get_impl();
+ let wrap: Borrowed<Aggregator> = from_glib_borrow(ptr);
+
+ gst_panic_to_error!(&wrap, &instance.panicked(), gst::FlowReturn::Error, {
+ imp.finish_buffer(&wrap, from_glib_full(buffer)).into()
+ })
+ .to_glib()
+}
+
+unsafe extern "C" fn aggregator_sink_event<T: ObjectSubclass>(
+ ptr: *mut gst_base_sys::GstAggregator,
+ aggregator_pad: *mut gst_base_sys::GstAggregatorPad,
+ event: *mut gst_sys::GstEvent,
+) -> glib_sys::gboolean
+where
+ T: AggregatorImpl,
+ T::Instance: PanicPoison,
+{
+ let instance = &*(ptr as *mut T::Instance);
+ let imp = instance.get_impl();
+ let wrap: Borrowed<Aggregator> = from_glib_borrow(ptr);
+
+ gst_panic_to_error!(&wrap, &instance.panicked(), false, {
+ imp.sink_event(
+ &wrap,
+ &from_glib_borrow(aggregator_pad),
+ from_glib_full(event),
+ )
+ })
+ .to_glib()
+}
+
+unsafe extern "C" fn aggregator_sink_event_pre_queue<T: ObjectSubclass>(
+ ptr: *mut gst_base_sys::GstAggregator,
+ aggregator_pad: *mut gst_base_sys::GstAggregatorPad,
+ event: *mut gst_sys::GstEvent,
+) -> glib_sys::gboolean
+where
+ T: AggregatorImpl,
+ T::Instance: PanicPoison,
+{
+ let instance = &*(ptr as *mut T::Instance);
+ let imp = instance.get_impl();
+ let wrap: Borrowed<Aggregator> = from_glib_borrow(ptr);
+
+ gst_panic_to_error!(&wrap, &instance.panicked(), false, {
+ imp.sink_event_pre_queue(
+ &wrap,
+ &from_glib_borrow(aggregator_pad),
+ from_glib_full(event),
+ )
+ })
+ .to_glib()
+}
+
+unsafe extern "C" fn aggregator_sink_query<T: ObjectSubclass>(
+ ptr: *mut gst_base_sys::GstAggregator,
+ aggregator_pad: *mut gst_base_sys::GstAggregatorPad,
+ query: *mut gst_sys::GstQuery,
+) -> glib_sys::gboolean
+where
+ T: AggregatorImpl,
+ T::Instance: PanicPoison,
+{
+ let instance = &*(ptr as *mut T::Instance);
+ let imp = instance.get_impl();
+ let wrap: Borrowed<Aggregator> = from_glib_borrow(ptr);
+
+ gst_panic_to_error!(&wrap, &instance.panicked(), false, {
+ imp.sink_query(
+ &wrap,
+ &from_glib_borrow(aggregator_pad),
+ gst::QueryRef::from_mut_ptr(query),
+ )
+ })
+ .to_glib()
+}
+
+unsafe extern "C" fn aggregator_sink_query_pre_queue<T: ObjectSubclass>(
+ ptr: *mut gst_base_sys::GstAggregator,
+ aggregator_pad: *mut gst_base_sys::GstAggregatorPad,
+ query: *mut gst_sys::GstQuery,
+) -> glib_sys::gboolean
+where
+ T: AggregatorImpl,
+ T::Instance: PanicPoison,
+{
+ let instance = &*(ptr as *mut T::Instance);
+ let imp = instance.get_impl();
+ let wrap: Borrowed<Aggregator> = from_glib_borrow(ptr);
+
+ gst_panic_to_error!(&wrap, &instance.panicked(), false, {
+ imp.sink_query_pre_queue(
+ &wrap,
+ &from_glib_borrow(aggregator_pad),
+ gst::QueryRef::from_mut_ptr(query),
+ )
+ })
+ .to_glib()
+}
+
+unsafe extern "C" fn aggregator_src_event<T: ObjectSubclass>(
+ ptr: *mut gst_base_sys::GstAggregator,
+ event: *mut gst_sys::GstEvent,
+) -> glib_sys::gboolean
+where
+ T: AggregatorImpl,
+ T::Instance: PanicPoison,
+{
+ let instance = &*(ptr as *mut T::Instance);
+ let imp = instance.get_impl();
+ let wrap: Borrowed<Aggregator> = from_glib_borrow(ptr);
+
+ gst_panic_to_error!(&wrap, &instance.panicked(), false, {
+ imp.src_event(&wrap, from_glib_full(event))
+ })
+ .to_glib()
+}
+
+unsafe extern "C" fn aggregator_src_query<T: ObjectSubclass>(
+ ptr: *mut gst_base_sys::GstAggregator,
+ query: *mut gst_sys::GstQuery,
+) -> glib_sys::gboolean
+where
+ T: AggregatorImpl,
+ T::Instance: PanicPoison,
+{
+ let instance = &*(ptr as *mut T::Instance);
+ let imp = instance.get_impl();
+ let wrap: Borrowed<Aggregator> = from_glib_borrow(ptr);
+
+ gst_panic_to_error!(&wrap, &instance.panicked(), false, {
+ imp.src_query(&wrap, gst::QueryRef::from_mut_ptr(query))
+ })
+ .to_glib()
+}
+
+unsafe extern "C" fn aggregator_src_activate<T: ObjectSubclass>(
+ ptr: *mut gst_base_sys::GstAggregator,
+ mode: gst_sys::GstPadMode,
+ active: glib_sys::gboolean,
+) -> glib_sys::gboolean
+where
+ T: AggregatorImpl,
+ T::Instance: PanicPoison,
+{
+ let instance = &*(ptr as *mut T::Instance);
+ let imp = instance.get_impl();
+ let wrap: Borrowed<Aggregator> = from_glib_borrow(ptr);
+
+ gst_panic_to_error!(&wrap, &instance.panicked(), false, {
+ match imp.src_activate(&wrap, from_glib(mode), from_glib(active)) {
+ Ok(()) => true,
+ Err(err) => {
+ err.log_with_object(&*wrap);
+ false
+ }
+ }
+ })
+ .to_glib()
+}
+
+unsafe extern "C" fn aggregator_aggregate<T: ObjectSubclass>(
+ ptr: *mut gst_base_sys::GstAggregator,
+ timeout: glib_sys::gboolean,
+) -> gst_sys::GstFlowReturn
+where
+ T: AggregatorImpl,
+ T::Instance: PanicPoison,
+{
+ let instance = &*(ptr as *mut T::Instance);
+ let imp = instance.get_impl();
+ let wrap: Borrowed<Aggregator> = from_glib_borrow(ptr);
+
+ gst_panic_to_error!(&wrap, &instance.panicked(), gst::FlowReturn::Error, {
+ imp.aggregate(&wrap, from_glib(timeout)).into()
+ })
+ .to_glib()
+}
+
+unsafe extern "C" fn aggregator_start<T: ObjectSubclass>(
+ ptr: *mut gst_base_sys::GstAggregator,
+) -> glib_sys::gboolean
+where
+ T: AggregatorImpl,
+ T::Instance: PanicPoison,
+{
+ let instance = &*(ptr as *mut T::Instance);
+ let imp = instance.get_impl();
+ let wrap: Borrowed<Aggregator> = from_glib_borrow(ptr);
+
+ gst_panic_to_error!(&wrap, &instance.panicked(), false, {
+ match imp.start(&wrap) {
+ Ok(()) => true,
+ Err(err) => {
+ wrap.post_error_message(&err);
+ false
+ }
+ }
+ })
+ .to_glib()
+}
+
+unsafe extern "C" fn aggregator_stop<T: ObjectSubclass>(
+ ptr: *mut gst_base_sys::GstAggregator,
+) -> glib_sys::gboolean
+where
+ T: AggregatorImpl,
+ T::Instance: PanicPoison,
+{
+ let instance = &*(ptr as *mut T::Instance);
+ let imp = instance.get_impl();
+ let wrap: Borrowed<Aggregator> = from_glib_borrow(ptr);
+
+ gst_panic_to_error!(&wrap, &instance.panicked(), false, {
+ match imp.stop(&wrap) {
+ Ok(()) => true,
+ Err(err) => {
+ wrap.post_error_message(&err);
+ false
+ }
+ }
+ })
+ .to_glib()
+}
+
+unsafe extern "C" fn aggregator_get_next_time<T: ObjectSubclass>(
+ ptr: *mut gst_base_sys::GstAggregator,
+) -> gst_sys::GstClockTime
+where
+ T: AggregatorImpl,
+ T::Instance: PanicPoison,
+{
+ let instance = &*(ptr as *mut T::Instance);
+ let imp = instance.get_impl();
+ let wrap: Borrowed<Aggregator> = from_glib_borrow(ptr);
+
+ gst_panic_to_error!(&wrap, &instance.panicked(), gst::CLOCK_TIME_NONE, {
+ imp.get_next_time(&wrap)
+ })
+ .to_glib()
+}
+
+unsafe extern "C" fn aggregator_create_new_pad<T: ObjectSubclass>(
+ ptr: *mut gst_base_sys::GstAggregator,
+ templ: *mut gst_sys::GstPadTemplate,
+ req_name: *const libc::c_char,
+ caps: *const gst_sys::GstCaps,
+) -> *mut gst_base_sys::GstAggregatorPad
+where
+ T: AggregatorImpl,
+ T::Instance: PanicPoison,
+{
+ let instance = &*(ptr as *mut T::Instance);
+ let imp = instance.get_impl();
+ let wrap: Borrowed<Aggregator> = from_glib_borrow(ptr);
+
+ gst_panic_to_error!(&wrap, &instance.panicked(), None, {
+ let req_name: Option<String> = from_glib_none(req_name);
+
+ // FIXME: Easier way to convert Option<String> to Option<&str>?
+ let mut _tmp = String::new();
+ let req_name = match req_name {
+ Some(n) => {
+ _tmp = n;
+ Some(_tmp.as_str())
+ }
+ None => None,
+ };
+
+ imp.create_new_pad(
+ &wrap,
+ &from_glib_borrow(templ),
+ req_name,
+ Option::<gst::Caps>::from_glib_borrow(caps)
+ .as_ref()
+ .as_ref(),
+ )
+ })
+ .to_glib_full()
+}
+
+unsafe extern "C" fn aggregator_update_src_caps<T: ObjectSubclass>(
+ ptr: *mut gst_base_sys::GstAggregator,
+ caps: *mut gst_sys::GstCaps,
+ res: *mut *mut gst_sys::GstCaps,
+) -> gst_sys::GstFlowReturn
+where
+ T: AggregatorImpl,
+ T::Instance: PanicPoison,
+{
+ let instance = &*(ptr as *mut T::Instance);
+ let imp = instance.get_impl();
+ let wrap: Borrowed<Aggregator> = from_glib_borrow(ptr);
+
+ *res = ptr::null_mut();
+
+ gst_panic_to_error!(&wrap, &instance.panicked(), gst::FlowReturn::Error, {
+ match imp.update_src_caps(&wrap, &from_glib_borrow(caps)) {
+ Ok(res_caps) => {
+ *res = res_caps.into_ptr();
+ gst::FlowReturn::Ok
+ }
+ Err(err) => err.into(),
+ }
+ })
+ .to_glib()
+}
+
+unsafe extern "C" fn aggregator_fixate_src_caps<T: ObjectSubclass>(
+ ptr: *mut gst_base_sys::GstAggregator,
+ caps: *mut gst_sys::GstCaps,
+) -> *mut gst_sys::GstCaps
+where
+ T: AggregatorImpl,
+ T::Instance: PanicPoison,
+{
+ let instance = &*(ptr as *mut T::Instance);
+ let imp = instance.get_impl();
+ let wrap: Borrowed<Aggregator> = from_glib_borrow(ptr);
+
+ gst_panic_to_error!(&wrap, &instance.panicked(), gst::Caps::new_empty(), {
+ imp.fixate_src_caps(&wrap, from_glib_full(caps))
+ })
+ .into_ptr()
+}
+
+unsafe extern "C" fn aggregator_negotiated_src_caps<T: ObjectSubclass>(
+ ptr: *mut gst_base_sys::GstAggregator,
+ caps: *mut gst_sys::GstCaps,
+) -> glib_sys::gboolean
+where
+ T: AggregatorImpl,
+ T::Instance: PanicPoison,
+{
+ let instance = &*(ptr as *mut T::Instance);
+ let imp = instance.get_impl();
+ let wrap: Borrowed<Aggregator> = from_glib_borrow(ptr);
+
+ gst_panic_to_error!(&wrap, &instance.panicked(), false, {
+ match imp.negotiated_src_caps(&wrap, &from_glib_borrow(caps)) {
+ Ok(()) => true,
+ Err(err) => {
+ err.log_with_object(&*wrap);
+ false
+ }
+ }
+ })
+ .to_glib()
+}
+
+unsafe extern "C" fn aggregator_negotiate<T: ObjectSubclass>(
+ ptr: *mut gst_base_sys::GstAggregator,
+) -> glib_sys::gboolean
+where
+ T: AggregatorImpl,
+ T::Instance: PanicPoison,
+{
+ let instance = &*(ptr as *mut T::Instance);
+ let imp = instance.get_impl();
+ let wrap: Borrowed<Aggregator> = from_glib_borrow(ptr);
+
+ gst_panic_to_error!(&wrap, &instance.panicked(), false, { imp.negotiate(&wrap) }).to_glib()
+}
diff --git a/utils/gst-plugin-fallbackswitch/src/base/subclass/aggregator_pad.rs b/utils/gst-plugin-fallbackswitch/src/base/subclass/aggregator_pad.rs
new file mode 100644
index 000000000..c09c4a36d
--- /dev/null
+++ b/utils/gst-plugin-fallbackswitch/src/base/subclass/aggregator_pad.rs
@@ -0,0 +1,147 @@
+// Copyright (C) 2018 Sebastian Dröge <sebastian@centricular.com>
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use super::super::gst_base_sys;
+use glib_sys;
+use gst_sys;
+
+use glib::translate::*;
+use gst;
+
+use glib::subclass::prelude::*;
+use gst::subclass::prelude::*;
+
+use super::super::Aggregator;
+use super::super::AggregatorPad;
+use super::super::AggregatorPadClass;
+
+pub trait AggregatorPadImpl: AggregatorPadImplExt + PadImpl + Send + Sync + 'static {
+ fn flush(
+ &self,
+ aggregator_pad: &AggregatorPad,
+ aggregator: &Aggregator,
+ ) -> Result<gst::FlowSuccess, gst::FlowError> {
+ self.parent_flush(aggregator_pad, aggregator)
+ }
+
+ fn skip_buffer(
+ &self,
+ aggregator_pad: &AggregatorPad,
+ aggregator: &Aggregator,
+ buffer: &gst::Buffer,
+ ) -> bool {
+ self.parent_skip_buffer(aggregator_pad, aggregator, buffer)
+ }
+}
+
+pub trait AggregatorPadImplExt {
+ fn parent_flush(
+ &self,
+ aggregator_pad: &AggregatorPad,
+ aggregator: &Aggregator,
+ ) -> Result<gst::FlowSuccess, gst::FlowError>;
+
+ fn parent_skip_buffer(
+ &self,
+ aggregator_pad: &AggregatorPad,
+ aggregator: &Aggregator,
+ buffer: &gst::Buffer,
+ ) -> bool;
+}
+
+impl<T: AggregatorPadImpl + ObjectImpl> AggregatorPadImplExt for T {
+ fn parent_flush(
+ &self,
+ aggregator_pad: &AggregatorPad,
+ aggregator: &Aggregator,
+ ) -> Result<gst::FlowSuccess, gst::FlowError> {
+ unsafe {
+ let data = self.get_type_data();
+ let parent_class =
+ data.as_ref().get_parent_class() as *mut gst_base_sys::GstAggregatorPadClass;
+ (*parent_class)
+ .flush
+ .map(|f| {
+ from_glib(f(
+ aggregator_pad.to_glib_none().0,
+ aggregator.to_glib_none().0,
+ ))
+ })
+ .unwrap_or(gst::FlowReturn::Ok)
+ .into_result()
+ }
+ }
+
+ fn parent_skip_buffer(
+ &self,
+ aggregator_pad: &AggregatorPad,
+ aggregator: &Aggregator,
+ buffer: &gst::Buffer,
+ ) -> bool {
+ unsafe {
+ let data = self.get_type_data();
+ let parent_class =
+ data.as_ref().get_parent_class() as *mut gst_base_sys::GstAggregatorPadClass;
+ (*parent_class)
+ .skip_buffer
+ .map(|f| {
+ from_glib(f(
+ aggregator_pad.to_glib_none().0,
+ aggregator.to_glib_none().0,
+ buffer.to_glib_none().0,
+ ))
+ })
+ .unwrap_or(false)
+ }
+ }
+}
+unsafe impl<T: ObjectSubclass + AggregatorPadImpl> IsSubclassable<T> for AggregatorPadClass {
+ fn override_vfuncs(&mut self) {
+ <gst::PadClass as IsSubclassable<T>>::override_vfuncs(self);
+ unsafe {
+ let klass = &mut *(self as *mut Self as *mut gst_base_sys::GstAggregatorPadClass);
+ klass.flush = Some(aggregator_pad_flush::<T>);
+ klass.skip_buffer = Some(aggregator_pad_skip_buffer::<T>);
+ }
+ }
+}
+
+unsafe extern "C" fn aggregator_pad_flush<T: ObjectSubclass>(
+ ptr: *mut gst_base_sys::GstAggregatorPad,
+ aggregator: *mut gst_base_sys::GstAggregator,
+) -> gst_sys::GstFlowReturn
+where
+ T: AggregatorPadImpl,
+{
+ let instance = &*(ptr as *mut T::Instance);
+ let imp = instance.get_impl();
+ let wrap: Borrowed<AggregatorPad> = from_glib_borrow(ptr);
+
+ let res: gst::FlowReturn = imp.flush(&wrap, &from_glib_borrow(aggregator)).into();
+ res.to_glib()
+}
+
+unsafe extern "C" fn aggregator_pad_skip_buffer<T: ObjectSubclass>(
+ ptr: *mut gst_base_sys::GstAggregatorPad,
+ aggregator: *mut gst_base_sys::GstAggregator,
+ buffer: *mut gst_sys::GstBuffer,
+) -> glib_sys::gboolean
+where
+ T: AggregatorPadImpl,
+{
+ let instance = &*(ptr as *mut T::Instance);
+ let imp = instance.get_impl();
+ let wrap: Borrowed<AggregatorPad> = from_glib_borrow(ptr);
+
+ imp.skip_buffer(
+ &wrap,
+ &from_glib_borrow(aggregator),
+ &from_glib_borrow(buffer),
+ )
+ .to_glib()
+}
diff --git a/utils/gst-plugin-fallbackswitch/src/base/subclass/mod.rs b/utils/gst-plugin-fallbackswitch/src/base/subclass/mod.rs
new file mode 100644
index 000000000..319dc1311
--- /dev/null
+++ b/utils/gst-plugin-fallbackswitch/src/base/subclass/mod.rs
@@ -0,0 +1,17 @@
+// Copyright (C) 2016-2018 Sebastian Dröge <sebastian@centricular.com>
+// 2016 Luis de Bethencourt <luisbg@osg.samsung.com>
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![allow(clippy::cast_ptr_alignment)]
+
+pub mod aggregator;
+pub mod aggregator_pad;
+
+pub mod prelude {
+ pub use super::aggregator::{AggregatorImpl, AggregatorImplExt};
+ pub use super::aggregator_pad::{AggregatorPadImpl, AggregatorPadImplExt};
+}
diff --git a/utils/gst-plugin-fallbackswitch/src/base/sys.rs b/utils/gst-plugin-fallbackswitch/src/base/sys.rs
new file mode 100644
index 000000000..ac3e8ffcd
--- /dev/null
+++ b/utils/gst-plugin-fallbackswitch/src/base/sys.rs
@@ -0,0 +1,235 @@
+#![allow(non_camel_case_types, non_upper_case_globals, non_snake_case)]
+#![allow(
+ clippy::approx_constant,
+ clippy::type_complexity,
+ clippy::unreadable_literal
+)]
+
+extern crate gstreamer_sys as gst;
+
+#[allow(unused_imports)]
+use libc::{
+ c_char, c_double, c_float, c_int, c_long, c_short, c_uchar, c_uint, c_ulong, c_ushort, c_void,
+ intptr_t, size_t, ssize_t, time_t, uintptr_t, FILE,
+};
+
+#[allow(unused_imports)]
+use glib_sys::{gboolean, gconstpointer, gpointer, GType};
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct GstAggregatorClass {
+ pub parent_class: gst::GstElementClass,
+ pub flush: Option<unsafe extern "C" fn(*mut GstAggregator) -> gst::GstFlowReturn>,
+ pub clip: Option<
+ unsafe extern "C" fn(
+ *mut GstAggregator,
+ *mut GstAggregatorPad,
+ *mut gst::GstBuffer,
+ ) -> *mut gst::GstBuffer,
+ >,
+ pub finish_buffer:
+ Option<unsafe extern "C" fn(*mut GstAggregator, *mut gst::GstBuffer) -> gst::GstFlowReturn>,
+ pub sink_event: Option<
+ unsafe extern "C" fn(
+ *mut GstAggregator,
+ *mut GstAggregatorPad,
+ *mut gst::GstEvent,
+ ) -> gboolean,
+ >,
+ pub sink_query: Option<
+ unsafe extern "C" fn(
+ *mut GstAggregator,
+ *mut GstAggregatorPad,
+ *mut gst::GstQuery,
+ ) -> gboolean,
+ >,
+ pub src_event: Option<unsafe extern "C" fn(*mut GstAggregator, *mut gst::GstEvent) -> gboolean>,
+ pub src_query: Option<unsafe extern "C" fn(*mut GstAggregator, *mut gst::GstQuery) -> gboolean>,
+ pub src_activate:
+ Option<unsafe extern "C" fn(*mut GstAggregator, gst::GstPadMode, gboolean) -> gboolean>,
+ pub aggregate: Option<unsafe extern "C" fn(*mut GstAggregator, gboolean) -> gst::GstFlowReturn>,
+ pub stop: Option<unsafe extern "C" fn(*mut GstAggregator) -> gboolean>,
+ pub start: Option<unsafe extern "C" fn(*mut GstAggregator) -> gboolean>,
+ pub get_next_time: Option<unsafe extern "C" fn(*mut GstAggregator) -> gst::GstClockTime>,
+ pub create_new_pad: Option<
+ unsafe extern "C" fn(
+ *mut GstAggregator,
+ *mut gst::GstPadTemplate,
+ *const c_char,
+ *const gst::GstCaps,
+ ) -> *mut GstAggregatorPad,
+ >,
+ pub update_src_caps: Option<
+ unsafe extern "C" fn(
+ *mut GstAggregator,
+ *mut gst::GstCaps,
+ *mut *mut gst::GstCaps,
+ ) -> gst::GstFlowReturn,
+ >,
+ pub fixate_src_caps:
+ Option<unsafe extern "C" fn(*mut GstAggregator, *mut gst::GstCaps) -> *mut gst::GstCaps>,
+ pub negotiated_src_caps:
+ Option<unsafe extern "C" fn(*mut GstAggregator, *mut gst::GstCaps) -> gboolean>,
+ pub decide_allocation:
+ Option<unsafe extern "C" fn(*mut GstAggregator, *mut gst::GstQuery) -> gboolean>,
+ pub propose_allocation: Option<
+ unsafe extern "C" fn(
+ *mut GstAggregator,
+ *mut GstAggregatorPad,
+ *mut gst::GstQuery,
+ *mut gst::GstQuery,
+ ) -> gboolean,
+ >,
+ pub negotiate: Option<unsafe extern "C" fn(*mut GstAggregator) -> gboolean>,
+ pub sink_event_pre_queue: Option<
+ unsafe extern "C" fn(
+ *mut GstAggregator,
+ *mut GstAggregatorPad,
+ *mut gst::GstEvent,
+ ) -> gboolean,
+ >,
+ pub sink_query_pre_queue: Option<
+ unsafe extern "C" fn(
+ *mut GstAggregator,
+ *mut GstAggregatorPad,
+ *mut gst::GstQuery,
+ ) -> gboolean,
+ >,
+ pub _gst_reserved: [gpointer; 17],
+}
+
+impl ::std::fmt::Debug for GstAggregatorClass {
+ fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
+ f.debug_struct(&format!("GstAggregatorClass @ {:?}", self as *const _))
+ .field("parent_class", &self.parent_class)
+ .field("flush", &self.flush)
+ .field("clip", &self.clip)
+ .field("finish_buffer", &self.finish_buffer)
+ .field("sink_event", &self.sink_event)
+ .field("sink_query", &self.sink_query)
+ .field("src_event", &self.src_event)
+ .field("src_query", &self.src_query)
+ .field("src_activate", &self.src_activate)
+ .field("aggregate", &self.aggregate)
+ .field("stop", &self.stop)
+ .field("start", &self.start)
+ .field("get_next_time", &self.get_next_time)
+ .field("create_new_pad", &self.create_new_pad)
+ .field("update_src_caps", &self.update_src_caps)
+ .field("fixate_src_caps", &self.fixate_src_caps)
+ .field("negotiated_src_caps", &self.negotiated_src_caps)
+ .field("decide_allocation", &self.decide_allocation)
+ .field("propose_allocation", &self.propose_allocation)
+ .finish()
+ }
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct GstAggregatorPadClass {
+ pub parent_class: gst::GstPadClass,
+ pub flush: Option<
+ unsafe extern "C" fn(*mut GstAggregatorPad, *mut GstAggregator) -> gst::GstFlowReturn,
+ >,
+ pub skip_buffer: Option<
+ unsafe extern "C" fn(
+ *mut GstAggregatorPad,
+ *mut GstAggregator,
+ *mut gst::GstBuffer,
+ ) -> gboolean,
+ >,
+ pub _gst_reserved: [gpointer; 20],
+}
+
+impl ::std::fmt::Debug for GstAggregatorPadClass {
+ fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
+ f.debug_struct(&format!("GstAggregatorPadClass @ {:?}", self as *const _))
+ .field("parent_class", &self.parent_class)
+ .field("flush", &self.flush)
+ .field("skip_buffer", &self.skip_buffer)
+ .finish()
+ }
+}
+
+#[repr(C)]
+pub struct _GstAggregatorPadPrivate(c_void);
+
+pub type GstAggregatorPadPrivate = *mut _GstAggregatorPadPrivate;
+
+#[repr(C)]
+pub struct _GstAggregatorPrivate(c_void);
+
+pub type GstAggregatorPrivate = *mut _GstAggregatorPrivate;
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct GstAggregator {
+ pub parent: gst::GstElement,
+ pub srcpad: *mut gst::GstPad,
+ pub priv_: *mut GstAggregatorPrivate,
+ pub _gst_reserved: [gpointer; 20],
+}
+
+impl ::std::fmt::Debug for GstAggregator {
+ fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
+ f.debug_struct(&format!("GstAggregator @ {:?}", self as *const _))
+ .field("parent", &self.parent)
+ .field("srcpad", &self.srcpad)
+ .finish()
+ }
+}
+
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct GstAggregatorPad {
+ pub parent: gst::GstPad,
+ pub segment: gst::GstSegment,
+ pub priv_: *mut GstAggregatorPadPrivate,
+ pub _gst_reserved: [gpointer; 4],
+}
+
+impl ::std::fmt::Debug for GstAggregatorPad {
+ fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
+ f.debug_struct(&format!("GstAggregatorPad @ {:?}", self as *const _))
+ .field("parent", &self.parent)
+ .field("segment", &self.segment)
+ .finish()
+ }
+}
+
+extern "C" {
+ //=========================================================================
+ // GstAggregator
+ //=========================================================================
+ pub fn gst_aggregator_get_type() -> GType;
+ pub fn gst_aggregator_finish_buffer(
+ aggregator: *mut GstAggregator,
+ buffer: *mut gst::GstBuffer,
+ ) -> gst::GstFlowReturn;
+ pub fn gst_aggregator_negotiate(aggregator: *mut GstAggregator) -> gboolean;
+ pub fn gst_aggregator_get_allocator(
+ self_: *mut GstAggregator,
+ allocator: *mut *mut gst::GstAllocator,
+ params: *mut gst::GstAllocationParams,
+ );
+ pub fn gst_aggregator_get_buffer_pool(self_: *mut GstAggregator) -> *mut gst::GstBufferPool;
+ pub fn gst_aggregator_get_latency(self_: *mut GstAggregator) -> gst::GstClockTime;
+ pub fn gst_aggregator_set_latency(
+ self_: *mut GstAggregator,
+ min_latency: gst::GstClockTime,
+ max_latency: gst::GstClockTime,
+ );
+ pub fn gst_aggregator_set_src_caps(self_: *mut GstAggregator, caps: *mut gst::GstCaps);
+ pub fn gst_aggregator_simple_get_next_time(self_: *mut GstAggregator) -> gst::GstClockTime;
+
+ //=========================================================================
+ // GstAggregatorPad
+ //=========================================================================
+ pub fn gst_aggregator_pad_get_type() -> GType;
+ pub fn gst_aggregator_pad_drop_buffer(pad: *mut GstAggregatorPad) -> gboolean;
+ pub fn gst_aggregator_pad_has_buffer(pad: *mut GstAggregatorPad) -> gboolean;
+ pub fn gst_aggregator_pad_is_eos(pad: *mut GstAggregatorPad) -> gboolean;
+ pub fn gst_aggregator_pad_peek_buffer(pad: *mut GstAggregatorPad) -> *mut gst::GstBuffer;
+ pub fn gst_aggregator_pad_pop_buffer(pad: *mut GstAggregatorPad) -> *mut gst::GstBuffer;
+}
diff --git a/utils/gst-plugin-fallbackswitch/src/base/utils.rs b/utils/gst-plugin-fallbackswitch/src/base/utils.rs
new file mode 100644
index 000000000..dca8cf691
--- /dev/null
+++ b/utils/gst-plugin-fallbackswitch/src/base/utils.rs
@@ -0,0 +1,30 @@
+// Copyright (C) 2017 Sebastian Dröge <sebastian@centricular.com>
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use glib::translate::mut_override;
+use glib_sys;
+
+pub struct MutexGuard<'a>(&'a glib_sys::GMutex);
+
+impl<'a> MutexGuard<'a> {
+ #[allow(clippy::trivially_copy_pass_by_ref)]
+ pub fn lock(mutex: &'a glib_sys::GMutex) -> Self {
+ unsafe {
+ glib_sys::g_mutex_lock(mut_override(mutex));
+ }
+ MutexGuard(mutex)
+ }
+}
+
+impl<'a> Drop for MutexGuard<'a> {
+ fn drop(&mut self) {
+ unsafe {
+ glib_sys::g_mutex_unlock(mut_override(self.0));
+ }
+ }
+}
diff --git a/utils/gst-plugin-fallbackswitch/src/fallbackswitch.rs b/utils/gst-plugin-fallbackswitch/src/fallbackswitch.rs
new file mode 100644
index 000000000..ddf4cca43
--- /dev/null
+++ b/utils/gst-plugin-fallbackswitch/src/fallbackswitch.rs
@@ -0,0 +1,794 @@
+// Copyright (C) 2019 Sebastian Dröge <sebastian@centricular.com>
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Library General Public
+// License as published by the Free Software Foundation; either
+// version 2 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Library General Public License for more details.
+//
+// You should have received a copy of the GNU Library General Public
+// License along with this library; if not, write to the
+// Free Software Foundation, Inc., 51 Franklin Street, Suite 500,
+// Boston, MA 02110-1335, USA.
+
+#[cfg(not(feature = "v1_18"))]
+use super::base as gst_base;
+use glib;
+use glib::prelude::*;
+use glib::subclass;
+use glib::subclass::prelude::*;
+use gst;
+use gst::prelude::*;
+use gst::subclass::prelude::*;
+use gst_audio;
+#[cfg(feature = "v1_18")]
+use gst_base;
+use gst_base::prelude::*;
+use gst_base::subclass::prelude::*;
+use gst_video;
+
+use std::sync::{Mutex, RwLock};
+
+struct FallbackSwitch {
+ sinkpad: gst_base::AggregatorPad,
+ fallback_sinkpad: RwLock<Option<gst_base::AggregatorPad>>,
+ active_sinkpad: Mutex<gst::Pad>,
+ output_state: Mutex<OutputState>,
+ pad_states: RwLock<PadStates>,
+ settings: Mutex<Settings>,
+}
+
+lazy_static! {
+ static ref CAT: gst::DebugCategory = gst::DebugCategory::new(
+ "fallbackswitch",
+ gst::DebugColorFlags::empty(),
+ Some("Fallback switch Element"),
+ );
+}
+
+#[derive(Debug)]
+struct OutputState {
+ last_sinkpad_time: gst::ClockTime,
+}
+
+#[derive(Debug, Default)]
+struct PadStates {
+ sinkpad: PadState,
+ fallback_sinkpad: Option<PadState>,
+}
+
+#[derive(Debug, Default)]
+struct PadState {
+ caps: Option<gst::Caps>,
+ audio_info: Option<gst_audio::AudioInfo>,
+ video_info: Option<gst_video::VideoInfo>,
+}
+
+const DEFAULT_TIMEOUT: u64 = 5 * gst::SECOND_VAL;
+
+#[derive(Debug, Clone)]
+struct Settings {
+ timeout: gst::ClockTime,
+}
+
+impl Default for OutputState {
+ fn default() -> Self {
+ OutputState {
+ last_sinkpad_time: gst::CLOCK_TIME_NONE,
+ }
+ }
+}
+
+impl Default for Settings {
+ fn default() -> Self {
+ Settings {
+ timeout: DEFAULT_TIMEOUT.into(),
+ }
+ }
+}
+
+static PROPERTIES: [subclass::Property; 2] = [
+ subclass::Property("timeout", |name| {
+ glib::ParamSpec::uint64(
+ name,
+ "Timeout",
+ "Timeout in nanoseconds",
+ 0,
+ std::u64::MAX,
+ DEFAULT_TIMEOUT,
+ glib::ParamFlags::READWRITE,
+ )
+ }),
+ subclass::Property("active-pad", |name| {
+ glib::ParamSpec::object(
+ name,
+ "Active Pad",
+ "Currently active pad",
+ gst::Pad::static_type(),
+ glib::ParamFlags::READABLE,
+ )
+ }),
+];
+
+impl FallbackSwitch {
+ fn handle_main_buffer(
+ &self,
+ agg: &gst_base::Aggregator,
+ state: &mut OutputState,
+ mut buffer: gst::Buffer,
+ fallback_sinkpad: Option<&gst_base::AggregatorPad>,
+ ) -> Result<Option<(gst::Buffer, gst::Caps, bool)>, gst::FlowError> {
+ // If we got a buffer on the sinkpad just handle it
+ gst_debug!(CAT, obj: agg, "Got buffer on sinkpad {:?}", buffer);
+
+ if buffer.get_pts().is_none() {
+ gst_error!(CAT, obj: agg, "Only buffers with PTS supported");
+ return Err(gst::FlowError::Error);
+ }
+
+ let segment = self
+ .sinkpad
+ .get_segment()
+ .downcast::<gst::ClockTime>()
+ .map_err(|_| {
+ gst_error!(CAT, obj: agg, "Only TIME segments supported");
+ gst::FlowError::Error
+ })?;
+
+ {
+ // FIXME: This will not work correctly for negative DTS
+ let buffer = buffer.make_mut();
+ buffer.set_pts(segment.to_running_time(buffer.get_pts()));
+ buffer.set_dts(segment.to_running_time(buffer.get_dts()));
+ }
+
+ let mut active_sinkpad = self.active_sinkpad.lock().unwrap();
+ let pad_change = &*active_sinkpad != self.sinkpad.upcast_ref::<gst::Pad>();
+ if pad_change {
+ if buffer.get_flags().contains(gst::BufferFlags::DELTA_UNIT) {
+ gst_info!(
+ CAT,
+ obj: agg,
+ "Can't change back to sinkpad, waiting for keyframe"
+ );
+ self.sinkpad.push_event(
+ gst_video::new_upstream_force_key_unit_event()
+ .all_headers(true)
+ .build(),
+ );
+ return Ok(None);
+ }
+
+ gst_info!(CAT, obj: agg, "Active pad changed to sinkpad");
+ *active_sinkpad = self.sinkpad.clone().upcast();
+ }
+ drop(active_sinkpad);
+
+ state.last_sinkpad_time = segment.to_running_time(buffer.get_dts_or_pts());
+
+ // Drop all older buffers from the fallback sinkpad
+ if let Some(fallback_sinkpad) = fallback_sinkpad {
+ let fallback_segment = self
+ .sinkpad
+ .get_segment()
+ .downcast::<gst::ClockTime>()
+ .map_err(|_| {
+ gst_error!(CAT, obj: agg, "Only TIME segments supported");
+ gst::FlowError::Error
+ })?;
+
+ while let Some(fallback_buffer) = fallback_sinkpad.peek_buffer() {
+ let fallback_pts = fallback_buffer.get_dts_or_pts();
+ if fallback_pts.is_none()
+ || fallback_segment.to_running_time(fallback_pts) <= state.last_sinkpad_time
+ {
+ gst_debug!(
+ CAT,
+ obj: agg,
+ "Dropping fallback buffer {:?}",
+ fallback_buffer
+ );
+ fallback_sinkpad.drop_buffer();
+ } else {
+ break;
+ }
+ }
+ }
+
+ let pad_states = self.pad_states.read().unwrap();
+ let active_caps = pad_states.sinkpad.caps.as_ref().unwrap().clone();
+ drop(pad_states);
+
+ Ok(Some((buffer, active_caps, pad_change)))
+ }
+
+ fn get_fallback_buffer(
+ &self,
+ agg: &gst_base::Aggregator,
+ state: &mut OutputState,
+ settings: &Settings,
+ fallback_sinkpad: &gst_base::AggregatorPad,
+ ) -> Result<(gst::Buffer, gst::Caps, bool), gst::FlowError> {
+ // If we have a fallback sinkpad and timeout, try to get a fallback buffer from here
+ // and drop all too old buffers in the process
+ loop {
+ let mut buffer = fallback_sinkpad
+ .pop_buffer()
+ .ok_or(gst_base::AGGREGATOR_FLOW_NEED_DATA)?;
+
+ gst_debug!(CAT, obj: agg, "Got buffer on fallback sinkpad {:?}", buffer);
+
+ if buffer.get_pts().is_none() {
+ gst_error!(CAT, obj: agg, "Only buffers with PTS supported");
+ return Err(gst::FlowError::Error);
+ }
+
+ let fallback_segment = fallback_sinkpad
+ .get_segment()
+ .downcast::<gst::ClockTime>()
+ .map_err(|_| {
+ gst_error!(CAT, obj: agg, "Only TIME segments supported");
+ gst::FlowError::Error
+ })?;
+ let running_time = fallback_segment.to_running_time(buffer.get_dts_or_pts());
+
+ {
+ // FIXME: This will not work correctly for negative DTS
+ let buffer = buffer.make_mut();
+ buffer.set_pts(fallback_segment.to_running_time(buffer.get_pts()));
+ buffer.set_dts(fallback_segment.to_running_time(buffer.get_dts()));
+ }
+
+ // If we never had a real buffer, initialize with the running time of the fallback
+ // sinkpad so that we still output fallback buffers after the timeout
+ if state.last_sinkpad_time.is_none() {
+ state.last_sinkpad_time = running_time;
+ }
+
+ // Get the next one if this one is before the timeout
+ if state.last_sinkpad_time + settings.timeout > running_time {
+ gst_debug!(
+ CAT,
+ obj: agg,
+ "Timeout not reached yet: {} + {} > {}",
+ state.last_sinkpad_time,
+ settings.timeout,
+ running_time
+ );
+
+ continue;
+ }
+
+ gst_debug!(
+ CAT,
+ obj: agg,
+ "Timeout reached: {} + {} <= {}",
+ state.last_sinkpad_time,
+ settings.timeout,
+ running_time
+ );
+
+ let mut active_sinkpad = self.active_sinkpad.lock().unwrap();
+ let pad_change = &*active_sinkpad != fallback_sinkpad.upcast_ref::<gst::Pad>();
+ if pad_change {
+ if buffer.get_flags().contains(gst::BufferFlags::DELTA_UNIT) {
+ gst_info!(
+ CAT,
+ obj: agg,
+ "Can't change to fallback sinkpad yet, waiting for keyframe"
+ );
+ fallback_sinkpad.push_event(
+ gst_video::new_upstream_force_key_unit_event()
+ .all_headers(true)
+ .build(),
+ );
+ continue;
+ }
+
+ gst_info!(CAT, obj: agg, "Active pad changed to fallback sinkpad");
+ *active_sinkpad = fallback_sinkpad.clone().upcast();
+ }
+ drop(active_sinkpad);
+
+ let pad_states = self.pad_states.read().unwrap();
+ let active_caps = match pad_states.fallback_sinkpad {
+ None => {
+ // This can happen if the pad is removed in the meantime,
+ // not a problem really
+ return Err(gst_base::AGGREGATOR_FLOW_NEED_DATA);
+ }
+ Some(ref fallback_sinkpad) => fallback_sinkpad.caps.as_ref().unwrap().clone(),
+ };
+ drop(pad_states);
+
+ break Ok((buffer, active_caps, pad_change));
+ }
+ }
+
+ fn get_next_buffer(
+ &self,
+ agg: &gst_base::Aggregator,
+ timeout: bool,
+ ) -> Result<(gst::Buffer, gst::Caps, bool), gst::FlowError> {
+ let settings = self.settings.lock().unwrap().clone();
+ let mut state = self.output_state.lock().unwrap();
+ let fallback_sinkpad = self.fallback_sinkpad.read().unwrap();
+
+ gst_debug!(CAT, obj: agg, "Aggregate called: timeout {}", timeout);
+
+ if let Some(buffer) = self.sinkpad.pop_buffer() {
+ if let Some(res) =
+ self.handle_main_buffer(agg, &mut *state, buffer, fallback_sinkpad.as_ref())?
+ {
+ return Ok(res);
+ }
+ } else if self.sinkpad.is_eos() {
+ gst_log!(CAT, obj: agg, "Sinkpad is EOS");
+ return Err(gst::FlowError::Eos);
+ }
+
+ if let (false, Some(_)) = (timeout, &*fallback_sinkpad) {
+ gst_debug!(CAT, obj: agg, "Have fallback sinkpad but no timeout yet");
+
+ Err(gst_base::AGGREGATOR_FLOW_NEED_DATA)
+ } else if let (true, Some(fallback_sinkpad)) = (timeout, &*fallback_sinkpad) {
+ self.get_fallback_buffer(agg, &mut *state, &settings, fallback_sinkpad)
+ } else {
+ // Otherwise there's not much we can do at this point
+ gst_debug!(
+ CAT,
+ obj: agg,
+ "Got no buffer on sinkpad and have no fallback sinkpad"
+ );
+ Err(gst_base::AGGREGATOR_FLOW_NEED_DATA)
+ }
+ }
+}
+
+impl ObjectSubclass for FallbackSwitch {
+ const NAME: &'static str = "FallbackSwitch";
+ type ParentType = gst_base::Aggregator;
+ type Instance = gst::subclass::ElementInstanceStruct<Self>;
+ type Class = subclass::simple::ClassStruct<Self>;
+
+ glib_object_subclass!();
+
+ fn new_with_class(klass: &subclass::simple::ClassStruct<Self>) -> Self {
+ let templ = klass.get_pad_template("sink").unwrap();
+ let sinkpad: gst_base::AggregatorPad = glib::Object::new(
+ gst_base::AggregatorPad::static_type(),
+ &[
+ ("name", &"sink"),
+ ("direction", &gst::PadDirection::Sink),
+ ("template", &templ),
+ ],
+ )
+ .unwrap()
+ .downcast()
+ .unwrap();
+
+ Self {
+ sinkpad: sinkpad.clone(),
+ fallback_sinkpad: RwLock::new(None),
+ active_sinkpad: Mutex::new(sinkpad.upcast()),
+ output_state: Mutex::new(OutputState::default()),
+ pad_states: RwLock::new(PadStates::default()),
+ settings: Mutex::new(Settings::default()),
+ }
+ }
+
+ fn class_init(klass: &mut subclass::simple::ClassStruct<Self>) {
+ klass.set_metadata(
+ "Fallback Switch",
+ "Generic",
+ "Allows switching to a fallback input after a given timeout",
+ "Sebastian Dröge <sebastian@centricular.com>",
+ );
+
+ let caps = gst::Caps::new_any();
+ let src_pad_template = gst::PadTemplate::new_with_gtype(
+ "src",
+ gst::PadDirection::Src,
+ gst::PadPresence::Always,
+ &caps,
+ gst_base::AggregatorPad::static_type(),
+ )
+ .unwrap();
+ klass.add_pad_template(src_pad_template);
+
+ let sink_pad_template = gst::PadTemplate::new_with_gtype(
+ "sink",
+ gst::PadDirection::Sink,
+ gst::PadPresence::Always,
+ &caps,
+ gst_base::AggregatorPad::static_type(),
+ )
+ .unwrap();
+ klass.add_pad_template(sink_pad_template);
+
+ let fallbacksink_pad_template = gst::PadTemplate::new_with_gtype(
+ "fallback_sink",
+ gst::PadDirection::Sink,
+ gst::PadPresence::Request,
+ &caps,
+ gst_base::AggregatorPad::static_type(),
+ )
+ .unwrap();
+ klass.add_pad_template(fallbacksink_pad_template);
+
+ klass.install_properties(&PROPERTIES);
+ }
+}
+
+impl ObjectImpl for FallbackSwitch {
+ glib_object_impl!();
+
+ fn constructed(&self, obj: &glib::Object) {
+ self.parent_constructed(obj);
+
+ let agg = obj.downcast_ref::<gst_base::Aggregator>().unwrap();
+ agg.add_pad(&self.sinkpad).unwrap();
+ }
+
+ fn set_property(&self, obj: &glib::Object, id: usize, value: &glib::Value) {
+ let prop = &PROPERTIES[id];
+ let agg = obj.downcast_ref::<gst_base::Aggregator>().unwrap();
+
+ match *prop {
+ subclass::Property("timeout", ..) => {
+ let mut settings = self.settings.lock().unwrap();
+ let timeout = value.get_some().expect("type checked upstream");
+ gst_info!(
+ CAT,
+ obj: agg,
+ "Changing timeout from {} to {}",
+ settings.timeout,
+ timeout
+ );
+ settings.timeout = timeout;
+ drop(settings);
+ }
+ _ => unimplemented!(),
+ }
+ }
+
+ fn get_property(&self, _obj: &glib::Object, id: usize) -> Result<glib::Value, ()> {
+ let prop = &PROPERTIES[id];
+
+ match *prop {
+ subclass::Property("timeout", ..) => {
+ let settings = self.settings.lock().unwrap();
+ Ok(settings.timeout.to_value())
+ }
+ subclass::Property("active-pad", ..) => {
+ let active_pad = self.active_sinkpad.lock().unwrap().clone();
+ Ok(active_pad.to_value())
+ }
+ _ => unimplemented!(),
+ }
+ }
+}
+
+impl ElementImpl for FallbackSwitch {
+ fn request_new_pad(
+ &self,
+ element: &gst::Element,
+ templ: &gst::PadTemplate,
+ name: Option<String>,
+ _caps: Option<&gst::Caps>,
+ ) -> Option<gst::Pad> {
+ let agg = element.downcast_ref::<gst_base::Aggregator>().unwrap();
+ let fallback_sink_templ = agg.get_pad_template("fallback_sink").unwrap();
+ if templ != &fallback_sink_templ
+ || (name.is_some() && name.as_deref() != Some("fallback_sink"))
+ {
+ gst_error!(CAT, obj: agg, "Wrong pad template or name");
+ return None;
+ }
+
+ let mut fallback_sinkpad = self.fallback_sinkpad.write().unwrap();
+ if fallback_sinkpad.is_some() {
+ gst_error!(CAT, obj: agg, "Already have a fallback sinkpad");
+ return None;
+ }
+
+ let sinkpad: gst_base::AggregatorPad = glib::Object::new(
+ gst_base::AggregatorPad::static_type(),
+ &[
+ ("name", &"fallback_sink"),
+ ("direction", &gst::PadDirection::Sink),
+ ("template", templ),
+ ],
+ )
+ .unwrap()
+ .downcast()
+ .unwrap();
+ *fallback_sinkpad = Some(sinkpad.clone());
+ drop(fallback_sinkpad);
+
+ agg.add_pad(&sinkpad).unwrap();
+
+ Some(sinkpad.upcast())
+ }
+
+ fn release_pad(&self, element: &gst::Element, pad: &gst::Pad) {
+ let agg = element.downcast_ref::<gst_base::Aggregator>().unwrap();
+ let mut fallback_sinkpad = self.fallback_sinkpad.write().unwrap();
+ let mut pad_states = self.pad_states.write().unwrap();
+
+ if fallback_sinkpad.as_ref().map(|p| p.upcast_ref()) == Some(pad) {
+ *fallback_sinkpad = None;
+ pad_states.fallback_sinkpad = None;
+ drop(pad_states);
+ drop(fallback_sinkpad);
+ agg.remove_pad(pad).unwrap();
+ gst_debug!(CAT, obj: agg, "Removed fallback sinkpad {:?}", pad);
+ }
+ }
+}
+
+impl AggregatorImpl for FallbackSwitch {
+ fn start(&self, _agg: &gst_base::Aggregator) -> Result<(), gst::ErrorMessage> {
+ *self.active_sinkpad.lock().unwrap() = self.sinkpad.clone().upcast();
+ *self.output_state.lock().unwrap() = OutputState::default();
+ *self.pad_states.write().unwrap() = PadStates::default();
+
+ Ok(())
+ }
+
+ fn sink_event_pre_queue(
+ &self,
+ agg: &gst_base::Aggregator,
+ agg_pad: &gst_base::AggregatorPad,
+ event: gst::Event,
+ ) -> bool {
+ use gst::EventView;
+
+ match event.view() {
+ EventView::Gap(_) => {
+ gst_debug!(CAT, obj: agg_pad, "Dropping gap event");
+ true
+ }
+ _ => self.parent_sink_event_pre_queue(agg, agg_pad, event),
+ }
+ }
+
+ fn sink_event(
+ &self,
+ agg: &gst_base::Aggregator,
+ agg_pad: &gst_base::AggregatorPad,
+ event: gst::Event,
+ ) -> bool {
+ use gst::EventView;
+
+ match event.view() {
+ EventView::Caps(caps) => {
+ let caps = caps.get_caps_owned();
+ gst_debug!(CAT, obj: agg_pad, "Received caps {}", caps);
+
+ let audio_info;
+ let video_info;
+ if caps.get_structure(0).unwrap().get_name() == "audio/x-raw" {
+ audio_info = gst_audio::AudioInfo::from_caps(&caps).ok();
+ video_info = None;
+ } else if caps.get_structure(0).unwrap().get_name() == "video/x-raw" {
+ audio_info = None;
+ video_info = gst_video::VideoInfo::from_caps(&caps).ok();
+ } else {
+ audio_info = None;
+ video_info = None;
+ }
+
+ let new_pad_state = PadState {
+ caps: Some(caps),
+ audio_info,
+ video_info,
+ };
+
+ let mut pad_states = self.pad_states.write().unwrap();
+ if agg_pad == &self.sinkpad {
+ pad_states.sinkpad = new_pad_state;
+ } else if Some(agg_pad) == self.fallback_sinkpad.read().unwrap().as_ref() {
+ pad_states.fallback_sinkpad = Some(new_pad_state);
+ }
+ drop(pad_states);
+
+ self.parent_sink_event(agg, agg_pad, event)
+ }
+ _ => self.parent_sink_event(agg, agg_pad, event),
+ }
+ }
+
+ fn get_next_time(&self, agg: &gst_base::Aggregator) -> gst::ClockTime {
+ // If we have a buffer on the sinkpad then the timeout is always going to be immediately,
+ // i.e. 0. We want to output that buffer immediately, no matter what.
+ //
+ // Otherwise if we have a fallback sinkpad and it has a buffer, then the timeout is going
+ // to be its running time. We will then either output the buffer or drop it, depending on
+ // its distance from the last sinkpad time
+ if self.sinkpad.peek_buffer().is_some() {
+ gst_debug!(CAT, obj: agg, "Have buffer on sinkpad, immediate timeout");
+ 0.into()
+ } else if self.sinkpad.is_eos() {
+ gst_debug!(CAT, obj: agg, "Sinkpad is EOS, immediate timeout");
+ 0.into()
+ } else if let Some((buffer, fallback_sinkpad)) = self
+ .fallback_sinkpad
+ .read()
+ .unwrap()
+ .as_ref()
+ .and_then(|p| p.peek_buffer().map(|buffer| (buffer, p)))
+ {
+ if buffer.get_pts().is_none() {
+ gst_error!(CAT, obj: agg, "Only buffers with PTS supported");
+ // Trigger aggregate immediately to error out immediately
+ return 0.into();
+ }
+
+ let segment = match fallback_sinkpad.get_segment().downcast::<gst::ClockTime>() {
+ Ok(segment) => segment,
+ Err(_) => {
+ gst_error!(CAT, obj: agg, "Only TIME segments supported");
+ // Trigger aggregate immediately to error out immediately
+ return 0.into();
+ }
+ };
+
+ let running_time = segment.to_running_time(buffer.get_dts_or_pts());
+ gst_debug!(
+ CAT,
+ obj: agg,
+ "Have buffer on fallback sinkpad, timeout at {}",
+ running_time
+ );
+ running_time
+ } else {
+ gst_debug!(CAT, obj: agg, "Have no buffer at all yet");
+ gst::CLOCK_TIME_NONE
+ }
+ }
+
+ // Clip the raw audio/video buffers we have to the segment boundaries to ensure that
+ // calculating the running times later works correctly
+ fn clip(
+ &self,
+ agg: &gst_base::Aggregator,
+ agg_pad: &gst_base::AggregatorPad,
+ mut buffer: gst::Buffer,
+ ) -> Option<gst::Buffer> {
+ let segment = match agg_pad.get_segment().downcast::<gst::ClockTime>() {
+ Ok(segment) => segment,
+ Err(_) => {
+ gst_error!(CAT, obj: agg, "Only TIME segments supported");
+ return Some(buffer);
+ }
+ };
+
+ let pts = buffer.get_pts();
+ if pts.is_none() {
+ gst_error!(CAT, obj: agg, "Only buffers with PTS supported");
+ return Some(buffer);
+ }
+
+ let pad_states = self.pad_states.read().unwrap();
+ let pad_state = if agg_pad == &self.sinkpad {
+ &pad_states.sinkpad
+ } else if Some(agg_pad) == self.fallback_sinkpad.read().unwrap().as_ref() {
+ if let Some(ref pad_state) = pad_states.fallback_sinkpad {
+ pad_state
+ } else {
+ return Some(buffer);
+ }
+ } else {
+ unreachable!()
+ };
+
+ if pad_state.audio_info.is_none() && pad_state.video_info.is_none() {
+ // No clipping possible for non-raw formats
+ return Some(buffer);
+ }
+
+ let duration = if buffer.get_duration().is_some() {
+ buffer.get_duration()
+ } else if let Some(ref audio_info) = pad_state.audio_info {
+ gst::SECOND
+ .mul_div_floor(
+ buffer.get_size() as u64,
+ audio_info.rate() as u64 * audio_info.bpf() as u64,
+ )
+ .unwrap()
+ } else if let Some(ref video_info) = pad_state.video_info {
+ if *video_info.fps().numer() > 0 {
+ gst::SECOND
+ .mul_div_floor(
+ *video_info.fps().denom() as u64,
+ *video_info.fps().numer() as u64,
+ )
+ .unwrap()
+ } else {
+ gst::CLOCK_TIME_NONE
+ }
+ } else {
+ unreachable!()
+ };
+
+ gst_debug!(
+ CAT,
+ obj: agg_pad,
+ "Clipping buffer {:?} with PTS {} and duration {}",
+ buffer,
+ pts,
+ duration
+ );
+ if let Some(ref audio_info) = pad_state.audio_info {
+ gst_audio::audio_buffer_clip(
+ buffer,
+ segment.upcast_ref(),
+ audio_info.rate(),
+ audio_info.bpf(),
+ )
+ } else if pad_state.video_info.is_some() {
+ segment.clip(pts, pts + duration).map(|(start, stop)| {
+ {
+ let buffer = buffer.make_mut();
+ buffer.set_pts(start);
+ buffer.set_dts(start);
+ if duration.is_some() {
+ buffer.set_duration(stop - start);
+ }
+ }
+
+ buffer
+ })
+ } else {
+ unreachable!();
+ }
+ }
+
+ fn aggregate(
+ &self,
+ agg: &gst_base::Aggregator,
+ timeout: bool,
+ ) -> Result<gst::FlowSuccess, gst::FlowError> {
+ gst_debug!(CAT, obj: agg, "Aggregate called: timeout {}", timeout);
+
+ let (mut buffer, active_caps, pad_change) = self.get_next_buffer(agg, timeout)?;
+
+ let current_src_caps = agg.get_static_pad("src").unwrap().get_current_caps();
+ if Some(&active_caps) != current_src_caps.as_ref() {
+ gst_info!(
+ CAT,
+ obj: agg,
+ "Caps change from {:?} to {:?}",
+ current_src_caps,
+ active_caps
+ );
+ agg.set_src_caps(&active_caps);
+ }
+
+ if pad_change {
+ agg.notify("active-pad");
+ buffer.make_mut().set_flags(gst::BufferFlags::DISCONT);
+ }
+
+ gst_debug!(CAT, obj: agg, "Finishing buffer {:?}", buffer);
+ agg.finish_buffer(buffer)
+ }
+
+ fn negotiate(&self, _agg: &gst_base::Aggregator) -> bool {
+ true
+ }
+}
+
+pub fn register(plugin: &gst::Plugin) -> Result<(), glib::BoolError> {
+ gst::Element::register(
+ Some(plugin),
+ "fallbackswitch",
+ gst::Rank::None,
+ FallbackSwitch::get_type(),
+ )
+}
diff --git a/utils/gst-plugin-fallbackswitch/src/lib.rs b/utils/gst-plugin-fallbackswitch/src/lib.rs
new file mode 100644
index 000000000..c2b2b0fdf
--- /dev/null
+++ b/utils/gst-plugin-fallbackswitch/src/lib.rs
@@ -0,0 +1,62 @@
+// Copyright (C) 2019 Sebastian Dröge <sebastian@centricular.com>
+//
+// This library is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Library General Public
+// License as published by the Free Software Foundation; either
+// version 2 of the License, or (at your option) any later version.
+//
+// This library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+// Library General Public License for more details.
+//
+// You should have received a copy of the GNU Library General Public
+// License along with this library; if not, write to the
+// Free Software Foundation, Inc., 51 Franklin Street, Suite 500,
+// Boston, MA 02110-1335, USA.
+
+#[macro_use]
+extern crate glib;
+#[macro_use]
+extern crate gstreamer as gst;
+
+extern crate gstreamer_audio as gst_audio;
+extern crate gstreamer_video as gst_video;
+
+#[cfg(not(feature = "v1_18"))]
+extern crate glib_sys;
+#[cfg(not(feature = "v1_18"))]
+extern crate gobject_sys;
+#[cfg(feature = "v1_18")]
+extern crate gstreamer_base as gst_base;
+#[cfg(not(feature = "v1_18"))]
+extern crate gstreamer_sys as gst_sys;
+#[cfg(not(feature = "v1_18"))]
+#[allow(dead_code)]
+mod base;
+#[cfg(not(feature = "v1_18"))]
+mod gst_base {
+ pub use super::base::*;
+}
+
+#[macro_use]
+extern crate lazy_static;
+
+mod fallbackswitch;
+
+fn plugin_init(plugin: &gst::Plugin) -> Result<(), glib::BoolError> {
+ fallbackswitch::register(plugin)?;
+ Ok(())
+}
+
+gst_plugin_define!(
+ fallbackswitch,
+ env!("CARGO_PKG_DESCRIPTION"),
+ plugin_init,
+ concat!(env!("CARGO_PKG_VERSION"), "-", env!("COMMIT_ID")),
+ "MIT/X11",
+ env!("CARGO_PKG_NAME"),
+ env!("CARGO_PKG_NAME"),
+ env!("CARGO_PKG_REPOSITORY"),
+ env!("BUILD_REL_DATE")
+);