Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] Output audio queue #1253

Draft
wants to merge 6 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
103 changes: 69 additions & 34 deletions src/engine/gstenginepipeline.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,8 @@ GstEnginePipeline::GstEnginePipeline(QObject *parent)
pipeline_(nullptr),
audiobin_(nullptr),
audiosink_(nullptr),
audioqueue_(nullptr),
inputaudioqueue_(nullptr),
outputaudioqueue_(nullptr),
audioqueueconverter_(nullptr),
ebur128_volume_(nullptr),
volume_(nullptr),
Expand Down Expand Up @@ -486,8 +487,8 @@ bool GstEnginePipeline::InitAudioBin(QString &error) {

// Create all the other elements

audioqueue_ = CreateElement("queue2", "audioqueue", audiobin_, error);
if (!audioqueue_) {
inputaudioqueue_ = CreateElement("queue2", "inputaudioqueue", audiobin_, error);
if (!inputaudioqueue_) {
return false;
}

Expand Down Expand Up @@ -575,8 +576,6 @@ bool GstEnginePipeline::InitAudioBin(QString &error) {

}

eventprobe_ = audioqueueconverter_;

// Create the replaygain elements if it's enabled.
GstElement *rgvolume = nullptr;
GstElement *rglimiter = nullptr;
Expand All @@ -594,7 +593,6 @@ bool GstEnginePipeline::InitAudioBin(QString &error) {
if (!rgconverter) {
return false;
}
eventprobe_ = rgconverter;
// Set replaygain settings
g_object_set(G_OBJECT(rgvolume), "album-mode", rg_mode_, nullptr);
g_object_set(G_OBJECT(rgvolume), "pre-amp", rg_preamp_, nullptr);
Expand All @@ -611,7 +609,6 @@ bool GstEnginePipeline::InitAudioBin(QString &error) {

UpdateEBUR128LoudnessNormalizingGaindB();

eventprobe_ = ebur128_volume_;
}

GstElement *bs2b = nullptr;
Expand All @@ -622,42 +619,44 @@ bool GstEnginePipeline::InitAudioBin(QString &error) {
}
}

{ // Create a pad on the outside of the audiobin and connect it to the pad of the first element.
GstPad *pad = gst_element_get_static_pad(audioqueue_, "sink");
if (pad) {
gst_element_add_pad(audiobin_, gst_ghost_pad_new("sink", pad));
gst_object_unref(pad);
}

outputaudioqueue_ = CreateElement("queue2", "outputaudioqueue", audiobin_, error);
if (!outputaudioqueue_) {
return false;
}

// Add a data probe on the src pad of the audioconvert element for our scope.
// We do it here because we want pre-equalized and pre-volume samples so that our visualization are not be affected by them.
{
GstPad *pad = gst_element_get_static_pad(eventprobe_, "src");
{ // Create a pad on the outside of the audiobin and connect it to the pad of the first element.
GstPad *pad = gst_element_get_static_pad(inputaudioqueue_, "sink");
if (pad) {
upstream_events_probe_cb_id_ = gst_pad_add_probe(pad, GST_PAD_PROBE_TYPE_EVENT_UPSTREAM, &UpstreamEventsProbeCallback, this, nullptr);
gst_element_add_pad(audiobin_, gst_ghost_pad_new("sink", pad));
gst_object_unref(pad);
}
}

// Set the buffer duration.
// We set this on this queue instead of the playbin because setting it on the playbin only affects network sources.
// We set this on our queues instead of the playbin because setting it on the playbin only affects network sources.
// Disable the default buffer and byte limits, so we only buffer based on time.

g_object_set(G_OBJECT(audioqueue_), "max-size-buffers", 0, nullptr);
g_object_set(G_OBJECT(audioqueue_), "max-size-bytes", 0, nullptr);
if (buffer_duration_nanosec_ > 0) {
qLog(Debug) << "Setting buffer duration:" << buffer_duration_nanosec_ << "low watermark:" << buffer_low_watermark_ << "high watermark:" << buffer_high_watermark_;
g_object_set(G_OBJECT(audioqueue_), "use-buffering", true, nullptr);
g_object_set(G_OBJECT(audioqueue_), "max-size-time", buffer_duration_nanosec_, nullptr);
g_object_set(G_OBJECT(audioqueue_), "low-watermark", buffer_low_watermark_, nullptr);
g_object_set(G_OBJECT(audioqueue_), "high-watermark", buffer_high_watermark_, nullptr);
for(GstElement *audioqueue : {inputaudioqueue_, outputaudioqueue_}) {
g_object_set(G_OBJECT(audioqueue), "max-size-buffers", 0, nullptr);
g_object_set(G_OBJECT(audioqueue), "max-size-bytes", 0, nullptr);
if (buffer_duration_nanosec_ > 0) {
qLog(Debug) << "Setting buffer duration:" << buffer_duration_nanosec_ << "low watermark:" << buffer_low_watermark_ << "high watermark:" << buffer_high_watermark_;
g_object_set(G_OBJECT(audioqueue), "use-buffering", true, nullptr);
g_object_set(G_OBJECT(audioqueue), "max-size-time", buffer_duration_nanosec_, nullptr);
g_object_set(G_OBJECT(audioqueue), "low-watermark", buffer_low_watermark_, nullptr);
g_object_set(G_OBJECT(audioqueue), "high-watermark", buffer_high_watermark_, nullptr);
}
}

// We do not care about the buffering status of the input queue,
// only of the output one.
g_object_set(G_OBJECT(inputaudioqueue_), "use-buffering", false, nullptr);

// Link all elements

if (!gst_element_link(audioqueue_, audioqueueconverter_)) {
error = "Failed to link audio queue to audio queue converter.";
if (!gst_element_link(inputaudioqueue_, audioqueueconverter_)) {
error = "Failed to link input audio queue to audio queue converter.";
return false;
}

Expand Down Expand Up @@ -736,6 +735,7 @@ bool GstEnginePipeline::InitAudioBin(QString &error) {
error = "Failed to link audio sink converter.";
return false;
}
element_link = audiosinkconverter;

{
GstCaps *caps = gst_caps_new_empty_simple("audio/x-raw");
Expand All @@ -747,17 +747,26 @@ bool GstEnginePipeline::InitAudioBin(QString &error) {
qLog(Debug) << "Setting channels to" << channels_;
gst_caps_set_simple(caps, "channels", G_TYPE_INT, channels_, nullptr);
}
const bool link_filtered_result = gst_element_link_filtered(audiosinkconverter, audiosink_, caps);
const bool link_filtered_result = gst_element_link_filtered(element_link, outputaudioqueue_, caps);
gst_caps_unref(caps);
if (!link_filtered_result) {
error = "Failed to link audio sink converter to audio sink with filter for " + output_;
error = "Failed to link audio sink converter to output audio queue with filter";
return false;
}
eventprobe_ = outputaudioqueue_;
element_link = outputaudioqueue_;
}

if (!gst_element_link(element_link, audiosink_)) {
error = "Failed to link output audio queue to audio sink for " + output_;
return false;
}
element_link = audiosink_;

{ // Add probes and handlers.
GstPad *pad = gst_element_get_static_pad(audioqueueconverter_, "src");
GstPad *pad = gst_element_get_static_pad(eventprobe_, "src");
if (pad) {
upstream_events_probe_cb_id_ = gst_pad_add_probe(pad, GST_PAD_PROBE_TYPE_EVENT_UPSTREAM, &UpstreamEventsProbeCallback, this, nullptr);
buffer_probe_cb_id_ = gst_pad_add_probe(pad, GST_PAD_PROBE_TYPE_BUFFER, BufferProbeCallback, this, nullptr);
gst_object_unref(pad);
}
Expand Down Expand Up @@ -1459,8 +1468,12 @@ void GstEnginePipeline::StateChangedMessageReceived(GstMessage *msg) {

void GstEnginePipeline::BufferingMessageReceived(GstMessage *msg) {

// Only handle buffering messages from the queue2 element in audiobin - not the one that's created automatically by playbin.
if (GST_ELEMENT(GST_MESSAGE_SRC(msg)) != audioqueue_) {
// Only handle buffering messages from the output queue2 element in audiobin - not the one that's created automatically by playbin.
// We want to monitor the output queue specifically, not the input queue,
// because there are other processing elements inbetween,
// and if the replenishing rate of the *output* queue is below real-time,
// we will certainly have stuttering.
if (GST_ELEMENT(GST_MESSAGE_SRC(msg)) != outputaudioqueue_) {
return;
}

Expand Down Expand Up @@ -1545,6 +1558,17 @@ bool GstEnginePipeline::Seek(const qint64 nanosec) {

}

static void flush_pipeline(GstElement *pipeline) {
// FIXME: is this really the right way to flush pipeline without seeking?
gint64 position_ns_;
gst_element_query_position(pipeline, GST_FORMAT_TIME, &position_ns_);
gst_element_seek_simple (pipeline, GST_FORMAT_TIME, GST_SEEK_FLAG_FLUSH, position_ns_);
}

QFuture<void> GstEnginePipeline::FlushPipeline() {
return QtConcurrent::run(&set_state_threadpool_, &flush_pipeline, pipeline_);
}

void GstEnginePipeline::SetEBUR128LoudnessNormalizingGain_dB(const double ebur128_loudness_normalizing_gain_db) {

ebur128_loudness_normalizing_gain_db_ = ebur128_loudness_normalizing_gain_db;
Expand All @@ -1558,6 +1582,8 @@ void GstEnginePipeline::UpdateEBUR128LoudnessNormalizingGaindB() {
auto dB_to_mult = [](const double gain_dB) { return std::pow(10., gain_dB / 20.); };

g_object_set(G_OBJECT(ebur128_volume_), "volume", dB_to_mult(ebur128_loudness_normalizing_gain_db_), nullptr);

FlushPipeline();
}

}
Expand All @@ -1575,6 +1601,10 @@ void GstEnginePipeline::SetVolume(const uint volume_percent) {
}
}

if (volume_ == volume_sw_) {
FlushPipeline();
}

volume_percent_ = volume_percent;

}
Expand All @@ -1583,6 +1613,7 @@ void GstEnginePipeline::SetFaderVolume(const qreal volume) {

if (volume_fading_) {
g_object_set(G_OBJECT(volume_fading_), "volume", volume, nullptr);
FlushPipeline(); // FIXME: this is not the right solution.
}

}
Expand All @@ -1598,6 +1629,8 @@ void GstEnginePipeline::UpdateStereoBalance() {

if (audiopanorama_) {
g_object_set(G_OBJECT(audiopanorama_), "panorama", stereo_balance_, nullptr);

FlushPipeline();
}

}
Expand Down Expand Up @@ -1637,6 +1670,8 @@ void GstEnginePipeline::UpdateEqualizer() {

g_object_set(G_OBJECT(equalizer_preamp_), "volume", preamp, nullptr);

FlushPipeline();

}

void GstEnginePipeline::StartFader(const qint64 duration_nanosec, const QTimeLine::Direction direction, const QEasingCurve::Type shape, const bool use_fudge_timer) {
Expand Down
4 changes: 3 additions & 1 deletion src/engine/gstenginepipeline.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ class GstEnginePipeline : public QObject {
void RemoveAllBufferConsumers();

// Control the music playback
QFuture<void> FlushPipeline();
QFuture<GstStateChangeReturn> SetState(const GstState state);
Q_INVOKABLE bool Seek(const qint64 nanosec);
void SetEBUR128LoudnessNormalizingGain_dB(const double ebur128_loudness_normalizing_gain_db);
Expand Down Expand Up @@ -301,7 +302,8 @@ class GstEnginePipeline : public QObject {
GstElement *pipeline_;
GstElement *audiobin_;
GstElement *audiosink_;
GstElement *audioqueue_;
GstElement *inputaudioqueue_;
GstElement *outputaudioqueue_;
GstElement *audioqueueconverter_;
GstElement *ebur128_volume_;
GstElement *volume_;
Expand Down