Qt
Internal/Contributor docs for the Qt SDK. Note: These are NOT official API docs; those are found at https://doc.qt.io/
Loading...
Searching...
No Matches
qffmpegaudiorenderer.cpp
Go to the documentation of this file.
1// Copyright (C) 2021 The Qt Company Ltd.
2// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
3
4#include "playbackengine/qffmpegaudiorenderer_p.h"
5
6#include <QtMultimedia/qaudiosink.h>
7#include <QtMultimedia/qaudiooutput.h>
8#include <QtMultimedia/qaudiobufferoutput.h>
9#include <QtMultimedia/private/qaudiobuffer_support_p.h>
10#include <QtMultimedia/private/qplatformaudiooutput_p.h>
11
12#include <QtCore/qloggingcategory.h>
13
17
19
20Q_STATIC_LOGGING_CATEGORY(qLcAudioRenderer, "qt.multimedia.ffmpeg.audiorenderer");
21
22namespace QFFmpeg {
23
24using namespace std::chrono_literals;
25using namespace std::chrono;
26
27namespace {
28constexpr auto DesiredBufferTime = 110000us;
29constexpr auto MinDesiredBufferTime = 22000us;
30constexpr auto MaxDesiredBufferTime = 64000us;
31constexpr auto MinDesiredFreeBufferTime = 10000us;
32
33// It might be changed with #ifdef, as on Linux, QPulseAudioSink has quite unstable timings,
34// and it needs much more time to make sure that the buffer is overloaded.
35constexpr auto BufferLoadingMeasureTime = 400ms;
36
37constexpr auto DurationBias = 2ms; // avoids extra timer events
38
39QAudioFormat audioFormatFromFrame(const Frame &frame)
40{
41 return QFFmpegMediaFormatInfo::audioFormatFromCodecParameters(
42 *frame.codecContext()->stream()->codecpar);
43}
44
45} // namespace
46
47AudioRenderer::AudioRenderer(const PlaybackEngineObjectID &id, const TimeController &tc,
48 QAudioOutput *output, QAudioBufferOutput *bufferOutput,
49 bool pitchCompensation)
50 : Renderer(id, tc),
51 m_output(output),
52 m_bufferOutput(bufferOutput),
53 m_pitchCompensation(pitchCompensation)
54{
55 if (output) {
56 // TODO: implement the signals in QPlatformAudioOutput and connect to them, QTBUG-112294
57 connect(output, &QAudioOutput::deviceChanged, this, &AudioRenderer::onDeviceChanged);
58 connect(output, &QAudioOutput::volumeChanged, this, &AudioRenderer::updateVolume);
59 connect(output, &QAudioOutput::mutedChanged, this, &AudioRenderer::updateVolume);
60 }
61}
62
63void AudioRenderer::setOutput(QAudioOutput *output)
64{
65 setOutputInternal(m_output, output, [this](QAudioOutput *) { onDeviceChanged(); });
66}
67
68void AudioRenderer::setOutput(QAudioBufferOutput *bufferOutput)
69{
70 setOutputInternal(m_bufferOutput, bufferOutput,
71 [this](QAudioBufferOutput *) { m_bufferOutputChanged = true; });
72}
73
75{
76 invokePriorityMethod([this, enabled] {
77 if (m_pitchCompensation == enabled)
78 return;
79
80 m_pitchCompensation = enabled;
81 m_audioFrameConverter.reset();
82 });
83}
84
89
91{
92 if (m_sink)
93 m_sink->setVolume(m_output->isMuted() ? 0.f : m_output->volume());
94}
95
97{
98 m_deviceChanged = true;
99}
100
102{
103 // TODO: play with what to clean: we may find better config.
104 // If we reset sink and converters, move m_ioDevice, m_ioDevice,
105 // m_audioFrameConverter, m_bufferOutputResampler to m_sessionCtx.
106 constexpr bool shouldResetSink = true;
107 constexpr bool shouldResetConverters = true;
108
109 if constexpr (shouldResetSink) {
110 if (m_sink)
111 m_sink->reset();
112 m_ioDevice = nullptr;
113 m_bufferLoadingInfo = {};
114 }
115
116 if constexpr (shouldResetConverters) {
117 m_audioFrameConverter.reset();
118 m_bufferOutputResampler.reset();
119 }
120
121 // change AudioRenderer caches
122 m_sessionCtx = {};
123 // don't touch m_deviceChanged here
124}
125
126Renderer::RenderingResult AudioRenderer::renderInternal(Frame frame)
127{
128 if (frame.isValid())
129 updateOutputs(frame);
130
131 // push to sink first in order not to waste time on resampling
132 // for QAudioBufferOutput
133 const RenderingResult result = pushFrameToOutput(frame);
134
135 if (m_sessionCtx.lastFramePushDone)
136 pushFrameToBufferOutput(frame);
137 // else // skip pushing the same data to QAudioBufferOutput
138
139 m_sessionCtx.lastFramePushDone = result.done;
140
141 return result;
142}
143
144AudioRenderer::RenderingResult AudioRenderer::pushFrameToOutput(const Frame &frame)
145{
146 if (!m_ioDevice || !m_audioFrameConverter)
147 return {};
148
149 Q_ASSERT(m_sink);
150
151 auto firstFrameFlagGuard = qScopeGuard([&]() { m_firstFrameToSink = false; });
152
153 const SynchronizationStamp syncStamp{ m_sink->state(), m_sink->bytesFree(),
154 m_sessionCtx.bufferedData.offset, SteadyClock::now() };
155
156 if (!m_sessionCtx.bufferedData.isValid()) {
157 if (!frame.isValid()) {
158 if (std::exchange(m_sessionCtx.drained, true))
159 return {};
160
161 const auto time = bufferLoadingTime(syncStamp);
162
163 qCDebug(qLcAudioRenderer) << "Draining AudioRenderer, time:" << time;
164
165 return { time.count() == 0, time };
166 }
167
168 m_sessionCtx.bufferedData = {
169 m_audioFrameConverter->convert(frame.avFrame()),
170 };
171 }
172
173 if (m_sessionCtx.bufferedData.isValid()) {
174 // synchronize after "QIODevice::write" to deliver audio data to the sink ASAP.
175 auto syncGuard = qScopeGuard([&]() { updateSynchronization(syncStamp, frame); });
176
177 const auto bytesWritten = m_ioDevice->write(m_sessionCtx.bufferedData.data(),
178 m_sessionCtx.bufferedData.size());
179
180 m_sessionCtx.bufferedData.offset += bytesWritten;
181
182 if (m_sessionCtx.bufferedData.size() <= 0) {
183 m_sessionCtx.bufferedData = {};
184
185 return {};
186 }
187
188 const auto remainingDuration = durationForBytes(m_sessionCtx.bufferedData.size());
189
190 return { false,
191 std::min(remainingDuration + DurationBias, m_timings.actualBufferDuration / 2) };
192 }
193
194 return {};
195}
196
197void AudioRenderer::pushFrameToBufferOutput(const Frame &frame)
198{
199 if (!m_bufferOutput)
200 return;
201
202 if (frame.isValid()) {
203 Q_ASSERT(m_bufferOutputResampler);
204
205 // TODO: get buffer from m_bufferedData if resample formats are equal
206 QAudioBuffer buffer = m_bufferOutputResampler->resample(frame.avFrame());
207 emit m_bufferOutput->audioBufferReceived(buffer);
208 } else {
209 emit m_bufferOutput->audioBufferReceived({});
210 }
211}
212
214{
215 m_audioFrameConverter.reset();
216}
217
219{
220 const TimePoint timePoint = Renderer::nextTimePoint();
221
222 // if the first frame is expected, don't force the immediate job
223 if (m_firstFrameToSink)
224 return timePoint;
225
226 // if the sink is active, don't force the immediate job
227 if (!m_sink || m_sink->state() != QAudio::IdleState)
228 return timePoint;
229
230 // if the waiting interval is out of a heuristic fixable range,
231 // don't force the immediate job.
232 constexpr auto MaxFixableInterval = 50ms;
233 if (timePoint == TimePoint::min() ||
234 timePoint - std::chrono::steady_clock::now() > MaxFixableInterval)
235 return timePoint;
236
237 return TimePoint::min(); // do the job now
238}
239
241{
242 m_firstFrameToSink = true;
243 Renderer::onPauseChanged();
244}
245
246void AudioRenderer::initAudioFrameConverter(const Frame &frame)
247{
248 // We recreate the frame converter whenever format or playback rate is changed
249 if (!m_pitchCompensation || QtPrivate::fuzzyCompare(playbackRate(), 1.0f)) {
250 m_audioFrameConverter = makeTrivialAudioFrameConverter(frame, m_sinkFormat, playbackRate());
251 } else {
252 m_audioFrameConverter =
253 makePitchShiftingAudioFrameConverter(frame, m_sinkFormat, playbackRate());
254 }
255}
256
258{
259 qCDebug(qLcAudioRenderer) << "Free audio output";
260 if (m_sink) {
261 m_sink->reset();
262
263 // TODO: inestigate if it's enough to reset the sink without deleting
264 m_sink.reset();
265 }
266
267 m_ioDevice = nullptr;
268
269 m_sessionCtx.bufferedData = {};
270 m_deviceChanged = false;
271 m_sinkFormat = {};
272 m_timings = {};
273 m_bufferLoadingInfo = {};
274}
275
276void AudioRenderer::updateOutputs(const Frame &frame)
277{
278 if (m_deviceChanged) {
280 m_audioFrameConverter.reset();
281 }
282
283 if (m_bufferOutput) {
284 if (m_bufferOutputChanged) {
285 m_bufferOutputChanged = false;
286 m_bufferOutputResampler.reset();
287 }
288
289 if (!m_bufferOutputResampler) {
290 QAudioFormat outputFormat = m_bufferOutput->format();
291 if (!outputFormat.isValid())
292 outputFormat = audioFormatFromFrame(frame);
293 m_bufferOutputResampler = createResampler(frame, outputFormat);
294 }
295 }
296
297 if (!m_output)
298 return;
299
300 if (!m_sinkFormat.isValid()) {
301 m_sinkFormat = audioFormatFromFrame(frame);
302 m_sinkFormat.setChannelConfig(m_output->device().channelConfiguration());
303 }
304
305 if (!m_sink) {
306 // Insert a delay here to test time offset synchronization, e.g. QThread::sleep(1)
307 m_sink = std::make_unique<QAudioSink>(m_output->device(), m_sinkFormat);
309 m_sink->setBufferSize(m_sinkFormat.bytesForDuration(DesiredBufferTime.count()));
310
311 connect(m_sink.get(), &QAudioSink::stateChanged, this,
312 &AudioRenderer::onAudioSinkStateChanged);
313
314 m_timings.actualBufferDuration = durationForBytes(m_sink->bufferSize());
315 m_timings.maxSoundDelay = qMin(MaxDesiredBufferTime,
316 m_timings.actualBufferDuration - MinDesiredFreeBufferTime);
317 m_timings.minSoundDelay = MinDesiredBufferTime;
318
319 Q_ASSERT(DurationBias < m_timings.minSoundDelay
320 && m_timings.maxSoundDelay < m_timings.actualBufferDuration);
321 }
322
323 if (!m_ioDevice) {
324 m_ioDevice = m_sink->start();
325 m_firstFrameToSink = true;
326 }
327
328 if (!m_audioFrameConverter)
329 initAudioFrameConverter(frame);
330}
331
332void AudioRenderer::updateSynchronization(const SynchronizationStamp &stamp, const Frame &frame)
333{
334 if (!frame.isValid())
335 return;
336
337 Q_ASSERT(m_sink);
338
339 const auto bufferLoadingTime = this->bufferLoadingTime(stamp);
340 const auto currentFrameDelay = frameDelay(frame, stamp.timePoint);
341 const auto writtenTime = durationForBytes(stamp.bufferBytesWritten);
342 const auto soundDelay = currentFrameDelay + bufferLoadingTime - writtenTime;
343
344 auto synchronize = [&](microseconds fixedDelay, microseconds targetSoundDelay) {
345 // TODO: investigate if we need sample compensation here
346
347 changeRendererTime(fixedDelay - targetSoundDelay);
348 if (qLcAudioRenderer().isDebugEnabled()) {
349 // clang-format off
350 qCDebug(qLcAudioRenderer)
351 << "Change rendering time:"
352 << "\n First frame:" << m_firstFrameToSink
353 << "\n Delay (frame+buffer-written):" << currentFrameDelay << "+"
354 << bufferLoadingTime << "-"
355 << writtenTime << "="
356 << soundDelay
357 << "\n Fixed delay:" << fixedDelay
358 << "\n Target delay:" << targetSoundDelay
359 << "\n Buffer durations (min/max/limit):" << m_timings.minSoundDelay
360 << m_timings.maxSoundDelay
361 << m_timings.actualBufferDuration
362 << "\n Audio sink state:" << stamp.audioSinkState;
363 // clang-format on
364 }
365 };
366
367 const auto loadingType = soundDelay > m_timings.maxSoundDelay ? BufferLoadingInfo::High
368 : soundDelay < m_timings.minSoundDelay ? BufferLoadingInfo::Low
369 : BufferLoadingInfo::Moderate;
370
371 if (loadingType != m_bufferLoadingInfo.type) {
372 // qCDebug(qLcAudioRenderer) << "Change buffer loading type:" <<
373 // m_bufferLoadingInfo.type
374 // << "->" << loadingType << "soundDelay:" << soundDelay;
375 m_bufferLoadingInfo = { loadingType, stamp.timePoint, soundDelay };
376 }
377
378 if (loadingType != BufferLoadingInfo::Moderate) {
379 const auto isHigh = loadingType == BufferLoadingInfo::High;
380 const auto shouldHandleIdle = stamp.audioSinkState == QAudio::IdleState && !isHigh;
381
382 auto &fixedDelay = m_bufferLoadingInfo.delay;
383
384 fixedDelay = shouldHandleIdle ? soundDelay
385 : isHigh ? qMin(soundDelay, fixedDelay)
386 : qMax(soundDelay, fixedDelay);
387
388 if (stamp.timePoint - m_bufferLoadingInfo.timePoint > BufferLoadingMeasureTime
389 || (m_firstFrameToSink && isHigh) || shouldHandleIdle) {
390 const auto targetDelay = isHigh
391 ? (m_timings.maxSoundDelay + m_timings.minSoundDelay) / 2
392 : m_timings.minSoundDelay + DurationBias;
393
394 synchronize(fixedDelay, targetDelay);
395 m_bufferLoadingInfo = { BufferLoadingInfo::Moderate, stamp.timePoint, targetDelay };
396 }
397 }
398}
399
401{
402 Q_ASSERT(m_sink);
403
404 if (syncStamp.audioSinkState == QAudio::IdleState)
405 return microseconds(0);
406
407 const auto bytes = qMax(m_sink->bufferSize() - syncStamp.audioSinkBytesFree, 0);
408 return durationForBytes(bytes);
409}
410
411void AudioRenderer::onAudioSinkStateChanged(QAudio::State state)
412{
413 if (state == QAudio::IdleState && !m_firstFrameToSink && !m_deviceChanged)
414 scheduleNextStep();
415}
416
418{
419 return microseconds(m_sinkFormat.durationForBytes(static_cast<qint32>(bytes)));
420}
421
422} // namespace QFFmpeg
423
424QT_END_NAMESPACE
425
426#include "moc_qffmpegaudiorenderer_p.cpp"
\inmodule QtMultimedia
The QAudioFormat class stores audio stream parameter information.
constexpr bool isValid() const noexcept
Returns true if all of the parameters are valid.
Microseconds durationForBytes(qsizetype bytes) const
void setPitchCompensation(bool enabled)
TimePoint nextTimePoint() const override
void setOutput(QAudioBufferOutput *bufferOutput)
void onAudioSinkStateChanged(QAudio::State state)
void setOutput(QAudioOutput *output)
Microseconds bufferLoadingTime(const SynchronizationStamp &syncStamp) const
std::conditional_t< QT_FFMPEG_AVIO_WRITE_CONST, const uint8_t *, uint8_t * > AvioWriteBufferType
Combined button and popup list for selecting options.