Qt
Internal/Contributor docs for the Qt SDK. Note: These are NOT official API docs; those are found at https://doc.qt.io/
Loading...
Searching...
No Matches
qffmpegaudiorenderer.cpp
Go to the documentation of this file.
1// Copyright (C) 2021 The Qt Company Ltd.
2// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
3
4#include "playbackengine/qffmpegaudiorenderer_p.h"
5
6#include <QtMultimedia/qaudiosink.h>
7#include <QtMultimedia/qaudiooutput.h>
8#include <QtMultimedia/qaudiobufferoutput.h>
9#include <QtMultimedia/private/qaudiobuffer_support_p.h>
10#include <QtMultimedia/private/qplatformaudiooutput_p.h>
11
12#include <QtCore/qloggingcategory.h>
13
17
19
20Q_STATIC_LOGGING_CATEGORY(qLcAudioRenderer, "qt.multimedia.ffmpeg.audiorenderer");
21
22namespace QFFmpeg {
23
24using namespace std::chrono_literals;
25using namespace std::chrono;
26
27namespace {
28constexpr auto DesiredBufferTime = 110000us;
29constexpr auto MinDesiredBufferTime = 22000us;
30constexpr auto MaxDesiredBufferTime = 64000us;
31constexpr auto MinDesiredFreeBufferTime = 10000us;
32
33// It might be changed with #ifdef, as on Linux, QPulseAudioSink has quite unstable timings,
34// and it needs much more time to make sure that the buffer is overloaded.
35constexpr auto BufferLoadingMeasureTime = 400ms;
36
37constexpr auto DurationBias = 2ms; // avoids extra timer events
38
39QAudioFormat audioFormatFromFrame(const Frame &frame)
40{
41 return QFFmpegMediaFormatInfo::audioFormatFromCodecParameters(
42 *frame.codecContext()->stream()->codecpar);
43}
44
45} // namespace
46
47AudioRenderer::AudioRenderer(const PlaybackEngineObjectID &id, const TimeController &tc,
48 QAudioOutput *output, QAudioBufferOutput *bufferOutput,
49 bool pitchCompensation)
50 : Renderer(id, tc),
51 m_output(output),
52 m_bufferOutput(bufferOutput),
53 m_pitchCompensation(pitchCompensation)
54{
55 if (output) {
56 // TODO: implement the signals in QPlatformAudioOutput and connect to them, QTBUG-112294
57 connect(output, &QAudioOutput::deviceChanged, this, &AudioRenderer::onDeviceChanged);
58 connect(output, &QAudioOutput::volumeChanged, this, &AudioRenderer::updateVolume);
59 connect(output, &QAudioOutput::mutedChanged, this, &AudioRenderer::updateVolume);
60 }
61}
62
63void AudioRenderer::setOutput(QAudioOutput *output)
64{
65 setOutputInternal(m_output, output, [this](QAudioOutput *) { onDeviceChanged(); });
66}
67
68void AudioRenderer::setOutput(QAudioBufferOutput *bufferOutput)
69{
70 setOutputInternal(m_bufferOutput, bufferOutput,
71 [this](QAudioBufferOutput *) { m_bufferOutputChanged = true; });
72}
73
75{
76 invokePriorityMethod([this, enabled] {
77 if (m_pitchCompensation == enabled)
78 return;
79
80 m_pitchCompensation = enabled;
81 m_audioFrameConverter.reset();
82 });
83}
84
89
91{
92 if (m_sink)
93 m_sink->setVolume(m_output->isMuted() ? 0.f : m_output->volume());
94}
95
97{
98 m_deviceChanged = true;
99}
100
101Renderer::RenderingResult AudioRenderer::renderInternal(Frame frame)
102{
103 if (frame.isValid())
104 updateOutputs(frame);
105
106 // push to sink first in order not to waste time on resampling
107 // for QAudioBufferOutput
108 const RenderingResult result = pushFrameToOutput(frame);
109
110 if (m_lastFramePushDone)
111 pushFrameToBufferOutput(frame);
112 // else // skip pushing the same data to QAudioBufferOutput
113
114 m_lastFramePushDone = result.done;
115
116 return result;
117}
118
119AudioRenderer::RenderingResult AudioRenderer::pushFrameToOutput(const Frame &frame)
120{
121 if (!m_ioDevice || !m_audioFrameConverter)
122 return {};
123
124 Q_ASSERT(m_sink);
125
126 auto firstFrameFlagGuard = qScopeGuard([&]() { m_firstFrameToSink = false; });
127
128 const SynchronizationStamp syncStamp{ m_sink->state(), m_sink->bytesFree(),
129 m_bufferedData.offset, SteadyClock::now() };
130
131 if (!m_bufferedData.isValid()) {
132 if (!frame.isValid()) {
133 if (std::exchange(m_drained, true))
134 return {};
135
136 const auto time = bufferLoadingTime(syncStamp);
137
138 qCDebug(qLcAudioRenderer) << "Draining AudioRenderer, time:" << time;
139
140 return { time.count() == 0, time };
141 }
142
143 m_bufferedData = {
144 m_audioFrameConverter->convert(frame.avFrame()),
145 };
146 }
147
148 if (m_bufferedData.isValid()) {
149 // synchronize after "QIODevice::write" to deliver audio data to the sink ASAP.
150 auto syncGuard = qScopeGuard([&]() { updateSynchronization(syncStamp, frame); });
151
152 const auto bytesWritten = m_ioDevice->write(m_bufferedData.data(), m_bufferedData.size());
153
154 m_bufferedData.offset += bytesWritten;
155
156 if (m_bufferedData.size() <= 0) {
157 m_bufferedData = {};
158
159 return {};
160 }
161
162 const auto remainingDuration = durationForBytes(m_bufferedData.size());
163
164 return { false,
165 std::min(remainingDuration + DurationBias, m_timings.actualBufferDuration / 2) };
166 }
167
168 return {};
169}
170
171void AudioRenderer::pushFrameToBufferOutput(const Frame &frame)
172{
173 if (!m_bufferOutput)
174 return;
175
176 if (frame.isValid()) {
177 Q_ASSERT(m_bufferOutputResampler);
178
179 // TODO: get buffer from m_bufferedData if resample formats are equal
180 QAudioBuffer buffer = m_bufferOutputResampler->resample(frame.avFrame());
181 emit m_bufferOutput->audioBufferReceived(buffer);
182 } else {
183 emit m_bufferOutput->audioBufferReceived({});
184 }
185}
186
188{
189 m_audioFrameConverter.reset();
190}
191
193{
194 const TimePoint timePoint = Renderer::nextTimePoint();
195
196 // if the first frame is expected, don't force the immediate job
197 if (m_firstFrameToSink)
198 return timePoint;
199
200 // if the sink is active, don't force the immediate job
201 if (!m_sink || m_sink->state() != QAudio::IdleState)
202 return timePoint;
203
204 // if the waiting interval is out of a heuristic fixable range,
205 // don't force the immediate job.
206 constexpr auto MaxFixableInterval = 50ms;
207 if (timePoint == TimePoint::min() ||
208 timePoint - std::chrono::steady_clock::now() > MaxFixableInterval)
209 return timePoint;
210
211 return TimePoint::min(); // do the job now
212}
213
215{
216 m_firstFrameToSink = true;
217 Renderer::onPauseChanged();
218}
219
220void AudioRenderer::initAudioFrameConverter(const Frame &frame)
221{
222 // We recreate the frame converter whenever format or playback rate is changed
223 if (!m_pitchCompensation || qFuzzyCompare(playbackRate(), 1.0f)) {
224 m_audioFrameConverter = makeTrivialAudioFrameConverter(frame, m_sinkFormat, playbackRate());
225 } else {
226 m_audioFrameConverter =
227 makePitchShiftingAudioFrameConverter(frame, m_sinkFormat, playbackRate());
228 }
229}
230
232{
233 qCDebug(qLcAudioRenderer) << "Free audio output";
234 if (m_sink) {
235 m_sink->reset();
236
237 // TODO: inestigate if it's enough to reset the sink without deleting
238 m_sink.reset();
239 }
240
241 m_ioDevice = nullptr;
242
243 m_bufferedData = {};
244 m_deviceChanged = false;
245 m_sinkFormat = {};
246 m_timings = {};
247 m_bufferLoadingInfo = {};
248}
249
250void AudioRenderer::updateOutputs(const Frame &frame)
251{
252 if (m_deviceChanged) {
254 m_audioFrameConverter.reset();
255 }
256
257 if (m_bufferOutput) {
258 if (m_bufferOutputChanged) {
259 m_bufferOutputChanged = false;
260 m_bufferOutputResampler.reset();
261 }
262
263 if (!m_bufferOutputResampler) {
264 QAudioFormat outputFormat = m_bufferOutput->format();
265 if (!outputFormat.isValid())
266 outputFormat = audioFormatFromFrame(frame);
267 m_bufferOutputResampler = createResampler(frame, outputFormat);
268 }
269 }
270
271 if (!m_output)
272 return;
273
274 if (!m_sinkFormat.isValid()) {
275 m_sinkFormat = audioFormatFromFrame(frame);
276 m_sinkFormat.setChannelConfig(m_output->device().channelConfiguration());
277 }
278
279 if (!m_sink) {
280 // Insert a delay here to test time offset synchronization, e.g. QThread::sleep(1)
281 m_sink = std::make_unique<QAudioSink>(m_output->device(), m_sinkFormat);
283 m_sink->setBufferSize(m_sinkFormat.bytesForDuration(DesiredBufferTime.count()));
284 m_ioDevice = m_sink->start();
285 m_firstFrameToSink = true;
286
287 connect(m_sink.get(), &QAudioSink::stateChanged, this,
288 &AudioRenderer::onAudioSinkStateChanged);
289
290 m_timings.actualBufferDuration = durationForBytes(m_sink->bufferSize());
291 m_timings.maxSoundDelay = qMin(MaxDesiredBufferTime,
292 m_timings.actualBufferDuration - MinDesiredFreeBufferTime);
293 m_timings.minSoundDelay = MinDesiredBufferTime;
294
295 Q_ASSERT(DurationBias < m_timings.minSoundDelay
296 && m_timings.maxSoundDelay < m_timings.actualBufferDuration);
297 }
298
299 if (!m_audioFrameConverter)
300 initAudioFrameConverter(frame);
301}
302
303void AudioRenderer::updateSynchronization(const SynchronizationStamp &stamp, const Frame &frame)
304{
305 if (!frame.isValid())
306 return;
307
308 Q_ASSERT(m_sink);
309
310 const auto bufferLoadingTime = this->bufferLoadingTime(stamp);
311 const auto currentFrameDelay = frameDelay(frame, stamp.timePoint);
312 const auto writtenTime = durationForBytes(stamp.bufferBytesWritten);
313 const auto soundDelay = currentFrameDelay + bufferLoadingTime - writtenTime;
314
315 auto synchronize = [&](microseconds fixedDelay, microseconds targetSoundDelay) {
316 // TODO: investigate if we need sample compensation here
317
318 changeRendererTime(fixedDelay - targetSoundDelay);
319 if (qLcAudioRenderer().isDebugEnabled()) {
320 // clang-format off
321 qCDebug(qLcAudioRenderer)
322 << "Change rendering time:"
323 << "\n First frame:" << m_firstFrameToSink
324 << "\n Delay (frame+buffer-written):" << currentFrameDelay << "+"
325 << bufferLoadingTime << "-"
326 << writtenTime << "="
327 << soundDelay
328 << "\n Fixed delay:" << fixedDelay
329 << "\n Target delay:" << targetSoundDelay
330 << "\n Buffer durations (min/max/limit):" << m_timings.minSoundDelay
331 << m_timings.maxSoundDelay
332 << m_timings.actualBufferDuration
333 << "\n Audio sink state:" << stamp.audioSinkState;
334 // clang-format on
335 }
336 };
337
338 const auto loadingType = soundDelay > m_timings.maxSoundDelay ? BufferLoadingInfo::High
339 : soundDelay < m_timings.minSoundDelay ? BufferLoadingInfo::Low
340 : BufferLoadingInfo::Moderate;
341
342 if (loadingType != m_bufferLoadingInfo.type) {
343 // qCDebug(qLcAudioRenderer) << "Change buffer loading type:" <<
344 // m_bufferLoadingInfo.type
345 // << "->" << loadingType << "soundDelay:" << soundDelay;
346 m_bufferLoadingInfo = { loadingType, stamp.timePoint, soundDelay };
347 }
348
349 if (loadingType != BufferLoadingInfo::Moderate) {
350 const auto isHigh = loadingType == BufferLoadingInfo::High;
351 const auto shouldHandleIdle = stamp.audioSinkState == QAudio::IdleState && !isHigh;
352
353 auto &fixedDelay = m_bufferLoadingInfo.delay;
354
355 fixedDelay = shouldHandleIdle ? soundDelay
356 : isHigh ? qMin(soundDelay, fixedDelay)
357 : qMax(soundDelay, fixedDelay);
358
359 if (stamp.timePoint - m_bufferLoadingInfo.timePoint > BufferLoadingMeasureTime
360 || (m_firstFrameToSink && isHigh) || shouldHandleIdle) {
361 const auto targetDelay = isHigh
362 ? (m_timings.maxSoundDelay + m_timings.minSoundDelay) / 2
363 : m_timings.minSoundDelay + DurationBias;
364
365 synchronize(fixedDelay, targetDelay);
366 m_bufferLoadingInfo = { BufferLoadingInfo::Moderate, stamp.timePoint, targetDelay };
367 }
368 }
369}
370
372{
373 Q_ASSERT(m_sink);
374
375 if (syncStamp.audioSinkState == QAudio::IdleState)
376 return microseconds(0);
377
378 const auto bytes = qMax(m_sink->bufferSize() - syncStamp.audioSinkBytesFree, 0);
379
380#ifdef Q_OS_ANDROID
381 // The hack has been added due to QAndroidAudioSink issues (QTBUG-118609).
382 // The method QAndroidAudioSink::bytesFree returns 0 or bufferSize, intermediate values are not
383 // available now; to be fixed.
384 if (bytes == 0)
385 return m_timings.minSoundDelay + MinDesiredBufferTime;
386#endif
387
388 return durationForBytes(bytes);
389}
390
391void AudioRenderer::onAudioSinkStateChanged(QAudio::State state)
392{
393 if (state == QAudio::IdleState && !m_firstFrameToSink && !m_deviceChanged)
394 scheduleNextStep();
395}
396
398{
399 return microseconds(m_sinkFormat.durationForBytes(static_cast<qint32>(bytes)));
400}
401
402} // namespace QFFmpeg
403
404QT_END_NAMESPACE
405
406#include "moc_qffmpegaudiorenderer_p.cpp"
\inmodule QtMultimedia
The QAudioFormat class stores audio stream parameter information.
constexpr bool isValid() const noexcept
Returns true if all of the parameters are valid.
Microseconds durationForBytes(qsizetype bytes) const
void setPitchCompensation(bool enabled)
TimePoint nextTimePoint() const override
void setOutput(QAudioBufferOutput *bufferOutput)
void onAudioSinkStateChanged(QAudio::State state)
void setOutput(QAudioOutput *output)
Microseconds bufferLoadingTime(const SynchronizationStamp &syncStamp) const
std::conditional_t< QT_FFMPEG_AVIO_WRITE_CONST, const uint8_t *, uint8_t * > AvioWriteBufferType
Combined button and popup list for selecting options.