Qt
Internal/Contributor docs for the Qt SDK. Note: These are NOT official API docs; those are found at https://doc.qt.io/
Loading...
Searching...
No Matches
qffmpegdemuxer.cpp
Go to the documentation of this file.
1// Copyright (C) 2021 The Qt Company Ltd.
2// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
3
4#include "playbackengine/qffmpegdemuxer_p.h"
5#include <qloggingcategory.h>
6#include <chrono>
7
8QT_BEGIN_NAMESPACE
9
10namespace QFFmpeg {
11
12// 4 sec for buffering. TODO: maybe move to env var customization
13static constexpr TrackDuration MaxBufferedDurationUs{ 4'000'000 };
14
15// around 4 sec of hdr video
16static constexpr qint64 MaxBufferedSize = 32 * 1024 * 1024;
17
18Q_STATIC_LOGGING_CATEGORY(qLcDemuxer, "qt.multimedia.ffmpeg.demuxer");
19
20static TrackPosition packetEndPos(const Packet &packet, const AVStream *stream,
21 const AVFormatContext *context)
22{
23 const AVPacket &avPacket = *packet.avPacket();
24 return packet.loopOffset().loopStartTimeUs.asDuration()
25 + toTrackPosition(AVStreamPosition(avPacket.pts + avPacket.duration), stream, context);
26}
27
28static bool isPacketWithinStreamDuration(const AVFormatContext *context, const Packet &packet)
29{
30 const AVPacket &avPacket = *packet.avPacket();
31 const AVStream &avStream = *context->streams[avPacket.stream_index];
32 const AVStreamDuration streamDuration(avStream.duration);
33 if (streamDuration.get() <= 0
34 || context->duration_estimation_method != AVFMT_DURATION_FROM_STREAM)
35 return true; // Stream duration shouldn't or doesn't need to be compared to pts
36
37 if (avPacket.pts == AV_NOPTS_VALUE) { // Unexpected situation
38 qWarning() << "QFFmpeg::Demuxer received AVPacket with pts == AV_NOPTS_VALUE";
39 return true;
40 }
41
42 if (avStream.start_time != AV_NOPTS_VALUE)
43 return AVStreamDuration(avPacket.pts - avStream.start_time) <= streamDuration;
44
45 const TrackPosition trackPos = toTrackPosition(AVStreamPosition(avPacket.pts), &avStream, context);
46 const TrackPosition trackPosOfStreamEnd = toTrackDuration(streamDuration, &avStream).asTimePoint();
47 return trackPos <= trackPosOfStreamEnd;
48
49 // TODO: If there is a packet that starts before the canonical end of the stream but has a
50 // malformed duration, rework doNextStep to check for eof after that packet.
51}
52
53Demuxer::Demuxer(const PlaybackEngineObjectID &id, AVFormatContext *context,
54 TrackPosition initialPosUs, bool seekPending, const LoopOffset &loopOffset,
55 const StreamIndexes &streamIndexes, int loops)
56 : PlaybackEngineObject(id),
57 m_context(context),
58 m_sessionCtx{ initialPosUs, loopOffset, !seekPending && initialPosUs == TrackPosition{ 0 } },
59 m_loops(loops)
60{
61 qCDebug(qLcDemuxer) << "Create demuxer."
62 << "pos:" << m_sessionCtx.posInLoopUs.get()
63 << "loop offset:" << m_sessionCtx.loopOffset.loopStartTimeUs.get()
64 << "loop index:" << m_sessionCtx.loopOffset.loopIndex << "loops:" << loops;
65
66 Q_ASSERT(m_context);
67
68 for (auto i = 0; i < QPlatformMediaPlayer::NTrackTypes; ++i) {
69 if (streamIndexes[i] >= 0) {
70 const auto trackType = static_cast<QPlatformMediaPlayer::TrackType>(i);
71 qCDebug(qLcDemuxer) << "Activate demuxing stream" << i << ", trackType:" << trackType;
72 m_streams[streamIndexes[i]] = { trackType };
73 }
74 }
75}
76
77void Demuxer::seek(quint64 sessionId, TrackPosition initialPosUs, const LoopOffset &loopOffset)
78{
79 updateSession(sessionId, [this, initialPosUs, loopOffset]() {
80 m_sessionCtx = { initialPosUs, loopOffset };
81
82 for (auto &[id, streamData] : m_streams)
83 streamData = StreamData{ streamData.trackType };
84
85 scheduleNextStep();
86 });
87}
88
90{
91 ensureSeeked();
92
93 Packet packet(m_sessionCtx.loopOffset, AVPacketUPtr{ av_packet_alloc() }, id());
94 AVPacket &avPacket = *packet.avPacket();
95
96 const int demuxStatus = av_read_frame(m_context, &avPacket);
97 if (demuxStatus == AVERROR_EXIT)
98 return;
99
100 const int streamIndex = avPacket.stream_index;
101 auto streamIterator = m_streams.find(streamIndex);
102 const bool streamIsRelevant = streamIterator != m_streams.end();
103
104 if (demuxStatus == AVERROR_EOF
105 || (streamIsRelevant && !isPacketWithinStreamDuration(m_context, packet))) {
106 ++m_sessionCtx.loopOffset.loopIndex;
107
108 const auto loops = m_loops.loadAcquire();
109 if (loops >= 0 && m_sessionCtx.loopOffset.loopIndex >= loops) {
110 qCDebug(qLcDemuxer) << "finish demuxing";
111
112 if (!std::exchange(m_sessionCtx.buffered, true))
113 emit packetsBuffered();
114
115 setAtEnd(true);
116 } else {
117 // start next loop
118 m_sessionCtx.seeked = false;
119 m_sessionCtx.posInLoopUs = TrackPosition(0);
120 m_sessionCtx.loopOffset.loopStartTimeUs = m_sessionCtx.maxPacketsEndPos;
121 m_sessionCtx.maxPacketsEndPos = TrackPosition(0);
122
123 ensureSeeked();
124
125 qCDebug(qLcDemuxer) << "Demuxer loops changed. Index:"
126 << m_sessionCtx.loopOffset.loopIndex
127 << "Offset:" << m_sessionCtx.loopOffset.loopStartTimeUs.get();
128
129 scheduleNextStep();
130 }
131
132 return;
133 }
134
135 if (demuxStatus < 0) {
136 qCWarning(qLcDemuxer) << "Demuxing failed" << demuxStatus << AVError(demuxStatus);
137
138 if (demuxStatus == AVERROR(EAGAIN)
139 && m_sessionCtx.demuxerRetryCount != s_maxDemuxerRetries) {
140 // When demuxer reports EAGAIN, we can try to recover by calling av_read_frame again.
141 // The documentation for av_read_frame does not mention this, but FFmpeg command line
142 // tool does this, see input_thread() function in ffmpeg_demux.c. There, the response
143 // is to sleep for 10 ms before trying again. NOTE: We do not have any known way of
144 // reproducing this in our tests.
145 m_sessionCtx.failTimePoint = std::chrono::steady_clock::now();
146 ++m_sessionCtx.demuxerRetryCount;
147
148 qCDebug(qLcDemuxer) << "Retrying";
149 scheduleNextStep();
150 } else {
151 // av_read_frame reports another error. This could for example happen if network is
152 // disconnected while playing a network stream, where av_read_frame may return
153 // ETIMEDOUT.
154 // TODO: Demuxer errors should likely stop playback in media player examples.
155 emit error(QMediaPlayer::ResourceError,
156 QLatin1StringView("Demuxing failed"));
157 }
158
159 return;
160 }
161
162 m_sessionCtx.demuxerRetryCount = 0;
163 m_sessionCtx.failTimePoint.reset();
164
165 if (streamIsRelevant) {
166 auto &streamData = streamIterator->second;
167 const AVStream *stream = m_context->streams[streamIndex];
168
169 const TrackPosition endPos = packetEndPos(packet, stream, m_context);
170 m_sessionCtx.maxPacketsEndPos = qMax(m_sessionCtx.maxPacketsEndPos, endPos);
171
172 // Increase buffered metrics as the packet has been processed.
173
174 streamData.bufferedDuration += toTrackDuration(AVStreamDuration(avPacket.duration), stream);
175 streamData.bufferedSize += avPacket.size;
176 streamData.maxSentPacketsPos = qMax(streamData.maxSentPacketsPos, endPos);
177 updateStreamDataLimitFlag(streamData);
178
179 if (!m_sessionCtx.buffered && streamData.isDataLimitReached) {
180 m_sessionCtx.buffered = true;
181 emit packetsBuffered();
182 }
183
184 if (!m_sessionCtx.firstPacketFound) {
185 m_sessionCtx.firstPacketFound = true;
186 emit firstPacketFound(id(),
187 m_sessionCtx.posInLoopUs
188 + m_sessionCtx.loopOffset.loopStartTimeUs.asDuration());
189 }
190
191 auto signal = signalByTrackType(streamData.trackType);
192 emit (this->*signal)(std::move(packet));
193 }
194
195 scheduleNextStep();
196}
197
198void Demuxer::onPacketProcessed(Packet packet)
199{
200 Q_ASSERT(packet.isValid());
201
202 if (!checkID(packet.sourceID()))
203 return;
204
205 auto &avPacket = *packet.avPacket();
206
207 const auto streamIndex = avPacket.stream_index;
208 const auto stream = m_context->streams[streamIndex];
209 auto it = m_streams.find(streamIndex);
210
211 if (it != m_streams.end()) {
212 auto &streamData = it->second;
213
214 // Decrease buffered metrics as new data (the packet) has been received (buffered)
215
216 streamData.bufferedDuration -= toTrackDuration(AVStreamDuration(avPacket.duration), stream);
217 streamData.bufferedSize -= avPacket.size;
218 streamData.maxProcessedPacketPos =
219 qMax(streamData.maxProcessedPacketPos, packetEndPos(packet, stream, m_context));
220
221 Q_ASSERT(it->second.bufferedDuration >= TrackDuration(0));
222 Q_ASSERT(it->second.bufferedSize >= 0);
223
224 updateStreamDataLimitFlag(streamData);
225 }
226
227 scheduleNextStep();
228}
229
231{
232 Q_ASSERT(m_sessionCtx.failTimePoint.has_value() == !!m_sessionCtx.demuxerRetryCount);
233 return m_sessionCtx.failTimePoint ? *m_sessionCtx.failTimePoint + s_demuxerRetryInterval
234 : PlaybackEngineObject::nextTimePoint();
235}
236
238{
239 auto isDataLimitReached = [](const auto &streamIndexToData) {
240 return streamIndexToData.second.isDataLimitReached;
241 };
242
243 // Demuxer waits:
244 // - if it's paused
245 // - if the end has been reached
246 // - if streams are empty (probably, should be handled on the initialization)
247 // - if at least one of the streams has reached the data limit (duration or size)
248
249 return PlaybackEngineObject::canDoNextStep() && !isAtEnd() && !m_streams.empty()
250 && std::none_of(m_streams.begin(), m_streams.end(), isDataLimitReached);
251}
252
253void Demuxer::ensureSeeked()
254{
255 if (std::exchange(m_sessionCtx.seeked, true))
256 return;
257
258 if ((m_context->ctx_flags & AVFMTCTX_UNSEEKABLE) == 0) {
259
260 // m_posInLoopUs is intended to be the number of microseconds since playback start, and is
261 // in the range [0, duration()]. av_seek_frame seeks to a position relative to the start of
262 // the media timeline, which may be non-zero. We adjust for this by adding the
263 // AVFormatContext's start_time.
264 //
265 // NOTE: m_posInLoop is not calculated correctly if the start_time is non-zero, but
266 // this must be fixed separately.
267 const AVContextPosition seekPos = toContextPosition(m_sessionCtx.posInLoopUs, m_context);
268
269 qCDebug(qLcDemuxer).nospace()
270 << "Seeking to offset " << m_sessionCtx.posInLoopUs.get() << "us from media start.";
271
272 auto err = av_seek_frame(m_context, -1, seekPos.get(), AVSEEK_FLAG_BACKWARD);
273
274 if (err < 0) {
275 qCWarning(qLcDemuxer) << "Failed to seek, pos" << seekPos.get();
276
277 // Drop an error of seeking to initial position of streams with undefined duration.
278 // This needs improvements.
279 if (m_sessionCtx.posInLoopUs != TrackPosition{ 0 } || m_context->duration > 0)
280 emit error(QMediaPlayer::ResourceError,
281 QLatin1StringView("Failed to seek: ") + err2str(err));
282 }
283 }
284
285 setAtEnd(false);
286}
287
288Demuxer::RequestingSignal Demuxer::signalByTrackType(QPlatformMediaPlayer::TrackType trackType)
289{
290 switch (trackType) {
291 case QPlatformMediaPlayer::TrackType::VideoStream:
292 return &Demuxer::requestProcessVideoPacket;
293 case QPlatformMediaPlayer::TrackType::AudioStream:
294 return &Demuxer::requestProcessAudioPacket;
295 case QPlatformMediaPlayer::TrackType::SubtitleStream:
296 return &Demuxer::requestProcessSubtitlePacket;
297 default:
298 Q_ASSERT(!"Unknown track type");
299 }
300
301 return nullptr;
302}
303
304void Demuxer::setLoops(int loopsCount)
305{
306 qCDebug(qLcDemuxer) << "setLoops to demuxer" << loopsCount;
307 m_loops.storeRelease(loopsCount);
308}
309
310void Demuxer::updateStreamDataLimitFlag(StreamData &streamData)
311{
312 const TrackDuration packetsPosDiff =
313 streamData.maxSentPacketsPos - streamData.maxProcessedPacketPos;
314 streamData.isDataLimitReached = streamData.bufferedDuration >= MaxBufferedDurationUs
315 || (streamData.bufferedDuration == TrackDuration(0)
316 && packetsPosDiff >= MaxBufferedDurationUs)
317 || streamData.bufferedSize >= MaxBufferedSize;
318}
319
320} // namespace QFFmpeg
321
322QT_END_NAMESPACE
323
324#include "moc_qffmpegdemuxer_p.cpp"
void setLoops(int loopsCount)
void doNextStep() override
void seek(quint64 sessionId, TrackPosition initialPosUs, const LoopOffset &loopOffset)
void(Demuxer::*)(Packet) RequestingSignal
bool canDoNextStep() const override
TimePoint nextTimePoint() const override
static constexpr TrackDuration MaxBufferedDurationUs
static constexpr qint64 MaxBufferedSize
std::conditional_t< QT_FFMPEG_AVIO_WRITE_CONST, const uint8_t *, uint8_t * > AvioWriteBufferType
static TrackPosition packetEndPos(const Packet &packet, const AVStream *stream, const AVFormatContext *context)
static bool isPacketWithinStreamDuration(const AVFormatContext *context, const Packet &packet)
#define qCWarning(category,...)
#define qCDebug(category,...)
#define Q_STATIC_LOGGING_CATEGORY(name,...)