Qt
Internal/Contributor docs for the Qt SDK. Note: These are NOT official API docs; those are found at https://doc.qt.io/
Loading...
Searching...
No Matches
qv4mm.cpp
Go to the documentation of this file.
1// Copyright (C) 2021 The Qt Company Ltd.
2// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
3
4#include "PageAllocation.h"
5#include "PageReservation.h"
6
7#include <private/qnumeric_p.h>
8#include <private/qv4alloca_p.h>
9#include <private/qv4engine_p.h>
10#include <private/qv4identifiertable_p.h>
11#include <private/qv4mapobject_p.h>
12#include <private/qv4mm_p.h>
13#include <private/qv4object_p.h>
14#include <private/qv4profiling_p.h>
15#include <private/qv4qobjectwrapper_p.h>
16#include <private/qv4setobject_p.h>
17#include <private/qv4stackframe_p.h>
18
19#include <QtQml/qqmlengine.h>
20
21#include <QtCore/qalgorithms.h>
22#include <QtCore/qelapsedtimer.h>
23#include <QtCore/qloggingcategory.h>
24#include <QtCore/qmap.h>
25#include <QtCore/qscopedvaluerollback.h>
26
27#include <algorithm>
28#include <chrono>
29#include <cstdlib>
30
31//#define MM_STATS
32
33#if !defined(MM_STATS) && !defined(QT_NO_DEBUG)
34#define MM_STATS
35#endif
36
37#if MM_DEBUG
38#define DEBUG qDebug() << "MM:"
39#else
40#define DEBUG if (1) ; else qDebug() << "MM:"
41#endif
42
43#ifdef V4_USE_VALGRIND
44#include <valgrind/valgrind.h>
45#include <valgrind/memcheck.h>
46#endif
47
48#ifdef V4_USE_HEAPTRACK
49#include <heaptrack_api.h>
50#endif
51
52#if OS(QNX)
53#include <sys/storage.h> // __tls()
54#endif
55
56#if USE(PTHREADS) && HAVE(PTHREAD_NP_H)
57#include <pthread_np.h>
58#endif
59
60Q_STATIC_LOGGING_CATEGORY(lcGcStats, "qt.qml.gc.statistics")
61Q_STATIC_LOGGING_CATEGORY(lcGcAllocatorStats, "qt.qml.gc.allocatorStats")
62Q_STATIC_LOGGING_CATEGORY(lcGcStateTransitions, "qt.qml.gc.stateTransitions")
63Q_STATIC_LOGGING_CATEGORY(lcGcForcedRuns, "qt.qml.gc.forcedRuns")
64Q_STATIC_LOGGING_CATEGORY(lcGcStepExecution, "qt.qml.gc.stepExecution")
65
66using namespace WTF;
67
68QT_BEGIN_NAMESPACE
69
70namespace QV4 {
71
72enum {
73 MinSlotsGCLimit = QV4::Chunk::AvailableSlots*16,
74 GCOverallocation = 200 /* Max overallocation by the GC in % */
75};
76
78 enum {
79#ifdef Q_OS_RTEMS
80 NumChunks = sizeof(quint64),
81#else
82 NumChunks = 8*sizeof(quint64),
83#endif
84 SegmentSize = NumChunks*Chunk::ChunkSize,
85 };
86
87 MemorySegment(size_t size)
88 {
89 size += Chunk::ChunkSize; // make sure we can get enough 64k alignment memory
90 if (size < SegmentSize)
91 size = SegmentSize;
92
93 pageReservation = PageReservation::reserve(size, OSAllocator::JSGCHeapPages);
94 base = reinterpret_cast<Chunk *>((reinterpret_cast<quintptr>(pageReservation.base()) + Chunk::ChunkSize - 1) & ~(Chunk::ChunkSize - 1));
96 availableBytes = size - (reinterpret_cast<quintptr>(base) - reinterpret_cast<quintptr>(pageReservation.base()));
97 if (availableBytes < SegmentSize)
98 --nChunks;
99 }
101 qSwap(pageReservation, other.pageReservation);
102 qSwap(base, other.base);
103 qSwap(allocatedMap, other.allocatedMap);
104 qSwap(availableBytes, other.availableBytes);
105 qSwap(nChunks, other.nChunks);
106 }
107
109 if (base)
110 pageReservation.deallocate();
111 }
112
113 void setBit(size_t index) {
114 Q_ASSERT(index < nChunks);
115 quint64 bit = static_cast<quint64>(1) << index;
116 allocatedMap |= bit;
117 }
118 void clearBit(size_t index) {
119 Q_ASSERT(index < nChunks);
120 quint64 bit = static_cast<quint64>(1) << index;
121 allocatedMap &= ~bit;
122 }
123 bool testBit(size_t index) const {
124 Q_ASSERT(index < nChunks);
125 quint64 bit = static_cast<quint64>(1) << index;
126 return (allocatedMap & bit);
127 }
128
129 Chunk *allocate(size_t size);
130 void free(Chunk *chunk, size_t size) {
131 DEBUG << "freeing chunk" << chunk;
132 size_t index = static_cast<size_t>(chunk - base);
133 size_t end = qMin(static_cast<size_t>(NumChunks), index + (size - 1)/Chunk::ChunkSize + 1);
134 while (index < end) {
135 Q_ASSERT(testBit(index));
136 clearBit(index);
137 ++index;
138 }
139
140 size_t pageSize = WTF::pageSize();
141 size = (size + pageSize - 1) & ~(pageSize - 1);
142#if !defined(Q_OS_LINUX) && !defined(Q_OS_WIN)
143 // Linux and Windows zero out pages that have been decommitted and get committed again.
144 // unfortunately that's not true on other OSes (e.g. BSD based ones), so zero out the
145 // memory before decommit, so that we can be sure that all chunks we allocate will be
146 // zero initialized.
147 memset(chunk, 0, size);
148#endif
149 pageReservation.decommit(chunk, size);
150 }
151
152 bool contains(Chunk *c) const {
153 return c >= base && c < base + nChunks;
154 }
155
157 Chunk *base = nullptr;
160 uint nChunks = 0;
161};
162
164{
165 if (!allocatedMap && size >= SegmentSize) {
166 // chunk allocated for one huge allocation
167 Q_ASSERT(availableBytes >= size);
168 pageReservation.commit(base, size);
169 allocatedMap = ~static_cast<quint64>(0);
170 return base;
171 }
172 size_t requiredChunks = (size + sizeof(Chunk) - 1)/sizeof(Chunk);
173 uint sequence = 0;
174 Chunk *candidate = nullptr;
175 for (uint i = 0; i < nChunks; ++i) {
176 if (!testBit(i)) {
177 if (!candidate)
178 candidate = base + i;
179 ++sequence;
180 } else {
181 candidate = nullptr;
182 sequence = 0;
183 }
184 if (sequence == requiredChunks) {
185 pageReservation.commit(candidate, size);
186 for (uint i = 0; i < requiredChunks; ++i)
187 setBit(candidate - base + i);
188 DEBUG << "allocated chunk " << candidate << Qt::hex << size;
189
190 return candidate;
191 }
192 }
193 return nullptr;
194}
195
198
200 size += Chunk::HeaderSize; // space required for the Chunk header
201 size_t pageSize = WTF::pageSize();
202 size = (size + pageSize - 1) & ~(pageSize - 1); // align to page sizes
203 if (size < Chunk::ChunkSize)
204 size = Chunk::ChunkSize;
205 return size;
206 }
207
208 Chunk *allocate(size_t size = 0);
209 void free(Chunk *chunk, size_t size = 0);
210
212};
213
215{
216 size = requiredChunkSize(size);
217 for (auto &m : memorySegments) {
218 if (~m.allocatedMap) {
219 Chunk *c = m.allocate(size);
220 if (c)
221 return c;
222 }
223 }
224
225 // allocate a new segment
226 memorySegments.push_back(MemorySegment(size));
227 Chunk *c = memorySegments.back().allocate(size);
228 Q_ASSERT(c);
229 return c;
230}
231
232void ChunkAllocator::free(Chunk *chunk, size_t size)
233{
234 size = requiredChunkSize(size);
235 for (auto &m : memorySegments) {
236 if (m.contains(chunk)) {
237 m.free(chunk, size);
238 return;
239 }
240 }
241 Q_ASSERT(false);
242}
243
244#ifdef DUMP_SWEEP
246 QString s = QString::number(n, 2);
247 while (s.length() < 64)
248 s.prepend(QChar::fromLatin1('0'));
249 return s;
250}
251#define SDUMP qDebug
252#else
253QString binary(quintptr) { return QString(); }
254#define SDUMP if (1) ; else qDebug
255#endif
256
257// Stores a classname -> freed count mapping.
258typedef QHash<const char*, int> MMStatsHash;
259Q_GLOBAL_STATIC(MMStatsHash, freedObjectStatsGlobal)
260
261// This indirection avoids sticking QHash code in each of the call sites, which
262// shaves off some instructions in the case that it's unused.
263static void increaseFreedCountForClass(const char *className)
264{
265 (*freedObjectStatsGlobal())[className]++;
266}
267
268//bool Chunk::sweep(ClassDestroyStatsCallback classCountPtr)
270{
271 bool hasUsedSlots = false;
272 SDUMP() << "sweeping chunk" << this;
273 HeapItem *o = realBase();
274 bool lastSlotFree = false;
275 for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) {
277 Q_ASSERT((toFree & objectBitmap[i]) == toFree); // check all black objects are marked as being used
279 SDUMP() << " index=" << i;
280 SDUMP() << " toFree =" << binary(toFree);
281 SDUMP() << " black =" << binary(blackBitmap[i]);
282 SDUMP() << " object =" << binary(objectBitmap[i]);
283 SDUMP() << " extends =" << binary(e);
284 if (lastSlotFree)
285 e &= (e + 1); // clear all lowest extent bits
286 while (toFree) {
288 quintptr bit = (static_cast<quintptr>(1) << index);
289
290 toFree ^= bit; // mask out freed slot
291
292 // remove all extends slots that have been freed
293 // this is a bit of bit trickery.
294 quintptr mask = (bit << 1) - 1; // create a mask of 1's to the right of and up to the current bit
295 quintptr objmask = e | mask; // or'ing mask with e gives all ones until the end of the current object
296 quintptr result = objmask + 1;
297 Q_ASSERT(qCountTrailingZeroBits(result) - index != 0); // ensure we freed something
298 result |= mask; // ensure we don't clear stuff to the right of the current object
299 e &= result;
300
302 Heap::Base *b = *itemToFree;
303 const VTable *v = b->internalClass->vtable;
304// if (Q_UNLIKELY(classCountPtr))
305// classCountPtr(v->className);
306 if (v->destroy) {
307 v->destroy(b);
309 }
310#ifdef V4_USE_HEAPTRACK
312#endif
313 }
315 - (blackBitmap[i] | e)) * Chunk::SlotSize,
318 hasUsedSlots |= (blackBitmap[i] != 0);
319 extendsBitmap[i] = e;
320 lastSlotFree = !((objectBitmap[i]|extendsBitmap[i]) >> (sizeof(quintptr)*8 - 1));
321 SDUMP() << " new extends =" << binary(e);
322 SDUMP() << " lastSlotFree" << lastSlotFree;
324 o += Chunk::Bits;
325 }
326 return hasUsedSlots;
327}
328
330{
331 HeapItem *o = realBase();
332 for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) {
335 while (toFree) {
337 quintptr bit = (static_cast<quintptr>(1) << index);
338
339 toFree ^= bit; // mask out freed slot
340
341 // remove all extends slots that have been freed
342 // this is a bit of bit trickery.
343 quintptr mask = (bit << 1) - 1; // create a mask of 1's to the right of and up to the current bit
344 quintptr objmask = e | mask; // or'ing mask with e gives all ones until the end of the current object
345 quintptr result = objmask + 1;
346 Q_ASSERT(qCountTrailingZeroBits(result) - index != 0); // ensure we freed something
347 result |= mask; // ensure we don't clear stuff to the right of the current object
348 e &= result;
349
351 Heap::Base *b = *itemToFree;
352 if (b->internalClass->vtable->destroy) {
355 }
356#ifdef V4_USE_HEAPTRACK
358#endif
359 }
362 objectBitmap[i] = 0;
363 extendsBitmap[i] = e;
364 o += Chunk::Bits;
365 }
366}
367
369{
370 memset(blackBitmap, 0, sizeof(blackBitmap));
371}
372
374{
376#if QT_POINTER_SIZE == 8
377 const int start = 0;
378#else
379 const int start = 1;
380#endif
381 uint freeSlots = 0;
383
384 for (int i = start; i < EntriesInBitmap; ++i) {
386#if QT_POINTER_SIZE == 8
387 if (!i)
388 usedSlots |= (static_cast<quintptr>(1) << (HeaderSize/SlotSize)) - 1;
389#endif
391 while (1) {
393 if (index == Bits)
394 break;
396 usedSlots &= ~((static_cast<quintptr>(1) << index) - 1);
397 while (!usedSlots) {
398 if (++i < EntriesInBitmap) {
400 } else {
402 // Overflows to 0 when counting trailing zeroes above in next iteration.
403 // Then, all the bits are zeroes and we break.
405 break;
406 }
408 }
410
412 usedSlots |= (quintptr(1) << index) - 1;
413 uint freeEnd = i*Bits + index;
415 freeSlots += nSlots;
418 uint bin = qMin(nBins - 1, nSlots);
420 bins[bin] = freeItem;
421 }
422 }
424}
425
427 Q_ASSERT((size % Chunk::SlotSize) == 0);
429
430 if (allocationStats)
432
433 HeapItem **last;
434
435 HeapItem *m;
436
437 if (slotsRequired < NumBins - 1) {
439 if (m) {
441 goto done;
442 }
443 }
444
445 if (nFree >= slotsRequired) {
446 // use bump allocation
448 m = nextFree;
451 goto done;
452 }
453
454 // search last bin for a large enough item
455 last = &freeBins[NumBins - 1];
456 while ((m = *last)) {
458 *last = m->freeData.next; // take it out of the list
459
461 if (remainingSlots == 0)
462 goto done;
463
465 if (remainingSlots > nFree) {
466 if (nFree) {
471 }
474 } else {
479 }
480 goto done;
481 }
482 last = &m->freeData.next;
483 }
484
485 if (slotsRequired < NumBins - 1) {
486 // check if we can split up another slot
487 for (size_t i = slotsRequired + 1; i < NumBins - 1; ++i) {
488 m = freeBins[i];
489 if (m) {
490 freeBins[i] = m->freeData.next; // take it out of the list
497 goto done;
498 }
499 }
500 }
501
502 if (!m) {
503 if (!forceAllocation)
504 return nullptr;
505 if (nFree) {
506 // Save any remaining slots of the current chunk
507 // for later, smaller allocations.
512 }
518 m = nextFree;
521 }
522
523done:
526#ifdef V4_USE_HEAPTRACK
528#endif
529 return m;
530}
531
533{
534 nextFree = nullptr;
535 nFree = 0;
536 memset(freeBins, 0, sizeof(freeBins));
537
539
540 auto firstEmptyChunk = std::partition(chunks.begin(), chunks.end(), [this](Chunk *c) {
541 return c->sweep(engine);
542 });
543
547 });
548
549 // only free the chunks at the end to avoid that the sweep() calls indirectly
550 // access freed memory
554 });
555
557}
558
560{
561 for (auto c : chunks)
562 c->freeAll(engine);
563 for (auto c : chunks) {
566 }
567}
568
570{
571 for (auto c : chunks)
572 c->resetBlackBits();
573}
574
576 MemorySegment *m = nullptr;
577 Chunk *c = nullptr;
578 if (size >= MemorySegment::SegmentSize/2) {
579 // too large to handle through the ChunkAllocator, let's get our own memory segement
580 size += Chunk::HeaderSize; // space required for the Chunk header
582 size = (size + pageSize - 1) & ~(pageSize - 1); // align to page sizes
583 m = new MemorySegment(size);
584 c = m->allocate(size);
585 } else {
587 }
588 Q_ASSERT(c);
592#ifdef V4_USE_HEAPTRACK
594#endif
595 return c->first();
596}
597
598static void freeHugeChunk(ChunkAllocator *chunkAllocator, const HugeItemAllocator::HugeChunk &c, ClassDestroyStatsCallback classCountPtr)
599{
600 HeapItem *itemToFree = c.chunk->first();
601 Heap::Base *b = *itemToFree;
602 const VTable *v = b->internalClass->vtable;
603 if (Q_UNLIKELY(classCountPtr))
604 classCountPtr(v->className);
605
606 if (v->destroy) {
607 v->destroy(b);
608 b->_checkIsDestroyed();
609 }
610 if (c.segment) {
611 // own memory segment
612 c.segment->free(c.chunk, c.size);
613 delete c.segment;
614 } else {
615 chunkAllocator->free(c.chunk, c.size);
616 }
617#ifdef V4_USE_HEAPTRACK
618 heaptrack_report_free(c.chunk);
619#endif
620}
621
637
639{
640 for (auto c : chunks)
642}
643
645{
646 for (auto &c : chunks) {
648 freeHugeChunk(chunkAllocator, c, nullptr);
649 }
650}
651
652namespace {
653using ExtraData = GCStateInfo::ExtraData;
654GCState markStart(GCStateMachine *that, ExtraData &)
655{
656 //Initialize the mark stack
657 that->mm->m_markStack = std::make_unique<MarkStack>(that->mm->engine);
658 that->mm->engine->isGCOngoing = true;
659 return GCState::MarkGlobalObject;
660}
661
662GCState markGlobalObject(GCStateMachine *that, ExtraData &)
663{
664 that->mm->engine->markObjects(that->mm->m_markStack.get());
665 return GCState::MarkJSStack;
666}
667
668GCState markJSStack(GCStateMachine *that, ExtraData &)
669{
670 that->mm->collectFromJSStack(that->mm->markStack());
671 return GCState::InitMarkPersistentValues;
672}
673
674GCState initMarkPersistentValues(GCStateMachine *that, ExtraData &stateData)
675{
676 if (!that->mm->m_persistentValues)
677 return GCState::InitMarkWeakValues; // no persistent values to mark
678 stateData = GCIteratorStorage { that->mm->m_persistentValues->begin() };
679 return GCState::MarkPersistentValues;
680}
681
682static constexpr int markLoopIterationCount = 1024;
683
684bool wasDrainNecessary(MarkStack *ms, QDeadlineTimer deadline)
685{
686 if (ms->remainingBeforeSoftLimit() > markLoopIterationCount)
687 return false;
688 // drain
689 ms->drain(deadline);
690 return true;
691}
692
693GCState markPersistentValues(GCStateMachine *that, ExtraData &stateData) {
694 auto markStack = that->mm->markStack();
695 if (wasDrainNecessary(markStack, that->deadline) && that->deadline.hasExpired())
696 return GCState::MarkPersistentValues;
697 PersistentValueStorage::Iterator& it = get<GCIteratorStorage>(stateData).it;
698 // avoid repeatedly hitting the timer constantly by batching iterations
699 for (int i = 0; i < markLoopIterationCount; ++i) {
700 if (!it.p)
701 return GCState::InitMarkWeakValues;
702 if (Managed *m = (*it).as<Managed>())
703 m->mark(markStack);
704 ++it;
705 }
706 return GCState::MarkPersistentValues;
707}
708
709GCState initMarkWeakValues(GCStateMachine *that, ExtraData &stateData)
710{
711 stateData = GCIteratorStorage { that->mm->m_weakValues->begin() };
712 return GCState::MarkWeakValues;
713}
714
715GCState markWeakValues(GCStateMachine *that, ExtraData &stateData)
716{
717 auto markStack = that->mm->markStack();
718 if (wasDrainNecessary(markStack, that->deadline) && that->deadline.hasExpired())
719 return GCState::MarkWeakValues;
720 PersistentValueStorage::Iterator& it = get<GCIteratorStorage>(stateData).it;
721 // avoid repeatedly hitting the timer constantly by batching iterations
722 for (int i = 0; i < markLoopIterationCount; ++i) {
723 if (!it.p)
724 return GCState::MarkDrain;
725 QObjectWrapper *qobjectWrapper = (*it).as<QObjectWrapper>();
726 ++it;
727 if (!qobjectWrapper)
728 continue;
729 QObject *qobject = qobjectWrapper->object();
730 if (!qobject)
731 continue;
732 bool keepAlive = QQmlData::keepAliveDuringGarbageCollection(qobject);
733
734 if (!keepAlive) {
735 if (QObject *parent = qobject->parent()) {
736 while (parent->parent())
737 parent = parent->parent();
738 keepAlive = QQmlData::keepAliveDuringGarbageCollection(parent);
739 }
740 }
741
742 if (keepAlive)
743 qobjectWrapper->mark(that->mm->markStack());
744 }
745 return GCState::MarkWeakValues;
746}
747
748GCState markDrain(GCStateMachine *that, ExtraData &)
749{
750 if (that->deadline.isForever()) {
751 that->mm->markStack()->drain();
752 return GCState::MarkReady;
753 }
754 auto drainState = that->mm->m_markStack->drain(that->deadline);
755 return drainState == MarkStack::DrainState::Complete
756 ? GCState::MarkReady
757 : GCState::MarkDrain;
758}
759
760GCState markReady(GCStateMachine *that, ExtraData &)
761{
762 auto isIncrementalRun = [](GCStateMachine* that){
763 return !that->mm->aggressiveGC && that->timeLimit.count() > 0;
764 };
765
766 if (that->mm->crossValidateIncrementalGC && isIncrementalRun(that))
767 return GCState::CrossValidateIncrementalMarkPhase;
768 return GCState::InitCallDestroyObjects;
769}
770
771GCState crossValidateIncrementalMarkPhase(GCStateMachine *that, ExtraData &)
772{
773 struct {
774 Chunk* operator()(Chunk* chunk) { return chunk; }
775 Chunk* operator()(const HugeItemAllocator::HugeChunk& chunk) { return chunk.chunk; }
776 } getChunk{};
777
778 auto takeBlackBitmap = [&getChunk](auto& allocator, std::vector<quintptr>& storage){
779 for (auto chunk : allocator.chunks) {
780 for (auto& bitmap : getChunk(chunk)->blackBitmap) {
781 storage.push_back(bitmap);
782 }
783 getChunk(chunk)->resetBlackBits();
784 }
785 };
786
787 auto runMarkPhase = [](GCStateMachine* that) {
788 that->reset();
789 that->mm->m_markStack.reset();
790
791 while (that->state != GCStateMachine::MarkReady) {
792 GCStateInfo& stateInfo = that->stateInfoMap[int(that->state)];
793 that->state = stateInfo.execute(that, that->stateData);
794 }
795 };
796
797 auto checkBlackBitmap = [&that, &getChunk](auto& allocator, const std::vector<quintptr>& storedBitmap) {
798 auto reportError = [&allocator, &getChunk, &that](std::size_t chunk_index, std::size_t bitmap_index, uint bit_index){
799 Q_UNUSED(that);
800 auto object = reinterpret_cast<Heap::Base*>(getChunk(allocator.chunks[chunk_index])->realBase() + (bit_index + (bitmap_index*Chunk::Bits)));
801 qDebug() << "Cross Validation Error on chunk" << chunk_index
802 << "on bitmap piece" << bitmap_index << "and bit" << bit_index
803 << ((object->internalClass) ? "With type" : "")
804 << ((object->internalClass) ?
805 Managed::typeToString(Managed::Type(object->internalClass->vtable->type)) : QString());
806
807 #ifdef QT_BUILD_INTERNAL
808 that->bitmapErrors.emplace_back(chunk_index, bitmap_index, bit_index);
809 #endif
810 };
811
812 auto original = storedBitmap.begin();
813 for (std::size_t chunk_index = 0; original != storedBitmap.end() && chunk_index < allocator.chunks.size(); ++chunk_index) {
814 for (std::size_t bitmap_index = 0; bitmap_index < Chunk::EntriesInBitmap; ++bitmap_index) {
815 if (auto differences = (~(*original)) & getChunk(allocator.chunks[chunk_index])->blackBitmap[bitmap_index]) {
816 while (differences != 0) {
817 uint bit_index = qCountTrailingZeroBits(differences);
818 reportError(chunk_index, bitmap_index, bit_index);
819 differences ^= quintptr{1} << bit_index;
820 }
821 }
822 ++original;
823 }
824 }
825 };
826
827 #ifdef QT_BUILD_INTERNAL
828 that->bitmapErrors.clear();
829 #endif
830
831 std::vector<quintptr> blockBitmap{};
832 blockBitmap.reserve(Chunk::EntriesInBitmap * that->mm->blockAllocator.chunks.size());
833 takeBlackBitmap(that->mm->blockAllocator, blockBitmap);
834
835 std::vector<quintptr> hugeItemBitmap{};
836 hugeItemBitmap.reserve(Chunk::EntriesInBitmap * that->mm->hugeItemAllocator.chunks.size());
837 takeBlackBitmap(that->mm->hugeItemAllocator, hugeItemBitmap);
838
839 std::vector<quintptr> internalClassBitmap{};
840 internalClassBitmap.reserve(Chunk::EntriesInBitmap * that->mm->icAllocator.chunks.size());
841 takeBlackBitmap(that->mm->icAllocator, internalClassBitmap);
842
843 runMarkPhase(that);
844
845 checkBlackBitmap(that->mm->blockAllocator, blockBitmap);
846 checkBlackBitmap(that->mm->hugeItemAllocator, hugeItemBitmap);
847 checkBlackBitmap(that->mm->icAllocator, internalClassBitmap);
848
849 return GCState::InitCallDestroyObjects;
850}
851
852/** \!internal
853collects new references from the stack, then drains the mark stack again
854*/
855void redrain(GCStateMachine *that)
856{
857 that->mm->collectFromJSStack(that->mm->markStack());
858 that->mm->m_markStack->drain();
859}
860
861GCState initCallDestroyObjects(GCStateMachine *that, ExtraData &stateData)
862{
863 // as we don't have a deletion barrier, we need to rescan the stack
864 redrain(that);
865 if (!that->mm->m_weakValues)
866 return GCState::FreeWeakMaps; // no need to call destroy objects
867 stateData = GCIteratorStorage { that->mm->m_weakValues->begin() };
868 return GCState::CallDestroyObjects;
869}
870GCState callDestroyObject(GCStateMachine *that, ExtraData &stateData)
871{
872 PersistentValueStorage::Iterator& it = get<GCIteratorStorage>(stateData).it;
873 // destroyObject might call user code, which really shouldn't call back into the gc
874 auto oldState = std::exchange(that->mm->gcBlocked, QV4::MemoryManager::Blockness::InCriticalSection);
875 auto cleanup = qScopeGuard([&]() {
876 that->mm->gcBlocked = oldState;
877 });
878 // avoid repeatedly hitting the timer constantly by batching iterations
879 for (int i = 0; i < markLoopIterationCount; ++i) {
880 if (!it.p)
881 return GCState::FreeWeakMaps;
882 Managed *m = (*it).managed();
883 ++it;
884 if (!m || m->markBit())
885 continue;
886 // we need to call destroyObject on qobjectwrappers now, so that they can emit the destroyed
887 // signal before we start sweeping the heap
888 if (QObjectWrapper *qobjectWrapper = m->as<QObjectWrapper>())
889 qobjectWrapper->destroyObject(/*lastSweep =*/false);
890 }
891 return GCState::CallDestroyObjects;
892}
893
894void freeWeakMaps(MemoryManager *mm)
895{
896 for (auto [map, lastMap] = std::tuple {mm->weakMaps, &mm->weakMaps }; map; map = map->nextWeakMap) {
897 if (!map->isMarked())
898 continue;
899 map->removeUnmarkedKeys();
900 *lastMap = map;
901 lastMap = &map->nextWeakMap;
902 }
903}
904
905GCState freeWeakMaps(GCStateMachine *that, ExtraData &)
906{
907 freeWeakMaps(that->mm);
908 return GCState::FreeWeakSets;
909}
910
911void freeWeakSets(MemoryManager *mm)
912{
913 for (auto [set, lastSet] = std::tuple {mm->weakSets, &mm->weakSets}; set; set = set->nextWeakSet) {
914
915 if (!set->isMarked())
916 continue;
917 set->removeUnmarkedKeys();
918 *lastSet = set;
919 lastSet = &set->nextWeakSet;
920 }
921}
922
923GCState freeWeakSets(GCStateMachine *that, ExtraData &)
924{
925 freeWeakSets(that->mm);
926 return GCState::HandleQObjectWrappers;
927}
928
929GCState handleQObjectWrappers(GCStateMachine *that, ExtraData &)
930{
931 that->mm->cleanupDeletedQObjectWrappersInSweep();
932 return GCState::DoSweep;
933}
934
935GCState doSweep(GCStateMachine *that, ExtraData &)
936{
937 auto mm = that->mm;
938
939 mm->engine->identifierTable->sweep();
940 mm->blockAllocator.sweep();
941 mm->hugeItemAllocator.sweep(that->mm->gcCollectorStats ? increaseFreedCountForClass : nullptr);
942 mm->icAllocator.sweep();
943
944 // reset all black bits
945 mm->blockAllocator.resetBlackBits();
946 mm->hugeItemAllocator.resetBlackBits();
947 mm->icAllocator.resetBlackBits();
948
949 mm->usedSlotsAfterLastFullSweep = mm->blockAllocator.usedSlotsAfterLastSweep + mm->icAllocator.usedSlotsAfterLastSweep;
950 mm->gcBlocked = MemoryManager::Unblocked;
951 mm->m_markStack.reset();
952 mm->engine->isGCOngoing = false;
953
954 mm->updateUnmanagedHeapSizeGCLimit();
955
956 return GCState::Invalid;
957}
958
959}
960
961
963 : engine(engine)
971 , aggressiveGC(!qEnvironmentVariableIsEmpty("QV4_MM_AGGRESSIVE_GC"))
972 , crossValidateIncrementalGC(qEnvironmentVariableIsSet("QV4_MM_CROSS_VALIDATE_INCREMENTAL_GC"))
975{
976#ifdef V4_USE_VALGRIND
977 VALGRIND_CREATE_MEMPOOL(this, 0, true);
978#endif
980 if (gcStats)
982
984 gcStateMachine->mm = this;
985
987 markStart,
988 false,
989 };
992 false,
993 };
996 false,
997 };
1000 false,
1001 };
1004 false,
1005 };
1008 false,
1009 };
1012 false,
1013 };
1015 markDrain,
1016 false,
1017 };
1019 markReady,
1020 false,
1021 };
1024 false,
1025 };
1028 false,
1029 };
1032 false,
1033 };
1036 false,
1037 };
1040 true, // ensure that handleQObjectWrappers runs in isolation
1041 };
1044 false,
1045 };
1047 doSweep,
1048 false,
1049 };
1050}
1051
1065
1067{
1068#ifdef MM_STATS
1071#endif
1072
1074 Q_ASSERT(size % Chunk::SlotSize == 0);
1075
1077 memset(m, 0, size);
1078 return *m;
1079}
1080
1082{
1084 Q_ASSERT(!(size % sizeof(HeapItem)));
1085
1086 Heap::Object *o;
1088 o = static_cast<Heap::Object *>(allocData(size));
1089 } else {
1090 // Allocate both in one go through the block allocator
1092 std::size_t memberSize = align(sizeof(Heap::MemberData) + (nMembers - 1)*sizeof(Value));
1094 Heap::MemberData *m;
1095 if (totalSize > Chunk::DataSize) {
1096 o = static_cast<Heap::Object *>(allocData(size));
1098 } else {
1099 HeapItem *mh = reinterpret_cast<HeapItem *>(allocData(totalSize));
1100 Heap::Base *b = *mh;
1101 o = static_cast<Heap::Object *>(b);
1102 mh += (size >> Chunk::SlotSizeShift);
1103 m = mh->as<Heap::MemberData>();
1104 Chunk *c = mh->chunk();
1105 size_t index = mh - c->realBase();
1108 }
1110 o->memberData.set(engine, m);
1112 m->values.alloc = static_cast<uint>((memberSize - sizeof(Heap::MemberData) + sizeof(Value))/sizeof(Value));
1114 m->init();
1115 }
1116
1117 return o;
1118}
1119
1120static uint markStackSize = 0;
1121
1123 : m_engine(engine)
1124{
1125 m_base = (Heap::Base **)engine->gcStack->base();
1126 m_top = m_base;
1127 const size_t size = engine->maxGCStackSize() / sizeof(Heap::Base);
1129 m_softLimit = m_base + size * 3 / 4;
1130}
1131
1133{
1134 // we're not calling drain(QDeadlineTimer::Forever) as that has higher overhead
1135 while (m_top > m_base) {
1136 Heap::Base *h = pop();
1137 ++markStackSize;
1138 Q_ASSERT(h); // at this point we should only have Heap::Base objects in this area on the stack. If not, weird things might happen.
1141 }
1142}
1143
1145{
1146 do {
1147 for (int i = 0; i <= markLoopIterationCount * 10; ++i) {
1148 if (m_top == m_base)
1149 return DrainState::Complete;
1150 Heap::Base *h = pop();
1151 ++markStackSize;
1152 Q_ASSERT(h); // at this point we should only have Heap::Base objects in this area on the stack. If not, weird things might happen.
1155 }
1156 } while (!deadline.hasExpired());
1157 return DrainState::Ongoing;
1158}
1159
1165
1167{
1168 if (engine->inShutdown)
1169 return;
1172 onEventLoop();
1173 }, Qt::QueuedConnection);
1174 return;
1175 }
1176 if (gcStateMachine->inProgress()) {
1178 }
1179}
1180
1181
1186
1188{
1189
1191 Managed *m = (*it).managed();
1192 if (!m || m->markBit())
1193 continue;
1194 // we need to call destroyObject on qobjectwrappers now, so that they can emit the destroyed
1195 // signal before we start sweeping the heap
1198 }
1199 }
1200
1201 freeWeakMaps(this);
1202 freeWeakSets(this);
1203
1205
1206 if (!lastSweep) {
1208 blockAllocator.sweep(/*classCountPtr*/);
1210 icAllocator.sweep(/*classCountPtr*/);
1211 }
1212
1213 // reset all black bits
1217
1221}
1222
1223/*
1224 \internal
1225 Helper function used in sweep to clean up the (to-be-freed) QObjectWrapper
1226 Used both in MemoryManager::sweep, and the corresponding gc statemachine phase
1227*/
1229{
1230 // onDestruction handlers may have accessed other QObject wrappers and reset their value, so ensure
1231 // that they are all set to undefined.
1233 Managed *m = (*it).managed();
1234 if (!m || m->markBit())
1235 continue;
1236 (*it) = Value::undefinedValue();
1237 }
1238
1239 // Now it is time to free QV4::QObjectWrapper Value, we must check the Value's tag to make sure its object has been destroyed
1241 if (pendingCount) {
1244 for (int i = 0; i < pendingCount; ++i) {
1246 if (v->isUndefined() || v->isEmpty())
1248 else
1250 }
1252 }
1253
1256 if (it.value().isNullOrUndefined())
1258 else
1259 ++it;
1260 }
1261 }
1262}
1263
1264bool MemoryManager::shouldRunGC() const
1265{
1268 return true;
1269 return false;
1270}
1271
1272static size_t dumpBins(BlockAllocator *b, const char *title)
1273{
1274 const QLoggingCategory &stats = lcGcAllocatorStats();
1275 size_t totalSlotMem = 0;
1276 if (title)
1277 qDebug(stats) << "Slot map for" << title << "allocator:";
1278 for (uint i = 0; i < BlockAllocator::NumBins; ++i) {
1279 uint nEntries = 0;
1280 HeapItem *h = b->freeBins[i];
1281 while (h) {
1282 ++nEntries;
1283 totalSlotMem += h->freeData.availableSlots;
1284 h = h->freeData.next;
1285 }
1286 if (title)
1287 qDebug(stats) << " number of entries in slot" << i << ":" << nEntries;
1288 }
1289 SDUMP() << " large slot map";
1290 HeapItem *h = b->freeBins[BlockAllocator::NumBins - 1];
1291 while (h) {
1292 SDUMP() << " " << Qt::hex << (quintptr(h)/32) << h->freeData.availableSlots;
1293 h = h->freeData.next;
1294 }
1295
1296 if (title)
1297 qDebug(stats) << " total mem in bins" << totalSlotMem*Chunk::SlotSize;
1298 return totalSlotMem*Chunk::SlotSize;
1299}
1300
1301/*!
1302 \internal
1303 Precondition: Incremental garbage collection must be currently active
1304 Finishes incremental garbage collection, unless in a critical section
1305 Code entering a critical section is expected to check if we need to
1306 force a gc completion, and to trigger the gc again if necessary
1307 when exiting the critcial section.
1308 Returns \c true if the gc cycle completed, false otherwise.
1309 */
1311{
1314 << "Tried to force the GC to complete a run but failed due to being in a critical section.";
1315 return false;
1316 }
1317
1318 const bool incrementalGCIsAlreadyRunning = m_markStack != nullptr;
1320
1321 qCDebug(lcGcForcedRuns) << "Forcing the GC to complete a run.";
1322
1324 while (gcStateMachine->inProgress()) {
1326 }
1328 return true;
1329}
1330
1332{
1333 runGC();
1334 const bool incrementalGCStillRunning = m_markStack != nullptr;
1337}
1338
1340{
1341 if (gcBlocked != Unblocked) {
1342 return;
1343 }
1344
1346
1347 if (gcStats) {
1350 }
1351
1352 if (!gcCollectorStats) {
1354 } else {
1357
1359 const size_t usedBefore = getUsedMem();
1361
1363 qDebug(stats) << "========== GC ==========";
1364#ifdef MM_STATS
1365 qDebug(stats) << " Triggered by alloc request of" << lastAllocRequestedSlots << "slots.";
1366 qDebug(stats) << " Allocations since last GC" << allocationCount;
1367 allocationCount = 0;
1368#endif
1370 qDebug(stats) << "Allocated" << totalMem << "bytes in" << oldChunks << "chunks";
1371 qDebug(stats) << "Fragmented memory before GC" << (totalMem - usedBefore);
1372 dumpBins(&blockAllocator, "Block");
1373 dumpBins(&icAllocator, "InternalClass");
1374
1376 t.start();
1378 qint64 markTime = t.nsecsElapsed()/1000;
1379 t.start();
1380 const size_t usedAfter = getUsedMem();
1382
1384 qDebug(stats) << "triggered by unmanaged heap:";
1385 qDebug(stats) << " old unmanaged heap size:" << oldUnmanagedSize;
1386 qDebug(stats) << " new unmanaged heap:" << unmanagedHeapSize;
1387 qDebug(stats) << " unmanaged heap limit:" << unmanagedHeapSizeGCLimit;
1388 }
1390 + dumpBins(&icAllocator, "InternalClasss");
1391 qDebug(stats) << "Marked object in" << markTime << "us.";
1392 qDebug(stats) << " " << markStackSize << "objects marked";
1393
1394 // sort our object types by number of freed instances
1397 typedef std::pair<const char*, int> ObjectStatInfo;
1400 for (auto it = freedObjectStats.constBegin(); it != freedObjectStats.constEnd(); ++it) {
1402 }
1404 return a.second > b.second && strcmp(a.first, b.first) < 0;
1405 });
1406
1407 qDebug(stats) << "Used memory before GC:" << usedBefore;
1408 qDebug(stats) << "Used memory after GC:" << usedAfter;
1409 qDebug(stats) << "Freed up bytes :" << (usedBefore - usedAfter);
1410 qDebug(stats) << "Freed up chunks :" << (oldChunks - blockAllocator.chunks.size());
1412 - memInBins - usedAfter;
1413 if (lost)
1414 qDebug(stats) << "!!!!!!!!!!!!!!!!!!!!! LOST MEM:" << lost << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!";
1416 qDebug(stats) << "Large item memory before GC:" << largeItemsBefore;
1417 qDebug(stats) << "Large item memory after GC:" << largeItemsAfter;
1418 qDebug(stats) << "Large item memory freed up:" << (largeItemsBefore - largeItemsAfter);
1419 }
1420
1421 for (auto it = freedObjectsSorted.cbegin(); it != freedObjectsSorted.cend(); ++it) {
1422 qDebug(stats).noquote() << QString::fromLatin1("Freed JS type: %1 (%2 instances)").arg(QString::fromLatin1(it->first), QString::number(it->second));
1423 }
1424
1425 qDebug(stats) << "======== End GC ========";
1426 }
1427
1428 if (gcStats)
1430}
1431
1436
1441
1446
1448{
1450 // more than 75% full, raise limit
1452 unmanagedHeapSize) * 2;
1453 } else if (unmanagedHeapSize * 4 <= unmanagedHeapSizeGCLimit) {
1454 // less than 25% full, lower limit
1457 }
1458
1459 if (aggressiveGC && !engine->inShutdown) {
1460 // ensure we don't 'loose' any memory
1461 // but not during shutdown, because than we skip parts of sweep
1462 // and use freeAll instead
1464 == blockAllocator.usedMem() + dumpBins(&blockAllocator, nullptr));
1466 == icAllocator.usedMem() + dumpBins(&icAllocator, nullptr));
1467 }
1468}
1469
1475
1481
1483{
1484 delete m_persistentValues;
1485 dumpStats();
1486
1487 // do one last non-incremental sweep to clean up C++ objects
1488 // first, abort any on-going incremental gc operation
1489 setGCTimeLimit(-1);
1490 if (engine->isGCOngoing) {
1491 engine->isGCOngoing = false;
1497 }
1498 // then sweep
1499 sweep(/*lastSweep*/true);
1500
1504
1505 delete m_weakValues;
1506#ifdef V4_USE_VALGRIND
1508#endif
1509 delete chunkAllocator;
1510}
1511
1512
1514{
1515 if (!gcStats)
1516 return;
1517
1518 const QLoggingCategory &stats = lcGcStats();
1519 qDebug(stats) << "Qml GC memory allocation statistics:";
1520 qDebug(stats) << "Total memory allocated:" << statistics.maxReservedMem;
1521 qDebug(stats) << "Max memory used before a GC run:" << statistics.maxAllocatedMem;
1522 qDebug(stats) << "Max memory used after a GC run:" << statistics.maxUsedMem;
1523 qDebug(stats) << "Requests for different item sizes:";
1524 for (int i = 1; i < BlockAllocator::NumBins - 1; ++i)
1525 qDebug(stats) << " <" << (i << Chunk::SlotSizeShift) << " bytes: " << statistics.allocations[i];
1526 qDebug(stats) << " >=" << ((BlockAllocator::NumBins - 1) << Chunk::SlotSizeShift) << " bytes: " << statistics.allocations[BlockAllocator::NumBins - 1];
1527}
1528
1530{
1533 while (v < top) {
1534 Managed *m = v->managed();
1535 if (m) {
1536 Q_ASSERT(m->inUse());
1537 // Skip pointers to already freed objects, they are bogus as well
1538 m->mark(markStack);
1539 }
1540 ++v;
1541 }
1542
1543 for (auto *frame = engine->currentStackFrame; frame; frame = frame->parentFrame()) {
1544 if (!frame->isMetaTypesFrame())
1545 continue;
1546
1548 = static_cast<const MetaTypesStackFrame *>(frame)->locals()) {
1549 // Actual AOT-compiled functions initialize the locals firsth thing when they
1550 // are called. However, the ScopedStackFrame has no locals, but still uses a
1551 // MetaTypesStackFrame.
1553 }
1554 }
1555}
1556
1559{
1560 // base assumption: target 60fps, use at most 1/3 of time for gc
1561 // unless overridden by env variable
1562 bool ok = false;
1563 auto envTimeLimit = qEnvironmentVariableIntValue("QV4_GC_TIMELIMIT", &ok );
1564 if (!ok)
1565 envTimeLimit = (1000 / 60) / 3;
1566 if (envTimeLimit > 0)
1568 else
1569 timeLimit = std::chrono::milliseconds { 0 };
1570}
1571
1572static void logStepTiming(GCStateMachine* that, quint64 timing) {
1573 auto registerTimingWithResetOnOverflow = [](
1574 GCStateMachine::StepTiming& storage, quint64 timing, GCState state
1575 ) {
1576 auto wouldOverflow = [](quint64 lhs, quint64 rhs) {
1577 return rhs > 0 && lhs > std::numeric_limits<quint64>::max() - rhs;
1578 };
1579
1580 if (wouldOverflow(storage.rolling_sum, timing) || wouldOverflow(storage.count, 1)) {
1581 qDebug(lcGcStepExecution) << "Resetting timings storage for"
1582 << QMetaEnum::fromType<GCState>().key(state) << "due to overflow.";
1583 storage.rolling_sum = timing;
1584 storage.count = 1;
1585 } else {
1586 storage.rolling_sum += timing;
1587 storage.count += 1;
1588 }
1589 };
1590
1591 GCStateMachine::StepTiming& storage = that->executionTiming[that->state];
1592 registerTimingWithResetOnOverflow(storage, timing, that->state);
1593
1594 qDebug(lcGcStepExecution) << "Performed" << QMetaEnum::fromType<GCState>().key(that->state)
1595 << "in" << timing << "microseconds";
1596 qDebug(lcGcStepExecution) << "This step was performed" << storage.count << " time(s), executing in"
1597 << (storage.rolling_sum / storage.count) << "microseconds on average.";
1598}
1599
1600static GCState executeWithLoggingIfEnabled(GCStateMachine* that, GCStateInfo& stateInfo) {
1601 if (!that->collectTimings)
1602 return stateInfo.execute(that, that->stateData);
1603
1604 QElapsedTimer timer;
1605 timer.start();
1606 GCState next = stateInfo.execute(that, that->stateData);
1607 logStepTiming(that, timer.nsecsElapsed()/1000);
1608 return next;
1609}
1610
1612 if (timeLimit.count() > 0) {
1614 bool deadlineExpired = false;
1615 while (!(deadlineExpired = deadline.hasExpired()) && state != GCState::Invalid) {
1617 /* initCallDestroyObjects is the last action which drains the mark
1618 stack by default. But as our write-barrier might end up putting
1619 objects on the markStack which still reference other objects.
1620 Especially when we call user code triggered by Component.onDestruction,
1621 but also when we run into a timeout.
1622 We don't redrain before InitCallDestroyObjects, as that would
1623 potentially lead to useless busy-work (e.g., if the last referencs
1624 to objects are removed while the mark phase is running)
1625 */
1626 redrain(this);
1627 }
1628 qCDebug(lcGcStateTransitions) << "Preparing to execute the"
1629 << QMetaEnum::fromType<GCState>().key(state) << "state";
1632 qCDebug(lcGcStateTransitions) << "Transitioning to the"
1633 << QMetaEnum::fromType<GCState>().key(state) << "state";
1635 break;
1636 }
1637 if (deadlineExpired)
1639 if (state != GCState::Invalid)
1641 mm->onEventLoop();
1642 }, Qt::QueuedConnection);
1643 } else {
1645 while (state != GCState::Invalid) {
1646 qCDebug(lcGcStateTransitions) << "Preparing to execute the"
1647 << QMetaEnum::fromType<GCState>().key(state) << "state";
1650 qCDebug(lcGcStateTransitions) << "Transitioning to the"
1651 << QMetaEnum::fromType<GCState>().key(state) << "state";
1652 }
1653 }
1654}
1655
1656} // namespace QV4
1657
1658QT_END_NAMESPACE
1659
1660#include "moc_qv4mm_p.cpp"
Definition qjsvalue.h:23
static void logStepTiming(GCStateMachine *that, quint64 timing)
Definition qv4mm.cpp:1572
static uint markStackSize
Definition qv4mm.cpp:1120
static void increaseFreedCountForClass(const char *className)
Definition qv4mm.cpp:263
static size_t dumpBins(BlockAllocator *b, const char *title)
Definition qv4mm.cpp:1272
QString binary(quintptr)
Definition qv4mm.cpp:253
@ MinSlotsGCLimit
Definition qv4mm.cpp:73
@ GCOverallocation
Definition qv4mm.cpp:74
QHash< const char *, int > MMStatsHash
Definition qv4mm.cpp:258
static GCState executeWithLoggingIfEnabled(GCStateMachine *that, GCStateInfo &stateInfo)
Definition qv4mm.cpp:1600
static void freeHugeChunk(ChunkAllocator *chunkAllocator, const HugeItemAllocator::HugeChunk &c, ClassDestroyStatsCallback classCountPtr)
Definition qv4mm.cpp:598
Q_STATIC_LOGGING_CATEGORY(lcAccessibilityCore, "qt.accessibility.core")
#define SDUMP
Definition qv4mm.cpp:254
#define MM_STATS
Definition qv4mm.cpp:34
void free(Chunk *chunk, size_t size=0)
Definition qv4mm.cpp:232
size_t requiredChunkSize(size_t size)
Definition qv4mm.cpp:199
Chunk * allocate(size_t size=0)
Definition qv4mm.cpp:214
std::vector< MemorySegment > memorySegments
Definition qv4mm.cpp:211
void free(Chunk *chunk, size_t size)
Definition qv4mm.cpp:130
Chunk * allocate(size_t size)
Definition qv4mm.cpp:163
MemorySegment(size_t size)
Definition qv4mm.cpp:87
PageReservation pageReservation
Definition qv4mm.cpp:156
void setBit(size_t index)
Definition qv4mm.cpp:113
size_t availableBytes
Definition qv4mm.cpp:159
bool testBit(size_t index) const
Definition qv4mm.cpp:123
quint64 allocatedMap
Definition qv4mm.cpp:158
void clearBit(size_t index)
Definition qv4mm.cpp:118
bool contains(Chunk *c) const
Definition qv4mm.cpp:152
MemorySegment(MemorySegment &&other)
Definition qv4mm.cpp:100