Qt
Internal/Contributor docs for the Qt SDK. Note: These are NOT official API docs; those are found at https://doc.qt.io/
Loading...
Searching...
No Matches
qv4mm.cpp
Go to the documentation of this file.
1// Copyright (C) 2021 The Qt Company Ltd.
2// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
3// Qt-Security score:critical reason:low-level-memory-management
4
5#include "PageAllocation.h"
6#include "PageReservation.h"
7
8#include <private/qnumeric_p.h>
9#include <private/qv4alloca_p.h>
10#include <private/qv4engine_p.h>
11#include <private/qv4identifiertable_p.h>
12#include <private/qv4mapobject_p.h>
13#include <private/qv4mm_p.h>
14#include <private/qv4object_p.h>
15#include <private/qv4profiling_p.h>
16#include <private/qv4qobjectwrapper_p.h>
17#include <private/qv4setobject_p.h>
18#include <private/qv4stackframe_p.h>
19
20#include <QtQml/qqmlengine.h>
21
22#include <QtCore/qalgorithms.h>
23#include <QtCore/qelapsedtimer.h>
24#include <QtCore/qloggingcategory.h>
25#include <QtCore/qmap.h>
26#include <QtCore/qscopedvaluerollback.h>
27
28#include <algorithm>
29#include <chrono>
30#include <cstdlib>
31
32//#define MM_STATS
33
34#if !defined(MM_STATS) && !defined(QT_NO_DEBUG)
35#define MM_STATS
36#endif
37
38#if MM_DEBUG
39#define DEBUG qDebug() << "MM:"
40#else
41#define DEBUG if (1) ; else qDebug() << "MM:"
42#endif
43
44#ifdef V4_USE_VALGRIND
45#include <valgrind/valgrind.h>
46#include <valgrind/memcheck.h>
47#endif
48
49#ifdef V4_USE_HEAPTRACK
50#include <heaptrack_api.h>
51#endif
52
53#if OS(QNX)
54#include <sys/storage.h> // __tls()
55#endif
56
57#if USE(PTHREADS) && HAVE(PTHREAD_NP_H)
58#include <pthread_np.h>
59#endif
60
61Q_STATIC_LOGGING_CATEGORY(lcGcStats, "qt.qml.gc.statistics")
62Q_STATIC_LOGGING_CATEGORY(lcGcAllocatorStats, "qt.qml.gc.allocatorStats")
63Q_STATIC_LOGGING_CATEGORY(lcGcStateTransitions, "qt.qml.gc.stateTransitions")
64Q_STATIC_LOGGING_CATEGORY(lcGcForcedRuns, "qt.qml.gc.forcedRuns")
65Q_STATIC_LOGGING_CATEGORY(lcGcStepExecution, "qt.qml.gc.stepExecution")
66
67using namespace WTF;
68
69QT_BEGIN_NAMESPACE
70
71namespace QV4 {
72
73enum {
74 MinSlotsGCLimit = QV4::Chunk::AvailableSlots*16,
75 GCOverallocation = 200 /* Max overallocation by the GC in % */
76};
77
79 enum {
80#ifdef Q_OS_RTEMS
81 NumChunks = sizeof(quint64),
82#else
83 NumChunks = 8*sizeof(quint64),
84#endif
85 SegmentSize = NumChunks*Chunk::ChunkSize,
86 };
87
88 MemorySegment(size_t size)
89 {
90 size += Chunk::ChunkSize; // make sure we can get enough 64k alignment memory
91 if (size < SegmentSize)
92 size = SegmentSize;
93
94 pageReservation = PageReservation::reserve(size, OSAllocator::JSGCHeapPages);
95 base = reinterpret_cast<Chunk *>((reinterpret_cast<quintptr>(pageReservation.base()) + Chunk::ChunkSize - 1) & ~(Chunk::ChunkSize - 1));
97 availableBytes = size - (reinterpret_cast<quintptr>(base) - reinterpret_cast<quintptr>(pageReservation.base()));
98 if (availableBytes < SegmentSize)
99 --nChunks;
100 }
102 qSwap(pageReservation, other.pageReservation);
103 qSwap(base, other.base);
104 qSwap(allocatedMap, other.allocatedMap);
105 qSwap(availableBytes, other.availableBytes);
106 qSwap(nChunks, other.nChunks);
107 }
108
110 if (base)
111 pageReservation.deallocate();
112 }
113
114 void setBit(size_t index) {
115 Q_ASSERT(index < nChunks);
116 quint64 bit = static_cast<quint64>(1) << index;
117 allocatedMap |= bit;
118 }
119 void clearBit(size_t index) {
120 Q_ASSERT(index < nChunks);
121 quint64 bit = static_cast<quint64>(1) << index;
122 allocatedMap &= ~bit;
123 }
124 bool testBit(size_t index) const {
125 Q_ASSERT(index < nChunks);
126 quint64 bit = static_cast<quint64>(1) << index;
127 return (allocatedMap & bit);
128 }
129
130 Chunk *allocate(size_t size);
131 void free(Chunk *chunk, size_t size) {
132 DEBUG << "freeing chunk" << chunk;
133 size_t index = static_cast<size_t>(chunk - base);
134 size_t end = qMin(static_cast<size_t>(NumChunks), index + (size - 1)/Chunk::ChunkSize + 1);
135 while (index < end) {
136 Q_ASSERT(testBit(index));
137 clearBit(index);
138 ++index;
139 }
140
141 size_t pageSize = WTF::pageSize();
142 size = (size + pageSize - 1) & ~(pageSize - 1);
143#if !defined(Q_OS_LINUX) && !defined(Q_OS_WIN)
144 // Linux and Windows zero out pages that have been decommitted and get committed again.
145 // unfortunately that's not true on other OSes (e.g. BSD based ones), so zero out the
146 // memory before decommit, so that we can be sure that all chunks we allocate will be
147 // zero initialized.
148 memset(chunk, 0, size);
149#endif
150 pageReservation.decommit(chunk, size);
151 }
152
153 bool contains(Chunk *c) const {
154 return c >= base && c < base + nChunks;
155 }
156
158 Chunk *base = nullptr;
161 uint nChunks = 0;
162};
163
165{
166 if (!allocatedMap && size >= SegmentSize) {
167 // chunk allocated for one huge allocation
168 Q_ASSERT(availableBytes >= size);
169 pageReservation.commit(base, size);
170 allocatedMap = ~static_cast<quint64>(0);
171 return base;
172 }
173 size_t requiredChunks = (size + sizeof(Chunk) - 1)/sizeof(Chunk);
174 uint sequence = 0;
175 Chunk *candidate = nullptr;
176 for (uint i = 0; i < nChunks; ++i) {
177 if (!testBit(i)) {
178 if (!candidate)
179 candidate = base + i;
180 ++sequence;
181 } else {
182 candidate = nullptr;
183 sequence = 0;
184 }
185 if (sequence == requiredChunks) {
186 pageReservation.commit(candidate, size);
187 for (uint i = 0; i < requiredChunks; ++i)
188 setBit(candidate - base + i);
189 DEBUG << "allocated chunk " << candidate << Qt::hex << size;
190
191 return candidate;
192 }
193 }
194 return nullptr;
195}
196
199
201 size += Chunk::HeaderSize; // space required for the Chunk header
202 size_t pageSize = WTF::pageSize();
203 size = (size + pageSize - 1) & ~(pageSize - 1); // align to page sizes
204 if (size < Chunk::ChunkSize)
205 size = Chunk::ChunkSize;
206 return size;
207 }
208
209 Chunk *allocate(size_t size = 0);
210 void free(Chunk *chunk, size_t size = 0);
211
213};
214
216{
217 size = requiredChunkSize(size);
218 for (auto &m : memorySegments) {
219 if (~m.allocatedMap) {
220 Chunk *c = m.allocate(size);
221 if (c)
222 return c;
223 }
224 }
225
226 // allocate a new segment
227 memorySegments.push_back(MemorySegment(size));
228 Chunk *c = memorySegments.back().allocate(size);
229 Q_ASSERT(c);
230 return c;
231}
232
233void ChunkAllocator::free(Chunk *chunk, size_t size)
234{
235 size = requiredChunkSize(size);
236 for (auto &m : memorySegments) {
237 if (m.contains(chunk)) {
238 m.free(chunk, size);
239 return;
240 }
241 }
242 Q_ASSERT(false);
243}
244
245#ifdef DUMP_SWEEP
247 QString s = QString::number(n, 2);
248 while (s.length() < 64)
249 s.prepend(QChar::fromLatin1('0'));
250 return s;
251}
252#define SDUMP qDebug
253#else
254QString binary(quintptr) { return QString(); }
255#define SDUMP if (1) ; else qDebug
256#endif
257
259{
260 bool hasUsedSlots = false;
261 SDUMP() << "sweeping chunk" << this;
262 HeapItem *o = realBase();
263 bool lastSlotFree = false;
264 for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) {
266 Q_ASSERT((toFree & objectBitmap[i]) == toFree); // check all black objects are marked as being used
268 SDUMP() << " index=" << i;
269 SDUMP() << " toFree =" << binary(toFree);
270 SDUMP() << " black =" << binary(blackBitmap[i]);
271 SDUMP() << " object =" << binary(objectBitmap[i]);
272 SDUMP() << " extends =" << binary(e);
273 if (lastSlotFree)
274 e &= (e + 1); // clear all lowest extent bits
275 while (toFree) {
277 quintptr bit = (static_cast<quintptr>(1) << index);
278
279 toFree ^= bit; // mask out freed slot
280
281 // remove all extends slots that have been freed
282 // this is a bit of bit trickery.
283 quintptr mask = (bit << 1) - 1; // create a mask of 1's to the right of and up to the current bit
284 quintptr objmask = e | mask; // or'ing mask with e gives all ones until the end of the current object
285 quintptr result = objmask + 1;
286 Q_ASSERT(qCountTrailingZeroBits(result) - index != 0); // ensure we freed something
287 result |= mask; // ensure we don't clear stuff to the right of the current object
288 e &= result;
289
291 Heap::Base *b = *itemToFree;
292 const VTable *v = b->internalClass->vtable;
293// if (Q_UNLIKELY(classCountPtr))
294// classCountPtr(v->className);
295 if (v->destroy) {
296 v->destroy(b);
298 }
299#ifdef V4_USE_HEAPTRACK
301#endif
302 }
304 - (blackBitmap[i] | e)) * Chunk::SlotSize,
307 hasUsedSlots |= (blackBitmap[i] != 0);
308 extendsBitmap[i] = e;
309 lastSlotFree = !((objectBitmap[i]|extendsBitmap[i]) >> (sizeof(quintptr)*8 - 1));
310 SDUMP() << " new extends =" << binary(e);
311 SDUMP() << " lastSlotFree" << lastSlotFree;
313 o += Chunk::Bits;
314 }
315 return hasUsedSlots;
316}
317
319{
320 HeapItem *o = realBase();
321 for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) {
324 while (toFree) {
326 quintptr bit = (static_cast<quintptr>(1) << index);
327
328 toFree ^= bit; // mask out freed slot
329
330 // remove all extends slots that have been freed
331 // this is a bit of bit trickery.
332 quintptr mask = (bit << 1) - 1; // create a mask of 1's to the right of and up to the current bit
333 quintptr objmask = e | mask; // or'ing mask with e gives all ones until the end of the current object
334 quintptr result = objmask + 1;
335 Q_ASSERT(qCountTrailingZeroBits(result) - index != 0); // ensure we freed something
336 result |= mask; // ensure we don't clear stuff to the right of the current object
337 e &= result;
338
340 Heap::Base *b = *itemToFree;
341 if (b->internalClass->vtable->destroy) {
344 }
345#ifdef V4_USE_HEAPTRACK
347#endif
348 }
351 objectBitmap[i] = 0;
352 extendsBitmap[i] = e;
353 o += Chunk::Bits;
354 }
355}
356
358{
359 memset(blackBitmap, 0, sizeof(blackBitmap));
360}
361
363{
365#if QT_POINTER_SIZE == 8
366 const int start = 0;
367#else
368 const int start = 1;
369#endif
370 uint freeSlots = 0;
372
373 for (int i = start; i < EntriesInBitmap; ++i) {
375#if QT_POINTER_SIZE == 8
376 if (!i)
377 usedSlots |= (static_cast<quintptr>(1) << (HeaderSize/SlotSize)) - 1;
378#endif
380 while (1) {
382 if (index == Bits)
383 break;
385 usedSlots &= ~((static_cast<quintptr>(1) << index) - 1);
386 while (!usedSlots) {
387 if (++i < EntriesInBitmap) {
389 } else {
391 // Overflows to 0 when counting trailing zeroes above in next iteration.
392 // Then, all the bits are zeroes and we break.
394 break;
395 }
397 }
399
401 usedSlots |= (quintptr(1) << index) - 1;
402 uint freeEnd = i*Bits + index;
404 freeSlots += nSlots;
407 uint bin = qMin(nBins - 1, nSlots);
409 bins[bin] = freeItem;
410 }
411 }
413}
414
416 Q_ASSERT((size % Chunk::SlotSize) == 0);
418
419 if (allocationStats)
421
422 HeapItem **last;
423
424 HeapItem *m;
425
426 if (slotsRequired < NumBins - 1) {
428 if (m) {
430 goto done;
431 }
432 }
433
434 if (nFree >= slotsRequired) {
435 // use bump allocation
437 m = nextFree;
440 goto done;
441 }
442
443 // search last bin for a large enough item
444 last = &freeBins[NumBins - 1];
445 while ((m = *last)) {
447 *last = m->freeData.next; // take it out of the list
448
450 if (remainingSlots == 0)
451 goto done;
452
454 if (remainingSlots > nFree) {
455 if (nFree) {
460 }
463 } else {
468 }
469 goto done;
470 }
471 last = &m->freeData.next;
472 }
473
474 if (slotsRequired < NumBins - 1) {
475 // check if we can split up another slot
476 for (size_t i = slotsRequired + 1; i < NumBins - 1; ++i) {
477 m = freeBins[i];
478 if (m) {
479 freeBins[i] = m->freeData.next; // take it out of the list
486 goto done;
487 }
488 }
489 }
490
491 if (!m) {
492 if (!forceAllocation)
493 return nullptr;
494 if (nFree) {
495 // Save any remaining slots of the current chunk
496 // for later, smaller allocations.
501 }
507 m = nextFree;
510 }
511
512done:
515#ifdef V4_USE_HEAPTRACK
517#endif
518 return m;
519}
520
522{
523 const auto firstEmptyChunkPos = partition(chunks, [this](const std::size_t i) {
524 return chunks.at(i)->sweep(engine);
525 });
527
528 nextFree = nullptr;
529 nFree = 0;
530 memset(freeBins, 0, sizeof(freeBins));
531
533
537 });
538
539 // only free the chunks at the end to avoid that the sweep() calls indirectly
540 // access freed memory
544 });
545
547}
548
550{
551 for (auto c : chunks)
552 c->freeAll(engine);
553 for (auto c : chunks) {
556 }
557}
558
560{
561 for (auto c : chunks)
562 c->resetBlackBits();
563}
564
566 MemorySegment *m = nullptr;
567 Chunk *c = nullptr;
568 if (size >= MemorySegment::SegmentSize/2) {
569 // too large to handle through the ChunkAllocator, let's get our own memory segement
570 size += Chunk::HeaderSize; // space required for the Chunk header
572 size = (size + pageSize - 1) & ~(pageSize - 1); // align to page sizes
573 m = new MemorySegment(size);
574 c = m->allocate(size);
575 } else {
577 }
578 Q_ASSERT(c);
582#ifdef V4_USE_HEAPTRACK
584#endif
585 return c->first();
586}
587
588static void freeHugeChunk(ChunkAllocator *chunkAllocator, const HugeItemAllocator::HugeChunk &c)
589{
590 HeapItem *itemToFree = c.chunk->first();
591 Heap::Base *b = *itemToFree;
592 const VTable *v = b->internalClass->vtable;
593
594 if (v->destroy) {
595 v->destroy(b);
596 b->_checkIsDestroyed();
597 }
598 if (c.segment) {
599 // own memory segment
600 c.segment->free(c.chunk, c.size);
601 delete c.segment;
602 } else {
603 chunkAllocator->free(c.chunk, c.size);
604 }
605#ifdef V4_USE_HEAPTRACK
606 heaptrack_report_free(c.chunk);
607#endif
608}
609
611{
612 auto isBlack = [this] (const HugeChunk &c) {
613 bool b = c.chunk->first()->isBlack();
615 if (!b) {
618 }
619 return !b;
620 };
621
624}
625
627{
628 for (auto c : chunks)
630}
631
639
640namespace {
641using ExtraData = GCStateInfo::ExtraData;
642GCState markStart(GCStateMachine *that, ExtraData &)
643{
644 //Initialize the mark stack
645 that->mm->m_markStack = std::make_unique<MarkStack>(that->mm->engine);
646 that->mm->engine->isGCOngoing = true;
647 return GCState::MarkGlobalObject;
648}
649
650GCState markGlobalObject(GCStateMachine *that, ExtraData &)
651{
652 that->mm->engine->markObjects(that->mm->m_markStack.get());
653 return GCState::MarkJSStack;
654}
655
656GCState markJSStack(GCStateMachine *that, ExtraData &)
657{
658 that->mm->collectFromJSStack(that->mm->markStack());
659 return GCState::InitMarkPersistentValues;
660}
661
662GCState initMarkPersistentValues(GCStateMachine *that, ExtraData &stateData)
663{
664 if (!that->mm->m_persistentValues)
665 return GCState::InitMarkWeakValues; // no persistent values to mark
666 stateData = GCIteratorStorage { that->mm->m_persistentValues->begin() };
667 return GCState::MarkPersistentValues;
668}
669
670enum: int {
671 MarkLoopIterationCount = 1024,
672 MarkLoopIterationCountForDrain = 10240,
673};
674
675bool wasDrainNecessary(MarkStack *ms, QDeadlineTimer deadline)
676{
677 if (ms->remainingBeforeSoftLimit() > MarkLoopIterationCount)
678 return false;
679 // drain
680 ms->drain(deadline);
681 return true;
682}
683
684GCState markPersistentValues(GCStateMachine *that, ExtraData &stateData) {
685 auto markStack = that->mm->markStack();
686 if (wasDrainNecessary(markStack, that->deadline) && that->deadline.hasExpired())
687 return GCState::MarkPersistentValues;
688 PersistentValueStorage::Iterator& it = get<GCIteratorStorage>(stateData).it;
689 // avoid repeatedly hitting the timer constantly by batching iterations
690 for (int i = 0; i < MarkLoopIterationCount; ++i) {
691 if (!it.p)
692 return GCState::InitMarkWeakValues;
693 if (Managed *m = (*it).as<Managed>())
694 m->mark(markStack);
695 ++it;
696 }
697 return GCState::MarkPersistentValues;
698}
699
700GCState initMarkWeakValues(GCStateMachine *that, ExtraData &stateData)
701{
702 stateData = GCIteratorStorage { that->mm->m_weakValues->begin() };
703 return GCState::MarkWeakValues;
704}
705
706GCState markWeakValues(GCStateMachine *that, ExtraData &stateData)
707{
708 auto markStack = that->mm->markStack();
709 if (wasDrainNecessary(markStack, that->deadline) && that->deadline.hasExpired())
710 return GCState::MarkWeakValues;
711 PersistentValueStorage::Iterator& it = get<GCIteratorStorage>(stateData).it;
712 // avoid repeatedly hitting the timer constantly by batching iterations
713 for (int i = 0; i < MarkLoopIterationCount; ++i) {
714 if (!it.p)
715 return GCState::MarkDrain;
716 QObjectWrapper *qobjectWrapper = (*it).as<QObjectWrapper>();
717 ++it;
718 if (!qobjectWrapper)
719 continue;
720 QObject *qobject = qobjectWrapper->object();
721 if (!qobject)
722 continue;
723 bool keepAlive = QQmlData::keepAliveDuringGarbageCollection(qobject);
724
725 if (!keepAlive) {
726 if (QObject *parent = qobject->parent()) {
727 while (parent->parent())
728 parent = parent->parent();
729 keepAlive = QQmlData::keepAliveDuringGarbageCollection(parent);
730 }
731 }
732
733 if (keepAlive)
734 qobjectWrapper->mark(that->mm->markStack());
735 }
736 return GCState::MarkWeakValues;
737}
738
739GCState markDrain(GCStateMachine *that, ExtraData &)
740{
741 if (that->deadline.isForever()) {
742 that->mm->markStack()->drain();
743 return GCState::MarkReady;
744 }
745 auto drainState = that->mm->m_markStack->drain(that->deadline);
746 return drainState == MarkStack::DrainState::Complete
747 ? GCState::MarkReady
748 : GCState::MarkDrain;
749}
750
751GCState markReady(GCStateMachine *that, ExtraData &)
752{
753 auto isIncrementalRun = [](GCStateMachine* that){
754 return !that->mm->aggressiveGC && that->timeLimit.count() > 0;
755 };
756
757 if (that->mm->crossValidateIncrementalGC && isIncrementalRun(that))
758 return GCState::CrossValidateIncrementalMarkPhase;
759 return GCState::InitCallDestroyObjects;
760}
761
762GCState crossValidateIncrementalMarkPhase(GCStateMachine *that, ExtraData &)
763{
764 struct {
765 Chunk* operator()(Chunk* chunk) { return chunk; }
766 Chunk* operator()(const HugeItemAllocator::HugeChunk& chunk) { return chunk.chunk; }
767 } getChunk{};
768
769 auto takeBlackBitmap = [&getChunk](auto& allocator, std::vector<quintptr>& storage){
770 for (auto chunk : allocator.chunks) {
771 for (auto& bitmap : getChunk(chunk)->blackBitmap) {
772 storage.push_back(bitmap);
773 }
774 getChunk(chunk)->resetBlackBits();
775 }
776 };
777
778 auto runMarkPhase = [](GCStateMachine* that) {
779 that->reset();
780 that->mm->m_markStack.reset();
781
782 while (that->state != GCStateMachine::MarkReady) {
783 GCStateInfo& stateInfo = that->stateInfoMap[int(that->state)];
784 that->state = stateInfo.execute(that, that->stateData);
785 }
786 };
787
788 auto checkBlackBitmap = [&that, &getChunk](auto& allocator, const std::vector<quintptr>& storedBitmap) {
789 auto reportError = [&allocator, &getChunk, &that](std::size_t chunk_index, std::size_t bitmap_index, uint bit_index){
790 #ifdef QT_BUILD_INTERNAL
791 // If we're collecting errors, don't output the debug message.
792 if (auto errors = that->bitmapErrors) {
793 errors->emplace_back(chunk_index, bitmap_index, bit_index);
794 return;
795 }
796 #endif
797
798 Q_UNUSED(that);
799 auto object = reinterpret_cast<Heap::Base*>(getChunk(allocator.chunks[chunk_index])->realBase() + (bit_index + (bitmap_index*Chunk::Bits)));
800 qDebug() << "Cross Validation Error on chunk" << chunk_index
801 << "on bitmap piece" << bitmap_index << "and bit" << bit_index
802 << ((object->internalClass) ? "With type" : "")
803 << ((object->internalClass) ?
804 Managed::typeToString(Managed::Type(object->internalClass->vtable->type)) : QString());
805 };
806
807 auto original = storedBitmap.begin();
808 for (std::size_t chunk_index = 0; original != storedBitmap.end() && chunk_index < allocator.chunks.size(); ++chunk_index) {
809 for (std::size_t bitmap_index = 0; bitmap_index < Chunk::EntriesInBitmap; ++bitmap_index) {
810 if (auto differences = (~(*original)) & getChunk(allocator.chunks[chunk_index])->blackBitmap[bitmap_index]) {
811 while (differences != 0) {
812 uint bit_index = qCountTrailingZeroBits(differences);
813 reportError(chunk_index, bitmap_index, bit_index);
814 differences ^= quintptr{1} << bit_index;
815 }
816 }
817 ++original;
818 }
819 }
820 };
821
822 #ifdef QT_BUILD_INTERNAL
823 if (auto *errors = that->bitmapErrors)
824 errors->clear();
825 #endif
826
827 std::vector<quintptr> blockBitmap{};
828 blockBitmap.reserve(Chunk::EntriesInBitmap * that->mm->blockAllocator.chunks.size());
829 takeBlackBitmap(that->mm->blockAllocator, blockBitmap);
830
831 std::vector<quintptr> hugeItemBitmap{};
832 hugeItemBitmap.reserve(Chunk::EntriesInBitmap * that->mm->hugeItemAllocator.chunks.size());
833 takeBlackBitmap(that->mm->hugeItemAllocator, hugeItemBitmap);
834
835 std::vector<quintptr> internalClassBitmap{};
836 internalClassBitmap.reserve(Chunk::EntriesInBitmap * that->mm->icAllocator.chunks.size());
837 takeBlackBitmap(that->mm->icAllocator, internalClassBitmap);
838
839 runMarkPhase(that);
840
841 checkBlackBitmap(that->mm->blockAllocator, blockBitmap);
842 checkBlackBitmap(that->mm->hugeItemAllocator, hugeItemBitmap);
843 checkBlackBitmap(that->mm->icAllocator, internalClassBitmap);
844
845 return GCState::InitCallDestroyObjects;
846}
847
848/** \!internal
849collects new references from the stack, then drains the mark stack again
850*/
851void redrain(GCStateMachine *that)
852{
853 that->mm->collectFromJSStack(that->mm->markStack());
854 that->mm->m_markStack->drain();
855}
856
857GCState initCallDestroyObjects(GCStateMachine *that, ExtraData &stateData)
858{
859 // as we don't have a deletion barrier, we need to rescan the stack
860 redrain(that);
861 if (!that->mm->m_weakValues)
862 return GCState::FreeWeakMaps; // no need to call destroy objects
863 stateData = GCIteratorStorage { that->mm->m_weakValues->begin() };
864 return GCState::CallDestroyObjects;
865}
866GCState callDestroyObject(GCStateMachine *that, ExtraData &stateData)
867{
868 PersistentValueStorage::Iterator& it = get<GCIteratorStorage>(stateData).it;
869 // destroyObject might call user code, which really shouldn't call back into the gc
870 auto oldState = std::exchange(that->mm->gcBlocked, QV4::MemoryManager::Blockness::InCriticalSection);
871 auto cleanup = qScopeGuard([&]() {
872 that->mm->gcBlocked = oldState;
873 });
874 // avoid repeatedly hitting the timer constantly by batching iterations
875 for (int i = 0; i < MarkLoopIterationCount; ++i) {
876 if (!it.p)
877 return GCState::FreeWeakMaps;
878 Managed *m = (*it).managed();
879 ++it;
880 if (!m || m->markBit())
881 continue;
882 // we need to call destroyObject on qobjectwrappers now, so that they can emit the destroyed
883 // signal before we start sweeping the heap
884 if (QObjectWrapper *qobjectWrapper = m->as<QObjectWrapper>())
885 qobjectWrapper->destroyObject(/*lastSweep =*/false);
886 }
887 return GCState::CallDestroyObjects;
888}
889
890void freeWeakMaps(MemoryManager *mm)
891{
892 for (auto [map, lastMap] = std::tuple {mm->weakMaps, &mm->weakMaps }; map; map = map->nextWeakMap) {
893 if (!map->isMarked())
894 continue;
895 map->removeUnmarkedKeys();
896 *lastMap = map;
897 lastMap = &map->nextWeakMap;
898 }
899}
900
901GCState freeWeakMaps(GCStateMachine *that, ExtraData &)
902{
903 freeWeakMaps(that->mm);
904 return GCState::FreeWeakSets;
905}
906
907void freeWeakSets(MemoryManager *mm)
908{
909 for (auto [set, lastSet] = std::tuple {mm->weakSets, &mm->weakSets}; set; set = set->nextWeakSet) {
910
911 if (!set->isMarked())
912 continue;
913 set->removeUnmarkedKeys();
914 *lastSet = set;
915 lastSet = &set->nextWeakSet;
916 }
917}
918
919GCState freeWeakSets(GCStateMachine *that, ExtraData &)
920{
921 freeWeakSets(that->mm);
922 return GCState::HandleQObjectWrappers;
923}
924
925GCState handleQObjectWrappers(GCStateMachine *that, ExtraData &)
926{
927 that->mm->cleanupDeletedQObjectWrappersInSweep();
928 return GCState::DoSweep;
929}
930
931GCState doSweep(GCStateMachine *that, ExtraData &)
932{
933 auto mm = that->mm;
934
935 mm->engine->identifierTable->sweep();
936 mm->blockAllocator.sweep();
937 mm->hugeItemAllocator.sweep();
938 mm->icAllocator.sweep();
939
940 // reset all black bits
941 mm->blockAllocator.resetBlackBits();
942 mm->hugeItemAllocator.resetBlackBits();
943 mm->icAllocator.resetBlackBits();
944
945 mm->usedSlotsAfterLastFullSweep = mm->blockAllocator.usedSlotsAfterLastSweep + mm->icAllocator.usedSlotsAfterLastSweep;
946 mm->gcBlocked = MemoryManager::Unblocked;
947 mm->m_markStack.reset();
948 mm->engine->isGCOngoing = false;
949
950 mm->updateUnmanagedHeapSizeGCLimit();
951
952 return GCState::Invalid;
953}
954
955}
956
957
959 : engine(engine)
967 , aggressiveGC(!qEnvironmentVariableIsEmpty("QV4_MM_AGGRESSIVE_GC"))
968 , crossValidateIncrementalGC(qEnvironmentVariableIsSet("QV4_MM_CROSS_VALIDATE_INCREMENTAL_GC"))
973{
974#ifdef V4_USE_VALGRIND
975 VALGRIND_CREATE_MEMPOOL(this, 0, true);
976#endif
977 if (statistics) {
980 }
981
983 gcStateMachine->mm = this;
984
986 markStart,
987 false,
988 };
991 false,
992 };
995 false,
996 };
999 false,
1000 };
1003 false,
1004 };
1007 false,
1008 };
1011 false,
1012 };
1014 markDrain,
1015 false,
1016 };
1018 markReady,
1019 false,
1020 };
1023 false,
1024 };
1027 false,
1028 };
1031 false,
1032 };
1035 false,
1036 };
1039 true, // ensure that handleQObjectWrappers runs in isolation
1040 };
1043 false,
1044 };
1046 doSweep,
1047 false,
1048 };
1049}
1050
1064
1066{
1067#ifdef MM_STATS
1070#endif
1071
1073 Q_ASSERT(size % Chunk::SlotSize == 0);
1074
1076 memset(m, 0, size);
1077 return *m;
1078}
1079
1081{
1083 Q_ASSERT(!(size % sizeof(HeapItem)));
1084
1085 Heap::Object *o;
1087 o = static_cast<Heap::Object *>(allocData(size));
1088 } else {
1089 // Allocate both in one go through the block allocator
1091 std::size_t memberSize = align(sizeof(Heap::MemberData) + (nMembers - 1)*sizeof(Value));
1093 Heap::MemberData *m;
1094 if (totalSize > Chunk::DataSize) {
1095 o = static_cast<Heap::Object *>(allocData(size));
1097 } else {
1098 HeapItem *mh = reinterpret_cast<HeapItem *>(allocData(totalSize));
1099 Heap::Base *b = *mh;
1100 o = static_cast<Heap::Object *>(b);
1101 mh += (size >> Chunk::SlotSizeShift);
1102 m = mh->as<Heap::MemberData>();
1103 Chunk *c = mh->chunk();
1104 size_t index = mh - c->realBase();
1107 }
1109 o->memberData.set(engine, m);
1111 m->values.alloc = static_cast<uint>((memberSize - sizeof(Heap::MemberData) + sizeof(Value))/sizeof(Value));
1113 m->init();
1114 }
1115
1116 return o;
1117}
1118
1120 : m_engine(engine)
1121{
1122 m_base = (Heap::Base **)engine->gcStack->base();
1123 m_top = m_base;
1124 const size_t size = engine->maxGCStackSize() / sizeof(Heap::Base);
1126 m_softLimit = m_base + size * 3 / 4;
1127}
1128
1130{
1131 // we're not calling drain(QDeadlineTimer::Forever) as that has higher overhead
1132 while (m_top > m_base) {
1133 Heap::Base *h = pop();
1134 Q_ASSERT(h); // at this point we should only have Heap::Base objects in this area on the stack. If not, weird things might happen.
1137 }
1138}
1139
1141{
1142 do {
1143 for (int i = 0; i <= MarkLoopIterationCountForDrain; ++i) {
1144 if (m_top == m_base)
1145 return DrainState::Complete;
1146 Heap::Base *h = pop();
1147 Q_ASSERT(h); // at this point we should only have Heap::Base objects in this area on the stack. If not, weird things might happen.
1150 }
1151 } while (!deadline.hasExpired());
1152 return DrainState::Ongoing;
1153}
1154
1160
1162{
1163 if (engine->inShutdown)
1164 return;
1167 onEventLoop();
1168 }, Qt::QueuedConnection);
1169 return;
1170 }
1171 if (!gcStateMachine->inProgress())
1172 return;
1173
1174 if (collectorStatistics) {
1176 if (!gcStateMachine->inProgress())
1177 collectorStatistics->end(this);
1178 } else {
1180 }
1181}
1182
1183
1188
1190{
1191
1193 Managed *m = (*it).managed();
1194 if (!m || m->markBit())
1195 continue;
1196 // we need to call destroyObject on qobjectwrappers now, so that they can emit the destroyed
1197 // signal before we start sweeping the heap
1200 }
1201 }
1202
1203 freeWeakMaps(this);
1204 freeWeakSets(this);
1205
1207
1208 if (!lastSweep) {
1210 blockAllocator.sweep(/*classCountPtr*/);
1212 icAllocator.sweep(/*classCountPtr*/);
1213 }
1214
1215 // reset all black bits
1219
1223}
1224
1225/*
1226 \internal
1227 Helper function used in sweep to clean up the (to-be-freed) QObjectWrapper
1228 Used both in MemoryManager::sweep, and the corresponding gc statemachine phase
1229*/
1231{
1232 // onDestruction handlers may have accessed other QObject wrappers and reset their value, so ensure
1233 // that they are all set to undefined.
1235 Managed *m = (*it).managed();
1236 if (!m || m->markBit())
1237 continue;
1238 (*it) = Value::undefinedValue();
1239 }
1240
1241 // Now it is time to free QV4::QObjectWrapper Value, we must check the Value's tag to make sure its object has been destroyed
1243 if (pendingCount) {
1246 for (int i = 0; i < pendingCount; ++i) {
1248 if (v->isUndefined() || v->isEmpty())
1250 else
1252 }
1254 }
1255
1258 if (it.value().isNullOrUndefined())
1260 else
1261 ++it;
1262 }
1263 }
1264}
1265
1266bool MemoryManager::shouldRunGC() const
1267{
1270 return true;
1271 return false;
1272}
1273
1274static size_t dumpBins(BlockAllocator *b, const char *title)
1275{
1276 const QLoggingCategory &stats = lcGcAllocatorStats();
1277 size_t totalSlotMem = 0;
1278 if (title)
1279 qDebug(stats) << "Slot map for" << title << "allocator:";
1280 for (uint i = 0; i < BlockAllocator::NumBins; ++i) {
1281 uint nEntries = 0;
1282 HeapItem *h = b->freeBins[i];
1283 while (h) {
1284 ++nEntries;
1285 totalSlotMem += h->freeData.availableSlots;
1286 h = h->freeData.next;
1287 }
1288 if (title)
1289 qDebug(stats) << " number of entries in slot" << i << ":" << nEntries;
1290 }
1291 SDUMP() << " large slot map";
1292 HeapItem *h = b->freeBins[BlockAllocator::NumBins - 1];
1293 while (h) {
1294 SDUMP() << " " << Qt::hex << (quintptr(h)/32) << h->freeData.availableSlots;
1295 h = h->freeData.next;
1296 }
1297
1298 if (title)
1299 qDebug(stats) << " total mem in bins" << totalSlotMem*Chunk::SlotSize;
1300 return totalSlotMem*Chunk::SlotSize;
1301}
1302
1303/*!
1304 \internal
1305 Precondition: Incremental garbage collection must be currently active
1306 Finishes incremental garbage collection, unless in a critical section
1307 Code entering a critical section is expected to check if we need to
1308 force a gc completion, and to trigger the gc again if necessary
1309 when exiting the critcial section.
1310 Returns \c true if the gc cycle completed, false otherwise.
1311 */
1313{
1316 << "Tried to force the GC to complete a run but failed due to being in a critical section.";
1317 return false;
1318 }
1319
1320 const bool incrementalGCIsAlreadyRunning = m_markStack != nullptr;
1322
1323 qCDebug(lcGcForcedRuns) << "Forcing the GC to complete a run.";
1324
1326 if (collectorStatistics) {
1327 while (gcStateMachine->inProgress())
1329 collectorStatistics->end(this);
1330 } else {
1331 while (gcStateMachine->inProgress())
1333 }
1334
1336 return true;
1337}
1338
1340{
1341 runGC();
1342 if (m_markStack != nullptr)
1344}
1345
1347{
1348 if (gcBlocked != Unblocked) {
1349 return;
1350 }
1351
1353
1354 if (statistics) {
1359 }
1360
1361 if (collectorStatistics) {
1362 if (!gcStateMachine->inProgress())
1365 if (!gcStateMachine->inProgress())
1366 collectorStatistics->end(this);
1367 } else {
1369 }
1370
1371
1372 if (statistics) {
1375 }
1376}
1377
1382
1387
1392
1394{
1396 // more than 75% full, raise limit
1398 unmanagedHeapSize) * 2;
1399 } else if (unmanagedHeapSize * 4 <= unmanagedHeapSizeGCLimit) {
1400 // less than 25% full, lower limit
1403 }
1404
1405 if (aggressiveGC && !engine->inShutdown) {
1406 // ensure we don't 'loose' any memory
1407 // but not during shutdown, because than we skip parts of sweep
1408 // and use freeAll instead
1410 == blockAllocator.usedMem() + dumpBins(&blockAllocator, nullptr));
1412 == icAllocator.usedMem() + dumpBins(&icAllocator, nullptr));
1413 }
1414}
1415
1421
1427
1429{
1430 delete m_persistentValues;
1431 dumpStats();
1432
1433 // do one last non-incremental sweep to clean up C++ objects
1434 // first, abort any on-going incremental gc operation
1435 setGCTimeLimit(-1);
1436 if (engine->isGCOngoing) {
1437 engine->isGCOngoing = false;
1443 }
1444 // then sweep
1445 sweep(/*lastSweep*/true);
1446
1450
1451 delete m_weakValues;
1452#ifdef V4_USE_VALGRIND
1454#endif
1455 delete chunkAllocator;
1456}
1457
1458
1460{
1461 if (!statistics)
1462 return;
1463
1464 const QLoggingCategory &stats = lcGcStats();
1465 qDebug(stats) << "Qml GC memory allocation statistics:";
1466 qDebug(stats) << "Total memory allocated:" << statistics->maxAllocatedMem;
1467 qDebug(stats) << "Max memory used before a GC run:" << statistics->maxUsedBeforeGC;
1468 qDebug(stats) << "Max memory used after a GC run:" << statistics->maxUsedAfterGC;
1469 qDebug(stats) << "Requests for different item sizes:";
1470 for (int i = 1; i < BlockAllocator::NumBins - 1; ++i)
1471 qDebug(stats) << " <" << (i << Chunk::SlotSizeShift) << " bytes: " << statistics->allocations[i];
1472 qDebug(stats) << " >=" << ((BlockAllocator::NumBins - 1) << Chunk::SlotSizeShift) << " bytes: " << statistics->allocations[BlockAllocator::NumBins - 1];
1473}
1474
1476{
1479 while (v < top) {
1480 Managed *m = v->managed();
1481 if (m) {
1482 Q_ASSERT(m->inUse());
1483 // Skip pointers to already freed objects, they are bogus as well
1484 m->mark(markStack);
1485 }
1486 ++v;
1487 }
1488
1489 for (auto *frame = engine->currentStackFrame; frame; frame = frame->parentFrame()) {
1490 if (!frame->isMetaTypesFrame())
1491 continue;
1492
1494 = static_cast<const MetaTypesStackFrame *>(frame)->locals()) {
1495 // Actual AOT-compiled functions initialize the locals firsth thing when they
1496 // are called. However, the ScopedStackFrame has no locals, but still uses a
1497 // MetaTypesStackFrame.
1499 }
1500 }
1501}
1502
1505{
1506 // base assumption: target 60fps, use at most 1/3 of time for gc
1507 // unless overridden by env variable
1508 bool ok = false;
1509 auto envTimeLimit = qEnvironmentVariableIntValue("QV4_GC_TIMELIMIT", &ok );
1510 if (!ok)
1511 envTimeLimit = (1000 / 60) / 3;
1512 if (envTimeLimit > 0)
1514 else
1515 timeLimit = std::chrono::milliseconds { 0 };
1516}
1517
1518static void logStepTiming(GCStateMachine* that, quint64 timing) {
1519 auto registerTimingWithResetOnOverflow = [](
1520 GCStateMachine::StepTiming& storage, quint64 timing, GCState state
1521 ) {
1522 auto wouldOverflow = [](quint64 lhs, quint64 rhs) {
1523 return rhs > 0 && lhs > std::numeric_limits<quint64>::max() - rhs;
1524 };
1525
1526 if (wouldOverflow(storage.rolling_sum, timing) || wouldOverflow(storage.count, 1)) {
1527 qDebug(lcGcStepExecution) << "Resetting timings storage for"
1528 << QMetaEnum::fromType<GCState>().key(state) << "due to overflow.";
1529 storage.rolling_sum = timing;
1530 storage.count = 1;
1531 } else {
1532 storage.rolling_sum += timing;
1533 storage.count += 1;
1534 }
1535 };
1536
1537 GCStateMachine::StepTiming& storage = that->executionTiming[that->state];
1538 registerTimingWithResetOnOverflow(storage, timing, that->state);
1539
1540 qDebug(lcGcStepExecution) << "Performed" << QMetaEnum::fromType<GCState>().key(that->state)
1541 << "in" << timing << "microseconds";
1542 qDebug(lcGcStepExecution) << "This step was performed" << storage.count << " time(s), executing in"
1543 << (storage.rolling_sum / storage.count) << "microseconds on average.";
1544}
1545
1546static GCState executeWithLoggingIfEnabled(GCStateMachine* that, GCStateInfo& stateInfo) {
1547 if (!that->collectTimings)
1548 return stateInfo.execute(that, that->stateData);
1549
1550 QElapsedTimer timer;
1551 timer.start();
1552 GCState next = stateInfo.execute(that, that->stateData);
1553 logStepTiming(that, timer.nsecsElapsed()/1000);
1554 return next;
1555}
1556
1557static void redrainDuringSweep(GCStateMachine *that)
1558{
1559 if (that->state > GCState::InitCallDestroyObjects) {
1560 /* initCallDestroyObjects is the last action which drains the mark
1561 stack by default. But as our write-barrier might end up putting
1562 objects on the markStack which still reference other objects.
1563 Especially when we call user code triggered by Component.onDestruction,
1564 but also when we run into a timeout.
1565 We don't redrain before InitCallDestroyObjects, as that would
1566 potentially lead to useless busy-work (e.g., if the last referencs
1567 to objects are removed while the mark phase is running)
1568 */
1569 redrain(that);
1570 }
1571}
1572
1574 if (timeLimit.count() > 0) {
1576 bool deadlineExpired = false;
1577 do {
1578 redrainDuringSweep(this);
1579 qCDebug(lcGcStateTransitions) << "Preparing to execute the"
1580 << QMetaEnum::fromType<GCState>().key(state) << "state";
1583 qCDebug(lcGcStateTransitions) << "Transitioning to the"
1584 << QMetaEnum::fromType<GCState>().key(state) << "state";
1586 break;
1587 } while (!(deadlineExpired = deadline.hasExpired()) && state != GCState::Invalid);
1588 if (deadlineExpired)
1590 if (state != GCState::Invalid)
1592 mm->onEventLoop();
1593 }, Qt::QueuedConnection);
1594 } else {
1596 while (state != GCState::Invalid) {
1597 redrainDuringSweep(this);
1598 qCDebug(lcGcStateTransitions) << "Preparing to execute the"
1599 << QMetaEnum::fromType<GCState>().key(state) << "state";
1602 qCDebug(lcGcStateTransitions) << "Transitioning to the"
1603 << QMetaEnum::fromType<GCState>().key(state) << "state";
1604 }
1605 }
1606}
1607
1620
1622{
1624
1630
1631 qDebug(stats) << "========== GC ==========";
1632#ifdef MM_STATS
1633 qDebug(stats) << " Triggered by alloc request of" << mm->lastAllocRequestedSlots << "slots.";
1634 qDebug(stats) << " Allocations since last GC" << mm->allocationCount;
1635 mm->allocationCount = 0;
1636#endif
1638 qDebug(stats) << "Allocated" << allocatedMem << "bytes in" << oldChunks << "chunks";
1639 qDebug(stats) << "Fragmented memory before GC" << (allocatedMem - regularItemsBefore);
1640 dumpBins(&mm->blockAllocator, "Block");
1641 dumpBins(&mm->icAllocator, "InternalClass");
1642}
1643
1651
1653{
1655
1658
1660 qDebug(stats) << "triggered by unmanaged heap:";
1661 qDebug(stats) << " old unmanaged heap size:" << oldUnmanagedSize;
1662 qDebug(stats) << " new unmanaged heap:" << mm->unmanagedHeapSize;
1663 qDebug(stats) << " unmanaged heap limit:" << mm->unmanagedHeapSizeGCLimit;
1664 }
1665 const size_t memInBins = dumpBins(&mm->blockAllocator, "Block")
1666 + dumpBins(&mm->icAllocator, "InternalClasss");
1667 qDebug(stats) << "Garbage collection took" << (gcTime / 1000) << "us.";
1668
1669 qDebug(stats) << "Regular item memory before GC:" << regularItemsBefore;
1670 qDebug(stats) << "Regular item memory after GC:" << regularItemsAfter;
1671 qDebug(stats) << "Freed up bytes :" << (regularItemsBefore - regularItemsAfter);
1672 qDebug(stats) << "Freed up chunks :" << (oldChunks - mm->blockAllocator.chunks.size());
1675 if (lost)
1676 qDebug(stats) << "!!!!!!!!!!!!!!!!!!!!! LOST MEM:" << lost << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!";
1678 qDebug(stats) << "Large item memory before GC:" << largeItemsBefore;
1679 qDebug(stats) << "Large item memory after GC:" << largeItemsAfter;
1680 qDebug(stats) << "Large item memory freed up:" << (largeItemsBefore - largeItemsAfter);
1681 }
1682
1683 qDebug(stats) << "======== End GC ========";
1684}
1685
1686} // namespace QV4
1687
1688QT_END_NAMESPACE
1689
1690#include "moc_qv4mm_p.cpp"
Definition qjsvalue.h:24
static void logStepTiming(GCStateMachine *that, quint64 timing)
Definition qv4mm.cpp:1518
static size_t dumpBins(BlockAllocator *b, const char *title)
Definition qv4mm.cpp:1274
QString binary(quintptr)
Definition qv4mm.cpp:254
@ MinSlotsGCLimit
Definition qv4mm.cpp:74
@ GCOverallocation
Definition qv4mm.cpp:75
static void freeHugeChunk(ChunkAllocator *chunkAllocator, const HugeItemAllocator::HugeChunk &c)
Definition qv4mm.cpp:588
static GCState executeWithLoggingIfEnabled(GCStateMachine *that, GCStateInfo &stateInfo)
Definition qv4mm.cpp:1546
static void redrainDuringSweep(GCStateMachine *that)
Definition qv4mm.cpp:1557
QT_BEGIN_NAMESPACE Q_STATIC_LOGGING_CATEGORY(lcSynthesizedIterableAccess, "qt.iterable.synthesized", QtWarningMsg)
#define SDUMP
Definition qv4mm.cpp:255
#define MM_STATS
Definition qv4mm.cpp:35
void free(Chunk *chunk, size_t size=0)
Definition qv4mm.cpp:233
size_t requiredChunkSize(size_t size)
Definition qv4mm.cpp:200
Chunk * allocate(size_t size=0)
Definition qv4mm.cpp:215
std::vector< MemorySegment > memorySegments
Definition qv4mm.cpp:212
void free(Chunk *chunk, size_t size)
Definition qv4mm.cpp:131
Chunk * allocate(size_t size)
Definition qv4mm.cpp:164
MemorySegment(size_t size)
Definition qv4mm.cpp:88
PageReservation pageReservation
Definition qv4mm.cpp:157
void setBit(size_t index)
Definition qv4mm.cpp:114
size_t availableBytes
Definition qv4mm.cpp:160
bool testBit(size_t index) const
Definition qv4mm.cpp:124
quint64 allocatedMap
Definition qv4mm.cpp:159
void clearBit(size_t index)
Definition qv4mm.cpp:119
bool contains(Chunk *c) const
Definition qv4mm.cpp:153
MemorySegment(MemorySegment &&other)
Definition qv4mm.cpp:101