Qt
Internal/Contributor docs for the Qt SDK. Note: These are NOT official API docs; those are found at https://doc.qt.io/
Loading...
Searching...
No Matches
qv4mm.cpp
Go to the documentation of this file.
1// Copyright (C) 2021 The Qt Company Ltd.
2// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
3// Qt-Security score:critical reason:low-level-memory-management
4
5#include "PageAllocation.h"
6#include "PageReservation.h"
7
8#include <private/qnumeric_p.h>
9#include <private/qv4alloca_p.h>
10#include <private/qv4engine_p.h>
11#include <private/qv4identifiertable_p.h>
12#include <private/qv4mapobject_p.h>
13#include <private/qv4mm_p.h>
14#include <private/qv4object_p.h>
15#include <private/qv4profiling_p.h>
16#include <private/qv4qobjectwrapper_p.h>
17#include <private/qv4setobject_p.h>
18#include <private/qv4stackframe_p.h>
19
20#include <QtQml/qqmlengine.h>
21
22#include <QtCore/qalgorithms.h>
23#include <QtCore/qelapsedtimer.h>
24#include <QtCore/qloggingcategory.h>
25#include <QtCore/qmap.h>
26#include <QtCore/qscopedvaluerollback.h>
27
28#include <algorithm>
29#include <chrono>
30#include <cstdlib>
31
32//#define MM_STATS
33
34#if !defined(MM_STATS) && !defined(QT_NO_DEBUG)
35#define MM_STATS
36#endif
37
38#if MM_DEBUG
39#define DEBUG qDebug() << "MM:"
40#else
41#define DEBUG if (1) ; else qDebug() << "MM:"
42#endif
43
44#ifdef V4_USE_VALGRIND
45#include <valgrind/valgrind.h>
46#include <valgrind/memcheck.h>
47#endif
48
49#ifdef V4_USE_HEAPTRACK
50#include <heaptrack_api.h>
51#endif
52
53#if OS(QNX)
54#include <sys/storage.h> // __tls()
55#endif
56
57#if USE(PTHREADS) && HAVE(PTHREAD_NP_H)
58#include <pthread_np.h>
59#endif
60
61Q_STATIC_LOGGING_CATEGORY(lcGcStats, "qt.qml.gc.statistics")
62Q_STATIC_LOGGING_CATEGORY(lcGcAllocatorStats, "qt.qml.gc.allocatorStats")
63Q_STATIC_LOGGING_CATEGORY(lcGcStateTransitions, "qt.qml.gc.stateTransitions")
64Q_STATIC_LOGGING_CATEGORY(lcGcForcedRuns, "qt.qml.gc.forcedRuns")
65Q_STATIC_LOGGING_CATEGORY(lcGcStepExecution, "qt.qml.gc.stepExecution")
66
67using namespace WTF;
68
69QT_BEGIN_NAMESPACE
70
71namespace QV4 {
72
73enum {
74 MinSlotsGCLimit = QV4::Chunk::AvailableSlots*16,
75 GCOverallocation = 200 /* Max overallocation by the GC in % */
76};
77
79 enum {
80#ifdef Q_OS_RTEMS
81 NumChunks = sizeof(quint64),
82#else
83 NumChunks = 8*sizeof(quint64),
84#endif
85 SegmentSize = NumChunks*Chunk::ChunkSize,
86 };
87
88 MemorySegment(size_t size)
89 {
90 size += Chunk::ChunkSize; // make sure we can get enough 64k alignment memory
91 if (size < SegmentSize)
92 size = SegmentSize;
93
94 pageReservation = PageReservation::reserve(size, OSAllocator::JSGCHeapPages);
95 base = reinterpret_cast<Chunk *>((reinterpret_cast<quintptr>(pageReservation.base()) + Chunk::ChunkSize - 1) & ~(Chunk::ChunkSize - 1));
97 availableBytes = size - (reinterpret_cast<quintptr>(base) - reinterpret_cast<quintptr>(pageReservation.base()));
98 if (availableBytes < SegmentSize)
99 --nChunks;
100 }
102 qSwap(pageReservation, other.pageReservation);
103 qSwap(base, other.base);
104 qSwap(allocatedMap, other.allocatedMap);
105 qSwap(availableBytes, other.availableBytes);
106 qSwap(nChunks, other.nChunks);
107 }
108
110 if (base)
111 pageReservation.deallocate();
112 }
113
114 void setBit(size_t index) {
115 Q_ASSERT(index < nChunks);
116 quint64 bit = static_cast<quint64>(1) << index;
117 allocatedMap |= bit;
118 }
119 void clearBit(size_t index) {
120 Q_ASSERT(index < nChunks);
121 quint64 bit = static_cast<quint64>(1) << index;
122 allocatedMap &= ~bit;
123 }
124 bool testBit(size_t index) const {
125 Q_ASSERT(index < nChunks);
126 quint64 bit = static_cast<quint64>(1) << index;
127 return (allocatedMap & bit);
128 }
129
130 Chunk *allocate(size_t size);
131 void free(Chunk *chunk, size_t size) {
132 DEBUG << "freeing chunk" << chunk;
133 size_t index = static_cast<size_t>(chunk - base);
134 size_t end = qMin(static_cast<size_t>(NumChunks), index + (size - 1)/Chunk::ChunkSize + 1);
135 while (index < end) {
136 Q_ASSERT(testBit(index));
137 clearBit(index);
138 ++index;
139 }
140
141 size_t pageSize = WTF::pageSize();
142 size = (size + pageSize - 1) & ~(pageSize - 1);
143#if !defined(Q_OS_LINUX) && !defined(Q_OS_WIN)
144 // Linux and Windows zero out pages that have been decommitted and get committed again.
145 // unfortunately that's not true on other OSes (e.g. BSD based ones), so zero out the
146 // memory before decommit, so that we can be sure that all chunks we allocate will be
147 // zero initialized.
148 memset(chunk, 0, size);
149#endif
150 pageReservation.decommit(chunk, size);
151 }
152
153 bool contains(Chunk *c) const {
154 return c >= base && c < base + nChunks;
155 }
156
158 Chunk *base = nullptr;
161 uint nChunks = 0;
162};
163
165{
166 if (!allocatedMap && size >= SegmentSize) {
167 // chunk allocated for one huge allocation
168 Q_ASSERT(availableBytes >= size);
169 pageReservation.commit(base, size);
170 allocatedMap = ~static_cast<quint64>(0);
171 return base;
172 }
173 size_t requiredChunks = (size + sizeof(Chunk) - 1)/sizeof(Chunk);
174 uint sequence = 0;
175 Chunk *candidate = nullptr;
176 for (uint i = 0; i < nChunks; ++i) {
177 if (!testBit(i)) {
178 if (!candidate)
179 candidate = base + i;
180 ++sequence;
181 } else {
182 candidate = nullptr;
183 sequence = 0;
184 }
185 if (sequence == requiredChunks) {
186 pageReservation.commit(candidate, size);
187 for (uint i = 0; i < requiredChunks; ++i)
188 setBit(candidate - base + i);
189 DEBUG << "allocated chunk " << candidate << Qt::hex << size;
190
191 return candidate;
192 }
193 }
194 return nullptr;
195}
196
199
201 size += Chunk::HeaderSize; // space required for the Chunk header
202 size_t pageSize = WTF::pageSize();
203 size = (size + pageSize - 1) & ~(pageSize - 1); // align to page sizes
204 if (size < Chunk::ChunkSize)
205 size = Chunk::ChunkSize;
206 return size;
207 }
208
209 Chunk *allocate(size_t size = 0);
210 void free(Chunk *chunk, size_t size = 0);
211
213};
214
216{
217 size = requiredChunkSize(size);
218 for (auto &m : memorySegments) {
219 if (~m.allocatedMap) {
220 Chunk *c = m.allocate(size);
221 if (c)
222 return c;
223 }
224 }
225
226 // allocate a new segment
227 memorySegments.push_back(MemorySegment(size));
228 Chunk *c = memorySegments.back().allocate(size);
229 Q_ASSERT(c);
230 return c;
231}
232
233void ChunkAllocator::free(Chunk *chunk, size_t size)
234{
235 size = requiredChunkSize(size);
236 for (auto &m : memorySegments) {
237 if (m.contains(chunk)) {
238 m.free(chunk, size);
239 return;
240 }
241 }
242 Q_ASSERT(false);
243}
244
245#ifdef DUMP_SWEEP
247 QString s = QString::number(n, 2);
248 while (s.length() < 64)
249 s.prepend(QChar::fromLatin1('0'));
250 return s;
251}
252#define SDUMP qDebug
253#else
254QString binary(quintptr) { return QString(); }
255#define SDUMP if (1) ; else qDebug
256#endif
257
259{
260 bool hasUsedSlots = false;
261 SDUMP() << "sweeping chunk" << this;
262 HeapItem *o = realBase();
263 bool lastSlotFree = false;
264 for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) {
266 Q_ASSERT((toFree & objectBitmap[i]) == toFree); // check all black objects are marked as being used
268 SDUMP() << " index=" << i;
269 SDUMP() << " toFree =" << binary(toFree);
270 SDUMP() << " black =" << binary(blackBitmap[i]);
271 SDUMP() << " object =" << binary(objectBitmap[i]);
272 SDUMP() << " extends =" << binary(e);
273 if (lastSlotFree)
274 e &= (e + 1); // clear all lowest extent bits
275 while (toFree) {
277 quintptr bit = (static_cast<quintptr>(1) << index);
278
279 toFree ^= bit; // mask out freed slot
280
281 // remove all extends slots that have been freed
282 // this is a bit of bit trickery.
283 quintptr mask = (bit << 1) - 1; // create a mask of 1's to the right of and up to the current bit
284 quintptr objmask = e | mask; // or'ing mask with e gives all ones until the end of the current object
285 quintptr result = objmask + 1;
286 Q_ASSERT(qCountTrailingZeroBits(result) - index != 0); // ensure we freed something
287 result |= mask; // ensure we don't clear stuff to the right of the current object
288 e &= result;
289
291 Heap::Base *b = *itemToFree;
292 const VTable *v = b->internalClass->vtable;
293// if (Q_UNLIKELY(classCountPtr))
294// classCountPtr(v->className);
295 if (v->destroy) {
296 v->destroy(b);
298 }
299#ifdef V4_USE_HEAPTRACK
301#endif
302 }
304 - (blackBitmap[i] | e)) * Chunk::SlotSize,
307 hasUsedSlots |= (blackBitmap[i] != 0);
308 extendsBitmap[i] = e;
309 lastSlotFree = !((objectBitmap[i]|extendsBitmap[i]) >> (sizeof(quintptr)*8 - 1));
310 SDUMP() << " new extends =" << binary(e);
311 SDUMP() << " lastSlotFree" << lastSlotFree;
313 o += Chunk::Bits;
314 }
315 return hasUsedSlots;
316}
317
319{
320 HeapItem *o = realBase();
321 for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) {
324 while (toFree) {
326 quintptr bit = (static_cast<quintptr>(1) << index);
327
328 toFree ^= bit; // mask out freed slot
329
330 // remove all extends slots that have been freed
331 // this is a bit of bit trickery.
332 quintptr mask = (bit << 1) - 1; // create a mask of 1's to the right of and up to the current bit
333 quintptr objmask = e | mask; // or'ing mask with e gives all ones until the end of the current object
334 quintptr result = objmask + 1;
335 Q_ASSERT(qCountTrailingZeroBits(result) - index != 0); // ensure we freed something
336 result |= mask; // ensure we don't clear stuff to the right of the current object
337 e &= result;
338
340 Heap::Base *b = *itemToFree;
341 if (b->internalClass->vtable->destroy) {
344 }
345#ifdef V4_USE_HEAPTRACK
347#endif
348 }
351 objectBitmap[i] = 0;
352 extendsBitmap[i] = e;
353 o += Chunk::Bits;
354 }
355}
356
358{
359 memset(blackBitmap, 0, sizeof(blackBitmap));
360}
361
363{
365#if QT_POINTER_SIZE == 8
366 const int start = 0;
367#else
368 const int start = 1;
369#endif
370 uint freeSlots = 0;
372
373 for (int i = start; i < EntriesInBitmap; ++i) {
375#if QT_POINTER_SIZE == 8
376 if (!i)
377 usedSlots |= (static_cast<quintptr>(1) << (HeaderSize/SlotSize)) - 1;
378#endif
380 while (1) {
382 if (index == Bits)
383 break;
385 usedSlots &= ~((static_cast<quintptr>(1) << index) - 1);
386 while (!usedSlots) {
387 if (++i < EntriesInBitmap) {
389 } else {
391 // Overflows to 0 when counting trailing zeroes above in next iteration.
392 // Then, all the bits are zeroes and we break.
394 break;
395 }
397 }
399
401 usedSlots |= (quintptr(1) << index) - 1;
402 uint freeEnd = i*Bits + index;
404 freeSlots += nSlots;
407 uint bin = qMin(nBins - 1, nSlots);
409 bins[bin] = freeItem;
410 }
411 }
413}
414
416 Q_ASSERT((size % Chunk::SlotSize) == 0);
418
419 if (allocationStats)
421
422 HeapItem **last;
423
424 HeapItem *m;
425
426 if (slotsRequired < NumBins - 1) {
428 if (m) {
430 goto done;
431 }
432 }
433
434 if (nFree >= slotsRequired) {
435 // use bump allocation
437 m = nextFree;
440 goto done;
441 }
442
443 // search last bin for a large enough item
444 last = &freeBins[NumBins - 1];
445 while ((m = *last)) {
447 *last = m->freeData.next; // take it out of the list
448
450 if (remainingSlots == 0)
451 goto done;
452
454 if (remainingSlots > nFree) {
455 if (nFree) {
460 }
463 } else {
468 }
469 goto done;
470 }
471 last = &m->freeData.next;
472 }
473
474 if (slotsRequired < NumBins - 1) {
475 // check if we can split up another slot
476 for (size_t i = slotsRequired + 1; i < NumBins - 1; ++i) {
477 m = freeBins[i];
478 if (m) {
479 freeBins[i] = m->freeData.next; // take it out of the list
486 goto done;
487 }
488 }
489 }
490
491 if (!m) {
492 if (!forceAllocation)
493 return nullptr;
494 if (nFree) {
495 // Save any remaining slots of the current chunk
496 // for later, smaller allocations.
501 }
507 m = nextFree;
510 }
511
512done:
515#ifdef V4_USE_HEAPTRACK
517#endif
518 return m;
519}
520
522{
523 const auto firstEmptyChunkPos = partition(chunks, [this](const std::size_t i) {
524 return chunks.at(i)->sweep(engine);
525 });
527
528 nextFree = nullptr;
529 nFree = 0;
530 memset(freeBins, 0, sizeof(freeBins));
531
533
537 });
538
539 // only free the chunks at the end to avoid that the sweep() calls indirectly
540 // access freed memory
544 });
545
547}
548
550{
551 for (auto c : chunks)
552 c->freeAll(engine);
553 for (auto c : chunks) {
556 }
557}
558
560{
561 for (auto c : chunks)
562 c->resetBlackBits();
563}
564
566 MemorySegment *m = nullptr;
567 Chunk *c = nullptr;
568 if (size >= MemorySegment::SegmentSize/2) {
569 // too large to handle through the ChunkAllocator, let's get our own memory segement
570 size += Chunk::HeaderSize; // space required for the Chunk header
572 size = (size + pageSize - 1) & ~(pageSize - 1); // align to page sizes
573 m = new MemorySegment(size);
574 c = m->allocate(size);
575 } else {
577 }
578 Q_ASSERT(c);
582#ifdef V4_USE_HEAPTRACK
584#endif
585 return c->first();
586}
587
588static void freeHugeChunk(ChunkAllocator *chunkAllocator, const HugeItemAllocator::HugeChunk &c)
589{
590 HeapItem *itemToFree = c.chunk->first();
591 Heap::Base *b = *itemToFree;
592 const VTable *v = b->internalClass->vtable;
593
594 if (v->destroy) {
595 v->destroy(b);
596 b->_checkIsDestroyed();
597 }
598 if (c.segment) {
599 // own memory segment
600 c.segment->free(c.chunk, c.size);
601 delete c.segment;
602 } else {
603 chunkAllocator->free(c.chunk, c.size);
604 }
605#ifdef V4_USE_HEAPTRACK
606 heaptrack_report_free(c.chunk);
607#endif
608}
609
611{
612 auto isBlack = [this] (const HugeChunk &c) {
613 bool b = c.chunk->first()->isBlack();
615 if (!b) {
618 }
619 return !b;
620 };
621
624}
625
627{
628 for (auto c : chunks)
630}
631
639
640namespace {
641using ExtraData = GCStateInfo::ExtraData;
642GCState markStart(GCStateMachine *that, ExtraData &)
643{
644 //Initialize the mark stack
645 that->mm->m_markStack = std::make_unique<MarkStack>(that->mm->engine);
646 that->mm->engine->isGCOngoing = true;
647 return GCState::MarkGlobalObject;
648}
649
650GCState markGlobalObject(GCStateMachine *that, ExtraData &)
651{
652 that->mm->engine->markObjects(that->mm->m_markStack.get());
653 return GCState::MarkJSStack;
654}
655
656GCState markJSStack(GCStateMachine *that, ExtraData &)
657{
658 that->mm->collectFromJSStack(that->mm->markStack());
659 return GCState::InitMarkPersistentValues;
660}
661
662GCState initMarkPersistentValues(GCStateMachine *that, ExtraData &stateData)
663{
664 if (!that->mm->m_persistentValues)
665 return GCState::InitMarkWeakValues; // no persistent values to mark
666 stateData = GCIteratorStorage { that->mm->m_persistentValues->begin() };
667 return GCState::MarkPersistentValues;
668}
669
670enum: int {
671 MarkLoopIterationCount = 1024,
672 MarkLoopIterationCountForDrain = 10240,
673};
674
675bool wasDrainNecessary(MarkStack *ms, QDeadlineTimer deadline)
676{
677 if (ms->remainingBeforeSoftLimit() > MarkLoopIterationCount)
678 return false;
679 // drain
680 ms->drain(deadline);
681 return true;
682}
683
684GCState markPersistentValues(GCStateMachine *that, ExtraData &stateData) {
685 auto markStack = that->mm->markStack();
686 if (wasDrainNecessary(markStack, that->deadline) && that->deadline.hasExpired())
687 return GCState::MarkPersistentValues;
688 PersistentValueStorage::Iterator& it = get<GCIteratorStorage>(stateData).it;
689 // avoid repeatedly hitting the timer constantly by batching iterations
690 for (int i = 0; i < MarkLoopIterationCount; ++i) {
691 if (!it.p)
692 return GCState::InitMarkWeakValues;
693 if (Managed *m = (*it).as<Managed>())
694 m->mark(markStack);
695 ++it;
696 }
697 return GCState::MarkPersistentValues;
698}
699
700GCState initMarkWeakValues(GCStateMachine *that, ExtraData &stateData)
701{
702 stateData = GCIteratorStorage { that->mm->m_weakValues->begin() };
703 return GCState::MarkWeakValues;
704}
705
706GCState markWeakValues(GCStateMachine *that, ExtraData &stateData)
707{
708 auto markStack = that->mm->markStack();
709 if (wasDrainNecessary(markStack, that->deadline) && that->deadline.hasExpired())
710 return GCState::MarkWeakValues;
711 PersistentValueStorage::Iterator& it = get<GCIteratorStorage>(stateData).it;
712 // avoid repeatedly hitting the timer constantly by batching iterations
713 for (int i = 0; i < MarkLoopIterationCount; ++i) {
714 if (!it.p)
715 return GCState::MarkDrain;
716 QObjectWrapper *qobjectWrapper = (*it).as<QObjectWrapper>();
717 ++it;
718 if (!qobjectWrapper)
719 continue;
720 QObject *qobject = qobjectWrapper->object();
721 if (!qobject)
722 continue;
723 bool keepAlive = QQmlData::keepAliveDuringGarbageCollection(qobject);
724
725 if (!keepAlive) {
726 if (QObject *parent = qobject->parent()) {
727 while (parent->parent())
728 parent = parent->parent();
729 keepAlive = QQmlData::keepAliveDuringGarbageCollection(parent);
730 }
731 }
732
733 if (keepAlive)
734 qobjectWrapper->mark(that->mm->markStack());
735 }
736 return GCState::MarkWeakValues;
737}
738
739GCState markDrain(GCStateMachine *that, ExtraData &)
740{
741 if (that->deadline.isForever()) {
742 that->mm->markStack()->drain();
743 return GCState::MarkReady;
744 }
745 auto drainState = that->mm->m_markStack->drain(that->deadline);
746 return drainState == MarkStack::DrainState::Complete
747 ? GCState::MarkReady
748 : GCState::MarkDrain;
749}
750
751GCState markReady(GCStateMachine *that, ExtraData &)
752{
753 auto isIncrementalRun = [](GCStateMachine* that){
754 return !that->mm->aggressiveGC && that->timeLimit.count() > 0;
755 };
756
757 if (that->mm->crossValidateIncrementalGC && isIncrementalRun(that))
758 return GCState::CrossValidateIncrementalMarkPhase;
759 return GCState::InitCallDestroyObjects;
760}
761
762GCState crossValidateIncrementalMarkPhase(GCStateMachine *that, ExtraData &)
763{
764 struct {
765 Chunk* operator()(Chunk* chunk) { return chunk; }
766 Chunk* operator()(const HugeItemAllocator::HugeChunk& chunk) { return chunk.chunk; }
767 } getChunk{};
768
769 auto takeBlackBitmap = [&getChunk](auto& allocator, std::vector<quintptr>& storage){
770 for (auto chunk : allocator.chunks) {
771 for (auto& bitmap : getChunk(chunk)->blackBitmap) {
772 storage.push_back(bitmap);
773 }
774 getChunk(chunk)->resetBlackBits();
775 }
776 };
777
778 auto runMarkPhase = [](GCStateMachine* that) {
779 that->reset();
780 that->mm->m_markStack.reset();
781
782 while (that->state != GCStateMachine::MarkReady) {
783 GCStateInfo& stateInfo = that->stateInfoMap[int(that->state)];
784 that->state = stateInfo.execute(that, that->stateData);
785 }
786 };
787
788 auto checkBlackBitmap = [&that, &getChunk](auto& allocator, const std::vector<quintptr>& storedBitmap) {
789 auto reportError = [&allocator, &getChunk, &that](std::size_t chunk_index, std::size_t bitmap_index, uint bit_index){
790 #ifdef QT_BUILD_INTERNAL
791 // If we're collecting errors, don't output the debug message.
792 if (auto errors = that->bitmapErrors) {
793 errors->emplace_back(chunk_index, bitmap_index, bit_index);
794 return;
795 }
796 #endif
797
798 Q_UNUSED(that);
799 auto object = reinterpret_cast<Heap::Base*>(getChunk(allocator.chunks[chunk_index])->realBase() + (bit_index + (bitmap_index*Chunk::Bits)));
800 qDebug() << "Cross Validation Error on chunk" << chunk_index
801 << "on bitmap piece" << bitmap_index << "and bit" << bit_index
802 << ((object->internalClass) ? "With type" : "")
803 << ((object->internalClass) ?
804 Managed::typeToString(Managed::Type(object->internalClass->vtable->type)) : QString());
805 };
806
807 auto original = storedBitmap.begin();
808 for (std::size_t chunk_index = 0; original != storedBitmap.end() && chunk_index < allocator.chunks.size(); ++chunk_index) {
809 for (std::size_t bitmap_index = 0; bitmap_index < Chunk::EntriesInBitmap; ++bitmap_index) {
810 if (auto differences = (~(*original)) & getChunk(allocator.chunks[chunk_index])->blackBitmap[bitmap_index]) {
811 while (differences != 0) {
812 uint bit_index = qCountTrailingZeroBits(differences);
813 reportError(chunk_index, bitmap_index, bit_index);
814 differences ^= quintptr{1} << bit_index;
815 }
816 }
817 ++original;
818 }
819 }
820 };
821
822 #ifdef QT_BUILD_INTERNAL
823 if (auto *errors = that->bitmapErrors)
824 errors->clear();
825 #endif
826
827 std::vector<quintptr> blockBitmap{};
828 blockBitmap.reserve(Chunk::EntriesInBitmap * that->mm->blockAllocator.chunks.size());
829 takeBlackBitmap(that->mm->blockAllocator, blockBitmap);
830
831 std::vector<quintptr> hugeItemBitmap{};
832 hugeItemBitmap.reserve(Chunk::EntriesInBitmap * that->mm->hugeItemAllocator.chunks.size());
833 takeBlackBitmap(that->mm->hugeItemAllocator, hugeItemBitmap);
834
835 std::vector<quintptr> internalClassBitmap{};
836 internalClassBitmap.reserve(Chunk::EntriesInBitmap * that->mm->icAllocator.chunks.size());
837 takeBlackBitmap(that->mm->icAllocator, internalClassBitmap);
838
839 runMarkPhase(that);
840
841 checkBlackBitmap(that->mm->blockAllocator, blockBitmap);
842 checkBlackBitmap(that->mm->hugeItemAllocator, hugeItemBitmap);
843 checkBlackBitmap(that->mm->icAllocator, internalClassBitmap);
844
845 return GCState::InitCallDestroyObjects;
846}
847
848/** \!internal
849collects new references from the stack, then drains the mark stack again
850*/
851void redrain(GCStateMachine *that)
852{
853 that->mm->collectFromJSStack(that->mm->markStack());
854 that->mm->m_markStack->drain();
855}
856
857GCState initCallDestroyObjects(GCStateMachine *that, ExtraData &stateData)
858{
859 // as we don't have a deletion barrier, we need to rescan the stack
860 redrain(that);
861 if (!that->mm->m_weakValues)
862 return GCState::FreeWeakMaps; // no need to call destroy objects
863 stateData = GCIteratorStorage { that->mm->m_weakValues->begin() };
864 return GCState::CallDestroyObjects;
865}
866GCState callDestroyObject(GCStateMachine *that, ExtraData &stateData)
867{
868 PersistentValueStorage::Iterator& it = get<GCIteratorStorage>(stateData).it;
869 // destroyObject might call user code, which really shouldn't call back into the gc
870 auto oldState = std::exchange(that->mm->gcBlocked, QV4::MemoryManager::Blockness::InCriticalSection);
871 auto cleanup = qScopeGuard([&]() {
872 that->mm->gcBlocked = oldState;
873 });
874 // avoid repeatedly hitting the timer constantly by batching iterations
875 for (int i = 0; i < MarkLoopIterationCount; ++i) {
876 if (!it.p)
877 return GCState::FreeWeakMaps;
878 Managed *m = (*it).managed();
879 ++it;
880 if (!m || m->markBit())
881 continue;
882 // we need to call destroyObject on qobjectwrappers now, so that they can emit the destroyed
883 // signal before we start sweeping the heap
884 if (QObjectWrapper *qobjectWrapper = m->as<QObjectWrapper>())
885 qobjectWrapper->destroyObject(/*lastSweep =*/false);
886 }
887 return GCState::CallDestroyObjects;
888}
889
890void freeWeakMaps(MemoryManager *mm)
891{
892 for (auto [map, lastMap] = std::tuple {mm->weakMaps, &mm->weakMaps }; map; map = map->nextWeakMap) {
893 if (!map->isMarked())
894 continue;
895 map->removeUnmarkedKeys();
896 *lastMap = map;
897 lastMap = &map->nextWeakMap;
898 }
899}
900
901GCState freeWeakMaps(GCStateMachine *that, ExtraData &)
902{
903 freeWeakMaps(that->mm);
904 return GCState::FreeWeakSets;
905}
906
907void freeWeakSets(MemoryManager *mm)
908{
909 for (auto [set, lastSet] = std::tuple {mm->weakSets, &mm->weakSets}; set; set = set->nextWeakSet) {
910
911 if (!set->isMarked())
912 continue;
913 set->removeUnmarkedKeys();
914 *lastSet = set;
915 lastSet = &set->nextWeakSet;
916 }
917}
918
919GCState freeWeakSets(GCStateMachine *that, ExtraData &)
920{
921 freeWeakSets(that->mm);
922 return GCState::HandleQObjectWrappers;
923}
924
925GCState handleQObjectWrappers(GCStateMachine *that, ExtraData &)
926{
927 that->mm->cleanupDeletedQObjectWrappersInSweep();
928 return GCState::DoSweep;
929}
930
931GCState doSweep(GCStateMachine *that, ExtraData &)
932{
933 auto mm = that->mm;
934
935 mm->engine->identifierTable->sweep();
936 mm->blockAllocator.sweep();
937 mm->hugeItemAllocator.sweep();
938 mm->icAllocator.sweep();
939
940 // reset all black bits
941 mm->blockAllocator.resetBlackBits();
942 mm->hugeItemAllocator.resetBlackBits();
943 mm->icAllocator.resetBlackBits();
944
945 mm->usedSlotsAfterLastFullSweep = mm->blockAllocator.usedSlotsAfterLastSweep + mm->icAllocator.usedSlotsAfterLastSweep;
946 mm->gcBlocked = MemoryManager::Unblocked;
947 mm->m_markStack.reset();
948 mm->engine->isGCOngoing = false;
949
950 mm->updateUnmanagedHeapSizeGCLimit();
951
952 return GCState::Invalid;
953}
954
955}
956
957
959 : engine(engine)
967 , aggressiveGC(!qEnvironmentVariableIsEmpty("QV4_MM_AGGRESSIVE_GC"))
968 , crossValidateIncrementalGC(qEnvironmentVariableIsSet("QV4_MM_CROSS_VALIDATE_INCREMENTAL_GC"))
971{
972#ifdef V4_USE_VALGRIND
973 VALGRIND_CREATE_MEMPOOL(this, 0, true);
974#endif
976 if (gcStats)
978
980 gcStateMachine->mm = this;
981
983 markStart,
984 false,
985 };
988 false,
989 };
992 false,
993 };
996 false,
997 };
1000 false,
1001 };
1004 false,
1005 };
1008 false,
1009 };
1011 markDrain,
1012 false,
1013 };
1015 markReady,
1016 false,
1017 };
1020 false,
1021 };
1024 false,
1025 };
1028 false,
1029 };
1032 false,
1033 };
1036 true, // ensure that handleQObjectWrappers runs in isolation
1037 };
1040 false,
1041 };
1043 doSweep,
1044 false,
1045 };
1046}
1047
1061
1063{
1064#ifdef MM_STATS
1067#endif
1068
1070 Q_ASSERT(size % Chunk::SlotSize == 0);
1071
1073 memset(m, 0, size);
1074 return *m;
1075}
1076
1078{
1080 Q_ASSERT(!(size % sizeof(HeapItem)));
1081
1082 Heap::Object *o;
1084 o = static_cast<Heap::Object *>(allocData(size));
1085 } else {
1086 // Allocate both in one go through the block allocator
1088 std::size_t memberSize = align(sizeof(Heap::MemberData) + (nMembers - 1)*sizeof(Value));
1090 Heap::MemberData *m;
1091 if (totalSize > Chunk::DataSize) {
1092 o = static_cast<Heap::Object *>(allocData(size));
1094 } else {
1095 HeapItem *mh = reinterpret_cast<HeapItem *>(allocData(totalSize));
1096 Heap::Base *b = *mh;
1097 o = static_cast<Heap::Object *>(b);
1098 mh += (size >> Chunk::SlotSizeShift);
1099 m = mh->as<Heap::MemberData>();
1100 Chunk *c = mh->chunk();
1101 size_t index = mh - c->realBase();
1104 }
1106 o->memberData.set(engine, m);
1108 m->values.alloc = static_cast<uint>((memberSize - sizeof(Heap::MemberData) + sizeof(Value))/sizeof(Value));
1110 m->init();
1111 }
1112
1113 return o;
1114}
1115
1117 : m_engine(engine)
1118{
1119 m_base = (Heap::Base **)engine->gcStack->base();
1120 m_top = m_base;
1121 const size_t size = engine->maxGCStackSize() / sizeof(Heap::Base);
1123 m_softLimit = m_base + size * 3 / 4;
1124}
1125
1127{
1128 // we're not calling drain(QDeadlineTimer::Forever) as that has higher overhead
1129 while (m_top > m_base) {
1130 Heap::Base *h = pop();
1131 Q_ASSERT(h); // at this point we should only have Heap::Base objects in this area on the stack. If not, weird things might happen.
1134 }
1135}
1136
1138{
1139 do {
1140 for (int i = 0; i <= MarkLoopIterationCountForDrain; ++i) {
1141 if (m_top == m_base)
1142 return DrainState::Complete;
1143 Heap::Base *h = pop();
1144 Q_ASSERT(h); // at this point we should only have Heap::Base objects in this area on the stack. If not, weird things might happen.
1147 }
1148 } while (!deadline.hasExpired());
1149 return DrainState::Ongoing;
1150}
1151
1157
1159{
1160 if (engine->inShutdown)
1161 return;
1164 onEventLoop();
1165 }, Qt::QueuedConnection);
1166 return;
1167 }
1168 if (gcStateMachine->inProgress()) {
1170 }
1171}
1172
1173
1178
1180{
1181
1183 Managed *m = (*it).managed();
1184 if (!m || m->markBit())
1185 continue;
1186 // we need to call destroyObject on qobjectwrappers now, so that they can emit the destroyed
1187 // signal before we start sweeping the heap
1190 }
1191 }
1192
1193 freeWeakMaps(this);
1194 freeWeakSets(this);
1195
1197
1198 if (!lastSweep) {
1200 blockAllocator.sweep(/*classCountPtr*/);
1202 icAllocator.sweep(/*classCountPtr*/);
1203 }
1204
1205 // reset all black bits
1209
1213}
1214
1215/*
1216 \internal
1217 Helper function used in sweep to clean up the (to-be-freed) QObjectWrapper
1218 Used both in MemoryManager::sweep, and the corresponding gc statemachine phase
1219*/
1221{
1222 // onDestruction handlers may have accessed other QObject wrappers and reset their value, so ensure
1223 // that they are all set to undefined.
1225 Managed *m = (*it).managed();
1226 if (!m || m->markBit())
1227 continue;
1228 (*it) = Value::undefinedValue();
1229 }
1230
1231 // Now it is time to free QV4::QObjectWrapper Value, we must check the Value's tag to make sure its object has been destroyed
1233 if (pendingCount) {
1236 for (int i = 0; i < pendingCount; ++i) {
1238 if (v->isUndefined() || v->isEmpty())
1240 else
1242 }
1244 }
1245
1248 if (it.value().isNullOrUndefined())
1250 else
1251 ++it;
1252 }
1253 }
1254}
1255
1256bool MemoryManager::shouldRunGC() const
1257{
1260 return true;
1261 return false;
1262}
1263
1264static size_t dumpBins(BlockAllocator *b, const char *title)
1265{
1266 const QLoggingCategory &stats = lcGcAllocatorStats();
1267 size_t totalSlotMem = 0;
1268 if (title)
1269 qDebug(stats) << "Slot map for" << title << "allocator:";
1270 for (uint i = 0; i < BlockAllocator::NumBins; ++i) {
1271 uint nEntries = 0;
1272 HeapItem *h = b->freeBins[i];
1273 while (h) {
1274 ++nEntries;
1275 totalSlotMem += h->freeData.availableSlots;
1276 h = h->freeData.next;
1277 }
1278 if (title)
1279 qDebug(stats) << " number of entries in slot" << i << ":" << nEntries;
1280 }
1281 SDUMP() << " large slot map";
1282 HeapItem *h = b->freeBins[BlockAllocator::NumBins - 1];
1283 while (h) {
1284 SDUMP() << " " << Qt::hex << (quintptr(h)/32) << h->freeData.availableSlots;
1285 h = h->freeData.next;
1286 }
1287
1288 if (title)
1289 qDebug(stats) << " total mem in bins" << totalSlotMem*Chunk::SlotSize;
1290 return totalSlotMem*Chunk::SlotSize;
1291}
1292
1293/*!
1294 \internal
1295 Precondition: Incremental garbage collection must be currently active
1296 Finishes incremental garbage collection, unless in a critical section
1297 Code entering a critical section is expected to check if we need to
1298 force a gc completion, and to trigger the gc again if necessary
1299 when exiting the critcial section.
1300 Returns \c true if the gc cycle completed, false otherwise.
1301 */
1303{
1306 << "Tried to force the GC to complete a run but failed due to being in a critical section.";
1307 return false;
1308 }
1309
1310 const bool incrementalGCIsAlreadyRunning = m_markStack != nullptr;
1312
1313 qCDebug(lcGcForcedRuns) << "Forcing the GC to complete a run.";
1314
1316 while (gcStateMachine->inProgress()) {
1318 }
1320 return true;
1321}
1322
1324{
1325 runGC();
1326 const bool incrementalGCStillRunning = m_markStack != nullptr;
1329}
1330
1332{
1333 if (gcBlocked != Unblocked) {
1334 return;
1335 }
1336
1338
1339 if (gcStats) {
1344 }
1345
1346 if (!gcCollectorStats) {
1348 } else {
1351
1355
1357 qDebug(stats) << "========== GC ==========";
1358#ifdef MM_STATS
1359 qDebug(stats) << " Triggered by alloc request of" << lastAllocRequestedSlots << "slots.";
1360 qDebug(stats) << " Allocations since last GC" << allocationCount;
1361 allocationCount = 0;
1362#endif
1364 qDebug(stats) << "Allocated" << allocatedMem << "bytes in" << oldChunks << "chunks";
1365 qDebug(stats) << "Fragmented memory before GC" << (allocatedMem - regularItemsBefore);
1366 dumpBins(&blockAllocator, "Block");
1367 dumpBins(&icAllocator, "InternalClass");
1368
1370 t.start();
1372 qint64 markTime = t.nsecsElapsed()/1000;
1373 t.start();
1376
1378 qDebug(stats) << "triggered by unmanaged heap:";
1379 qDebug(stats) << " old unmanaged heap size:" << oldUnmanagedSize;
1380 qDebug(stats) << " new unmanaged heap:" << unmanagedHeapSize;
1381 qDebug(stats) << " unmanaged heap limit:" << unmanagedHeapSizeGCLimit;
1382 }
1384 + dumpBins(&icAllocator, "InternalClasss");
1385 qDebug(stats) << "Marked object in" << markTime << "us.";
1386
1387 qDebug(stats) << "Regular item memory before GC:" << regularItemsBefore;
1388 qDebug(stats) << "Regular item memory after GC:" << regularItemsAfter;
1389 qDebug(stats) << "Freed up bytes :" << (regularItemsBefore - regularItemsAfter);
1390 qDebug(stats) << "Freed up chunks :" << (oldChunks - blockAllocator.chunks.size());
1393 if (lost)
1394 qDebug(stats) << "!!!!!!!!!!!!!!!!!!!!! LOST MEM:" << lost << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!";
1396 qDebug(stats) << "Large item memory before GC:" << largeItemsBefore;
1397 qDebug(stats) << "Large item memory after GC:" << largeItemsAfter;
1398 qDebug(stats) << "Large item memory freed up:" << (largeItemsBefore - largeItemsAfter);
1399 }
1400
1401 qDebug(stats) << "======== End GC ========";
1402 }
1403
1404 if (gcStats) {
1407 }
1408}
1409
1414
1419
1424
1426{
1428 // more than 75% full, raise limit
1430 unmanagedHeapSize) * 2;
1431 } else if (unmanagedHeapSize * 4 <= unmanagedHeapSizeGCLimit) {
1432 // less than 25% full, lower limit
1435 }
1436
1437 if (aggressiveGC && !engine->inShutdown) {
1438 // ensure we don't 'loose' any memory
1439 // but not during shutdown, because than we skip parts of sweep
1440 // and use freeAll instead
1442 == blockAllocator.usedMem() + dumpBins(&blockAllocator, nullptr));
1444 == icAllocator.usedMem() + dumpBins(&icAllocator, nullptr));
1445 }
1446}
1447
1453
1459
1461{
1462 delete m_persistentValues;
1463 dumpStats();
1464
1465 // do one last non-incremental sweep to clean up C++ objects
1466 // first, abort any on-going incremental gc operation
1467 setGCTimeLimit(-1);
1468 if (engine->isGCOngoing) {
1469 engine->isGCOngoing = false;
1475 }
1476 // then sweep
1477 sweep(/*lastSweep*/true);
1478
1482
1483 delete m_weakValues;
1484#ifdef V4_USE_VALGRIND
1486#endif
1487 delete chunkAllocator;
1488}
1489
1490
1492{
1493 if (!gcStats)
1494 return;
1495
1496 const QLoggingCategory &stats = lcGcStats();
1497 qDebug(stats) << "Qml GC memory allocation statistics:";
1498 qDebug(stats) << "Total memory allocated:" << statistics.maxAllocatedMem;
1499 qDebug(stats) << "Max memory used before a GC run:" << statistics.maxUsedBeforeGC;
1500 qDebug(stats) << "Max memory used after a GC run:" << statistics.maxUsedAfterGC;
1501 qDebug(stats) << "Requests for different item sizes:";
1502 for (int i = 1; i < BlockAllocator::NumBins - 1; ++i)
1503 qDebug(stats) << " <" << (i << Chunk::SlotSizeShift) << " bytes: " << statistics.allocations[i];
1504 qDebug(stats) << " >=" << ((BlockAllocator::NumBins - 1) << Chunk::SlotSizeShift) << " bytes: " << statistics.allocations[BlockAllocator::NumBins - 1];
1505}
1506
1508{
1511 while (v < top) {
1512 Managed *m = v->managed();
1513 if (m) {
1514 Q_ASSERT(m->inUse());
1515 // Skip pointers to already freed objects, they are bogus as well
1516 m->mark(markStack);
1517 }
1518 ++v;
1519 }
1520
1521 for (auto *frame = engine->currentStackFrame; frame; frame = frame->parentFrame()) {
1522 if (!frame->isMetaTypesFrame())
1523 continue;
1524
1526 = static_cast<const MetaTypesStackFrame *>(frame)->locals()) {
1527 // Actual AOT-compiled functions initialize the locals firsth thing when they
1528 // are called. However, the ScopedStackFrame has no locals, but still uses a
1529 // MetaTypesStackFrame.
1531 }
1532 }
1533}
1534
1537{
1538 // base assumption: target 60fps, use at most 1/3 of time for gc
1539 // unless overridden by env variable
1540 bool ok = false;
1541 auto envTimeLimit = qEnvironmentVariableIntValue("QV4_GC_TIMELIMIT", &ok );
1542 if (!ok)
1543 envTimeLimit = (1000 / 60) / 3;
1544 if (envTimeLimit > 0)
1546 else
1547 timeLimit = std::chrono::milliseconds { 0 };
1548}
1549
1550static void logStepTiming(GCStateMachine* that, quint64 timing) {
1551 auto registerTimingWithResetOnOverflow = [](
1552 GCStateMachine::StepTiming& storage, quint64 timing, GCState state
1553 ) {
1554 auto wouldOverflow = [](quint64 lhs, quint64 rhs) {
1555 return rhs > 0 && lhs > std::numeric_limits<quint64>::max() - rhs;
1556 };
1557
1558 if (wouldOverflow(storage.rolling_sum, timing) || wouldOverflow(storage.count, 1)) {
1559 qDebug(lcGcStepExecution) << "Resetting timings storage for"
1560 << QMetaEnum::fromType<GCState>().key(state) << "due to overflow.";
1561 storage.rolling_sum = timing;
1562 storage.count = 1;
1563 } else {
1564 storage.rolling_sum += timing;
1565 storage.count += 1;
1566 }
1567 };
1568
1569 GCStateMachine::StepTiming& storage = that->executionTiming[that->state];
1570 registerTimingWithResetOnOverflow(storage, timing, that->state);
1571
1572 qDebug(lcGcStepExecution) << "Performed" << QMetaEnum::fromType<GCState>().key(that->state)
1573 << "in" << timing << "microseconds";
1574 qDebug(lcGcStepExecution) << "This step was performed" << storage.count << " time(s), executing in"
1575 << (storage.rolling_sum / storage.count) << "microseconds on average.";
1576}
1577
1578static GCState executeWithLoggingIfEnabled(GCStateMachine* that, GCStateInfo& stateInfo) {
1579 if (!that->collectTimings)
1580 return stateInfo.execute(that, that->stateData);
1581
1582 QElapsedTimer timer;
1583 timer.start();
1584 GCState next = stateInfo.execute(that, that->stateData);
1585 logStepTiming(that, timer.nsecsElapsed()/1000);
1586 return next;
1587}
1588
1590 if (timeLimit.count() > 0) {
1592 bool deadlineExpired = false;
1593 while (!(deadlineExpired = deadline.hasExpired()) && state != GCState::Invalid) {
1595 /* initCallDestroyObjects is the last action which drains the mark
1596 stack by default. But as our write-barrier might end up putting
1597 objects on the markStack which still reference other objects.
1598 Especially when we call user code triggered by Component.onDestruction,
1599 but also when we run into a timeout.
1600 We don't redrain before InitCallDestroyObjects, as that would
1601 potentially lead to useless busy-work (e.g., if the last referencs
1602 to objects are removed while the mark phase is running)
1603 */
1604 redrain(this);
1605 }
1606 qCDebug(lcGcStateTransitions) << "Preparing to execute the"
1607 << QMetaEnum::fromType<GCState>().key(state) << "state";
1610 qCDebug(lcGcStateTransitions) << "Transitioning to the"
1611 << QMetaEnum::fromType<GCState>().key(state) << "state";
1613 break;
1614 }
1615 if (deadlineExpired)
1617 if (state != GCState::Invalid)
1619 mm->onEventLoop();
1620 }, Qt::QueuedConnection);
1621 } else {
1623 while (state != GCState::Invalid) {
1624 qCDebug(lcGcStateTransitions) << "Preparing to execute the"
1625 << QMetaEnum::fromType<GCState>().key(state) << "state";
1628 qCDebug(lcGcStateTransitions) << "Transitioning to the"
1629 << QMetaEnum::fromType<GCState>().key(state) << "state";
1630 }
1631 }
1632}
1633
1646
1647} // namespace QV4
1648
1649QT_END_NAMESPACE
1650
1651#include "moc_qv4mm_p.cpp"
Definition qjsvalue.h:24
static void logStepTiming(GCStateMachine *that, quint64 timing)
Definition qv4mm.cpp:1550
static size_t dumpBins(BlockAllocator *b, const char *title)
Definition qv4mm.cpp:1264
QString binary(quintptr)
Definition qv4mm.cpp:254
@ MinSlotsGCLimit
Definition qv4mm.cpp:74
@ GCOverallocation
Definition qv4mm.cpp:75
static void freeHugeChunk(ChunkAllocator *chunkAllocator, const HugeItemAllocator::HugeChunk &c)
Definition qv4mm.cpp:588
static GCState executeWithLoggingIfEnabled(GCStateMachine *that, GCStateInfo &stateInfo)
Definition qv4mm.cpp:1578
QT_BEGIN_NAMESPACE Q_STATIC_LOGGING_CATEGORY(lcSynthesizedIterableAccess, "qt.iterable.synthesized", QtWarningMsg)
#define SDUMP
Definition qv4mm.cpp:255
#define MM_STATS
Definition qv4mm.cpp:35
void free(Chunk *chunk, size_t size=0)
Definition qv4mm.cpp:233
size_t requiredChunkSize(size_t size)
Definition qv4mm.cpp:200
Chunk * allocate(size_t size=0)
Definition qv4mm.cpp:215
std::vector< MemorySegment > memorySegments
Definition qv4mm.cpp:212
void free(Chunk *chunk, size_t size)
Definition qv4mm.cpp:131
Chunk * allocate(size_t size)
Definition qv4mm.cpp:164
MemorySegment(size_t size)
Definition qv4mm.cpp:88
PageReservation pageReservation
Definition qv4mm.cpp:157
void setBit(size_t index)
Definition qv4mm.cpp:114
size_t availableBytes
Definition qv4mm.cpp:160
bool testBit(size_t index) const
Definition qv4mm.cpp:124
quint64 allocatedMap
Definition qv4mm.cpp:159
void clearBit(size_t index)
Definition qv4mm.cpp:119
bool contains(Chunk *c) const
Definition qv4mm.cpp:153
MemorySegment(MemorySegment &&other)
Definition qv4mm.cpp:101