Qt
Internal/Contributor docs for the Qt SDK. <b>Note:</b> These are NOT official API docs; those are found <a href='https://doc.qt.io/'>here</a>.
Loading...
Searching...
No Matches
qv4mm.cpp
Go to the documentation of this file.
1// Copyright (C) 2021 The Qt Company Ltd.
2// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
3
4#include "qv4engine_p.h"
5#include "qv4object_p.h"
6#include "qv4mm_p.h"
9#include <QtCore/qalgorithms.h>
10#include <QtCore/private/qnumeric_p.h>
11#include <QtCore/qloggingcategory.h>
12#include <private/qv4alloca_p.h>
13#include <qqmlengine.h>
14#include "PageReservation.h"
15#include "PageAllocation.h"
16
17#include <QElapsedTimer>
18#include <QMap>
19#include <QScopedValueRollback>
20
21#include <iostream>
22#include <cstdlib>
23#include <algorithm>
24#include "qv4profiling_p.h"
25#include "qv4mapobject_p.h"
26#include "qv4setobject_p.h"
27
28#include <chrono>
29
30//#define MM_STATS
31
32#if !defined(MM_STATS) && !defined(QT_NO_DEBUG)
33#define MM_STATS
34#endif
35
36#if MM_DEBUG
37#define DEBUG qDebug() << "MM:"
38#else
39#define DEBUG if (1) ; else qDebug() << "MM:"
40#endif
41
42#ifdef V4_USE_VALGRIND
43#include <valgrind/valgrind.h>
44#include <valgrind/memcheck.h>
45#endif
46
47#ifdef V4_USE_HEAPTRACK
48#include <heaptrack_api.h>
49#endif
50
51#if OS(QNX)
52#include <sys/storage.h> // __tls()
53#endif
54
55#if USE(PTHREADS) && HAVE(PTHREAD_NP_H)
56#include <pthread_np.h>
57#endif
58
59Q_LOGGING_CATEGORY(lcGcStats, "qt.qml.gc.statistics")
61Q_LOGGING_CATEGORY(lcGcAllocatorStats, "qt.qml.gc.allocatorStats")
62Q_DECLARE_LOGGING_CATEGORY(lcGcAllocatorStats)
63
64using namespace WTF;
65
67
68namespace QV4 {
69
70enum {
72 GCOverallocation = 200 /* Max overallocation by the GC in % */
73};
74
76 enum {
77#ifdef Q_OS_RTEMS
78 NumChunks = sizeof(quint64),
79#else
80 NumChunks = 8*sizeof(quint64),
81#endif
82 SegmentSize = NumChunks*Chunk::ChunkSize,
83 };
84
86 {
87 size += Chunk::ChunkSize; // make sure we can get enough 64k alignment memory
88 if (size < SegmentSize)
89 size = SegmentSize;
90
91 pageReservation = PageReservation::reserve(size, OSAllocator::JSGCHeapPages);
92 base = reinterpret_cast<Chunk *>((reinterpret_cast<quintptr>(pageReservation.base()) + Chunk::ChunkSize - 1) & ~(Chunk::ChunkSize - 1));
93 nChunks = NumChunks;
94 availableBytes = size - (reinterpret_cast<quintptr>(base) - reinterpret_cast<quintptr>(pageReservation.base()));
95 if (availableBytes < SegmentSize)
96 --nChunks;
97 }
99 qSwap(pageReservation, other.pageReservation);
100 qSwap(base, other.base);
101 qSwap(allocatedMap, other.allocatedMap);
102 qSwap(availableBytes, other.availableBytes);
103 qSwap(nChunks, other.nChunks);
104 }
105
107 if (base)
108 pageReservation.deallocate();
109 }
110
111 void setBit(size_t index) {
112 Q_ASSERT(index < nChunks);
113 quint64 bit = static_cast<quint64>(1) << index;
114// qDebug() << " setBit" << hex << index << (index & (Bits - 1)) << bit;
115 allocatedMap |= bit;
116 }
117 void clearBit(size_t index) {
118 Q_ASSERT(index < nChunks);
119 quint64 bit = static_cast<quint64>(1) << index;
120// qDebug() << " setBit" << hex << index << (index & (Bits - 1)) << bit;
121 allocatedMap &= ~bit;
122 }
123 bool testBit(size_t index) const {
124 Q_ASSERT(index < nChunks);
125 quint64 bit = static_cast<quint64>(1) << index;
126 return (allocatedMap & bit);
127 }
128
129 Chunk *allocate(size_t size);
130 void free(Chunk *chunk, size_t size) {
131 DEBUG << "freeing chunk" << chunk;
132 size_t index = static_cast<size_t>(chunk - base);
133 size_t end = qMin(static_cast<size_t>(NumChunks), index + (size - 1)/Chunk::ChunkSize + 1);
134 while (index < end) {
136 clearBit(index);
137 ++index;
138 }
139
140 size_t pageSize = WTF::pageSize();
141 size = (size + pageSize - 1) & ~(pageSize - 1);
142#if !defined(Q_OS_LINUX) && !defined(Q_OS_WIN)
143 // Linux and Windows zero out pages that have been decommitted and get committed again.
144 // unfortunately that's not true on other OSes (e.g. BSD based ones), so zero out the
145 // memory before decommit, so that we can be sure that all chunks we allocate will be
146 // zero initialized.
147 memset(chunk, 0, size);
148#endif
149 pageReservation.decommit(chunk, size);
150 }
151
152 bool contains(Chunk *c) const {
153 return c >= base && c < base + nChunks;
154 }
155
156 PageReservation pageReservation;
157 Chunk *base = nullptr;
158 quint64 allocatedMap = 0;
159 size_t availableBytes = 0;
160 uint nChunks = 0;
161};
162
163Chunk *MemorySegment::allocate(size_t size)
164{
165 if (!allocatedMap && size >= SegmentSize) {
166 // chunk allocated for one huge allocation
167 Q_ASSERT(availableBytes >= size);
168 pageReservation.commit(base, size);
169 allocatedMap = ~static_cast<quint64>(0);
170 return base;
171 }
172 size_t requiredChunks = (size + sizeof(Chunk) - 1)/sizeof(Chunk);
173 uint sequence = 0;
174 Chunk *candidate = nullptr;
175 for (uint i = 0; i < nChunks; ++i) {
176 if (!testBit(i)) {
177 if (!candidate)
178 candidate = base + i;
179 ++sequence;
180 } else {
181 candidate = nullptr;
182 sequence = 0;
183 }
184 if (sequence == requiredChunks) {
185 pageReservation.commit(candidate, size);
186 for (uint i = 0; i < requiredChunks; ++i)
187 setBit(candidate - base + i);
188 DEBUG << "allocated chunk " << candidate << Qt::hex << size;
189
190 return candidate;
191 }
192 }
193 return nullptr;
194}
195
198
199 size_t requiredChunkSize(size_t size) {
200 size += Chunk::HeaderSize; // space required for the Chunk header
201 size_t pageSize = WTF::pageSize();
202 size = (size + pageSize - 1) & ~(pageSize - 1); // align to page sizes
203 if (size < Chunk::ChunkSize)
204 size = Chunk::ChunkSize;
205 return size;
206 }
207
208 Chunk *allocate(size_t size = 0);
209 void free(Chunk *chunk, size_t size = 0);
210
211 std::vector<MemorySegment> memorySegments;
212};
213
214Chunk *ChunkAllocator::allocate(size_t size)
215{
216 size = requiredChunkSize(size);
217 for (auto &m : memorySegments) {
218 if (~m.allocatedMap) {
219 Chunk *c = m.allocate(size);
220 if (c)
221 return c;
222 }
223 }
224
225 // allocate a new segment
226 memorySegments.push_back(MemorySegment(size));
227 Chunk *c = memorySegments.back().allocate(size);
228 Q_ASSERT(c);
229 return c;
230}
231
232void ChunkAllocator::free(Chunk *chunk, size_t size)
233{
234 size = requiredChunkSize(size);
235 for (auto &m : memorySegments) {
236 if (m.contains(chunk)) {
237 m.free(chunk, size);
238 return;
239 }
240 }
241 Q_ASSERT(false);
242}
243
244#ifdef DUMP_SWEEP
247 while (s.length() < 64)
248 s.prepend(QChar::fromLatin1('0'));
249 return s;
250}
251#define SDUMP qDebug
252#else
254#define SDUMP if (1) ; else qDebug
255#endif
256
257// Stores a classname -> freed count mapping.
258typedef QHash<const char*, int> MMStatsHash;
259Q_GLOBAL_STATIC(MMStatsHash, freedObjectStatsGlobal)
260
261// This indirection avoids sticking QHash code in each of the call sites, which
262// shaves off some instructions in the case that it's unused.
263static void increaseFreedCountForClass(const char *className)
264{
265 (*freedObjectStatsGlobal())[className]++;
266}
267
268//bool Chunk::sweep(ClassDestroyStatsCallback classCountPtr)
269bool Chunk::sweep(ExecutionEngine *engine)
270{
271 bool hasUsedSlots = false;
272 SDUMP() << "sweeping chunk" << this;
273 HeapItem *o = realBase();
274 bool lastSlotFree = false;
275 for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) {
276 quintptr toFree = objectBitmap[i] ^ blackBitmap[i];
277 Q_ASSERT((toFree & objectBitmap[i]) == toFree); // check all black objects are marked as being used
278 quintptr e = extendsBitmap[i];
279 SDUMP() << " index=" << i;
280 SDUMP() << " toFree =" << binary(toFree);
281 SDUMP() << " black =" << binary(blackBitmap[i]);
282 SDUMP() << " object =" << binary(objectBitmap[i]);
283 SDUMP() << " extends =" << binary(e);
284 if (lastSlotFree)
285 e &= (e + 1); // clear all lowest extent bits
286 while (toFree) {
288 quintptr bit = (static_cast<quintptr>(1) << index);
289
290 toFree ^= bit; // mask out freed slot
291 // DEBUG << " index" << hex << index << toFree;
292
293 // remove all extends slots that have been freed
294 // this is a bit of bit trickery.
295 quintptr mask = (bit << 1) - 1; // create a mask of 1's to the right of and up to the current bit
296 quintptr objmask = e | mask; // or'ing mask with e gives all ones until the end of the current object
297 quintptr result = objmask + 1;
298 Q_ASSERT(qCountTrailingZeroBits(result) - index != 0); // ensure we freed something
299 result |= mask; // ensure we don't clear stuff to the right of the current object
300 e &= result;
301
302 HeapItem *itemToFree = o + index;
303 Heap::Base *b = *itemToFree;
304 const VTable *v = b->internalClass->vtable;
305// if (Q_UNLIKELY(classCountPtr))
306// classCountPtr(v->className);
307 if (v->destroy) {
308 v->destroy(b);
309 b->_checkIsDestroyed();
310 }
311#ifdef V4_USE_HEAPTRACK
312 heaptrack_report_free(itemToFree);
313#endif
314 }
315 Q_V4_PROFILE_DEALLOC(engine, qPopulationCount((objectBitmap[i] | extendsBitmap[i])
316 - (blackBitmap[i] | e)) * Chunk::SlotSize,
317 Profiling::SmallItem);
318 objectBitmap[i] = blackBitmap[i];
319 hasUsedSlots |= (blackBitmap[i] != 0);
320 extendsBitmap[i] = e;
321 lastSlotFree = !((objectBitmap[i]|extendsBitmap[i]) >> (sizeof(quintptr)*8 - 1));
322 SDUMP() << " new extends =" << binary(e);
323 SDUMP() << " lastSlotFree" << lastSlotFree;
324 Q_ASSERT((objectBitmap[i] & extendsBitmap[i]) == 0);
325 o += Chunk::Bits;
326 }
327 // DEBUG << "swept chunk" << this << "freed" << slotsFreed << "slots.";
328 return hasUsedSlots;
329}
330
331void Chunk::freeAll(ExecutionEngine *engine)
332{
333 // DEBUG << "sweeping chunk" << this << (*freeList);
334 HeapItem *o = realBase();
335 for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) {
336 quintptr toFree = objectBitmap[i];
337 quintptr e = extendsBitmap[i];
338 // DEBUG << hex << " index=" << i << toFree;
339 while (toFree) {
341 quintptr bit = (static_cast<quintptr>(1) << index);
342
343 toFree ^= bit; // mask out freed slot
344 // DEBUG << " index" << hex << index << toFree;
345
346 // remove all extends slots that have been freed
347 // this is a bit of bit trickery.
348 quintptr mask = (bit << 1) - 1; // create a mask of 1's to the right of and up to the current bit
349 quintptr objmask = e | mask; // or'ing mask with e gives all ones until the end of the current object
350 quintptr result = objmask + 1;
351 Q_ASSERT(qCountTrailingZeroBits(result) - index != 0); // ensure we freed something
352 result |= mask; // ensure we don't clear stuff to the right of the current object
353 e &= result;
354
355 HeapItem *itemToFree = o + index;
356 Heap::Base *b = *itemToFree;
357 if (b->internalClass->vtable->destroy) {
358 b->internalClass->vtable->destroy(b);
359 b->_checkIsDestroyed();
360 }
361#ifdef V4_USE_HEAPTRACK
362 heaptrack_report_free(itemToFree);
363#endif
364 }
365 Q_V4_PROFILE_DEALLOC(engine, (qPopulationCount(objectBitmap[i]|extendsBitmap[i])
366 - qPopulationCount(e)) * Chunk::SlotSize, Profiling::SmallItem);
367 objectBitmap[i] = 0;
368 extendsBitmap[i] = e;
369 o += Chunk::Bits;
370 }
371 // DEBUG << "swept chunk" << this << "freed" << slotsFreed << "slots.";
372}
373
374void Chunk::resetBlackBits()
375{
376 memset(blackBitmap, 0, sizeof(blackBitmap));
377}
378
379void Chunk::sortIntoBins(HeapItem **bins, uint nBins)
380{
381// qDebug() << "sortIntoBins:";
382 HeapItem *base = realBase();
383#if QT_POINTER_SIZE == 8
384 const int start = 0;
385#else
386 const int start = 1;
387#endif
388#ifndef QT_NO_DEBUG
389 uint freeSlots = 0;
390 uint allocatedSlots = 0;
391#endif
392 for (int i = start; i < EntriesInBitmap; ++i) {
393 quintptr usedSlots = (objectBitmap[i]|extendsBitmap[i]);
394#if QT_POINTER_SIZE == 8
395 if (!i)
396 usedSlots |= (static_cast<quintptr>(1) << (HeaderSize/SlotSize)) - 1;
397#endif
398#ifndef QT_NO_DEBUG
399 allocatedSlots += qPopulationCount(usedSlots);
400// qDebug() << hex << " i=" << i << "used=" << usedSlots;
401#endif
402 while (1) {
403 uint index = qCountTrailingZeroBits(usedSlots + 1);
404 if (index == Bits)
405 break;
406 uint freeStart = i*Bits + index;
407 usedSlots &= ~((static_cast<quintptr>(1) << index) - 1);
408 while (!usedSlots) {
409 if (++i < EntriesInBitmap) {
410 usedSlots = (objectBitmap[i]|extendsBitmap[i]);
411 } else {
412 Q_ASSERT(i == EntriesInBitmap);
413 // Overflows to 0 when counting trailing zeroes above in next iteration.
414 // Then, all the bits are zeroes and we break.
415 usedSlots = std::numeric_limits<quintptr>::max();
416 break;
417 }
418#ifndef QT_NO_DEBUG
419 allocatedSlots += qPopulationCount(usedSlots);
420// qDebug() << hex << " i=" << i << "used=" << usedSlots;
421#endif
422 }
423 HeapItem *freeItem = base + freeStart;
424
425 index = qCountTrailingZeroBits(usedSlots);
426 usedSlots |= (quintptr(1) << index) - 1;
427 uint freeEnd = i*Bits + index;
428 uint nSlots = freeEnd - freeStart;
429#ifndef QT_NO_DEBUG
430// qDebug() << hex << " got free slots from" << freeStart << "to" << freeEnd << "n=" << nSlots << "usedSlots=" << usedSlots;
431 freeSlots += nSlots;
432#endif
433 Q_ASSERT(freeEnd > freeStart && freeEnd <= NumSlots);
434 freeItem->freeData.availableSlots = nSlots;
435 uint bin = qMin(nBins - 1, nSlots);
436 freeItem->freeData.next = bins[bin];
437 bins[bin] = freeItem;
438 }
439 }
440#ifndef QT_NO_DEBUG
441 Q_ASSERT(freeSlots + allocatedSlots == (EntriesInBitmap - start) * 8 * sizeof(quintptr));
442#endif
443}
444
445HeapItem *BlockAllocator::allocate(size_t size, bool forceAllocation) {
446 Q_ASSERT((size % Chunk::SlotSize) == 0);
447 size_t slotsRequired = size >> Chunk::SlotSizeShift;
448
449 if (allocationStats)
450 ++allocationStats[binForSlots(slotsRequired)];
451
452 HeapItem **last;
453
454 HeapItem *m;
455
456 if (slotsRequired < NumBins - 1) {
457 m = freeBins[slotsRequired];
458 if (m) {
459 freeBins[slotsRequired] = m->freeData.next;
460 goto done;
461 }
462 }
463
464 if (nFree >= slotsRequired) {
465 // use bump allocation
466 Q_ASSERT(nextFree);
467 m = nextFree;
468 nextFree += slotsRequired;
469 nFree -= slotsRequired;
470 goto done;
471 }
472
473 // DEBUG << "No matching bin found for item" << size << bin;
474 // search last bin for a large enough item
475 last = &freeBins[NumBins - 1];
476 while ((m = *last)) {
477 if (m->freeData.availableSlots >= slotsRequired) {
478 *last = m->freeData.next; // take it out of the list
479
480 size_t remainingSlots = m->freeData.availableSlots - slotsRequired;
481 // DEBUG << "found large free slots of size" << m->freeData.availableSlots << m << "remaining" << remainingSlots;
482 if (remainingSlots == 0)
483 goto done;
484
485 HeapItem *remainder = m + slotsRequired;
486 if (remainingSlots > nFree) {
487 if (nFree) {
488 size_t bin = binForSlots(nFree);
489 nextFree->freeData.next = freeBins[bin];
490 nextFree->freeData.availableSlots = nFree;
491 freeBins[bin] = nextFree;
492 }
493 nextFree = remainder;
494 nFree = remainingSlots;
495 } else {
496 remainder->freeData.availableSlots = remainingSlots;
497 size_t binForRemainder = binForSlots(remainingSlots);
498 remainder->freeData.next = freeBins[binForRemainder];
499 freeBins[binForRemainder] = remainder;
500 }
501 goto done;
502 }
503 last = &m->freeData.next;
504 }
505
506 if (slotsRequired < NumBins - 1) {
507 // check if we can split up another slot
508 for (size_t i = slotsRequired + 1; i < NumBins - 1; ++i) {
509 m = freeBins[i];
510 if (m) {
511 freeBins[i] = m->freeData.next; // take it out of the list
512// qDebug() << "got item" << slotsRequired << "from slot" << i;
513 size_t remainingSlots = i - slotsRequired;
514 Q_ASSERT(remainingSlots < NumBins - 1);
515 HeapItem *remainder = m + slotsRequired;
516 remainder->freeData.availableSlots = remainingSlots;
517 remainder->freeData.next = freeBins[remainingSlots];
518 freeBins[remainingSlots] = remainder;
519 goto done;
520 }
521 }
522 }
523
524 if (!m) {
525 if (!forceAllocation)
526 return nullptr;
527 if (nFree) {
528 // Save any remaining slots of the current chunk
529 // for later, smaller allocations.
530 size_t bin = binForSlots(nFree);
531 nextFree->freeData.next = freeBins[bin];
532 nextFree->freeData.availableSlots = nFree;
533 freeBins[bin] = nextFree;
534 }
535 Chunk *newChunk = chunkAllocator->allocate();
536 Q_V4_PROFILE_ALLOC(engine, Chunk::DataSize, Profiling::HeapPage);
537 chunks.push_back(newChunk);
538 nextFree = newChunk->first();
539 nFree = Chunk::AvailableSlots;
540 m = nextFree;
541 nextFree += slotsRequired;
542 nFree -= slotsRequired;
543 }
544
545done:
546 m->setAllocatedSlots(slotsRequired);
547 Q_V4_PROFILE_ALLOC(engine, slotsRequired * Chunk::SlotSize, Profiling::SmallItem);
548#ifdef V4_USE_HEAPTRACK
549 heaptrack_report_alloc(m, slotsRequired * Chunk::SlotSize);
550#endif
551 // DEBUG << " " << hex << m->chunk() << m->chunk()->objectBitmap[0] << m->chunk()->extendsBitmap[0] << (m - m->chunk()->realBase());
552 return m;
553}
554
555void BlockAllocator::sweep()
556{
557 nextFree = nullptr;
558 nFree = 0;
559 memset(freeBins, 0, sizeof(freeBins));
560
561// qDebug() << "BlockAlloc: sweep";
562 usedSlotsAfterLastSweep = 0;
563
564 auto firstEmptyChunk = std::partition(chunks.begin(), chunks.end(), [this](Chunk *c) {
565 return c->sweep(engine);
566 });
567
568 std::for_each(chunks.begin(), firstEmptyChunk, [this](Chunk *c) {
569 c->sortIntoBins(freeBins, NumBins);
570 usedSlotsAfterLastSweep += c->nUsedSlots();
571 });
572
573 // only free the chunks at the end to avoid that the sweep() calls indirectly
574 // access freed memory
575 std::for_each(firstEmptyChunk, chunks.end(), [this](Chunk *c) {
576 Q_V4_PROFILE_DEALLOC(engine, Chunk::DataSize, Profiling::HeapPage);
577 chunkAllocator->free(c);
578 });
579
580 chunks.erase(firstEmptyChunk, chunks.end());
581}
582
583void BlockAllocator::freeAll()
584{
585 for (auto c : chunks)
586 c->freeAll(engine);
587 for (auto c : chunks) {
588 Q_V4_PROFILE_DEALLOC(engine, Chunk::DataSize, Profiling::HeapPage);
589 chunkAllocator->free(c);
590 }
591}
592
593void BlockAllocator::resetBlackBits()
594{
595 for (auto c : chunks)
596 c->resetBlackBits();
597}
598
599HeapItem *HugeItemAllocator::allocate(size_t size) {
600 MemorySegment *m = nullptr;
601 Chunk *c = nullptr;
602 if (size >= MemorySegment::SegmentSize/2) {
603 // too large to handle through the ChunkAllocator, let's get our own memory segement
604 size += Chunk::HeaderSize; // space required for the Chunk header
605 size_t pageSize = WTF::pageSize();
606 size = (size + pageSize - 1) & ~(pageSize - 1); // align to page sizes
607 m = new MemorySegment(size);
608 c = m->allocate(size);
609 } else {
610 c = chunkAllocator->allocate(size);
611 }
612 Q_ASSERT(c);
613 chunks.push_back(HugeChunk{m, c, size});
614 Chunk::setBit(c->objectBitmap, c->first() - c->realBase());
615 Q_V4_PROFILE_ALLOC(engine, size, Profiling::LargeItem);
616#ifdef V4_USE_HEAPTRACK
617 heaptrack_report_alloc(c, size);
618#endif
619 return c->first();
620}
621
622static void freeHugeChunk(ChunkAllocator *chunkAllocator, const HugeItemAllocator::HugeChunk &c, ClassDestroyStatsCallback classCountPtr)
623{
624 HeapItem *itemToFree = c.chunk->first();
625 Heap::Base *b = *itemToFree;
626 const VTable *v = b->internalClass->vtable;
627 if (Q_UNLIKELY(classCountPtr))
628 classCountPtr(v->className);
629
630 if (v->destroy) {
631 v->destroy(b);
632 b->_checkIsDestroyed();
633 }
634 if (c.segment) {
635 // own memory segment
636 c.segment->free(c.chunk, c.size);
637 delete c.segment;
638 } else {
639 chunkAllocator->free(c.chunk, c.size);
640 }
641#ifdef V4_USE_HEAPTRACK
642 heaptrack_report_free(c.chunk);
643#endif
644}
645
646void HugeItemAllocator::sweep(ClassDestroyStatsCallback classCountPtr)
647{
648 auto isBlack = [this, classCountPtr] (const HugeChunk &c) {
649 bool b = c.chunk->first()->isBlack();
650 Chunk::clearBit(c.chunk->blackBitmap, c.chunk->first() - c.chunk->realBase());
651 if (!b) {
652 Q_V4_PROFILE_DEALLOC(engine, c.size, Profiling::LargeItem);
653 freeHugeChunk(chunkAllocator, c, classCountPtr);
654 }
655 return !b;
656 };
657
658 auto newEnd = std::remove_if(chunks.begin(), chunks.end(), isBlack);
659 chunks.erase(newEnd, chunks.end());
660}
661
662void HugeItemAllocator::resetBlackBits()
663{
664 for (auto c : chunks)
665 Chunk::clearBit(c.chunk->blackBitmap, c.chunk->first() - c.chunk->realBase());
666}
667
668void HugeItemAllocator::freeAll()
669{
670 for (auto &c : chunks) {
671 Q_V4_PROFILE_DEALLOC(engine, c.size, Profiling::LargeItem);
672 freeHugeChunk(chunkAllocator, c, nullptr);
673 }
674}
675
676namespace {
677using ExtraData = GCStateInfo::ExtraData;
678GCState markStart(GCStateMachine *that, ExtraData &)
679{
680 //Initialize the mark stack
681 that->mm->m_markStack = std::make_unique<MarkStack>(that->mm->engine);
682 that->mm->engine->isGCOngoing = true;
683 return MarkGlobalObject;
684}
685
686GCState markGlobalObject(GCStateMachine *that, ExtraData &)
687{
688 that->mm->engine->markObjects(that->mm->m_markStack.get());
689 return MarkJSStack;
690}
691
692GCState markJSStack(GCStateMachine *that, ExtraData &)
693{
694 that->mm->collectFromJSStack(that->mm->markStack());
696}
697
698GCState initMarkPersistentValues(GCStateMachine *that, ExtraData &stateData)
699{
700 if (!that->mm->m_persistentValues)
701 return InitMarkWeakValues; // no persistent values to mark
702 stateData = GCIteratorStorage { that->mm->m_persistentValues->begin() };
704}
705
706static constexpr int markLoopIterationCount = 1024;
707
708bool wasDrainNecessary(MarkStack *ms, QDeadlineTimer deadline)
709{
710 if (ms->remainingBeforeSoftLimit() > markLoopIterationCount)
711 return false;
712 // drain
713 ms->drain(deadline);
714 return true;
715}
716
717GCState markPersistentValues(GCStateMachine *that, ExtraData &stateData) {
718 auto markStack = that->mm->markStack();
719 if (wasDrainNecessary(markStack, that->deadline) && that->deadline.hasExpired())
721 PersistentValueStorage::Iterator& it = get<GCIteratorStorage>(stateData).it;
722 // avoid repeatedly hitting the timer constantly by batching iterations
723 for (int i = 0; i < markLoopIterationCount; ++i) {
724 if (!it.p)
725 return InitMarkWeakValues;
726 if (Managed *m = (*it).as<Managed>())
727 m->mark(markStack);
728 ++it;
729 }
731}
732
733GCState initMarkWeakValues(GCStateMachine *that, ExtraData &stateData)
734{
735 stateData = GCIteratorStorage { that->mm->m_weakValues->begin() };
736 return MarkWeakValues;
737}
738
739GCState markWeakValues(GCStateMachine *that, ExtraData &stateData)
740{
741 auto markStack = that->mm->markStack();
742 if (wasDrainNecessary(markStack, that->deadline) && that->deadline.hasExpired())
743 return MarkWeakValues;
744 PersistentValueStorage::Iterator& it = get<GCIteratorStorage>(stateData).it;
745 // avoid repeatedly hitting the timer constantly by batching iterations
746 for (int i = 0; i < markLoopIterationCount; ++i) {
747 if (!it.p)
748 return MarkDrain;
749 QObjectWrapper *qobjectWrapper = (*it).as<QObjectWrapper>();
750 ++it;
751 if (!qobjectWrapper)
752 continue;
753 QObject *qobject = qobjectWrapper->object();
754 if (!qobject)
755 continue;
756 bool keepAlive = QQmlData::keepAliveDuringGarbageCollection(qobject);
757
758 if (!keepAlive) {
759 if (QObject *parent = qobject->parent()) {
760 while (parent->parent())
761 parent = parent->parent();
763 }
764 }
765
766 if (keepAlive)
767 qobjectWrapper->mark(that->mm->markStack());
768 }
769 return MarkWeakValues;
770}
771
772GCState markDrain(GCStateMachine *that, ExtraData &)
773{
774 if (that->deadline.isForever()) {
775 that->mm->markStack()->drain();
776 return MarkReady;
777 }
778 auto drainState = that->mm->m_markStack->drain(that->deadline);
779 return drainState == MarkStack::DrainState::Complete
780 ? MarkReady
781 : MarkDrain;
782}
783
784GCState markReady(GCStateMachine *, ExtraData &)
785{
786 //Possibility to do some clean up, stat printing, etc...
788}
789
793void redrain(GCStateMachine *that)
794{
795 that->mm->collectFromJSStack(that->mm->markStack());
796 that->mm->m_markStack->drain();
797}
798
799GCState initCallDestroyObjects(GCStateMachine *that, ExtraData &stateData)
800{
801 // as we don't have a deletion barrier, we need to rescan the stack
802 redrain(that);
803 if (!that->mm->m_weakValues)
804 return FreeWeakMaps; // no need to call destroy objects
805 stateData = GCIteratorStorage { that->mm->m_weakValues->begin() };
806 return CallDestroyObjects;
807}
808GCState callDestroyObject(GCStateMachine *that, ExtraData &stateData)
809{
810 PersistentValueStorage::Iterator& it = get<GCIteratorStorage>(stateData).it;
811 // destroyObject might call user code, which really shouldn't call back into the gc
812 auto oldState = std::exchange(that->mm->gcBlocked, QV4::MemoryManager::Blockness::InCriticalSection);
813 auto cleanup = qScopeGuard([&]() {
814 that->mm->gcBlocked = oldState;
815 });
816 // avoid repeatedly hitting the timer constantly by batching iterations
817 for (int i = 0; i < markLoopIterationCount; ++i) {
818 if (!it.p)
819 return FreeWeakMaps;
820 Managed *m = (*it).managed();
821 ++it;
822 if (!m || m->markBit())
823 continue;
824 // we need to call destroyObject on qobjectwrappers now, so that they can emit the destroyed
825 // signal before we start sweeping the heap
826 if (QObjectWrapper *qobjectWrapper = m->as<QObjectWrapper>())
827 qobjectWrapper->destroyObject(/*lastSweep =*/false);
828 }
829 return CallDestroyObjects;
830}
831
832void freeWeakMaps(MemoryManager *mm)
833{
834 for (auto [map, lastMap] = std::tuple {mm->weakMaps, &mm->weakMaps }; map; map = map->nextWeakMap) {
835 if (!map->isMarked())
836 continue;
837 map->removeUnmarkedKeys();
838 *lastMap = map;
839 lastMap = &map->nextWeakMap;
840 }
841}
842
843GCState freeWeakMaps(GCStateMachine *that, ExtraData &)
844{
845 freeWeakMaps(that->mm);
846 return FreeWeakSets;
847}
848
849void freeWeakSets(MemoryManager *mm)
850{
851 for (auto [set, lastSet] = std::tuple {mm->weakSets, &mm->weakSets}; set; set = set->nextWeakSet) {
852
853 if (!set->isMarked())
854 continue;
855 set->removeUnmarkedKeys();
856 *lastSet = set;
857 lastSet = &set->nextWeakSet;
858 }
859}
860
861GCState freeWeakSets(GCStateMachine *that, ExtraData &)
862{
863 freeWeakSets(that->mm);
865}
866
867GCState handleQObjectWrappers(GCStateMachine *that, ExtraData &)
868{
869 that->mm->cleanupDeletedQObjectWrappersInSweep();
870 return DoSweep;
871}
872
873GCState doSweep(GCStateMachine *that, ExtraData &)
874{
875 auto mm = that->mm;
876
877 mm->engine->identifierTable->sweep();
878 mm->blockAllocator.sweep();
879 mm->hugeItemAllocator.sweep(that->mm->gcCollectorStats ? increaseFreedCountForClass : nullptr);
880 mm->icAllocator.sweep();
881
882 // reset all black bits
883 mm->blockAllocator.resetBlackBits();
884 mm->hugeItemAllocator.resetBlackBits();
885 mm->icAllocator.resetBlackBits();
886
887 mm->usedSlotsAfterLastFullSweep = mm->blockAllocator.usedSlotsAfterLastSweep + mm->icAllocator.usedSlotsAfterLastSweep;
888 mm->gcBlocked = MemoryManager::Unblocked;
889 mm->m_markStack.reset();
890 mm->engine->isGCOngoing = false;
891
892 mm->updateUnmanagedHeapSizeGCLimit();
893
894 return Invalid;
895}
896
897}
898
899
900MemoryManager::MemoryManager(ExecutionEngine *engine)
901 : engine(engine)
902 , chunkAllocator(new ChunkAllocator)
903 , blockAllocator(chunkAllocator, engine)
904 , icAllocator(chunkAllocator, engine)
905 , hugeItemAllocator(chunkAllocator, engine)
906 , m_persistentValues(new PersistentValueStorage(engine))
907 , m_weakValues(new PersistentValueStorage(engine))
908 , unmanagedHeapSizeGCLimit(MinUnmanagedHeapSizeGCLimit)
909 , aggressiveGC(!qEnvironmentVariableIsEmpty("QV4_MM_AGGRESSIVE_GC"))
910 , gcStats(lcGcStats().isDebugEnabled())
911 , gcCollectorStats(lcGcAllocatorStats().isDebugEnabled())
912{
913#ifdef V4_USE_VALGRIND
914 VALGRIND_CREATE_MEMPOOL(this, 0, true);
915#endif
916 memset(statistics.allocations, 0, sizeof(statistics.allocations));
917 if (gcStats)
919
920 gcStateMachine = std::make_unique<GCStateMachine>();
921 gcStateMachine->mm = this;
922
923 gcStateMachine->stateInfoMap[GCState::MarkStart] = {
924 markStart,
925 false,
926 };
928 markGlobalObject,
929 false,
930 };
931 gcStateMachine->stateInfoMap[GCState::MarkJSStack] = {
932 markJSStack,
933 false,
934 };
936 initMarkPersistentValues,
937 false,
938 };
940 markPersistentValues,
941 false,
942 };
944 initMarkWeakValues,
945 false,
946 };
947 gcStateMachine->stateInfoMap[GCState::MarkWeakValues] = {
948 markWeakValues,
949 false,
950 };
951 gcStateMachine->stateInfoMap[GCState::MarkDrain] = {
952 markDrain,
953 false,
954 };
955 gcStateMachine->stateInfoMap[GCState::MarkReady] = {
956 markReady,
957 false,
958 };
960 initCallDestroyObjects,
961 false,
962 };
964 callDestroyObject,
965 false,
966 };
967 gcStateMachine->stateInfoMap[GCState::FreeWeakMaps] = {
968 freeWeakMaps,
969 false,
970 };
971 gcStateMachine->stateInfoMap[GCState::FreeWeakSets] = {
972 freeWeakSets,
973 true, // ensure that handleQObjectWrappers runs in isolation
974 };
976 handleQObjectWrappers,
977 false,
978 };
979 gcStateMachine->stateInfoMap[GCState::DoSweep] = {
980 doSweep,
981 false,
982 };
983}
984
985Heap::Base *MemoryManager::allocString(std::size_t unmanagedSize)
986{
987 const size_t stringSize = align(sizeof(Heap::String));
988#ifdef MM_STATS
991#endif
992 unmanagedHeapSize += unmanagedSize;
993
994 HeapItem *m = allocate(&blockAllocator, stringSize);
995 memset(m, 0, stringSize);
996 return *m;
997}
998
1000{
1001#ifdef MM_STATS
1004#endif
1005
1008
1010 memset(m, 0, size);
1011 return *m;
1012}
1013
1014Heap::Object *MemoryManager::allocObjectWithMemberData(const QV4::VTable *vtable, uint nMembers)
1015{
1016 uint size = (vtable->nInlineProperties + vtable->inlinePropertyOffset)*sizeof(Value);
1017 Q_ASSERT(!(size % sizeof(HeapItem)));
1018
1019 Heap::Object *o;
1020 if (nMembers <= vtable->nInlineProperties) {
1021 o = static_cast<Heap::Object *>(allocData(size));
1022 } else {
1023 // Allocate both in one go through the block allocator
1024 nMembers -= vtable->nInlineProperties;
1025 std::size_t memberSize = align(sizeof(Heap::MemberData) + (nMembers - 1)*sizeof(Value));
1026 size_t totalSize = size + memberSize;
1027 Heap::MemberData *m;
1028 if (totalSize > Chunk::DataSize) {
1029 o = static_cast<Heap::Object *>(allocData(size));
1030 m = hugeItemAllocator.allocate(memberSize)->as<Heap::MemberData>();
1031 } else {
1032 HeapItem *mh = reinterpret_cast<HeapItem *>(allocData(totalSize));
1033 Heap::Base *b = *mh;
1034 o = static_cast<Heap::Object *>(b);
1035 mh += (size >> Chunk::SlotSizeShift);
1036 m = mh->as<Heap::MemberData>();
1037 Chunk *c = mh->chunk();
1038 size_t index = mh - c->realBase();
1039 Chunk::setBit(c->objectBitmap, index);
1040 Chunk::clearBit(c->extendsBitmap, index);
1041 }
1042 o->memberData.set(engine, m);
1044 Q_ASSERT(o->memberData->internalClass);
1045 m->values.alloc = static_cast<uint>((memberSize - sizeof(Heap::MemberData) + sizeof(Value))/sizeof(Value));
1046 m->values.size = o->memberData->values.alloc;
1047 m->init();
1048// qDebug() << " got" << o->memberData << o->memberData->size;
1049 }
1050// qDebug() << "allocating object with memberData" << o << o->memberData.operator->();
1051 return o;
1052}
1053
1055
1057 : m_engine(engine)
1058{
1059 m_base = (Heap::Base **)engine->gcStack->base();
1060 m_top = m_base;
1061 const size_t size = engine->maxGCStackSize() / sizeof(Heap::Base);
1062 m_hardLimit = m_base + size;
1063 m_softLimit = m_base + size * 3 / 4;
1064}
1065
1067{
1068 // we're not calling drain(QDeadlineTimer::Forever) as that has higher overhead
1069 while (m_top > m_base) {
1070 Heap::Base *h = pop();
1071 ++markStackSize;
1072 Q_ASSERT(h); // at this point we should only have Heap::Base objects in this area on the stack. If not, weird things might happen.
1073 h->internalClass->vtable->markObjects(h, this);
1074 }
1075}
1076
1078{
1079 do {
1080 for (int i = 0; i <= markLoopIterationCount * 10; ++i) {
1081 if (m_top == m_base)
1082 return DrainState::Complete;
1083 Heap::Base *h = pop();
1084 ++markStackSize;
1085 Q_ASSERT(h); // at this point we should only have Heap::Base objects in this area on the stack. If not, weird things might happen.
1086 h->internalClass->vtable->markObjects(h, this);
1087 }
1088 } while (!deadline.hasExpired());
1089 return DrainState::Ongoing;
1090}
1091
1093{
1094 if (engine->inShutdown)
1095 return;
1098 onEventLoop();
1100 return;
1101 }
1102 if (gcStateMachine->inProgress()) {
1103 gcStateMachine->step();
1104 }
1105}
1106
1107
1109{
1110 gcStateMachine->timeLimit = std::chrono::milliseconds(timeMs);
1111}
1112
1113void MemoryManager::sweep(bool lastSweep, ClassDestroyStatsCallback classCountPtr)
1114{
1115
1117 Managed *m = (*it).managed();
1118 if (!m || m->markBit())
1119 continue;
1120 // we need to call destroyObject on qobjectwrappers now, so that they can emit the destroyed
1121 // signal before we start sweeping the heap
1122 if (QObjectWrapper *qobjectWrapper = (*it).as<QObjectWrapper>()) {
1123 qobjectWrapper->destroyObject(lastSweep);
1124 }
1125 }
1126
1127 freeWeakMaps(this);
1128 freeWeakSets(this);
1129
1131
1132 if (!lastSweep) {
1134 blockAllocator.sweep(/*classCountPtr*/);
1135 hugeItemAllocator.sweep(classCountPtr);
1136 icAllocator.sweep(/*classCountPtr*/);
1137 }
1138
1139 // reset all black bits
1143
1147}
1148
1149/*
1150 \internal
1151 Helper function used in sweep to clean up the (to-be-freed) QObjectWrapper
1152 Used both in MemoryManager::sweep, and the corresponding gc statemachine phase
1153*/
1155{
1156 // onDestruction handlers may have accessed other QObject wrappers and reset their value, so ensure
1157 // that they are all set to undefined.
1159 Managed *m = (*it).managed();
1160 if (!m || m->markBit())
1161 continue;
1162 (*it) = Value::undefinedValue();
1163 }
1164
1165 // Now it is time to free QV4::QObjectWrapper Value, we must check the Value's tag to make sure its object has been destroyed
1166 const int pendingCount = m_pendingFreedObjectWrapperValue.size();
1167 if (pendingCount) {
1168 QVector<Value *> remainingWeakQObjectWrappers;
1169 remainingWeakQObjectWrappers.reserve(pendingCount);
1170 for (int i = 0; i < pendingCount; ++i) {
1172 if (v->isUndefined() || v->isEmpty())
1174 else
1175 remainingWeakQObjectWrappers.append(v);
1176 }
1177 m_pendingFreedObjectWrapperValue = remainingWeakQObjectWrappers;
1178 }
1179
1180 if (MultiplyWrappedQObjectMap *multiplyWrappedQObjects = engine->m_multiplyWrappedQObjects) {
1181 for (MultiplyWrappedQObjectMap::Iterator it = multiplyWrappedQObjects->begin(); it != multiplyWrappedQObjects->end();) {
1182 if (it.value().isNullOrUndefined())
1183 it = multiplyWrappedQObjects->erase(it);
1184 else
1185 ++it;
1186 }
1187 }
1188}
1189
1190bool MemoryManager::shouldRunGC() const
1191{
1192 size_t total = blockAllocator.totalSlots() + icAllocator.totalSlots();
1193 if (total > MinSlotsGCLimit && usedSlotsAfterLastFullSweep * GCOverallocation < total * 100)
1194 return true;
1195 return false;
1196}
1197
1198static size_t dumpBins(BlockAllocator *b, const char *title)
1199{
1200 const QLoggingCategory &stats = lcGcAllocatorStats();
1201 size_t totalSlotMem = 0;
1202 if (title)
1203 qDebug(stats) << "Slot map for" << title << "allocator:";
1204 for (uint i = 0; i < BlockAllocator::NumBins; ++i) {
1205 uint nEntries = 0;
1206 HeapItem *h = b->freeBins[i];
1207 while (h) {
1208 ++nEntries;
1209 totalSlotMem += h->freeData.availableSlots;
1210 h = h->freeData.next;
1211 }
1212 if (title)
1213 qDebug(stats) << " number of entries in slot" << i << ":" << nEntries;
1214 }
1215 SDUMP() << " large slot map";
1216 HeapItem *h = b->freeBins[BlockAllocator::NumBins - 1];
1217 while (h) {
1218 SDUMP() << " " << Qt::hex << (quintptr(h)/32) << h->freeData.availableSlots;
1219 h = h->freeData.next;
1220 }
1221
1222 if (title)
1223 qDebug(stats) << " total mem in bins" << totalSlotMem*Chunk::SlotSize;
1224 return totalSlotMem*Chunk::SlotSize;
1225}
1226
1237{
1239 return false;
1240 const bool incrementalGCIsAlreadyRunning = m_markStack != nullptr;
1241 Q_ASSERT(incrementalGCIsAlreadyRunning);
1242 auto oldTimeLimit = std::exchange(gcStateMachine->timeLimit, std::chrono::microseconds::max());
1243 while (gcStateMachine->inProgress()) {
1244 gcStateMachine->step();
1245 }
1246 gcStateMachine->timeLimit = oldTimeLimit;
1247 return true;
1248}
1249
1251{
1252 runGC();
1253 const bool incrementalGCStillRunning = m_markStack != nullptr;
1254 if (incrementalGCStillRunning)
1256}
1257
1259{
1260 if (gcBlocked != Unblocked) {
1261 return;
1262 }
1263
1265
1266 if (gcStats) {
1267 statistics.maxReservedMem = qMax(statistics.maxReservedMem, getAllocatedMem());
1268 statistics.maxAllocatedMem = qMax(statistics.maxAllocatedMem, getUsedMem() + getLargeItemsMem());
1269 }
1270
1271 if (!gcCollectorStats) {
1272 gcStateMachine->step();
1273 } else {
1274 bool triggeredByUnmanagedHeap = (unmanagedHeapSize > unmanagedHeapSizeGCLimit);
1275 size_t oldUnmanagedSize = unmanagedHeapSize;
1276
1277 const size_t totalMem = getAllocatedMem();
1278 const size_t usedBefore = getUsedMem();
1279 const size_t largeItemsBefore = getLargeItemsMem();
1280
1281 const QLoggingCategory &stats = lcGcAllocatorStats();
1282 qDebug(stats) << "========== GC ==========";
1283#ifdef MM_STATS
1284 qDebug(stats) << " Triggered by alloc request of" << lastAllocRequestedSlots << "slots.";
1285 qDebug(stats) << " Allocations since last GC" << allocationCount;
1286 allocationCount = 0;
1287#endif
1288 size_t oldChunks = blockAllocator.chunks.size();
1289 qDebug(stats) << "Allocated" << totalMem << "bytes in" << oldChunks << "chunks";
1290 qDebug(stats) << "Fragmented memory before GC" << (totalMem - usedBefore);
1291 dumpBins(&blockAllocator, "Block");
1292 dumpBins(&icAllocator, "InternalClass");
1293
1295 t.start();
1296 gcStateMachine->step();
1297 qint64 markTime = t.nsecsElapsed()/1000;
1298 t.restart();
1299 const size_t usedAfter = getUsedMem();
1300 const size_t largeItemsAfter = getLargeItemsMem();
1301
1302 if (triggeredByUnmanagedHeap) {
1303 qDebug(stats) << "triggered by unmanaged heap:";
1304 qDebug(stats) << " old unmanaged heap size:" << oldUnmanagedSize;
1305 qDebug(stats) << " new unmanaged heap:" << unmanagedHeapSize;
1306 qDebug(stats) << " unmanaged heap limit:" << unmanagedHeapSizeGCLimit;
1307 }
1308 size_t memInBins = dumpBins(&blockAllocator, "Block")
1309 + dumpBins(&icAllocator, "InternalClasss");
1310 qDebug(stats) << "Marked object in" << markTime << "us.";
1311 qDebug(stats) << " " << markStackSize << "objects marked";
1312
1313 // sort our object types by number of freed instances
1314 MMStatsHash freedObjectStats;
1315 std::swap(freedObjectStats, *freedObjectStatsGlobal());
1316 typedef std::pair<const char*, int> ObjectStatInfo;
1317 std::vector<ObjectStatInfo> freedObjectsSorted;
1318 freedObjectsSorted.reserve(freedObjectStats.size());
1319 for (auto it = freedObjectStats.constBegin(); it != freedObjectStats.constEnd(); ++it) {
1320 freedObjectsSorted.push_back(std::make_pair(it.key(), it.value()));
1321 }
1322 std::sort(freedObjectsSorted.begin(), freedObjectsSorted.end(), [](const ObjectStatInfo &a, const ObjectStatInfo &b) {
1323 return a.second > b.second && strcmp(a.first, b.first) < 0;
1324 });
1325
1326 qDebug(stats) << "Used memory before GC:" << usedBefore;
1327 qDebug(stats) << "Used memory after GC:" << usedAfter;
1328 qDebug(stats) << "Freed up bytes :" << (usedBefore - usedAfter);
1329 qDebug(stats) << "Freed up chunks :" << (oldChunks - blockAllocator.chunks.size());
1331 - memInBins - usedAfter;
1332 if (lost)
1333 qDebug(stats) << "!!!!!!!!!!!!!!!!!!!!! LOST MEM:" << lost << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!";
1334 if (largeItemsBefore || largeItemsAfter) {
1335 qDebug(stats) << "Large item memory before GC:" << largeItemsBefore;
1336 qDebug(stats) << "Large item memory after GC:" << largeItemsAfter;
1337 qDebug(stats) << "Large item memory freed up:" << (largeItemsBefore - largeItemsAfter);
1338 }
1339
1340 for (auto it = freedObjectsSorted.cbegin(); it != freedObjectsSorted.cend(); ++it) {
1341 qDebug(stats).noquote() << QString::fromLatin1("Freed JS type: %1 (%2 instances)").arg(QString::fromLatin1(it->first), QString::number(it->second));
1342 }
1343
1344 qDebug(stats) << "======== End GC ========";
1345 }
1346
1347 if (gcStats)
1348 statistics.maxUsedMem = qMax(statistics.maxUsedMem, getUsedMem() + getLargeItemsMem());
1349}
1350
1352{
1354}
1355
1360
1362{
1363 return hugeItemAllocator.usedMem();
1364}
1365
1367{
1369 // more than 75% full, raise limit
1371 unmanagedHeapSize) * 2;
1372 } else if (unmanagedHeapSize * 4 <= unmanagedHeapSizeGCLimit) {
1373 // less than 25% full, lower limit
1374 unmanagedHeapSizeGCLimit = qMax(std::size_t(MinUnmanagedHeapSizeGCLimit),
1376 }
1377
1378 if (aggressiveGC && !engine->inShutdown) {
1379 // ensure we don't 'loose' any memory
1380 // but not during shutdown, because than we skip parts of sweep
1381 // and use freeAll instead
1383 == blockAllocator.usedMem() + dumpBins(&blockAllocator, nullptr));
1385 == icAllocator.usedMem() + dumpBins(&icAllocator, nullptr));
1386 }
1387}
1388
1390{
1391 map->nextWeakMap = weakMaps;
1392 weakMaps = map;
1393}
1394
1396{
1397 set->nextWeakSet = weakSets;
1398 weakSets = set;
1399}
1400
1402{
1403 delete m_persistentValues;
1404 dumpStats();
1405
1406 // do one last non-incremental sweep to clean up C++ objects
1407 // first, abort any on-going incremental gc operation
1408 setGCTimeLimit(-1);
1409 if (engine->isGCOngoing) {
1410 engine->isGCOngoing = false;
1411 m_markStack.reset();
1416 }
1417 // then sweep
1418 sweep(/*lastSweep*/true);
1419
1423
1424 delete m_weakValues;
1425#ifdef V4_USE_VALGRIND
1426 VALGRIND_DESTROY_MEMPOOL(this);
1427#endif
1428 delete chunkAllocator;
1429}
1430
1431
1433{
1434 if (!gcStats)
1435 return;
1436
1437 const QLoggingCategory &stats = lcGcStats();
1438 qDebug(stats) << "Qml GC memory allocation statistics:";
1439 qDebug(stats) << "Total memory allocated:" << statistics.maxReservedMem;
1440 qDebug(stats) << "Max memory used before a GC run:" << statistics.maxAllocatedMem;
1441 qDebug(stats) << "Max memory used after a GC run:" << statistics.maxUsedMem;
1442 qDebug(stats) << "Requests for different item sizes:";
1443 for (int i = 1; i < BlockAllocator::NumBins - 1; ++i)
1444 qDebug(stats) << " <" << (i << Chunk::SlotSizeShift) << " bytes: " << statistics.allocations[i];
1445 qDebug(stats) << " >=" << ((BlockAllocator::NumBins - 1) << Chunk::SlotSizeShift) << " bytes: " << statistics.allocations[BlockAllocator::NumBins - 1];
1446}
1447
1449{
1452 while (v < top) {
1453 Managed *m = v->managed();
1454 if (m) {
1455 Q_ASSERT(m->inUse());
1456 // Skip pointers to already freed objects, they are bogus as well
1457 m->mark(markStack);
1458 }
1459 ++v;
1460 }
1461}
1462
1464{
1465 // base assumption: target 60fps, use at most 1/3 of time for gc
1466 timeLimit = std::chrono::milliseconds { (1000 / 60) / 3 };
1467}
1468
1470 if (timeLimit.count() > 0) {
1472 bool deadlineExpired = false;
1473 while (!(deadlineExpired = deadline.hasExpired()) && state != GCState::Invalid) {
1475 /* initCallDestroyObjects is the last action which drains the mark
1476 stack by default. But as our write-barrier might end up putting
1477 objects on the markStack which still reference other objects.
1478 Especially when we call user code triggered by Component.onDestruction,
1479 but also when we run into a timeout.
1480 We don't redrain before InitCallDestroyObjects, as that would
1481 potentially lead to useless busy-work (e.g., if the last referencs
1482 to objects are removed while the mark phase is running)
1483 */
1484 redrain(this);
1485 }
1486 GCStateInfo& stateInfo = stateInfoMap[int(state)];
1487 state = stateInfo.execute(this, stateData);
1488 if (stateInfo.breakAfter)
1489 break;
1490 }
1491 if (deadlineExpired)
1493 if (state != GCState::Invalid)
1495 mm->onEventLoop();
1497 } else {
1499 while (state != GCState::Invalid) {
1500 GCStateInfo& stateInfo = stateInfoMap[int(state)];
1501 state = stateInfo.execute(this, stateData);
1502 }
1503 }
1504}
1505
1506} // namespace QV4
1507
\inmodule QtCore
bool hasExpired() const noexcept
Returns true if this QDeadlineTimer object has expired, false if there remains time left.
static constexpr ForeverConstant Forever
\inmodule QtCore
void start() noexcept
\typealias QElapsedTimer::Duration Synonym for std::chrono::nanoseconds.
\inmodule QtCore
Definition qhash.h:1103
\inmodule QtCore
\inmodule QtCore
Definition qobject.h:103
QObject * parent() const
Returns a pointer to the parent object.
Definition qobject.h:346
static bool keepAliveDuringGarbageCollection(const QObject *object)
Definition qqmldata_p.h:233
iterator begin()
Definition qset.h:136
iterator end()
Definition qset.h:140
const_iterator constBegin() const noexcept
Definition qset.h:139
const_iterator cend() const noexcept
Definition qset.h:142
const_iterator constEnd() const noexcept
Definition qset.h:143
iterator erase(const_iterator i)
Definition qset.h:145
const_iterator cbegin() const noexcept
Definition qset.h:138
\macro QT_RESTRICTED_CAST_FROM_ASCII
Definition qstring.h:129
static QString fromLatin1(QByteArrayView ba)
This is an overloaded member function, provided for convenience. It differs from the above function o...
Definition qstring.cpp:5871
static QString number(int, int base=10)
This is an overloaded member function, provided for convenience. It differs from the above function o...
Definition qstring.cpp:8084
Heap::Base * allocString(std::size_t unmanagedSize)
expects size to be aligned
Definition qv4mm.cpp:985
QVector< Value * > m_pendingFreedObjectWrapperValue
Definition qv4mm_p.h:410
Heap::SetObject * weakSets
Definition qv4mm_p.h:412
QV4::ExecutionEngine * engine
Definition qv4mm_p.h:403
Heap::Object * allocObjectWithMemberData(const QV4::VTable *vtable, uint nMembers)
Definition qv4mm.cpp:1014
size_t getLargeItemsMem() const
Definition qv4mm.cpp:1361
bool tryForceGCCompletion()
Definition qv4mm.cpp:1236
size_t lastAllocRequestedSlots
Definition qv4mm_p.h:428
std::unique_ptr< GCStateMachine > gcStateMachine
Definition qv4mm_p.h:414
ObjectType::Data * allocate(Args &&... args)
Definition qv4mm_p.h:298
Blockness gcBlocked
Definition qv4mm_p.h:422
struct QV4::MemoryManager::@640 statistics
ChunkAllocator * chunkAllocator
Definition qv4mm_p.h:404
PersistentValueStorage * m_persistentValues
Definition qv4mm_p.h:408
void collectFromJSStack(MarkStack *markStack) const
Definition qv4mm.cpp:1448
void sweep(bool lastSweep=false, ClassDestroyStatsCallback classCountPtr=nullptr)
Definition qv4mm.cpp:1113
void cleanupDeletedQObjectWrappersInSweep()
Definition qv4mm.cpp:1154
PersistentValueStorage * m_weakValues
Definition qv4mm_p.h:409
std::size_t unmanagedHeapSize
Definition qv4mm_p.h:417
void registerWeakMap(Heap::MapObject *map)
Definition qv4mm.cpp:1389
void dumpStats() const
Definition qv4mm.cpp:1432
std::size_t usedSlotsAfterLastFullSweep
Definition qv4mm_p.h:419
BlockAllocator blockAllocator
Definition qv4mm_p.h:405
HugeItemAllocator hugeItemAllocator
Definition qv4mm_p.h:407
size_t getUsedMem() const
Definition qv4mm.cpp:1351
std::unique_ptr< MarkStack > m_markStack
Definition qv4mm_p.h:415
Heap::Base * allocData(std::size_t size)
Definition qv4mm.cpp:999
size_t getAllocatedMem() const
Definition qv4mm.cpp:1356
void setGCTimeLimit(int timeMs)
Definition qv4mm.cpp:1108
void registerWeakSet(Heap::SetObject *set)
Definition qv4mm.cpp:1395
Heap::MapObject * weakMaps
Definition qv4mm_p.h:411
void updateUnmanagedHeapSizeGCLimit()
Definition qv4mm.cpp:1366
std::size_t unmanagedHeapSizeGCLimit
Definition qv4mm_p.h:418
static constexpr std::size_t align(std::size_t size)
Definition qv4mm_p.h:187
MarkStack * markStack()
Definition qv4mm_p.h:346
BlockAllocator icAllocator
Definition qv4mm_p.h:406
QMap< QString, QString > map
[6]
QSet< QString >::iterator it
Combined button and popup list for selecting options.
void(* ClassDestroyStatsCallback)(const char *)
Definition qv4mmdefs_p.h:30
@ MinSlotsGCLimit
Definition qv4mm.cpp:71
@ GCOverallocation
Definition qv4mm.cpp:72
static size_t dumpBins(BlockAllocator *b, const char *title)
Definition qv4mm.cpp:1198
static uint markStackSize
Definition qv4mm.cpp:1054
GCState
Definition qv4mm_p.h:31
@ MarkDrain
Definition qv4mm_p.h:39
@ MarkJSStack
Definition qv4mm_p.h:34
@ InitCallDestroyObjects
Definition qv4mm_p.h:41
@ HandleQObjectWrappers
Definition qv4mm_p.h:45
@ MarkPersistentValues
Definition qv4mm_p.h:36
@ CallDestroyObjects
Definition qv4mm_p.h:42
@ MarkWeakValues
Definition qv4mm_p.h:38
@ DoSweep
Definition qv4mm_p.h:46
@ InitMarkPersistentValues
Definition qv4mm_p.h:35
@ MarkGlobalObject
Definition qv4mm_p.h:33
@ MarkReady
Definition qv4mm_p.h:40
@ InitMarkWeakValues
Definition qv4mm_p.h:37
@ MarkStart
Definition qv4mm_p.h:32
@ Invalid
Definition qv4mm_p.h:47
@ FreeWeakMaps
Definition qv4mm_p.h:43
@ FreeWeakSets
Definition qv4mm_p.h:44
QHash< const char *, int > MMStatsHash
Definition qv4mm.cpp:258
static void freeHugeChunk(ChunkAllocator *chunkAllocator, const HugeItemAllocator::HugeChunk &c, ClassDestroyStatsCallback classCountPtr)
Definition qv4mm.cpp:622
QTextStream & hex(QTextStream &stream)
Calls QTextStream::setIntegerBase(16) on stream and returns stream.
@ QueuedConnection
constexpr uint qCountTrailingZeroBits(quint32 v) noexcept
Q_DECL_CONST_FUNCTION QT_POPCOUNT_CONSTEXPR uint qPopulationCount(quint32 v) noexcept
#define Q_UNLIKELY(x)
static bool testBit(long bit, const long *field)
#define Q_GLOBAL_STATIC(TYPE, NAME,...)
#define qDebug
[1]
Definition qlogging.h:164
#define Q_LOGGING_CATEGORY(name,...)
#define Q_DECLARE_LOGGING_CATEGORY(name)
@ Invalid
constexpr const T & qMin(const T &a, const T &b)
Definition qminmax.h:40
constexpr const T & qMax(const T &a, const T &b)
Definition qminmax.h:42
GLsizei GLsizei GLenum void * binary
GLboolean GLboolean GLboolean b
GLsizei const GLfloat * v
[13]
const GLfloat * m
GLboolean GLboolean GLboolean GLboolean a
[7]
GLenum GLuint GLintptr GLsizeiptr size
[1]
GLuint index
[2]
GLuint GLuint end
GLdouble GLdouble GLdouble GLdouble top
GLuint start
GLint GLint GLint GLint GLint GLint GLint GLbitfield mask
GLfloat n
GLfloat GLfloat GLfloat GLfloat h
GLdouble s
[6]
Definition qopenglext.h:235
const GLubyte * c
GLdouble GLdouble t
Definition qopenglext.h:243
GLuint64EXT * result
[6]
static constexpr qint64 HeaderSize
#define Q_ASSERT(cond)
Definition qrandom.cpp:47
QScopeGuard< typename std::decay< F >::type > qScopeGuard(F &&f)
[qScopeGuard]
Definition qscopeguard.h:60
QT_BEGIN_NAMESPACE constexpr void qSwap(T &value1, T &value2) noexcept(std::is_nothrow_swappable_v< T >)
Definition qswap.h:20
void gc(QV4::ExecutionEngine &engine, GCFlags flags)
Definition qmlutils.cpp:118
Q_CORE_EXPORT bool qEnvironmentVariableIsEmpty(const char *varName) noexcept
size_t quintptr
Definition qtypes.h:167
unsigned long long quint64
Definition qtypes.h:61
unsigned int uint
Definition qtypes.h:34
long long qint64
Definition qtypes.h:60
static const uint base
Definition qurlidna.cpp:20
#define SDUMP
Definition qv4mm.cpp:254
#define DEBUG
Definition qv4mm.cpp:39
#define Q_V4_PROFILE_DEALLOC(engine, size, type)
#define Q_V4_PROFILE_ALLOC(engine, size, type)
const char className[16]
[1]
Definition qwizard.cpp:100
QFuture< QSet< QChar > > set
[10]
QDeadlineTimer deadline(30s)
QObject::connect nullptr
QString title
[35]
ba setBit(0, true)
QSharedPointer< T > other(t)
[5]
QJSEngine engine
[0]
static bool invokeMethod(QObject *obj, const char *member, Qt::ConnectionType, QGenericReturnArgument ret, QGenericArgument val0=QGenericArgument(nullptr), QGenericArgument val1=QGenericArgument(), QGenericArgument val2=QGenericArgument(), QGenericArgument val3=QGenericArgument(), QGenericArgument val4=QGenericArgument(), QGenericArgument val5=QGenericArgument(), QGenericArgument val6=QGenericArgument(), QGenericArgument val7=QGenericArgument(), QGenericArgument val8=QGenericArgument(), QGenericArgument val9=QGenericArgument())
\threadsafe This is an overloaded member function, provided for convenience. It differs from the abov...
size_t usedMem() const
Definition qv4mm_p.h:123
uint * allocationStats
Definition qv4mm_p.h:142
size_t usedSlotsAfterLastSweep
Definition qv4mm_p.h:137
std::vector< Chunk * > chunks
Definition qv4mm_p.h:141
void resetBlackBits()
Definition qv4mm.cpp:593
size_t totalSlots() const
Definition qv4mm_p.h:116
size_t allocatedMem() const
Definition qv4mm_p.h:120
void free(Chunk *chunk, size_t size=0)
Definition qv4mm.cpp:232
size_t requiredChunkSize(size_t size)
Definition qv4mm.cpp:199
std::vector< MemorySegment > memorySegments
Definition qv4mm.cpp:211
static void setBit(quintptr *bitmap, size_t index)
Definition qv4mmdefs_p.h:91
HeapItem * first()
HeapItem * realBase()
static void clearBit(quintptr *bitmap, size_t index)
Definition qv4mmdefs_p.h:97
IdentifierTable * identifierTable
Heap::InternalClass * internalClasses(InternalClassType icType)
WTF::PageAllocation * gcStack
QJSEngine * publicEngine
int maxGCStackSize() const
MultiplyWrappedQObjectMap * m_multiplyWrappedQObjects
std::variant< std::monostate, GCIteratorStorage > ExtraData
Definition qv4mm_p.h:59
GCState(* execute)(GCStateMachine *, ExtraData &)
Definition qv4mm_p.h:60
Q_QML_EXPORT void transition()
Definition qv4mm.cpp:1469
std::chrono::microseconds timeLimit
Definition qv4mm_p.h:67
MemoryManager * mm
Definition qv4mm_p.h:70
void handleTimeout(GCState state)
Definition qv4mm_p.h:92
ExtraData stateData
Definition qv4mm_p.h:71
std::array< GCStateInfo, GCState::Count > stateInfoMap
Definition qv4mm_p.h:69
QDeadlineTimer deadline
Definition qv4mm_p.h:68
struct QV4::HeapItem::@638::@641 freeData
Chunk * chunk() const
Pointer< InternalClass *, 0 > internalClass
Definition qv4heap_p.h:63
size_t usedMem() const
Definition qv4mm_p.h:155
HeapItem * allocate(size_t size)
Definition qv4mm.cpp:599
void sweep(ClassDestroyStatsCallback classCountPtr)
Definition qv4mm.cpp:646
MarkStack(ExecutionEngine *engine)
Definition qv4mm.cpp:1056
ExecutionEngine * engine() const
void free(Chunk *chunk, size_t size)
Definition qv4mm.cpp:130
MemorySegment(size_t size)
Definition qv4mm.cpp:85
PageReservation pageReservation
Definition qv4mm.cpp:156
void setBit(size_t index)
Definition qv4mm.cpp:111
bool testBit(size_t index) const
Definition qv4mm.cpp:123
void clearBit(size_t index)
Definition qv4mm.cpp:117
bool contains(Chunk *c) const
Definition qv4mm.cpp:152
MemorySegment(MemorySegment &&other)
Definition qv4mm.cpp:98
static void free(Value *v)
quint16 inlinePropertyOffset
Definition qv4vtable_p.h:62
quint16 nInlineProperties
Definition qv4vtable_p.h:63
Destroy destroy
Definition qv4vtable_p.h:75
QML_NEARLY_ALWAYS_INLINE ManagedPtr managed() const
Definition qv4value_p.h:75
static constexpr Value undefinedValue()
Definition qv4value_p.h:191
const T * as() const
Definition qv4value_p.h:132