1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
|
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef gc_GCRuntime_h
#define gc_GCRuntime_h
#include "mozilla/Atomics.h"
#include "mozilla/EnumSet.h"
#include "jsfriendapi.h"
#include "jsgc.h"
#include "gc/Heap.h"
#include "gc/Nursery.h"
#include "gc/Statistics.h"
#include "gc/StoreBuffer.h"
#include "gc/Tracer.h"
#include "js/GCAnnotations.h"
namespace js {
class AutoLockGC;
class AutoLockHelperThreadState;
class VerifyPreTracer;
namespace gc {
typedef Vector<JS::Zone*, 4, SystemAllocPolicy> ZoneVector;
using BlackGrayEdgeVector = Vector<TenuredCell*, 0, SystemAllocPolicy>;
class AutoMaybeStartBackgroundAllocation;
class MarkingValidator;
class AutoTraceSession;
struct MovingTracer;
class ChunkPool
{
Chunk* head_;
size_t count_;
public:
ChunkPool() : head_(nullptr), count_(0) {}
size_t count() const { return count_; }
Chunk* head() { MOZ_ASSERT(head_); return head_; }
Chunk* pop();
void push(Chunk* chunk);
Chunk* remove(Chunk* chunk);
#ifdef DEBUG
bool contains(Chunk* chunk) const;
bool verify() const;
#endif
// Pool mutation does not invalidate an Iter unless the mutation
// is of the Chunk currently being visited by the Iter.
class Iter {
public:
explicit Iter(ChunkPool& pool) : current_(pool.head_) {}
bool done() const { return !current_; }
void next();
Chunk* get() const { return current_; }
operator Chunk*() const { return get(); }
Chunk* operator->() const { return get(); }
private:
Chunk* current_;
};
};
// Performs extra allocation off the main thread so that when memory is
// required on the main thread it will already be available and waiting.
class BackgroundAllocTask : public GCParallelTaskHelper<BackgroundAllocTask>
{
// Guarded by the GC lock.
JSRuntime* runtime;
ChunkPool& chunkPool_;
const bool enabled_;
public:
BackgroundAllocTask(JSRuntime* rt, ChunkPool& pool);
bool enabled() const { return enabled_; }
void run();
};
// Search the provided Chunks for free arenas and recommit them.
class BackgroundDecommitTask : public GCParallelTaskHelper<BackgroundDecommitTask>
{
public:
using ChunkVector = mozilla::Vector<Chunk*>;
explicit BackgroundDecommitTask(JSRuntime *rt) : runtime(rt) {}
void setChunksToScan(ChunkVector &chunks);
void run();
private:
JSRuntime* runtime;
ChunkVector toDecommit;
};
/*
* Encapsulates all of the GC tunables. These are effectively constant and
* should only be modified by setParameter.
*/
class GCSchedulingTunables
{
/*
* Soft limit on the number of bytes we are allowed to allocate in the GC
* heap. Attempts to allocate gcthings over this limit will return null and
* subsequently invoke the standard OOM machinery, independent of available
* physical memory.
*/
size_t gcMaxBytes_;
/*
* The base value used to compute zone->trigger.gcBytes(). When
* usage.gcBytes() surpasses threshold.gcBytes() for a zone, the zone may
* be scheduled for a GC, depending on the exact circumstances.
*/
size_t gcZoneAllocThresholdBase_;
/* Fraction of threshold.gcBytes() which triggers an incremental GC. */
double zoneAllocThresholdFactor_;
/*
* Number of bytes to allocate between incremental slices in GCs triggered
* by the zone allocation threshold.
*/
size_t zoneAllocDelayBytes_;
/*
* Totally disables |highFrequencyGC|, the HeapGrowthFactor, and other
* tunables that make GC non-deterministic.
*/
bool dynamicHeapGrowthEnabled_;
/*
* We enter high-frequency mode if we GC a twice within this many
* microseconds. This value is stored directly in microseconds.
*/
uint64_t highFrequencyThresholdUsec_;
/*
* When in the |highFrequencyGC| mode, these parameterize the per-zone
* "HeapGrowthFactor" computation.
*/
uint64_t highFrequencyLowLimitBytes_;
uint64_t highFrequencyHighLimitBytes_;
double highFrequencyHeapGrowthMax_;
double highFrequencyHeapGrowthMin_;
/*
* When not in |highFrequencyGC| mode, this is the global (stored per-zone)
* "HeapGrowthFactor".
*/
double lowFrequencyHeapGrowth_;
/*
* Doubles the length of IGC slices when in the |highFrequencyGC| mode.
*/
bool dynamicMarkSliceEnabled_;
/*
* Controls whether painting can trigger IGC slices.
*/
bool refreshFrameSlicesEnabled_;
/*
* Controls the number of empty chunks reserved for future allocation.
*/
uint32_t minEmptyChunkCount_;
uint32_t maxEmptyChunkCount_;
public:
GCSchedulingTunables()
: gcMaxBytes_(0),
gcZoneAllocThresholdBase_(30 * 1024 * 1024),
zoneAllocThresholdFactor_(0.9),
zoneAllocDelayBytes_(1024 * 1024),
dynamicHeapGrowthEnabled_(false),
highFrequencyThresholdUsec_(1000 * 1000),
highFrequencyLowLimitBytes_(100 * 1024 * 1024),
highFrequencyHighLimitBytes_(500 * 1024 * 1024),
highFrequencyHeapGrowthMax_(3.0),
highFrequencyHeapGrowthMin_(1.5),
lowFrequencyHeapGrowth_(1.5),
dynamicMarkSliceEnabled_(false),
refreshFrameSlicesEnabled_(true),
minEmptyChunkCount_(1),
maxEmptyChunkCount_(30)
{}
size_t gcMaxBytes() const { return gcMaxBytes_; }
size_t gcZoneAllocThresholdBase() const { return gcZoneAllocThresholdBase_; }
double zoneAllocThresholdFactor() const { return zoneAllocThresholdFactor_; }
size_t zoneAllocDelayBytes() const { return zoneAllocDelayBytes_; }
bool isDynamicHeapGrowthEnabled() const { return dynamicHeapGrowthEnabled_; }
uint64_t highFrequencyThresholdUsec() const { return highFrequencyThresholdUsec_; }
uint64_t highFrequencyLowLimitBytes() const { return highFrequencyLowLimitBytes_; }
uint64_t highFrequencyHighLimitBytes() const { return highFrequencyHighLimitBytes_; }
double highFrequencyHeapGrowthMax() const { return highFrequencyHeapGrowthMax_; }
double highFrequencyHeapGrowthMin() const { return highFrequencyHeapGrowthMin_; }
double lowFrequencyHeapGrowth() const { return lowFrequencyHeapGrowth_; }
bool isDynamicMarkSliceEnabled() const { return dynamicMarkSliceEnabled_; }
bool areRefreshFrameSlicesEnabled() const { return refreshFrameSlicesEnabled_; }
unsigned minEmptyChunkCount(const AutoLockGC&) const { return minEmptyChunkCount_; }
unsigned maxEmptyChunkCount() const { return maxEmptyChunkCount_; }
MOZ_MUST_USE bool setParameter(JSGCParamKey key, uint32_t value, const AutoLockGC& lock);
};
/*
* GC Scheduling Overview
* ======================
*
* Scheduling GC's in SpiderMonkey/Firefox is tremendously complicated because
* of the large number of subtle, cross-cutting, and widely dispersed factors
* that must be taken into account. A summary of some of the more important
* factors follows.
*
* Cost factors:
*
* * GC too soon and we'll revisit an object graph almost identical to the
* one we just visited; since we are unlikely to find new garbage, the
* traversal will be largely overhead. We rely heavily on external factors
* to signal us that we are likely to find lots of garbage: e.g. "a tab
* just got closed".
*
* * GC too late and we'll run out of memory to allocate (e.g. Out-Of-Memory,
* hereafter simply abbreviated to OOM). If this happens inside
* SpiderMonkey we may be able to recover, but most embedder allocations
* will simply crash on OOM, even if the GC has plenty of free memory it
* could surrender.
*
* * Memory fragmentation: if we fill the process with GC allocations, a
* request for a large block of contiguous memory may fail because no
* contiguous block is free, despite having enough memory available to
* service the request.
*
* * Management overhead: if our GC heap becomes large, we create extra
* overhead when managing the GC's structures, even if the allocations are
* mostly unused.
*
* Heap Management Factors:
*
* * GC memory: The GC has its own allocator that it uses to make fixed size
* allocations for GC managed things. In cases where the GC thing requires
* larger or variable sized memory to implement itself, it is responsible
* for using the system heap.
*
* * C Heap Memory: Rather than allowing for large or variable allocations,
* the SpiderMonkey GC allows GC things to hold pointers to C heap memory.
* It is the responsibility of the thing to free this memory with a custom
* finalizer (with the sole exception of NativeObject, which knows about
* slots and elements for performance reasons). C heap memory has different
* performance and overhead tradeoffs than GC internal memory, which need
* to be considered with scheduling a GC.
*
* Application Factors:
*
* * Most applications allocate heavily at startup, then enter a processing
* stage where memory utilization remains roughly fixed with a slower
* allocation rate. This is not always the case, however, so while we may
* optimize for this pattern, we must be able to handle arbitrary
* allocation patterns.
*
* Other factors:
*
* * Other memory: This is memory allocated outside the purview of the GC.
* Data mapped by the system for code libraries, data allocated by those
* libraries, data in the JSRuntime that is used to manage the engine,
* memory used by the embedding that is not attached to a GC thing, memory
* used by unrelated processes running on the hardware that use space we
* could otherwise use for allocation, etc. While we don't have to manage
* it, we do have to take it into account when scheduling since it affects
* when we will OOM.
*
* * Physical Reality: All real machines have limits on the number of bits
* that they are physically able to store. While modern operating systems
* can generally make additional space available with swapping, at some
* point there are simply no more bits to allocate. There is also the
* factor of address space limitations, particularly on 32bit machines.
*
* * Platform Factors: Each OS makes use of wildly different memory
* management techniques. These differences result in different performance
* tradeoffs, different fragmentation patterns, and different hard limits
* on the amount of physical and/or virtual memory that we can use before
* OOMing.
*
*
* Reasons for scheduling GC
* -------------------------
*
* While code generally takes the above factors into account in only an ad-hoc
* fashion, the API forces the user to pick a "reason" for the GC. We have a
* bunch of JS::gcreason reasons in GCAPI.h. These fall into a few categories
* that generally coincide with one or more of the above factors.
*
* Embedding reasons:
*
* 1) Do a GC now because the embedding knows something useful about the
* zone's memory retention state. These are gcreasons like LOAD_END,
* PAGE_HIDE, SET_NEW_DOCUMENT, DOM_UTILS. Mostly, Gecko uses these to
* indicate that a significant fraction of the scheduled zone's memory is
* probably reclaimable.
*
* 2) Do some known amount of GC work now because the embedding knows now is
* a good time to do a long, unblockable operation of a known duration.
* These are INTER_SLICE_GC and REFRESH_FRAME.
*
* Correctness reasons:
*
* 3) Do a GC now because correctness depends on some GC property. For
* example, CC_WAITING is where the embedding requires the mark bits
* to be set correct. Also, EVICT_NURSERY where we need to work on the tenured
* heap.
*
* 4) Do a GC because we are shutting down: e.g. SHUTDOWN_CC or DESTROY_*.
*
* 5) Do a GC because a compartment was accessed between GC slices when we
* would have otherwise discarded it. We have to do a second GC to clean
* it up: e.g. COMPARTMENT_REVIVED.
*
* Emergency Reasons:
*
* 6) Do an all-zones, non-incremental GC now because the embedding knows it
* cannot wait: e.g. MEM_PRESSURE.
*
* 7) OOM when fetching a new Chunk results in a LAST_DITCH GC.
*
* Heap Size Limitation Reasons:
*
* 8) Do an incremental, zonal GC with reason MAYBEGC when we discover that
* the gc's allocated size is approaching the current trigger. This is
* called MAYBEGC because we make this check in the MaybeGC function.
* MaybeGC gets called at the top of the main event loop. Normally, it is
* expected that this callback will keep the heap size limited. It is
* relatively inexpensive, because it is invoked with no JS running and
* thus few stack roots to scan. For this reason, the GC's "trigger" bytes
* is less than the GC's "max" bytes as used by the trigger below.
*
* 9) Do an incremental, zonal GC with reason MAYBEGC when we go to allocate
* a new GC thing and find that the GC heap size has grown beyond the
* configured maximum (JSGC_MAX_BYTES). We trigger this GC by returning
* nullptr and then calling maybeGC at the top level of the allocator.
* This is then guaranteed to fail the "size greater than trigger" check
* above, since trigger is always less than max. After performing the GC,
* the allocator unconditionally returns nullptr to force an OOM exception
* is raised by the script.
*
* Note that this differs from a LAST_DITCH GC where we actually run out
* of memory (i.e., a call to a system allocator fails) when trying to
* allocate. Unlike above, LAST_DITCH GC only happens when we are really
* out of memory, not just when we cross an arbitrary trigger; despite
* this, it may still return an allocation at the end and allow the script
* to continue, if the LAST_DITCH GC was able to free up enough memory.
*
* 10) Do a GC under reason ALLOC_TRIGGER when we are over the GC heap trigger
* limit, but in the allocator rather than in a random call to maybeGC.
* This occurs if we allocate too much before returning to the event loop
* and calling maybeGC; this is extremely common in benchmarks and
* long-running Worker computations. Note that this uses a wildly
* different mechanism from the above in that it sets the interrupt flag
* and does the GC at the next loop head, before the next alloc, or
* maybeGC. The reason for this is that this check is made after the
* allocation and we cannot GC with an uninitialized thing in the heap.
*
* 11) Do an incremental, zonal GC with reason TOO_MUCH_MALLOC when we have
* malloced more than JSGC_MAX_MALLOC_BYTES in a zone since the last GC.
*
*
* Size Limitation Triggers Explanation
* ------------------------------------
*
* The GC internally is entirely unaware of the context of the execution of
* the mutator. It sees only:
*
* A) Allocated size: this is the amount of memory currently requested by the
* mutator. This quantity is monotonically increasing: i.e. the allocation
* rate is always >= 0. It is also easy for the system to track.
*
* B) Retained size: this is the amount of memory that the mutator can
* currently reach. Said another way, it is the size of the heap
* immediately after a GC (modulo background sweeping). This size is very
* costly to know exactly and also extremely hard to estimate with any
* fidelity.
*
* For reference, a common allocated vs. retained graph might look like:
*
* | ** **
* | ** ** * **
* | ** * ** * **
* | * ** * ** * **
* | ** ** * ** * **
* s| * * ** ** + + **
* i| * * * + + + + +
* z| * * * + + + + +
* e| * **+
* | * +
* | * +
* | * +
* | * +
* | * +
* |*+
* +--------------------------------------------------
* time
* *** = allocated
* +++ = retained
*
* Note that this is a bit of a simplification
* because in reality we track malloc and GC heap
* sizes separately and have a different level of
* granularity and accuracy on each heap.
*
* This presents some obvious implications for Mark-and-Sweep collectors.
* Namely:
* -> t[marking] ~= size[retained]
* -> t[sweeping] ~= size[allocated] - size[retained]
*
* In a non-incremental collector, maintaining low latency and high
* responsiveness requires that total GC times be as low as possible. Thus,
* in order to stay responsive when we did not have a fully incremental
* collector, our GC triggers were focused on minimizing collection time.
* Furthermore, since size[retained] is not under control of the GC, all the
* GC could do to control collection times was reduce sweep times by
* minimizing size[allocated], per the equation above.
*
* The result of the above is GC triggers that focus on size[allocated] to
* the exclusion of other important factors and default heuristics that are
* not optimal for a fully incremental collector. On the other hand, this is
* not all bad: minimizing size[allocated] also minimizes the chance of OOM
* and sweeping remains one of the hardest areas to further incrementalize.
*
* EAGER_ALLOC_TRIGGER
* -------------------
* Occurs when we return to the event loop and find our heap is getting
* largish, but before t[marking] OR t[sweeping] is too large for a
* responsive non-incremental GC. This is intended to be the common case
* in normal web applications: e.g. we just finished an event handler and
* the few objects we allocated when computing the new whatzitz have
* pushed us slightly over the limit. After this GC we rescale the new
* EAGER_ALLOC_TRIGGER trigger to 150% of size[retained] so that our
* non-incremental GC times will always be proportional to this size
* rather than being dominated by sweeping.
*
* As a concession to mutators that allocate heavily during their startup
* phase, we have a highFrequencyGCMode that ups the growth rate to 300%
* of the current size[retained] so that we'll do fewer longer GCs at the
* end of the mutator startup rather than more, smaller GCs.
*
* Assumptions:
* -> Responsiveness is proportional to t[marking] + t[sweeping].
* -> size[retained] is proportional only to GC allocations.
*
* ALLOC_TRIGGER (non-incremental)
* -------------------------------
* If we do not return to the event loop before getting all the way to our
* gc trigger bytes then MAYBEGC will never fire. To avoid OOMing, we
* succeed the current allocation and set the script interrupt so that we
* will (hopefully) do a GC before we overflow our max and have to raise
* an OOM exception for the script.
*
* Assumptions:
* -> Common web scripts will return to the event loop before using
* 10% of the current gcTriggerBytes worth of GC memory.
*
* ALLOC_TRIGGER (incremental)
* ---------------------------
* In practice the above trigger is rough: if a website is just on the
* cusp, sometimes it will trigger a non-incremental GC moments before
* returning to the event loop, where it could have done an incremental
* GC. Thus, we recently added an incremental version of the above with a
* substantially lower threshold, so that we have a soft limit here. If
* IGC can collect faster than the allocator generates garbage, even if
* the allocator does not return to the event loop frequently, we should
* not have to fall back to a non-incremental GC.
*
* INCREMENTAL_TOO_SLOW
* --------------------
* Do a full, non-incremental GC if we overflow ALLOC_TRIGGER during an
* incremental GC. When in the middle of an incremental GC, we suppress
* our other triggers, so we need a way to backstop the IGC if the
* mutator allocates faster than the IGC can clean things up.
*
* TOO_MUCH_MALLOC
* ---------------
* Performs a GC before size[allocated] - size[retained] gets too large
* for non-incremental sweeping to be fast in the case that we have
* significantly more malloc allocation than GC allocation. This is meant
* to complement MAYBEGC triggers. We track this by counting malloced
* bytes; the counter gets reset at every GC since we do not always have a
* size at the time we call free. Because of this, the malloc heuristic
* is, unfortunatly, not usefully able to augment our other GC heap
* triggers and is limited to this singular heuristic.
*
* Assumptions:
* -> EITHER size[allocated_by_malloc] ~= size[allocated_by_GC]
* OR time[sweeping] ~= size[allocated_by_malloc]
* -> size[retained] @ t0 ~= size[retained] @ t1
* i.e. That the mutator is in steady-state operation.
*
* LAST_DITCH_GC
* -------------
* Does a GC because we are out of memory.
*
* Assumptions:
* -> size[retained] < size[available_memory]
*/
class GCSchedulingState
{
/*
* Influences how we schedule and run GC's in several subtle ways. The most
* important factor is in how it controls the "HeapGrowthFactor". The
* growth factor is a measure of how large (as a percentage of the last GC)
* the heap is allowed to grow before we try to schedule another GC.
*/
bool inHighFrequencyGCMode_;
public:
GCSchedulingState()
: inHighFrequencyGCMode_(false)
{}
bool inHighFrequencyGCMode() const { return inHighFrequencyGCMode_; }
void updateHighFrequencyMode(uint64_t lastGCTime, uint64_t currentTime,
const GCSchedulingTunables& tunables) {
inHighFrequencyGCMode_ =
tunables.isDynamicHeapGrowthEnabled() && lastGCTime &&
lastGCTime + tunables.highFrequencyThresholdUsec() > currentTime;
}
};
template<typename F>
struct Callback {
F op;
void* data;
Callback()
: op(nullptr), data(nullptr)
{}
Callback(F op, void* data)
: op(op), data(data)
{}
};
template<typename F>
using CallbackVector = Vector<Callback<F>, 4, SystemAllocPolicy>;
template <typename T, typename Iter0, typename Iter1>
class ChainedIter
{
Iter0 iter0_;
Iter1 iter1_;
public:
ChainedIter(const Iter0& iter0, const Iter1& iter1)
: iter0_(iter0), iter1_(iter1)
{}
bool done() const { return iter0_.done() && iter1_.done(); }
void next() {
MOZ_ASSERT(!done());
if (!iter0_.done()) {
iter0_.next();
} else {
MOZ_ASSERT(!iter1_.done());
iter1_.next();
}
}
T get() const {
MOZ_ASSERT(!done());
if (!iter0_.done())
return iter0_.get();
MOZ_ASSERT(!iter1_.done());
return iter1_.get();
}
operator T() const { return get(); }
T operator->() const { return get(); }
};
typedef HashMap<Value*, const char*, DefaultHasher<Value*>, SystemAllocPolicy> RootedValueMap;
using AllocKinds = mozilla::EnumSet<AllocKind>;
class GCRuntime
{
public:
explicit GCRuntime(JSRuntime* rt);
MOZ_MUST_USE bool init(uint32_t maxbytes, uint32_t maxNurseryBytes);
void finishRoots();
void finish();
MOZ_MUST_USE bool addRoot(Value* vp, const char* name);
void removeRoot(Value* vp);
void setMarkStackLimit(size_t limit, AutoLockGC& lock);
MOZ_MUST_USE bool setParameter(JSGCParamKey key, uint32_t value, AutoLockGC& lock);
uint32_t getParameter(JSGCParamKey key, const AutoLockGC& lock);
MOZ_MUST_USE bool triggerGC(JS::gcreason::Reason reason);
void maybeAllocTriggerZoneGC(Zone* zone, const AutoLockGC& lock);
// The return value indicates if we were able to do the GC.
bool triggerZoneGC(Zone* zone, JS::gcreason::Reason reason);
void maybeGC(Zone* zone);
void minorGC(JS::gcreason::Reason reason,
gcstats::Phase phase = gcstats::PHASE_MINOR_GC) JS_HAZ_GC_CALL;
void evictNursery(JS::gcreason::Reason reason = JS::gcreason::EVICT_NURSERY) {
minorGC(reason, gcstats::PHASE_EVICT_NURSERY);
}
// The return value indicates whether a major GC was performed.
bool gcIfRequested();
void gc(JSGCInvocationKind gckind, JS::gcreason::Reason reason);
void startGC(JSGCInvocationKind gckind, JS::gcreason::Reason reason, int64_t millis = 0);
void gcSlice(JS::gcreason::Reason reason, int64_t millis = 0);
void finishGC(JS::gcreason::Reason reason);
void abortGC();
void startDebugGC(JSGCInvocationKind gckind, SliceBudget& budget);
void debugGCSlice(SliceBudget& budget);
void triggerFullGCForAtoms() {
MOZ_ASSERT(fullGCForAtomsRequested_);
fullGCForAtomsRequested_ = false;
MOZ_RELEASE_ASSERT(triggerGC(JS::gcreason::ALLOC_TRIGGER));
}
inline void poke();
enum TraceOrMarkRuntime {
TraceRuntime,
MarkRuntime
};
void traceRuntime(JSTracer* trc, AutoLockForExclusiveAccess& lock);
void traceRuntimeForMinorGC(JSTracer* trc, AutoLockForExclusiveAccess& lock);
void notifyDidPaint();
void shrinkBuffers();
void onOutOfMallocMemory();
void onOutOfMallocMemory(const AutoLockGC& lock);
size_t maxMallocBytesAllocated() { return maxMallocBytes; }
uint64_t nextCellUniqueId() {
MOZ_ASSERT(nextCellUniqueId_ > 0);
uint64_t uid = ++nextCellUniqueId_;
return uid;
}
#ifdef DEBUG
bool shutdownCollectedEverything() const {
return arenasEmptyAtShutdown;
}
#endif
public:
// Internal public interface
State state() const { return incrementalState; }
bool isHeapCompacting() const { return state() == State::Compact; }
bool isForegroundSweeping() const { return state() == State::Sweep; }
bool isBackgroundSweeping() { return helperState.isBackgroundSweeping(); }
void waitBackgroundSweepEnd() { helperState.waitBackgroundSweepEnd(); }
void waitBackgroundSweepOrAllocEnd() {
helperState.waitBackgroundSweepEnd();
allocTask.cancel(GCParallelTask::CancelAndWait);
}
void requestMinorGC(JS::gcreason::Reason reason);
#ifdef DEBUG
bool onBackgroundThread() { return helperState.onBackgroundThread(); }
#endif // DEBUG
void lockGC() {
lock.lock();
}
void unlockGC() {
lock.unlock();
}
#ifdef DEBUG
bool isAllocAllowed() { return noGCOrAllocationCheck == 0; }
void disallowAlloc() { ++noGCOrAllocationCheck; }
void allowAlloc() {
MOZ_ASSERT(!isAllocAllowed());
--noGCOrAllocationCheck;
}
bool isNurseryAllocAllowed() { return noNurseryAllocationCheck == 0; }
void disallowNurseryAlloc() { ++noNurseryAllocationCheck; }
void allowNurseryAlloc() {
MOZ_ASSERT(!isNurseryAllocAllowed());
--noNurseryAllocationCheck;
}
bool isStrictProxyCheckingEnabled() { return disableStrictProxyCheckingCount == 0; }
void disableStrictProxyChecking() { ++disableStrictProxyCheckingCount; }
void enableStrictProxyChecking() {
MOZ_ASSERT(disableStrictProxyCheckingCount > 0);
--disableStrictProxyCheckingCount;
}
#endif // DEBUG
bool isInsideUnsafeRegion() { return inUnsafeRegion != 0; }
void enterUnsafeRegion() { ++inUnsafeRegion; }
void leaveUnsafeRegion() {
MOZ_ASSERT(inUnsafeRegion > 0);
--inUnsafeRegion;
}
void verifyIsSafeToGC() {
MOZ_DIAGNOSTIC_ASSERT(!isInsideUnsafeRegion(),
"[AutoAssertNoGC] possible GC in GC-unsafe region");
}
void setAlwaysPreserveCode() { alwaysPreserveCode = true; }
bool isIncrementalGCAllowed() const { return incrementalAllowed; }
void disallowIncrementalGC() { incrementalAllowed = false; }
bool isIncrementalGCEnabled() const { return mode == JSGC_MODE_INCREMENTAL && incrementalAllowed; }
bool isIncrementalGCInProgress() const { return state() != State::NotActive; }
bool isGenerationalGCEnabled() const { return generationalDisabled == 0; }
void disableGenerationalGC();
void enableGenerationalGC();
void disableCompactingGC();
void enableCompactingGC();
bool isCompactingGCEnabled() const;
void setGrayRootsTracer(JSTraceDataOp traceOp, void* data);
MOZ_MUST_USE bool addBlackRootsTracer(JSTraceDataOp traceOp, void* data);
void removeBlackRootsTracer(JSTraceDataOp traceOp, void* data);
void setMaxMallocBytes(size_t value);
int32_t getMallocBytes() const { return mallocBytesUntilGC; }
void resetMallocBytes();
bool isTooMuchMalloc() const { return mallocBytesUntilGC <= 0; }
void updateMallocCounter(JS::Zone* zone, size_t nbytes);
void onTooMuchMalloc();
void setGCCallback(JSGCCallback callback, void* data);
void callGCCallback(JSGCStatus status) const;
void setObjectsTenuredCallback(JSObjectsTenuredCallback callback,
void* data);
void callObjectsTenuredCallback();
MOZ_MUST_USE bool addFinalizeCallback(JSFinalizeCallback callback, void* data);
void removeFinalizeCallback(JSFinalizeCallback func);
MOZ_MUST_USE bool addWeakPointerZoneGroupCallback(JSWeakPointerZoneGroupCallback callback,
void* data);
void removeWeakPointerZoneGroupCallback(JSWeakPointerZoneGroupCallback callback);
MOZ_MUST_USE bool addWeakPointerCompartmentCallback(JSWeakPointerCompartmentCallback callback,
void* data);
void removeWeakPointerCompartmentCallback(JSWeakPointerCompartmentCallback callback);
JS::GCSliceCallback setSliceCallback(JS::GCSliceCallback callback);
JS::GCNurseryCollectionCallback setNurseryCollectionCallback(
JS::GCNurseryCollectionCallback callback);
JS::DoCycleCollectionCallback setDoCycleCollectionCallback(JS::DoCycleCollectionCallback callback);
void callDoCycleCollectionCallback(JSContext* cx);
void setFullCompartmentChecks(bool enable);
bool isManipulatingDeadZones() { return manipulatingDeadZones; }
void setManipulatingDeadZones(bool value) { manipulatingDeadZones = value; }
unsigned objectsMarkedInDeadZonesCount() { return objectsMarkedInDeadZones; }
void incObjectsMarkedInDeadZone() {
MOZ_ASSERT(manipulatingDeadZones);
++objectsMarkedInDeadZones;
}
JS::Zone* getCurrentZoneGroup() { return currentZoneGroup; }
void setFoundBlackGrayEdges(TenuredCell& target) {
AutoEnterOOMUnsafeRegion oomUnsafe;
if (!foundBlackGrayEdges.append(&target))
oomUnsafe.crash("OOM|small: failed to insert into foundBlackGrayEdges");
}
uint64_t gcNumber() const { return number; }
uint64_t minorGCCount() const { return minorGCNumber; }
void incMinorGcNumber() { ++minorGCNumber; ++number; }
uint64_t majorGCCount() const { return majorGCNumber; }
void incMajorGcNumber() { ++majorGCNumber; ++number; }
int64_t defaultSliceBudget() const { return defaultTimeBudget_; }
bool isIncrementalGc() const { return isIncremental; }
bool isFullGc() const { return isFull; }
bool isCompactingGc() const { return isCompacting; }
bool minorGCRequested() const { return minorGCTriggerReason != JS::gcreason::NO_REASON; }
bool majorGCRequested() const { return majorGCTriggerReason != JS::gcreason::NO_REASON; }
bool isGcNeeded() { return minorGCRequested() || majorGCRequested(); }
bool fullGCForAtomsRequested() const { return fullGCForAtomsRequested_; }
double computeHeapGrowthFactor(size_t lastBytes);
size_t computeTriggerBytes(double growthFactor, size_t lastBytes);
JSGCMode gcMode() const { return mode; }
void setGCMode(JSGCMode m) {
mode = m;
marker.setGCMode(mode);
}
inline void updateOnFreeArenaAlloc(const ChunkInfo& info);
inline void updateOnArenaFree(const ChunkInfo& info);
ChunkPool& fullChunks(const AutoLockGC& lock) { return fullChunks_; }
ChunkPool& availableChunks(const AutoLockGC& lock) { return availableChunks_; }
ChunkPool& emptyChunks(const AutoLockGC& lock) { return emptyChunks_; }
const ChunkPool& fullChunks(const AutoLockGC& lock) const { return fullChunks_; }
const ChunkPool& availableChunks(const AutoLockGC& lock) const { return availableChunks_; }
const ChunkPool& emptyChunks(const AutoLockGC& lock) const { return emptyChunks_; }
typedef ChainedIter<Chunk*, ChunkPool::Iter, ChunkPool::Iter> NonEmptyChunksIter;
NonEmptyChunksIter allNonEmptyChunks() {
return NonEmptyChunksIter(ChunkPool::Iter(availableChunks_), ChunkPool::Iter(fullChunks_));
}
Chunk* getOrAllocChunk(const AutoLockGC& lock,
AutoMaybeStartBackgroundAllocation& maybeStartBGAlloc);
void recycleChunk(Chunk* chunk, const AutoLockGC& lock);
// Free certain LifoAlloc blocks when it is safe to do so.
void freeUnusedLifoBlocksAfterSweeping(LifoAlloc* lifo);
void freeAllLifoBlocksAfterSweeping(LifoAlloc* lifo);
void freeAllLifoBlocksAfterMinorGC(LifoAlloc* lifo);
// Queue a thunk to run after the next minor GC.
void callAfterMinorGC(void (*thunk)(void* data), void* data) {
nursery.queueSweepAction(thunk, data);
}
// Public here for ReleaseArenaLists and FinalizeTypedArenas.
void releaseArena(Arena* arena, const AutoLockGC& lock);
void releaseHeldRelocatedArenas();
void releaseHeldRelocatedArenasWithoutUnlocking(const AutoLockGC& lock);
// Allocator
template <AllowGC allowGC>
MOZ_MUST_USE bool checkAllocatorState(JSContext* cx, AllocKind kind);
template <AllowGC allowGC>
JSObject* tryNewNurseryObject(JSContext* cx, size_t thingSize, size_t nDynamicSlots,
const Class* clasp);
template <AllowGC allowGC>
static JSObject* tryNewTenuredObject(ExclusiveContext* cx, AllocKind kind, size_t thingSize,
size_t nDynamicSlots);
template <typename T, AllowGC allowGC>
static T* tryNewTenuredThing(ExclusiveContext* cx, AllocKind kind, size_t thingSize);
static TenuredCell* refillFreeListInGC(Zone* zone, AllocKind thingKind);
private:
enum IncrementalProgress
{
NotFinished = 0,
Finished
};
// For ArenaLists::allocateFromArena()
friend class ArenaLists;
Chunk* pickChunk(const AutoLockGC& lock,
AutoMaybeStartBackgroundAllocation& maybeStartBGAlloc);
Arena* allocateArena(Chunk* chunk, Zone* zone, AllocKind kind,
ShouldCheckThresholds checkThresholds, const AutoLockGC& lock);
void arenaAllocatedDuringGC(JS::Zone* zone, Arena* arena);
// Allocator internals
MOZ_MUST_USE bool gcIfNeededPerAllocation(JSContext* cx);
template <typename T>
static void checkIncrementalZoneState(ExclusiveContext* cx, T* t);
static TenuredCell* refillFreeListFromAnyThread(ExclusiveContext* cx, AllocKind thingKind,
size_t thingSize);
static TenuredCell* refillFreeListFromMainThread(JSContext* cx, AllocKind thingKind,
size_t thingSize);
static TenuredCell* refillFreeListOffMainThread(ExclusiveContext* cx, AllocKind thingKind);
/*
* Return the list of chunks that can be released outside the GC lock.
* Must be called either during the GC or with the GC lock taken.
*/
friend class BackgroundDecommitTask;
ChunkPool expireEmptyChunkPool(const AutoLockGC& lock);
void freeEmptyChunks(JSRuntime* rt, const AutoLockGC& lock);
void prepareToFreeChunk(ChunkInfo& info);
friend class BackgroundAllocTask;
friend class AutoMaybeStartBackgroundAllocation;
bool wantBackgroundAllocation(const AutoLockGC& lock) const;
void startBackgroundAllocTaskIfIdle();
void requestMajorGC(JS::gcreason::Reason reason);
SliceBudget defaultBudget(JS::gcreason::Reason reason, int64_t millis);
void budgetIncrementalGC(SliceBudget& budget, AutoLockForExclusiveAccess& lock);
void resetIncrementalGC(AbortReason reason, AutoLockForExclusiveAccess& lock);
// Assert if the system state is such that we should never
// receive a request to do GC work.
void checkCanCallAPI();
// Check if the system state is such that GC has been supressed
// or otherwise delayed.
MOZ_MUST_USE bool checkIfGCAllowedInCurrentState(JS::gcreason::Reason reason);
gcstats::ZoneGCStats scanZonesBeforeGC();
void collect(bool nonincrementalByAPI, SliceBudget budget, JS::gcreason::Reason reason) JS_HAZ_GC_CALL;
MOZ_MUST_USE bool gcCycle(bool nonincrementalByAPI, SliceBudget& budget,
JS::gcreason::Reason reason);
void incrementalCollectSlice(SliceBudget& budget, JS::gcreason::Reason reason,
AutoLockForExclusiveAccess& lock);
void purgeRuntime(AutoLockForExclusiveAccess& lock);
MOZ_MUST_USE bool beginMarkPhase(JS::gcreason::Reason reason, AutoLockForExclusiveAccess& lock);
bool shouldPreserveJITCode(JSCompartment* comp, int64_t currentTime,
JS::gcreason::Reason reason, bool canAllocateMoreCode);
void traceRuntimeForMajorGC(JSTracer* trc, AutoLockForExclusiveAccess& lock);
void traceRuntimeAtoms(JSTracer* trc, AutoLockForExclusiveAccess& lock);
void traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrMark,
AutoLockForExclusiveAccess& lock);
void bufferGrayRoots();
void maybeDoCycleCollection();
void markCompartments();
IncrementalProgress drainMarkStack(SliceBudget& sliceBudget, gcstats::Phase phase);
template <class CompartmentIterT> void markWeakReferences(gcstats::Phase phase);
void markWeakReferencesInCurrentGroup(gcstats::Phase phase);
template <class ZoneIterT, class CompartmentIterT> void markGrayReferences(gcstats::Phase phase);
void markBufferedGrayRoots(JS::Zone* zone);
void markGrayReferencesInCurrentGroup(gcstats::Phase phase);
void markAllWeakReferences(gcstats::Phase phase);
void markAllGrayReferences(gcstats::Phase phase);
void beginSweepPhase(bool lastGC, AutoLockForExclusiveAccess& lock);
void findZoneGroups(AutoLockForExclusiveAccess& lock);
MOZ_MUST_USE bool findInterZoneEdges();
void getNextZoneGroup();
void endMarkingZoneGroup();
void beginSweepingZoneGroup(AutoLockForExclusiveAccess& lock);
bool shouldReleaseObservedTypes();
void endSweepingZoneGroup();
IncrementalProgress sweepPhase(SliceBudget& sliceBudget, AutoLockForExclusiveAccess& lock);
void endSweepPhase(bool lastGC, AutoLockForExclusiveAccess& lock);
void sweepZones(FreeOp* fop, bool lastGC);
void decommitAllWithoutUnlocking(const AutoLockGC& lock);
void startDecommit();
void queueZonesForBackgroundSweep(ZoneList& zones);
void sweepBackgroundThings(ZoneList& zones, LifoAlloc& freeBlocks);
void assertBackgroundSweepingFinished();
bool shouldCompact();
void beginCompactPhase();
IncrementalProgress compactPhase(JS::gcreason::Reason reason, SliceBudget& sliceBudget,
AutoLockForExclusiveAccess& lock);
void endCompactPhase(JS::gcreason::Reason reason);
void sweepTypesAfterCompacting(Zone* zone);
void sweepZoneAfterCompacting(Zone* zone);
MOZ_MUST_USE bool relocateArenas(Zone* zone, JS::gcreason::Reason reason,
Arena*& relocatedListOut, SliceBudget& sliceBudget);
void updateTypeDescrObjects(MovingTracer* trc, Zone* zone);
void updateCellPointers(MovingTracer* trc, Zone* zone, AllocKinds kinds, size_t bgTaskCount);
void updateAllCellPointers(MovingTracer* trc, Zone* zone);
void updatePointersToRelocatedCells(Zone* zone, AutoLockForExclusiveAccess& lock);
void protectAndHoldArenas(Arena* arenaList);
void unprotectHeldRelocatedArenas();
void releaseRelocatedArenas(Arena* arenaList);
void releaseRelocatedArenasWithoutUnlocking(Arena* arenaList, const AutoLockGC& lock);
void finishCollection(JS::gcreason::Reason reason);
#ifdef DEBUG
void checkForCompartmentMismatches();
#endif
void callFinalizeCallbacks(FreeOp* fop, JSFinalizeStatus status) const;
void callWeakPointerZoneGroupCallbacks() const;
void callWeakPointerCompartmentCallbacks(JSCompartment* comp) const;
public:
JSRuntime* rt;
/* Embedders can use this zone however they wish. */
JS::Zone* systemZone;
/* List of compartments and zones (protected by the GC lock). */
ZoneVector zones;
Nursery nursery;
StoreBuffer storeBuffer;
gcstats::Statistics stats;
GCMarker marker;
/* Track heap usage for this runtime. */
HeapUsage usage;
/* GC scheduling state and parameters. */
GCSchedulingTunables tunables;
GCSchedulingState schedulingState;
MemProfiler mMemProfiler;
private:
// When empty, chunks reside in the emptyChunks pool and are re-used as
// needed or eventually expired if not re-used. The emptyChunks pool gets
// refilled from the background allocation task heuristically so that empty
// chunks should always available for immediate allocation without syscalls.
ChunkPool emptyChunks_;
// Chunks which have had some, but not all, of their arenas allocated live
// in the available chunk lists. When all available arenas in a chunk have
// been allocated, the chunk is removed from the available list and moved
// to the fullChunks pool. During a GC, if all arenas are free, the chunk
// is moved back to the emptyChunks pool and scheduled for eventual
// release.
ChunkPool availableChunks_;
// When all arenas in a chunk are used, it is moved to the fullChunks pool
// so as to reduce the cost of operations on the available lists.
ChunkPool fullChunks_;
RootedValueMap rootsHash;
size_t maxMallocBytes;
// An incrementing id used to assign unique ids to cells that require one.
mozilla::Atomic<uint64_t, mozilla::ReleaseAcquire> nextCellUniqueId_;
/*
* Number of the committed arenas in all GC chunks including empty chunks.
*/
mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> numArenasFreeCommitted;
VerifyPreTracer* verifyPreData;
private:
bool chunkAllocationSinceLastGC;
int64_t lastGCTime;
JSGCMode mode;
mozilla::Atomic<size_t, mozilla::ReleaseAcquire> numActiveZoneIters;
/* During shutdown, the GC needs to clean up every possible object. */
bool cleanUpEverything;
// Gray marking must be done after all black marking is complete. However,
// we do not have write barriers on XPConnect roots. Therefore, XPConnect
// roots must be accumulated in the first slice of incremental GC. We
// accumulate these roots in each zone's gcGrayRoots vector and then mark
// them later, after black marking is complete for each compartment. This
// accumulation can fail, but in that case we switch to non-incremental GC.
enum class GrayBufferState {
Unused,
Okay,
Failed
};
GrayBufferState grayBufferState;
bool hasBufferedGrayRoots() const { return grayBufferState == GrayBufferState::Okay; }
// Clear each zone's gray buffers, but do not change the current state.
void resetBufferedGrayRoots() const;
// Reset the gray buffering state to Unused.
void clearBufferedGrayRoots() {
grayBufferState = GrayBufferState::Unused;
resetBufferedGrayRoots();
}
mozilla::Atomic<JS::gcreason::Reason, mozilla::Relaxed> majorGCTriggerReason;
JS::gcreason::Reason minorGCTriggerReason;
/* Perform full GC if rt->keepAtoms() becomes false. */
bool fullGCForAtomsRequested_;
/* Incremented at the start of every minor GC. */
uint64_t minorGCNumber;
/* Incremented at the start of every major GC. */
uint64_t majorGCNumber;
/* The major GC number at which to release observed type information. */
uint64_t jitReleaseNumber;
/* Incremented on every GC slice. */
uint64_t number;
/* The number at the time of the most recent GC's first slice. */
uint64_t startNumber;
/* Whether the currently running GC can finish in multiple slices. */
bool isIncremental;
/* Whether all zones are being collected in first GC slice. */
bool isFull;
/* Whether the heap will be compacted at the end of GC. */
bool isCompacting;
/* The invocation kind of the current GC, taken from the first slice. */
JSGCInvocationKind invocationKind;
/* The initial GC reason, taken from the first slice. */
JS::gcreason::Reason initialReason;
#ifdef DEBUG
/*
* If this is 0, all cross-compartment proxies must be registered in the
* wrapper map. This checking must be disabled temporarily while creating
* new wrappers. When non-zero, this records the recursion depth of wrapper
* creation.
*/
uintptr_t disableStrictProxyCheckingCount;
#endif
/*
* The current incremental GC phase. This is also used internally in
* non-incremental GC.
*/
State incrementalState;
/* Indicates that the last incremental slice exhausted the mark stack. */
bool lastMarkSlice;
/* Whether any sweeping will take place in the separate GC helper thread. */
bool sweepOnBackgroundThread;
/* Whether observed type information is being released in the current GC. */
bool releaseObservedTypes;
/* Whether any black->gray edges were found during marking. */
BlackGrayEdgeVector foundBlackGrayEdges;
/* Singly linekd list of zones to be swept in the background. */
ZoneList backgroundSweepZones;
/*
* Free LIFO blocks are transferred to this allocator before being freed on
* the background GC thread after sweeping.
*/
LifoAlloc blocksToFreeAfterSweeping;
/*
* Free LIFO blocks are transferred to this allocator before being freed
* after minor GC.
*/
LifoAlloc blocksToFreeAfterMinorGC;
/* Index of current zone group (for stats). */
unsigned zoneGroupIndex;
/*
* Incremental sweep state.
*/
JS::Zone* zoneGroups;
JS::Zone* currentZoneGroup;
bool sweepingTypes;
unsigned finalizePhase;
JS::Zone* sweepZone;
AllocKind sweepKind;
bool abortSweepAfterCurrentGroup;
/*
* Concurrent sweep infrastructure.
*/
void startTask(GCParallelTask& task, gcstats::Phase phase,
AutoLockHelperThreadState& locked);
void joinTask(GCParallelTask& task, gcstats::Phase phase,
AutoLockHelperThreadState& locked);
/*
* List head of arenas allocated during the sweep phase.
*/
Arena* arenasAllocatedDuringSweep;
/*
* Incremental compacting state.
*/
bool startedCompacting;
ZoneList zonesToMaybeCompact;
Arena* relocatedArenasToRelease;
/*
* Indicates that a GC slice has taken place in the middle of an animation
* frame, rather than at the beginning. In this case, the next slice will be
* delayed so that we don't get back-to-back slices.
*/
bool interFrameGC;
/* Default budget for incremental GC slice. See js/SliceBudget.h. */
int64_t defaultTimeBudget_;
/*
* We disable incremental GC if we encounter a Class with a trace hook
* that does not implement write barriers.
*/
bool incrementalAllowed;
/*
* GGC can be enabled from the command line while testing.
*/
unsigned generationalDisabled;
/*
* Whether compacting GC can is enabled globally.
*/
bool compactingEnabled;
/*
* Some code cannot tolerate compacting GC so it can be disabled temporarily
* with AutoDisableCompactingGC which uses this counter.
*/
unsigned compactingDisabledCount;
/*
* This is true if we are in the middle of a brain transplant (e.g.,
* JS_TransplantObject) or some other operation that can manipulate
* dead zones.
*/
bool manipulatingDeadZones;
/*
* This field is incremented each time we mark an object inside a
* zone with no incoming cross-compartment pointers. Typically if
* this happens it signals that an incremental GC is marking too much
* stuff. At various times we check this counter and, if it has changed, we
* run an immediate, non-incremental GC to clean up the dead
* zones. This should happen very rarely.
*/
unsigned objectsMarkedInDeadZones;
bool poked;
bool fullCompartmentChecks;
Callback<JSGCCallback> gcCallback;
Callback<JS::DoCycleCollectionCallback> gcDoCycleCollectionCallback;
Callback<JSObjectsTenuredCallback> tenuredCallback;
CallbackVector<JSFinalizeCallback> finalizeCallbacks;
CallbackVector<JSWeakPointerZoneGroupCallback> updateWeakPointerZoneGroupCallbacks;
CallbackVector<JSWeakPointerCompartmentCallback> updateWeakPointerCompartmentCallbacks;
/*
* Malloc counter to measure memory pressure for GC scheduling. It runs
* from maxMallocBytes down to zero.
*/
mozilla::Atomic<ptrdiff_t, mozilla::ReleaseAcquire> mallocBytesUntilGC;
/*
* Whether a GC has been triggered as a result of mallocBytesUntilGC
* falling below zero.
*/
mozilla::Atomic<bool, mozilla::ReleaseAcquire> mallocGCTriggered;
/*
* The trace operations to trace embedding-specific GC roots. One is for
* tracing through black roots and the other is for tracing through gray
* roots. The black/gray distinction is only relevant to the cycle
* collector.
*/
CallbackVector<JSTraceDataOp> blackRootTracers;
Callback<JSTraceDataOp> grayRootTracer;
/* Always preserve JIT code during GCs, for testing. */
bool alwaysPreserveCode;
/*
* Some regions of code are hard for the static rooting hazard analysis to
* understand. In those cases, we trade the static analysis for a dynamic
* analysis. When this is non-zero, we should assert if we trigger, or
* might trigger, a GC.
*/
int inUnsafeRegion;
#ifdef DEBUG
size_t noGCOrAllocationCheck;
size_t noNurseryAllocationCheck;
bool arenasEmptyAtShutdown;
#endif
/* Synchronize GC heap access between main thread and GCHelperState. */
friend class js::AutoLockGC;
js::Mutex lock;
BackgroundAllocTask allocTask;
BackgroundDecommitTask decommitTask;
GCHelperState helperState;
/*
* During incremental sweeping, this field temporarily holds the arenas of
* the current AllocKind being swept in order of increasing free space.
*/
SortedArenaList incrementalSweepList;
friend class js::GCHelperState;
friend class MarkingValidator;
friend class AutoTraceSession;
friend class AutoEnterIteration;
};
/* Prevent compartments and zones from being collected during iteration. */
class MOZ_RAII AutoEnterIteration {
GCRuntime* gc;
public:
explicit AutoEnterIteration(GCRuntime* gc_) : gc(gc_) {
++gc->numActiveZoneIters;
}
~AutoEnterIteration() {
MOZ_ASSERT(gc->numActiveZoneIters);
--gc->numActiveZoneIters;
}
};
// After pulling a Chunk out of the empty chunks pool, we want to run the
// background allocator to refill it. The code that takes Chunks does so under
// the GC lock. We need to start the background allocation under the helper
// threads lock. To avoid lock inversion we have to delay the start until after
// we are outside the GC lock. This class handles that delay automatically.
class MOZ_RAII AutoMaybeStartBackgroundAllocation
{
GCRuntime* gc;
public:
AutoMaybeStartBackgroundAllocation()
: gc(nullptr)
{}
void tryToStartBackgroundAllocation(GCRuntime& gc) {
this->gc = &gc;
}
~AutoMaybeStartBackgroundAllocation() {
if (gc)
gc->startBackgroundAllocTaskIfIdle();
}
};
} /* namespace gc */
} /* namespace js */
#endif
|