12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/parallel/objectStartArray.inline.hpp"
26 #include "gc/parallel/parallelArguments.hpp"
27 #include "gc/parallel/parallelInitLogger.hpp"
28 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
29 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
30 #include "gc/parallel/psMemoryPool.hpp"
31 #include "gc/parallel/psParallelCompact.inline.hpp"
32 #include "gc/parallel/psPromotionManager.hpp"
33 #include "gc/parallel/psScavenge.hpp"
34 #include "gc/parallel/psVMOperations.hpp"
35 #include "gc/shared/fullGCForwarding.inline.hpp"
36 #include "gc/shared/gcHeapSummary.hpp"
37 #include "gc/shared/gcLocker.inline.hpp"
38 #include "gc/shared/gcWhen.hpp"
39 #include "gc/shared/genArguments.hpp"
40 #include "gc/shared/locationPrinter.inline.hpp"
41 #include "gc/shared/scavengableNMethods.hpp"
42 #include "gc/shared/suspendibleThreadSet.hpp"
43 #include "logging/log.hpp"
44 #include "memory/iterator.hpp"
45 #include "memory/metaspaceCounters.hpp"
46 #include "memory/metaspaceUtils.hpp"
47 #include "memory/reservedSpace.hpp"
48 #include "memory/universe.hpp"
49 #include "oops/oop.inline.hpp"
50 #include "runtime/cpuTimeCounters.hpp"
51 #include "runtime/handles.inline.hpp"
103
104 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
105 const size_t old_capacity = _old_gen->capacity_in_bytes();
106 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
107 _size_policy =
108 new PSAdaptiveSizePolicy(eden_capacity,
109 initial_promo_size,
110 young_gen()->to_space()->capacity_in_bytes(),
111 GenAlignment,
112 max_gc_pause_sec,
113 GCTimeRatio
114 );
115
116 assert((old_gen()->virtual_space()->high_boundary() ==
117 young_gen()->virtual_space()->low_boundary()),
118 "Boundaries must meet");
119 // initialize the policy counters - 2 collectors, 2 generations
120 _gc_policy_counters =
121 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
122
123 if (!PSParallelCompact::initialize_aux_data()) {
124 return JNI_ENOMEM;
125 }
126
127 // Create CPU time counter
128 CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
129
130 ParallelInitLogger::print();
131
132 FullGCForwarding::initialize(_reserved);
133
134 return JNI_OK;
135 }
136
137 void ParallelScavengeHeap::initialize_serviceability() {
138
139 _eden_pool = new EdenMutableSpacePool(_young_gen,
140 _young_gen->eden_space(),
141 "PS Eden Space",
142 false /* support_usage_threshold */);
143
144 _survivor_pool = new SurvivorMutableSpacePool(_young_gen,
167 }
168 }
169
170 void ParallelScavengeHeap::safepoint_synchronize_end() {
171 if (UseStringDeduplication) {
172 SuspendibleThreadSet::desynchronize();
173 }
174 }
175 class PSIsScavengable : public BoolObjectClosure {
176 bool do_object_b(oop obj) {
177 return ParallelScavengeHeap::heap()->is_in_young(obj);
178 }
179 };
180
181 static PSIsScavengable _is_scavengable;
182
183 void ParallelScavengeHeap::post_initialize() {
184 CollectedHeap::post_initialize();
185 // Need to init the tenuring threshold
186 PSScavenge::initialize();
187 PSParallelCompact::post_initialize();
188 PSPromotionManager::initialize();
189
190 ScavengableNMethods::initialize(&_is_scavengable);
191 GCLocker::initialize();
192 }
193
194 void ParallelScavengeHeap::update_counters() {
195 young_gen()->update_counters();
196 old_gen()->update_counters();
197 MetaspaceCounters::update_performance_counters();
198 update_parallel_worker_threads_cpu_time();
199 }
200
201 size_t ParallelScavengeHeap::capacity() const {
202 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
203 return value;
204 }
205
206 size_t ParallelScavengeHeap::used() const {
207 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
374
375 HeapWord* ParallelScavengeHeap::allocate_old_gen_and_record(size_t size) {
376 assert_locked_or_safepoint(Heap_lock);
377 HeapWord* res = old_gen()->allocate(size);
378 if (res != nullptr) {
379 _size_policy->tenured_allocation(size * HeapWordSize);
380 }
381 return res;
382 }
383
384 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
385 if (!should_alloc_in_eden(size)) {
386 // Size is too big for eden.
387 return allocate_old_gen_and_record(size);
388 }
389
390 return nullptr;
391 }
392
393 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
394 PSParallelCompact::invoke(clear_all_soft_refs);
395 }
396
397 HeapWord* ParallelScavengeHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
398 HeapWord* result = nullptr;
399
400 result = young_gen()->allocate(size);
401 if (result == nullptr && !is_tlab) {
402 result = old_gen()->expand_and_allocate(size);
403 }
404 return result; // Could be null if we are out of space.
405 }
406
407 HeapWord* ParallelScavengeHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
408 assert(size != 0, "precondition");
409
410 HeapWord* result = nullptr;
411
412 // If young-gen can handle this allocation, attempt young-gc firstly.
413 bool should_run_young_gc = is_tlab || should_alloc_in_eden(size);
414 collect_at_safepoint(!should_run_young_gc);
415
416 result = expand_heap_and_allocate(size, is_tlab);
417 if (result != nullptr) {
418 return result;
419 }
420
421 // If we reach this point, we're really out of memory. Try every trick
422 // we can to reclaim memory. Force collection of soft references. Force
423 // a complete compaction of the heap. Any additional methods for finding
424 // free memory should be here, especially if they are expensive. If this
425 // attempt fails, an OOM exception will be thrown.
426 {
427 // Make sure the heap is fully compacted
428 uintx old_interval = HeapMaximumCompactionInterval;
429 HeapMaximumCompactionInterval = 0;
430
431 const bool clear_all_soft_refs = true;
432 PSParallelCompact::invoke(clear_all_soft_refs);
433
434 // Restore
435 HeapMaximumCompactionInterval = old_interval;
436 }
437
438 result = expand_heap_and_allocate(size, is_tlab);
439 if (result != nullptr) {
440 return result;
441 }
442
443 // What else? We might try synchronous finalization later. If the total
444 // space available is large enough for the allocation, then a more
445 // complete compaction phase than we've tried so far might be
446 // appropriate.
447 return nullptr;
448 }
449
450
451 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
452 CollectedHeap::ensure_parsability(retire_tlabs);
453 young_gen()->eden_space()->ensure_parsability();
454 }
455
456 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
457 return young_gen()->eden_space()->tlab_capacity(thr);
458 }
459
460 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
461 return young_gen()->eden_space()->tlab_used(thr);
462 }
518 }
519 }
520 }
521
522 bool ParallelScavengeHeap::must_clear_all_soft_refs() {
523 return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
524 _gc_cause == GCCause::_wb_full_gc;
525 }
526
527 void ParallelScavengeHeap::collect_at_safepoint(bool full) {
528 assert(!GCLocker::is_active(), "precondition");
529 bool clear_soft_refs = must_clear_all_soft_refs();
530
531 if (!full) {
532 bool success = PSScavenge::invoke(clear_soft_refs);
533 if (success) {
534 return;
535 }
536 // Upgrade to Full-GC if young-gc fails
537 }
538 PSParallelCompact::invoke(clear_soft_refs);
539 }
540
541 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
542 young_gen()->object_iterate(cl);
543 old_gen()->object_iterate(cl);
544 }
545
546 // The HeapBlockClaimer is used during parallel iteration over the heap,
547 // allowing workers to claim heap areas ("blocks"), gaining exclusive rights to these.
548 // The eden and survivor spaces are treated as single blocks as it is hard to divide
549 // these spaces.
550 // The old space is divided into fixed-size blocks.
551 class HeapBlockClaimer : public StackObj {
552 size_t _claimed_index;
553
554 public:
555 static const size_t InvalidIndex = SIZE_MAX;
556 static const size_t EdenIndex = 0;
557 static const size_t SurvivorIndex = 1;
558 static const size_t NumNonOldGenClaims = 2;
659 }
660
661 bool ParallelScavengeHeap::print_location(outputStream* st, void* addr) const {
662 return BlockLocationPrinter<ParallelScavengeHeap>::print_location(st, addr);
663 }
664
665 void ParallelScavengeHeap::print_on(outputStream* st) const {
666 if (young_gen() != nullptr) {
667 young_gen()->print_on(st);
668 }
669 if (old_gen() != nullptr) {
670 old_gen()->print_on(st);
671 }
672 MetaspaceUtils::print_on(st);
673 }
674
675 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
676 this->CollectedHeap::print_on_error(st);
677
678 st->cr();
679 PSParallelCompact::print_on_error(st);
680 }
681
682 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
683 ParallelScavengeHeap::heap()->workers().threads_do(tc);
684 }
685
686 void ParallelScavengeHeap::print_tracing_info() const {
687 AdaptiveSizePolicyOutput::print();
688 log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
689 log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
690 }
691
692 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
693 const PSYoungGen* const young = young_gen();
694 const MutableSpace* const eden = young->eden_space();
695 const MutableSpace* const from = young->from_space();
696 const PSOldGen* const old = old_gen();
697
698 return PreGenGCValues(young->used_in_bytes(),
699 young->capacity_in_bytes(),
700 eden->used_in_bytes(),
701 eden->capacity_in_bytes(),
702 from->used_in_bytes(),
703 from->capacity_in_bytes(),
704 old->used_in_bytes(),
705 old->capacity_in_bytes());
706 }
707
708 void ParallelScavengeHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
709 const PSYoungGen* const young = young_gen();
|
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "gc/parallel/objectStartArray.inline.hpp"
26 #include "gc/parallel/parallelArguments.hpp"
27 #include "gc/parallel/parallelInitLogger.hpp"
28 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
29 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
30 #include "gc/parallel/psMemoryPool.hpp"
31 #include "gc/parallel/psParallelCompact.inline.hpp"
32 #include "gc/parallel/psParallelCompactNew.inline.hpp"
33 #include "gc/parallel/psPromotionManager.hpp"
34 #include "gc/parallel/psScavenge.hpp"
35 #include "gc/parallel/psVMOperations.hpp"
36 #include "gc/shared/fullGCForwarding.inline.hpp"
37 #include "gc/shared/gcHeapSummary.hpp"
38 #include "gc/shared/gcLocker.inline.hpp"
39 #include "gc/shared/gcWhen.hpp"
40 #include "gc/shared/genArguments.hpp"
41 #include "gc/shared/locationPrinter.inline.hpp"
42 #include "gc/shared/scavengableNMethods.hpp"
43 #include "gc/shared/suspendibleThreadSet.hpp"
44 #include "logging/log.hpp"
45 #include "memory/iterator.hpp"
46 #include "memory/metaspaceCounters.hpp"
47 #include "memory/metaspaceUtils.hpp"
48 #include "memory/reservedSpace.hpp"
49 #include "memory/universe.hpp"
50 #include "oops/oop.inline.hpp"
51 #include "runtime/cpuTimeCounters.hpp"
52 #include "runtime/handles.inline.hpp"
104
105 const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
106 const size_t old_capacity = _old_gen->capacity_in_bytes();
107 const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
108 _size_policy =
109 new PSAdaptiveSizePolicy(eden_capacity,
110 initial_promo_size,
111 young_gen()->to_space()->capacity_in_bytes(),
112 GenAlignment,
113 max_gc_pause_sec,
114 GCTimeRatio
115 );
116
117 assert((old_gen()->virtual_space()->high_boundary() ==
118 young_gen()->virtual_space()->low_boundary()),
119 "Boundaries must meet");
120 // initialize the policy counters - 2 collectors, 2 generations
121 _gc_policy_counters =
122 new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
123
124 if (UseCompactObjectHeaders) {
125 if (!PSParallelCompactNew::initialize_aux_data()) {
126 return JNI_ENOMEM;
127 }
128 } else {
129 if (!PSParallelCompact::initialize_aux_data()) {
130 return JNI_ENOMEM;
131 }
132 }
133
134 // Create CPU time counter
135 CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
136
137 ParallelInitLogger::print();
138
139 FullGCForwarding::initialize(_reserved);
140
141 return JNI_OK;
142 }
143
144 void ParallelScavengeHeap::initialize_serviceability() {
145
146 _eden_pool = new EdenMutableSpacePool(_young_gen,
147 _young_gen->eden_space(),
148 "PS Eden Space",
149 false /* support_usage_threshold */);
150
151 _survivor_pool = new SurvivorMutableSpacePool(_young_gen,
174 }
175 }
176
177 void ParallelScavengeHeap::safepoint_synchronize_end() {
178 if (UseStringDeduplication) {
179 SuspendibleThreadSet::desynchronize();
180 }
181 }
182 class PSIsScavengable : public BoolObjectClosure {
183 bool do_object_b(oop obj) {
184 return ParallelScavengeHeap::heap()->is_in_young(obj);
185 }
186 };
187
188 static PSIsScavengable _is_scavengable;
189
190 void ParallelScavengeHeap::post_initialize() {
191 CollectedHeap::post_initialize();
192 // Need to init the tenuring threshold
193 PSScavenge::initialize();
194 if (UseCompactObjectHeaders) {
195 PSParallelCompactNew::post_initialize();
196 } else {
197 PSParallelCompact::post_initialize();
198 }
199 PSPromotionManager::initialize();
200
201 ScavengableNMethods::initialize(&_is_scavengable);
202 GCLocker::initialize();
203 }
204
205 void ParallelScavengeHeap::update_counters() {
206 young_gen()->update_counters();
207 old_gen()->update_counters();
208 MetaspaceCounters::update_performance_counters();
209 update_parallel_worker_threads_cpu_time();
210 }
211
212 size_t ParallelScavengeHeap::capacity() const {
213 size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
214 return value;
215 }
216
217 size_t ParallelScavengeHeap::used() const {
218 size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
385
386 HeapWord* ParallelScavengeHeap::allocate_old_gen_and_record(size_t size) {
387 assert_locked_or_safepoint(Heap_lock);
388 HeapWord* res = old_gen()->allocate(size);
389 if (res != nullptr) {
390 _size_policy->tenured_allocation(size * HeapWordSize);
391 }
392 return res;
393 }
394
395 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
396 if (!should_alloc_in_eden(size)) {
397 // Size is too big for eden.
398 return allocate_old_gen_and_record(size);
399 }
400
401 return nullptr;
402 }
403
404 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
405 if (UseCompactObjectHeaders) {
406 PSParallelCompactNew::invoke(clear_all_soft_refs, false /* serial */);
407 } else {
408 PSParallelCompact::invoke(clear_all_soft_refs);
409 }
410 }
411
412 HeapWord* ParallelScavengeHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
413 HeapWord* result = nullptr;
414
415 result = young_gen()->allocate(size);
416 if (result == nullptr && !is_tlab) {
417 result = old_gen()->expand_and_allocate(size);
418 }
419 return result; // Could be null if we are out of space.
420 }
421
422 HeapWord* ParallelScavengeHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
423 assert(size != 0, "precondition");
424
425 HeapWord* result = nullptr;
426
427 // If young-gen can handle this allocation, attempt young-gc firstly.
428 bool should_run_young_gc = is_tlab || should_alloc_in_eden(size);
429 collect_at_safepoint(!should_run_young_gc);
430
431 result = expand_heap_and_allocate(size, is_tlab);
432 if (result != nullptr) {
433 return result;
434 }
435
436 // If we reach this point, we're really out of memory. Try every trick
437 // we can to reclaim memory. Force collection of soft references. Force
438 // a complete compaction of the heap. Any additional methods for finding
439 // free memory should be here, especially if they are expensive. If this
440 // attempt fails, an OOM exception will be thrown.
441 {
442 // Make sure the heap is fully compacted
443 uintx old_interval = HeapMaximumCompactionInterval;
444 HeapMaximumCompactionInterval = 0;
445
446 const bool clear_all_soft_refs = true;
447 if (UseCompactObjectHeaders) {
448 PSParallelCompactNew::invoke(clear_all_soft_refs, false /* serial */);
449 } else {
450 PSParallelCompact::invoke(clear_all_soft_refs);
451 }
452
453 // Restore
454 HeapMaximumCompactionInterval = old_interval;
455 }
456
457 result = expand_heap_and_allocate(size, is_tlab);
458 if (result != nullptr) {
459 return result;
460 }
461
462 if (UseCompactObjectHeaders) {
463 PSParallelCompactNew::invoke(true /* clear_soft_refs */, true /* serial */);
464 }
465
466 result = expand_heap_and_allocate(size, is_tlab);
467 if (result != nullptr) {
468 return result;
469 }
470
471 // What else? We might try synchronous finalization later. If the total
472 // space available is large enough for the allocation, then a more
473 // complete compaction phase than we've tried so far might be
474 // appropriate.
475 return nullptr;
476 }
477
478
479 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
480 CollectedHeap::ensure_parsability(retire_tlabs);
481 young_gen()->eden_space()->ensure_parsability();
482 }
483
484 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
485 return young_gen()->eden_space()->tlab_capacity(thr);
486 }
487
488 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
489 return young_gen()->eden_space()->tlab_used(thr);
490 }
546 }
547 }
548 }
549
550 bool ParallelScavengeHeap::must_clear_all_soft_refs() {
551 return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
552 _gc_cause == GCCause::_wb_full_gc;
553 }
554
555 void ParallelScavengeHeap::collect_at_safepoint(bool full) {
556 assert(!GCLocker::is_active(), "precondition");
557 bool clear_soft_refs = must_clear_all_soft_refs();
558
559 if (!full) {
560 bool success = PSScavenge::invoke(clear_soft_refs);
561 if (success) {
562 return;
563 }
564 // Upgrade to Full-GC if young-gc fails
565 }
566 if (UseCompactObjectHeaders) {
567 PSParallelCompactNew::invoke(clear_soft_refs, false /* serial */);
568 } else {
569 PSParallelCompact::invoke(clear_soft_refs);
570 }
571 }
572
573 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
574 young_gen()->object_iterate(cl);
575 old_gen()->object_iterate(cl);
576 }
577
578 // The HeapBlockClaimer is used during parallel iteration over the heap,
579 // allowing workers to claim heap areas ("blocks"), gaining exclusive rights to these.
580 // The eden and survivor spaces are treated as single blocks as it is hard to divide
581 // these spaces.
582 // The old space is divided into fixed-size blocks.
583 class HeapBlockClaimer : public StackObj {
584 size_t _claimed_index;
585
586 public:
587 static const size_t InvalidIndex = SIZE_MAX;
588 static const size_t EdenIndex = 0;
589 static const size_t SurvivorIndex = 1;
590 static const size_t NumNonOldGenClaims = 2;
691 }
692
693 bool ParallelScavengeHeap::print_location(outputStream* st, void* addr) const {
694 return BlockLocationPrinter<ParallelScavengeHeap>::print_location(st, addr);
695 }
696
697 void ParallelScavengeHeap::print_on(outputStream* st) const {
698 if (young_gen() != nullptr) {
699 young_gen()->print_on(st);
700 }
701 if (old_gen() != nullptr) {
702 old_gen()->print_on(st);
703 }
704 MetaspaceUtils::print_on(st);
705 }
706
707 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
708 this->CollectedHeap::print_on_error(st);
709
710 st->cr();
711 if (UseCompactObjectHeaders) {
712 PSParallelCompactNew::print_on_error(st);
713 } else {
714 PSParallelCompact::print_on_error(st);
715 }
716 }
717
718 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
719 ParallelScavengeHeap::heap()->workers().threads_do(tc);
720 }
721
722 void ParallelScavengeHeap::print_tracing_info() const {
723 AdaptiveSizePolicyOutput::print();
724 log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
725 if (UseCompactObjectHeaders) {
726 log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompactNew::accumulated_time()->seconds());
727 } else {
728 log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
729 }
730 }
731
732 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
733 const PSYoungGen* const young = young_gen();
734 const MutableSpace* const eden = young->eden_space();
735 const MutableSpace* const from = young->from_space();
736 const PSOldGen* const old = old_gen();
737
738 return PreGenGCValues(young->used_in_bytes(),
739 young->capacity_in_bytes(),
740 eden->used_in_bytes(),
741 eden->capacity_in_bytes(),
742 from->used_in_bytes(),
743 from->capacity_in_bytes(),
744 old->used_in_bytes(),
745 old->capacity_in_bytes());
746 }
747
748 void ParallelScavengeHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
749 const PSYoungGen* const young = young_gen();
|