1 /*
  2  * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "gc/parallel/objectStartArray.inline.hpp"
 26 #include "gc/parallel/parallelArguments.hpp"
 27 #include "gc/parallel/parallelInitLogger.hpp"
 28 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
 29 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
 30 #include "gc/parallel/psMemoryPool.hpp"
 31 #include "gc/parallel/psParallelCompact.inline.hpp"
 32 #include "gc/parallel/psPromotionManager.hpp"
 33 #include "gc/parallel/psScavenge.hpp"
 34 #include "gc/parallel/psVMOperations.hpp"
 35 #include "gc/shared/fullGCForwarding.inline.hpp"
 36 #include "gc/shared/gcHeapSummary.hpp"
 37 #include "gc/shared/gcLocker.inline.hpp"
 38 #include "gc/shared/gcWhen.hpp"
 39 #include "gc/shared/genArguments.hpp"
 40 #include "gc/shared/locationPrinter.inline.hpp"
 41 #include "gc/shared/scavengableNMethods.hpp"
 42 #include "gc/shared/suspendibleThreadSet.hpp"
 43 #include "logging/log.hpp"
 44 #include "memory/iterator.hpp"
 45 #include "memory/metaspaceCounters.hpp"
 46 #include "memory/metaspaceUtils.hpp"
 47 #include "memory/reservedSpace.hpp"
 48 #include "memory/universe.hpp"
 49 #include "oops/oop.inline.hpp"
 50 #include "runtime/cpuTimeCounters.hpp"
 51 #include "runtime/handles.inline.hpp"
 52 #include "runtime/java.hpp"
 53 #include "runtime/vmThread.hpp"
 54 #include "services/memoryManager.hpp"
 55 #include "utilities/macros.hpp"
 56 #include "utilities/vmError.hpp"
 57 
 58 PSYoungGen*  ParallelScavengeHeap::_young_gen = nullptr;
 59 PSOldGen*    ParallelScavengeHeap::_old_gen = nullptr;
 60 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = nullptr;
 61 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = nullptr;
 62 
 63 jint ParallelScavengeHeap::initialize() {
 64   const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes();
 65 
 66   ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment);
 67 
 68   trace_actual_reserved_page_size(reserved_heap_size, heap_rs);
 69 
 70   initialize_reserved_region(heap_rs);
 71   // Layout the reserved space for the generations.
 72   ReservedSpace old_rs   = heap_rs.first_part(MaxOldSize, GenAlignment);
 73   ReservedSpace young_rs = heap_rs.last_part(MaxOldSize, GenAlignment);
 74   assert(young_rs.size() == MaxNewSize, "Didn't reserve all of the heap");
 75 
 76   PSCardTable* card_table = new PSCardTable(_reserved);
 77   card_table->initialize(old_rs.base(), young_rs.base());
 78 
 79   CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table);
 80   barrier_set->initialize();
 81   BarrierSet::set_barrier_set(barrier_set);
 82 
 83   // Set up WorkerThreads
 84   _workers.initialize_workers();
 85 
 86   // Create and initialize the generations.
 87   _young_gen = new PSYoungGen(
 88       young_rs,
 89       NewSize,
 90       MinNewSize,
 91       MaxNewSize);
 92   _old_gen = new PSOldGen(
 93       old_rs,
 94       OldSize,
 95       MinOldSize,
 96       MaxOldSize,
 97       "old", 1);
 98 
 99   assert(young_gen()->max_gen_size() == young_rs.size(),"Consistency check");
100   assert(old_gen()->max_gen_size() == old_rs.size(), "Consistency check");
101 
102   double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0;
103 
104   const size_t eden_capacity = _young_gen->eden_space()->capacity_in_bytes();
105   const size_t old_capacity = _old_gen->capacity_in_bytes();
106   const size_t initial_promo_size = MIN2(eden_capacity, old_capacity);
107   _size_policy =
108     new PSAdaptiveSizePolicy(eden_capacity,
109                              initial_promo_size,
110                              young_gen()->to_space()->capacity_in_bytes(),
111                              GenAlignment,
112                              max_gc_pause_sec,
113                              GCTimeRatio
114                              );
115 
116   assert((old_gen()->virtual_space()->high_boundary() ==
117           young_gen()->virtual_space()->low_boundary()),
118          "Boundaries must meet");
119   // initialize the policy counters - 2 collectors, 2 generations
120   _gc_policy_counters =
121     new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
122 
123   if (!PSParallelCompact::initialize_aux_data()) {
124     return JNI_ENOMEM;
125   }
126 
127   // Create CPU time counter
128   CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
129 
130   ParallelInitLogger::print();
131 
132   FullGCForwarding::initialize(_reserved);
133 
134   return JNI_OK;
135 }
136 
137 void ParallelScavengeHeap::initialize_serviceability() {
138 
139   _eden_pool = new EdenMutableSpacePool(_young_gen,
140                                         _young_gen->eden_space(),
141                                         "PS Eden Space",
142                                         false /* support_usage_threshold */);
143 
144   _survivor_pool = new SurvivorMutableSpacePool(_young_gen,
145                                                 "PS Survivor Space",
146                                                 false /* support_usage_threshold */);
147 
148   _old_pool = new PSGenerationPool(_old_gen,
149                                    "PS Old Gen",
150                                    true /* support_usage_threshold */);
151 
152   _young_manager = new GCMemoryManager("PS Scavenge");
153   _old_manager = new GCMemoryManager("PS MarkSweep");
154 
155   _old_manager->add_pool(_eden_pool);
156   _old_manager->add_pool(_survivor_pool);
157   _old_manager->add_pool(_old_pool);
158 
159   _young_manager->add_pool(_eden_pool);
160   _young_manager->add_pool(_survivor_pool);
161 
162 }
163 
164 void ParallelScavengeHeap::safepoint_synchronize_begin() {
165   if (UseStringDeduplication) {
166     SuspendibleThreadSet::synchronize();
167   }
168 }
169 
170 void ParallelScavengeHeap::safepoint_synchronize_end() {
171   if (UseStringDeduplication) {
172     SuspendibleThreadSet::desynchronize();
173   }
174 }
175 class PSIsScavengable : public BoolObjectClosure {
176   bool do_object_b(oop obj) {
177     return ParallelScavengeHeap::heap()->is_in_young(obj);
178   }
179 };
180 
181 static PSIsScavengable _is_scavengable;
182 
183 void ParallelScavengeHeap::post_initialize() {
184   CollectedHeap::post_initialize();
185   // Need to init the tenuring threshold
186   PSScavenge::initialize();
187   PSParallelCompact::post_initialize();
188   PSPromotionManager::initialize();
189 
190   ScavengableNMethods::initialize(&_is_scavengable);
191   GCLocker::initialize();
192 }
193 
194 void ParallelScavengeHeap::update_counters() {
195   young_gen()->update_counters();
196   old_gen()->update_counters();
197   MetaspaceCounters::update_performance_counters();
198   update_parallel_worker_threads_cpu_time();
199 }
200 
201 size_t ParallelScavengeHeap::capacity() const {
202   size_t value = young_gen()->capacity_in_bytes() + old_gen()->capacity_in_bytes();
203   return value;
204 }
205 
206 size_t ParallelScavengeHeap::used() const {
207   size_t value = young_gen()->used_in_bytes() + old_gen()->used_in_bytes();
208   return value;
209 }
210 
211 bool ParallelScavengeHeap::is_maximal_no_gc() const {
212   // We don't expand young-gen except at a GC.
213   return old_gen()->is_maximal_no_gc();
214 }
215 
216 
217 size_t ParallelScavengeHeap::max_capacity() const {
218   size_t estimated = reserved_region().byte_size();
219   if (UseAdaptiveSizePolicy) {
220     estimated -= _size_policy->max_survivor_size(young_gen()->max_gen_size());
221   } else {
222     estimated -= young_gen()->to_space()->capacity_in_bytes();
223   }
224   return MAX2(estimated, capacity());
225 }
226 
227 bool ParallelScavengeHeap::is_in(const void* p) const {
228   return young_gen()->is_in(p) || old_gen()->is_in(p);
229 }
230 
231 bool ParallelScavengeHeap::is_in_reserved(const void* p) const {
232   return young_gen()->is_in_reserved(p) || old_gen()->is_in_reserved(p);
233 }
234 
235 bool ParallelScavengeHeap::requires_barriers(stackChunkOop p) const {
236   return !is_in_young(p);
237 }
238 
239 // There are two levels of allocation policy here.
240 //
241 // When an allocation request fails, the requesting thread must invoke a VM
242 // operation, transfer control to the VM thread, and await the results of a
243 // garbage collection. That is quite expensive, and we should avoid doing it
244 // multiple times if possible.
245 //
246 // To accomplish this, we have a basic allocation policy, and also a
247 // failed allocation policy.
248 //
249 // The basic allocation policy controls how you allocate memory without
250 // attempting garbage collection. It is okay to grab locks and
251 // expand the heap, if that can be done without coming to a safepoint.
252 // It is likely that the basic allocation policy will not be very
253 // aggressive.
254 //
255 // The failed allocation policy is invoked from the VM thread after
256 // the basic allocation policy is unable to satisfy a mem_allocate
257 // request. This policy needs to cover the entire range of collection,
258 // heap expansion, and out-of-memory conditions. It should make every
259 // attempt to allocate the requested memory.
260 
261 // Basic allocation policy. Should never be called at a safepoint, or
262 // from the VM thread.
263 //
264 // This method must handle cases where many mem_allocate requests fail
265 // simultaneously. When that happens, only one VM operation will succeed,
266 // and the rest will not be executed. For that reason, this method loops
267 // during failed allocation attempts. If the java heap becomes exhausted,
268 // we rely on the size_policy object to force a bail out.
269 HeapWord* ParallelScavengeHeap::mem_allocate(size_t size,
270                                              bool* gc_overhead_limit_was_exceeded) {
271   assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
272   assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
273   assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
274 
275   bool is_tlab = false;
276   return mem_allocate_work(size, is_tlab, gc_overhead_limit_was_exceeded);
277 }
278 
279 HeapWord* ParallelScavengeHeap::mem_allocate_work(size_t size,
280                                                   bool is_tlab,
281                                                   bool* gc_overhead_limit_was_exceeded) {
282 
283   // In general gc_overhead_limit_was_exceeded should be false so
284   // set it so here and reset it to true only if the gc time
285   // limit is being exceeded as checked below.
286   *gc_overhead_limit_was_exceeded = false;
287 
288   HeapWord* result = young_gen()->allocate(size);
289 
290   uint loop_count = 0;
291   uint gc_count = 0;
292 
293   while (result == nullptr) {
294     // We don't want to have multiple collections for a single filled generation.
295     // To prevent this, each thread tracks the total_collections() value, and if
296     // the count has changed, does not do a new collection.
297     //
298     // The collection count must be read only while holding the heap lock. VM
299     // operations also hold the heap lock during collections. There is a lock
300     // contention case where thread A blocks waiting on the Heap_lock, while
301     // thread B is holding it doing a collection. When thread A gets the lock,
302     // the collection count has already changed. To prevent duplicate collections,
303     // The policy MUST attempt allocations during the same period it reads the
304     // total_collections() value!
305     {
306       MutexLocker ml(Heap_lock);
307       gc_count = total_collections();
308 
309       result = young_gen()->allocate(size);
310       if (result != nullptr) {
311         return result;
312       }
313 
314       // If certain conditions hold, try allocating from the old gen.
315       if (!is_tlab) {
316         result = mem_allocate_old_gen(size);
317         if (result != nullptr) {
318           return result;
319         }
320       }
321     }
322 
323     assert(result == nullptr, "inv");
324     {
325       VM_ParallelCollectForAllocation op(size, is_tlab, gc_count);
326       VMThread::execute(&op);
327 
328       // Did the VM operation execute? If so, return the result directly.
329       // This prevents us from looping until time out on requests that can
330       // not be satisfied.
331       if (op.prologue_succeeded()) {
332         assert(is_in_or_null(op.result()), "result not in heap");
333 
334         // Exit the loop if the gc time limit has been exceeded.
335         // The allocation must have failed above ("result" guarding
336         // this path is null) and the most recent collection has exceeded the
337         // gc overhead limit (although enough may have been collected to
338         // satisfy the allocation).  Exit the loop so that an out-of-memory
339         // will be thrown (return a null ignoring the contents of
340         // op.result()),
341         // but clear gc_overhead_limit_exceeded so that the next collection
342         // starts with a clean slate (i.e., forgets about previous overhead
343         // excesses).  Fill op.result() with a filler object so that the
344         // heap remains parsable.
345         const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
346         const bool softrefs_clear = soft_ref_policy()->all_soft_refs_clear();
347 
348         if (limit_exceeded && softrefs_clear) {
349           *gc_overhead_limit_was_exceeded = true;
350           size_policy()->set_gc_overhead_limit_exceeded(false);
351           log_trace(gc)("ParallelScavengeHeap::mem_allocate: return null because gc_overhead_limit_exceeded is set");
352           if (op.result() != nullptr) {
353             CollectedHeap::fill_with_object(op.result(), size);
354           }
355           return nullptr;
356         }
357 
358         return op.result();
359       }
360     }
361 
362     // The policy object will prevent us from looping forever. If the
363     // time spent in gc crosses a threshold, we will bail out.
364     loop_count++;
365     if ((result == nullptr) && (QueuedAllocationWarningCount > 0) &&
366         (loop_count % QueuedAllocationWarningCount == 0)) {
367       log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count);
368       log_warning(gc)("\tsize=%zu", size);
369     }
370   }
371 
372   return result;
373 }
374 
375 HeapWord* ParallelScavengeHeap::allocate_old_gen_and_record(size_t size) {
376   assert_locked_or_safepoint(Heap_lock);
377   HeapWord* res = old_gen()->allocate(size);
378   if (res != nullptr) {
379     _size_policy->tenured_allocation(size * HeapWordSize);
380   }
381   return res;
382 }
383 
384 HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
385   if (!should_alloc_in_eden(size)) {
386     // Size is too big for eden.
387     return allocate_old_gen_and_record(size);
388   }
389 
390   return nullptr;
391 }
392 
393 void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
394   PSParallelCompact::invoke(clear_all_soft_refs);
395 }
396 
397 HeapWord* ParallelScavengeHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
398   HeapWord* result = nullptr;
399 
400   result = young_gen()->allocate(size);
401   if (result == nullptr && !is_tlab) {
402     result = old_gen()->expand_and_allocate(size);
403   }
404   return result;   // Could be null if we are out of space.
405 }
406 
407 HeapWord* ParallelScavengeHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
408   assert(size != 0, "precondition");
409 
410   HeapWord* result = nullptr;
411 
412   // If young-gen can handle this allocation, attempt young-gc firstly.
413   bool should_run_young_gc = is_tlab || should_alloc_in_eden(size);
414   collect_at_safepoint(!should_run_young_gc);
415 
416   result = expand_heap_and_allocate(size, is_tlab);
417   if (result != nullptr) {
418     return result;
419   }
420 
421   // If we reach this point, we're really out of memory. Try every trick
422   // we can to reclaim memory. Force collection of soft references. Force
423   // a complete compaction of the heap. Any additional methods for finding
424   // free memory should be here, especially if they are expensive. If this
425   // attempt fails, an OOM exception will be thrown.
426   {
427     // Make sure the heap is fully compacted
428     uintx old_interval = HeapMaximumCompactionInterval;
429     HeapMaximumCompactionInterval = 0;
430 
431     const bool clear_all_soft_refs = true;
432     PSParallelCompact::invoke(clear_all_soft_refs);
433 
434     // Restore
435     HeapMaximumCompactionInterval = old_interval;
436   }
437 
438   result = expand_heap_and_allocate(size, is_tlab);
439   if (result != nullptr) {
440     return result;
441   }
442 
443   // What else?  We might try synchronous finalization later.  If the total
444   // space available is large enough for the allocation, then a more
445   // complete compaction phase than we've tried so far might be
446   // appropriate.
447   return nullptr;
448 }
449 
450 
451 void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
452   CollectedHeap::ensure_parsability(retire_tlabs);
453   young_gen()->eden_space()->ensure_parsability();
454 }
455 
456 size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
457   return young_gen()->eden_space()->tlab_capacity(thr);
458 }
459 
460 size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
461   return young_gen()->eden_space()->tlab_used(thr);
462 }
463 
464 size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
465   return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
466 }
467 
468 HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
469   bool dummy;
470   HeapWord* result = mem_allocate_work(requested_size /* size */,
471                                        true /* is_tlab */,
472                                        &dummy);
473   if (result != nullptr) {
474     *actual_size = requested_size;
475   }
476 
477   return result;
478 }
479 
480 void ParallelScavengeHeap::resize_all_tlabs() {
481   CollectedHeap::resize_all_tlabs();
482 }
483 
484 void ParallelScavengeHeap::prune_scavengable_nmethods() {
485   ScavengableNMethods::prune_nmethods_not_into_young();
486 }
487 
488 void ParallelScavengeHeap::prune_unlinked_nmethods() {
489   ScavengableNMethods::prune_unlinked_nmethods();
490 }
491 
492 void ParallelScavengeHeap::collect(GCCause::Cause cause) {
493   assert(!Heap_lock->owned_by_self(),
494     "this thread should not own the Heap_lock");
495 
496   uint gc_count      = 0;
497   uint full_gc_count = 0;
498   {
499     MutexLocker ml(Heap_lock);
500     // This value is guarded by the Heap_lock
501     gc_count      = total_collections();
502     full_gc_count = total_full_collections();
503   }
504 
505   while (true) {
506     VM_ParallelGCCollect op(gc_count, full_gc_count, cause);
507     VMThread::execute(&op);
508 
509     if (!GCCause::is_explicit_full_gc(cause)) {
510       return;
511     }
512 
513     {
514       MutexLocker ml(Heap_lock);
515       if (full_gc_count != total_full_collections()) {
516         return;
517       }
518     }
519   }
520 }
521 
522 bool ParallelScavengeHeap::must_clear_all_soft_refs() {
523   return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
524          _gc_cause == GCCause::_wb_full_gc;
525 }
526 
527 void ParallelScavengeHeap::collect_at_safepoint(bool full) {
528   assert(!GCLocker::is_active(), "precondition");
529   bool clear_soft_refs = must_clear_all_soft_refs();
530 
531   if (!full) {
532     bool success = PSScavenge::invoke(clear_soft_refs);
533     if (success) {
534       return;
535     }
536     // Upgrade to Full-GC if young-gc fails
537   }
538   PSParallelCompact::invoke(clear_soft_refs);
539 }
540 
541 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
542   young_gen()->object_iterate(cl);
543   old_gen()->object_iterate(cl);
544 }
545 
546 // The HeapBlockClaimer is used during parallel iteration over the heap,
547 // allowing workers to claim heap areas ("blocks"), gaining exclusive rights to these.
548 // The eden and survivor spaces are treated as single blocks as it is hard to divide
549 // these spaces.
550 // The old space is divided into fixed-size blocks.
551 class HeapBlockClaimer : public StackObj {
552   size_t _claimed_index;
553 
554 public:
555   static const size_t InvalidIndex = SIZE_MAX;
556   static const size_t EdenIndex = 0;
557   static const size_t SurvivorIndex = 1;
558   static const size_t NumNonOldGenClaims = 2;
559 
560   HeapBlockClaimer() : _claimed_index(EdenIndex) { }
561   // Claim the block and get the block index.
562   size_t claim_and_get_block() {
563     size_t block_index;
564     block_index = Atomic::fetch_then_add(&_claimed_index, 1u);
565 
566     PSOldGen* old_gen = ParallelScavengeHeap::heap()->old_gen();
567     size_t num_claims = old_gen->num_iterable_blocks() + NumNonOldGenClaims;
568 
569     return block_index < num_claims ? block_index : InvalidIndex;
570   }
571 };
572 
573 void ParallelScavengeHeap::object_iterate_parallel(ObjectClosure* cl,
574                                                    HeapBlockClaimer* claimer) {
575   size_t block_index = claimer->claim_and_get_block();
576   // Iterate until all blocks are claimed
577   if (block_index == HeapBlockClaimer::EdenIndex) {
578     young_gen()->eden_space()->object_iterate(cl);
579     block_index = claimer->claim_and_get_block();
580   }
581   if (block_index == HeapBlockClaimer::SurvivorIndex) {
582     young_gen()->from_space()->object_iterate(cl);
583     young_gen()->to_space()->object_iterate(cl);
584     block_index = claimer->claim_and_get_block();
585   }
586   while (block_index != HeapBlockClaimer::InvalidIndex) {
587     old_gen()->object_iterate_block(cl, block_index - HeapBlockClaimer::NumNonOldGenClaims);
588     block_index = claimer->claim_and_get_block();
589   }
590 }
591 
592 class PSScavengeParallelObjectIterator : public ParallelObjectIteratorImpl {
593 private:
594   ParallelScavengeHeap*  _heap;
595   HeapBlockClaimer      _claimer;
596 
597 public:
598   PSScavengeParallelObjectIterator() :
599       _heap(ParallelScavengeHeap::heap()),
600       _claimer() {}
601 
602   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
603     _heap->object_iterate_parallel(cl, &_claimer);
604   }
605 };
606 
607 ParallelObjectIteratorImpl* ParallelScavengeHeap::parallel_object_iterator(uint thread_num) {
608   return new PSScavengeParallelObjectIterator();
609 }
610 
611 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const {
612   if (young_gen()->is_in_reserved(addr)) {
613     assert(young_gen()->is_in(addr),
614            "addr should be in allocated part of young gen");
615     // called from os::print_location by find or VMError
616     if (DebuggingContext::is_enabled() || VMError::is_error_reported()) {
617       return nullptr;
618     }
619     Unimplemented();
620   } else if (old_gen()->is_in_reserved(addr)) {
621     assert(old_gen()->is_in(addr),
622            "addr should be in allocated part of old gen");
623     return old_gen()->start_array()->object_start((HeapWord*)addr);
624   }
625   return nullptr;
626 }
627 
628 bool ParallelScavengeHeap::block_is_obj(const HeapWord* addr) const {
629   return block_start(addr) == addr;
630 }
631 
632 void ParallelScavengeHeap::prepare_for_verify() {
633   ensure_parsability(false);  // no need to retire TLABs for verification
634 }
635 
636 PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
637   PSOldGen* old = old_gen();
638   HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
639   HeapWord* old_reserved_start = old->reserved().start();
640   HeapWord* old_reserved_end = old->reserved().end();
641   VirtualSpaceSummary old_summary(old_reserved_start, old_committed_end, old_reserved_end);
642   SpaceSummary old_space(old_reserved_start, old_committed_end, old->used_in_bytes());
643 
644   PSYoungGen* young = young_gen();
645   VirtualSpaceSummary young_summary(young->reserved().start(),
646     (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
647 
648   MutableSpace* eden = young_gen()->eden_space();
649   SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
650 
651   MutableSpace* from = young_gen()->from_space();
652   SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
653 
654   MutableSpace* to = young_gen()->to_space();
655   SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
656 
657   VirtualSpaceSummary heap_summary = create_heap_space_summary();
658   return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
659 }
660 
661 bool ParallelScavengeHeap::print_location(outputStream* st, void* addr) const {
662   return BlockLocationPrinter<ParallelScavengeHeap>::print_location(st, addr);
663 }
664 
665 void ParallelScavengeHeap::print_on(outputStream* st) const {
666   if (young_gen() != nullptr) {
667     young_gen()->print_on(st);
668   }
669   if (old_gen() != nullptr) {
670     old_gen()->print_on(st);
671   }
672   MetaspaceUtils::print_on(st);
673 }
674 
675 void ParallelScavengeHeap::print_on_error(outputStream* st) const {
676   this->CollectedHeap::print_on_error(st);
677 
678   st->cr();
679   PSParallelCompact::print_on_error(st);
680 }
681 
682 void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
683   ParallelScavengeHeap::heap()->workers().threads_do(tc);
684 }
685 
686 void ParallelScavengeHeap::print_tracing_info() const {
687   AdaptiveSizePolicyOutput::print();
688   log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
689   log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
690 }
691 
692 PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
693   const PSYoungGen* const young = young_gen();
694   const MutableSpace* const eden = young->eden_space();
695   const MutableSpace* const from = young->from_space();
696   const PSOldGen* const old = old_gen();
697 
698   return PreGenGCValues(young->used_in_bytes(),
699                         young->capacity_in_bytes(),
700                         eden->used_in_bytes(),
701                         eden->capacity_in_bytes(),
702                         from->used_in_bytes(),
703                         from->capacity_in_bytes(),
704                         old->used_in_bytes(),
705                         old->capacity_in_bytes());
706 }
707 
708 void ParallelScavengeHeap::print_heap_change(const PreGenGCValues& pre_gc_values) const {
709   const PSYoungGen* const young = young_gen();
710   const MutableSpace* const eden = young->eden_space();
711   const MutableSpace* const from = young->from_space();
712   const PSOldGen* const old = old_gen();
713 
714   log_info(gc, heap)(HEAP_CHANGE_FORMAT" "
715                      HEAP_CHANGE_FORMAT" "
716                      HEAP_CHANGE_FORMAT,
717                      HEAP_CHANGE_FORMAT_ARGS(young->name(),
718                                              pre_gc_values.young_gen_used(),
719                                              pre_gc_values.young_gen_capacity(),
720                                              young->used_in_bytes(),
721                                              young->capacity_in_bytes()),
722                      HEAP_CHANGE_FORMAT_ARGS("Eden",
723                                              pre_gc_values.eden_used(),
724                                              pre_gc_values.eden_capacity(),
725                                              eden->used_in_bytes(),
726                                              eden->capacity_in_bytes()),
727                      HEAP_CHANGE_FORMAT_ARGS("From",
728                                              pre_gc_values.from_used(),
729                                              pre_gc_values.from_capacity(),
730                                              from->used_in_bytes(),
731                                              from->capacity_in_bytes()));
732   log_info(gc, heap)(HEAP_CHANGE_FORMAT,
733                      HEAP_CHANGE_FORMAT_ARGS(old->name(),
734                                              pre_gc_values.old_gen_used(),
735                                              pre_gc_values.old_gen_capacity(),
736                                              old->used_in_bytes(),
737                                              old->capacity_in_bytes()));
738   MetaspaceUtils::print_metaspace_change(pre_gc_values.metaspace_sizes());
739 }
740 
741 void ParallelScavengeHeap::verify(VerifyOption option /* ignored */) {
742   // Why do we need the total_collections()-filter below?
743   if (total_collections() > 0) {
744     log_debug(gc, verify)("Tenured");
745     old_gen()->verify();
746 
747     log_debug(gc, verify)("Eden");
748     young_gen()->verify();
749 
750     log_debug(gc, verify)("CardTable");
751     card_table()->verify_all_young_refs_imprecise();
752   }
753 }
754 
755 void ParallelScavengeHeap::trace_actual_reserved_page_size(const size_t reserved_heap_size, const ReservedSpace rs) {
756   // Check if Info level is enabled, since os::trace_page_sizes() logs on Info level.
757   if(log_is_enabled(Info, pagesize)) {
758     const size_t page_size = rs.page_size();
759     os::trace_page_sizes("Heap",
760                          MinHeapSize,
761                          reserved_heap_size,
762                          rs.base(),
763                          rs.size(),
764                          page_size);
765   }
766 }
767 
768 void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
769   const PSHeapSummary& heap_summary = create_ps_heap_summary();
770   gc_tracer->report_gc_heap_summary(when, heap_summary);
771 
772   const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
773   gc_tracer->report_metaspace_summary(when, metaspace_summary);
774 }
775 
776 CardTableBarrierSet* ParallelScavengeHeap::barrier_set() {
777   return barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
778 }
779 
780 PSCardTable* ParallelScavengeHeap::card_table() {
781   return static_cast<PSCardTable*>(barrier_set()->card_table());
782 }
783 
784 void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
785                                             size_t survivor_size) {
786   // Delegate the resize to the generation.
787   _young_gen->resize(eden_size, survivor_size);
788 }
789 
790 void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
791   // Delegate the resize to the generation.
792   _old_gen->resize(desired_free_space);
793 }
794 
795 HeapWord* ParallelScavengeHeap::allocate_loaded_archive_space(size_t size) {
796   return _old_gen->allocate(size);
797 }
798 
799 void ParallelScavengeHeap::complete_loaded_archive_space(MemRegion archive_space) {
800   assert(_old_gen->object_space()->used_region().contains(archive_space),
801          "Archive space not contained in old gen");
802   _old_gen->complete_loaded_archive_space(archive_space);
803 }
804 
805 void ParallelScavengeHeap::register_nmethod(nmethod* nm) {
806   ScavengableNMethods::register_nmethod(nm);
807 }
808 
809 void ParallelScavengeHeap::unregister_nmethod(nmethod* nm) {
810   ScavengableNMethods::unregister_nmethod(nm);
811 }
812 
813 void ParallelScavengeHeap::verify_nmethod(nmethod* nm) {
814   ScavengableNMethods::verify_nmethod(nm);
815 }
816 
817 GrowableArray<GCMemoryManager*> ParallelScavengeHeap::memory_managers() {
818   GrowableArray<GCMemoryManager*> memory_managers(2);
819   memory_managers.append(_young_manager);
820   memory_managers.append(_old_manager);
821   return memory_managers;
822 }
823 
824 GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() {
825   GrowableArray<MemoryPool*> memory_pools(3);
826   memory_pools.append(_eden_pool);
827   memory_pools.append(_survivor_pool);
828   memory_pools.append(_old_pool);
829   return memory_pools;
830 }
831 
832 void ParallelScavengeHeap::pin_object(JavaThread* thread, oop obj) {
833   GCLocker::enter(thread);
834 }
835 
836 void ParallelScavengeHeap::unpin_object(JavaThread* thread, oop obj) {
837   GCLocker::exit(thread);
838 }
839 
840 void ParallelScavengeHeap::update_parallel_worker_threads_cpu_time() {
841   assert(Thread::current()->is_VM_thread(),
842          "Must be called from VM thread to avoid races");
843   if (!UsePerfData || !os::is_thread_cpu_time_supported()) {
844     return;
845   }
846 
847   // Ensure ThreadTotalCPUTimeClosure destructor is called before publishing gc
848   // time.
849   {
850     ThreadTotalCPUTimeClosure tttc(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
851     // Currently parallel worker threads in GCTaskManager never terminate, so it
852     // is safe for VMThread to read their CPU times. If upstream changes this
853     // behavior, we should rethink if it is still safe.
854     gc_threads_do(&tttc);
855   }
856 
857   CPUTimeCounters::publish_gc_total_cpu_time();
858 }