1 /*
   2  * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "compiler/oopMap.hpp"
  31 #include "gc/parallel/objectStartArray.inline.hpp"
  32 #include "gc/parallel/parallelArguments.hpp"
  33 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  34 #include "gc/parallel/parMarkBitMap.inline.hpp"
  35 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  36 #include "gc/parallel/psCompactionManagerNew.inline.hpp"
  37 #include "gc/parallel/psOldGen.hpp"
  38 #include "gc/parallel/psParallelCompactNew.inline.hpp"
  39 #include "gc/parallel/psPromotionManager.inline.hpp"
  40 #include "gc/parallel/psScavenge.hpp"
  41 #include "gc/parallel/psYoungGen.hpp"
  42 #include "gc/shared/classUnloadingContext.hpp"
  43 #include "gc/shared/fullGCForwarding.inline.hpp"
  44 #include "gc/shared/gcCause.hpp"
  45 #include "gc/shared/gcHeapSummary.hpp"
  46 #include "gc/shared/gcId.hpp"
  47 #include "gc/shared/gcLocker.hpp"
  48 #include "gc/shared/gcTimer.hpp"
  49 #include "gc/shared/gcTrace.hpp"
  50 #include "gc/shared/gcTraceTime.inline.hpp"
  51 #include "gc/shared/gcVMOperations.hpp"
  52 #include "gc/shared/isGCActiveMark.hpp"
  53 #include "gc/shared/oopStorageSetParState.inline.hpp"
  54 #include "gc/shared/preservedMarks.inline.hpp"
  55 #include "gc/shared/referencePolicy.hpp"
  56 #include "gc/shared/referenceProcessor.hpp"
  57 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  58 #include "gc/shared/spaceDecorator.hpp"
  59 #include "gc/shared/strongRootsScope.hpp"
  60 #include "gc/shared/taskTerminator.hpp"
  61 #include "gc/shared/weakProcessor.inline.hpp"
  62 #include "gc/shared/workerPolicy.hpp"
  63 #include "gc/shared/workerThread.hpp"
  64 #include "gc/shared/workerUtils.hpp"
  65 #include "logging/log.hpp"
  66 #include "memory/iterator.inline.hpp"
  67 #include "memory/memoryReserver.hpp"
  68 #include "memory/metaspaceUtils.hpp"
  69 #include "memory/resourceArea.hpp"
  70 #include "memory/universe.hpp"
  71 #include "nmt/memTracker.hpp"
  72 #include "oops/methodData.hpp"
  73 #include "runtime/java.hpp"
  74 #include "runtime/safepoint.hpp"
  75 #include "runtime/threads.hpp"
  76 #include "runtime/vmThread.hpp"
  77 #include "services/memoryService.hpp"
  78 #include "utilities/align.hpp"
  79 #include "utilities/debug.hpp"
  80 #include "utilities/events.hpp"
  81 #include "utilities/formatBuffer.hpp"
  82 #include "utilities/macros.hpp"
  83 #if INCLUDE_JVMCI
  84 #include "jvmci/jvmci.hpp"
  85 #endif
  86 
  87 SpaceInfoNew PSParallelCompactNew::_space_info[PSParallelCompactNew::last_space_id];
  88 
  89 size_t PSParallelCompactNew::_num_regions;
  90 PCRegionData* PSParallelCompactNew::_region_data_array;
  91 size_t PSParallelCompactNew::_num_regions_serial;
  92 PCRegionData* PSParallelCompactNew::_region_data_array_serial;
  93 PCRegionData** PSParallelCompactNew::_per_worker_region_data;
  94 bool PSParallelCompactNew::_serial = false;
  95 
  96 SpanSubjectToDiscoveryClosure PSParallelCompactNew::_span_based_discoverer;
  97 ReferenceProcessor* PSParallelCompactNew::_ref_processor = nullptr;
  98 
  99 void PSParallelCompactNew::print_on_error(outputStream* st) {
 100   _mark_bitmap.print_on_error(st);
 101 }
 102 
 103 STWGCTimer          PSParallelCompactNew::_gc_timer;
 104 ParallelOldTracer   PSParallelCompactNew::_gc_tracer;
 105 elapsedTimer        PSParallelCompactNew::_accumulated_time;
 106 unsigned int        PSParallelCompactNew::_maximum_compaction_gc_num = 0;
 107 CollectorCounters*  PSParallelCompactNew::_counters = nullptr;
 108 ParMarkBitMap       PSParallelCompactNew::_mark_bitmap;
 109 
 110 PSParallelCompactNew::IsAliveClosure PSParallelCompactNew::_is_alive_closure;
 111 
 112 class PCAdjustPointerClosure: public BasicOopIterateClosure {
 113   template <typename T>
 114   void do_oop_work(T* p) { PSParallelCompactNew::adjust_pointer(p); }
 115 
 116 public:
 117   void do_oop(oop* p) final          { do_oop_work(p); }
 118   void do_oop(narrowOop* p) final    { do_oop_work(p); }
 119 
 120   ReferenceIterationMode reference_iteration_mode() final { return DO_FIELDS; }
 121 };
 122 
 123 static PCAdjustPointerClosure pc_adjust_pointer_closure;
 124 
 125 class IsAliveClosure: public BoolObjectClosure {
 126 public:
 127   bool do_object_b(oop p) final;
 128 };
 129 
 130 
 131 bool PSParallelCompactNew::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
 132 
 133 void PSParallelCompactNew::post_initialize() {
 134   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 135   _span_based_discoverer.set_span(heap->reserved_region());
 136   _ref_processor =
 137     new ReferenceProcessor(&_span_based_discoverer,
 138                            ParallelGCThreads,   // mt processing degree
 139                            ParallelGCThreads,   // mt discovery degree
 140                            false,               // concurrent_discovery
 141                            &_is_alive_closure); // non-header is alive closure
 142 
 143   _counters = new CollectorCounters("Parallel full collection pauses", 1);
 144 
 145   // Initialize static fields in ParCompactionManager.
 146   ParCompactionManagerNew::initialize(mark_bitmap());
 147 }
 148 
 149 bool PSParallelCompactNew::initialize_aux_data() {
 150   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 151   MemRegion mr = heap->reserved_region();
 152   assert(mr.byte_size() != 0, "heap should be reserved");
 153 
 154   initialize_space_info();
 155 
 156   if (!_mark_bitmap.initialize(mr)) {
 157     vm_shutdown_during_initialization(
 158       err_msg("Unable to allocate %zuKB bitmaps for parallel "
 159       "garbage collection for the requested %zuKB heap.",
 160       _mark_bitmap.reserved_byte_size()/K, mr.byte_size()/K));
 161     return false;
 162   }
 163 
 164   return true;
 165 }
 166 
 167 void PSParallelCompactNew::initialize_space_info()
 168 {
 169   memset(&_space_info, 0, sizeof(_space_info));
 170 
 171   PSYoungGen* young_gen = ParallelScavengeHeap::young_gen();
 172 
 173   _space_info[old_space_id].set_space(ParallelScavengeHeap::old_gen()->object_space());
 174   _space_info[eden_space_id].set_space(young_gen->eden_space());
 175   _space_info[from_space_id].set_space(young_gen->from_space());
 176   _space_info[to_space_id].set_space(young_gen->to_space());
 177 
 178   _space_info[old_space_id].set_start_array(ParallelScavengeHeap::old_gen()->start_array());
 179 }
 180 
 181 void
 182 PSParallelCompactNew::clear_data_covering_space(SpaceId id)
 183 {
 184   // At this point, top is the value before GC, new_top() is the value that will
 185   // be set at the end of GC.  The marking bitmap is cleared to top; nothing
 186   // should be marked above top.
 187   MutableSpace* const space = _space_info[id].space();
 188   HeapWord* const bot = space->bottom();
 189   HeapWord* const top = space->top();
 190 
 191   _mark_bitmap.clear_range(bot, top);
 192 }
 193 
 194 void PSParallelCompactNew::pre_compact()
 195 {
 196   // Update the from & to space pointers in space_info, since they are swapped
 197   // at each young gen gc.  Do the update unconditionally (even though a
 198   // promotion failure does not swap spaces) because an unknown number of young
 199   // collections will have swapped the spaces an unknown number of times.
 200   GCTraceTime(Debug, gc, phases) tm("Pre Compact", &_gc_timer);
 201   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 202   _space_info[from_space_id].set_space(ParallelScavengeHeap::young_gen()->from_space());
 203   _space_info[to_space_id].set_space(ParallelScavengeHeap::young_gen()->to_space());
 204 
 205   // Increment the invocation count
 206   heap->increment_total_collections(true);
 207 
 208   CodeCache::on_gc_marking_cycle_start();
 209 
 210   heap->print_heap_before_gc();
 211   heap->trace_heap_before_gc(&_gc_tracer);
 212 
 213   // Fill in TLABs
 214   heap->ensure_parsability(true);  // retire TLABs
 215 
 216   if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
 217     Universe::verify("Before GC");
 218   }
 219 
 220   DEBUG_ONLY(mark_bitmap()->verify_clear();)
 221 }
 222 
 223 void PSParallelCompactNew::post_compact()
 224 {
 225   GCTraceTime(Info, gc, phases) tm("Post Compact", &_gc_timer);
 226 
 227   CodeCache::on_gc_marking_cycle_finish();
 228   CodeCache::arm_all_nmethods();
 229 
 230   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
 231     // Clear the marking bitmap, summary data and split info.
 232     clear_data_covering_space(SpaceId(id));
 233   }
 234 
 235   {
 236     PCRegionData* last_live[last_space_id];
 237     for (uint i = old_space_id; i < last_space_id; ++i) {
 238       last_live[i] = nullptr;
 239     }
 240 
 241     // Figure out last region in each space that has live data.
 242     uint space_id = old_space_id;
 243     MutableSpace* space = _space_info[space_id].space();
 244     size_t num_regions = get_num_regions();
 245     PCRegionData* region_data_array = get_region_data_array();
 246     last_live[space_id] = &region_data_array[0];
 247     for (size_t idx = 0; idx < num_regions; idx++) {
 248       PCRegionData* rd = region_data_array + idx;
 249       if(!space->contains(rd->bottom())) {
 250         ++space_id;
 251         assert(space_id < last_space_id, "invariant");
 252         space = _space_info[space_id].space();
 253         log_develop_trace(gc, compaction)("Last live for space: %u: %zu", space_id, idx);
 254         last_live[space_id] = rd;
 255       }
 256       assert(space->contains(rd->bottom()), "next space should contain next region");
 257       log_develop_trace(gc, compaction)("post-compact region: idx: %zu, bottom: " PTR_FORMAT ", new_top: " PTR_FORMAT ", end: " PTR_FORMAT, rd->idx(), p2i(rd->bottom()), p2i(rd->new_top()), p2i(rd->end()));
 258       if (rd->new_top() > rd->bottom()) {
 259         last_live[space_id] = rd;
 260         log_develop_trace(gc, compaction)("Bump last live for space: %u", space_id);
 261       }
 262     }
 263 
 264     for (uint i = old_space_id; i < last_space_id; ++i) {
 265       PCRegionData* rd = last_live[i];
 266         log_develop_trace(gc, compaction)(
 267                 "Last live region in space: %u, compaction region, " PTR_FORMAT ", #%zu: [" PTR_FORMAT ", " PTR_FORMAT "), new_top: " PTR_FORMAT,
 268                 i, p2i(rd), rd->idx(),
 269                 p2i(rd->bottom()), p2i(rd->end()), p2i(rd->new_top()));
 270     }
 271 
 272     // Fill all gaps and update the space boundaries.
 273     space_id = old_space_id;
 274     space = _space_info[space_id].space();
 275     size_t total_live = 0;
 276     size_t total_waste = 0;
 277     for (size_t idx = 0; idx < num_regions; idx++) {
 278       PCRegionData* rd = &region_data_array[idx];
 279       PCRegionData* last_live_in_space = last_live[space_id];
 280       assert(last_live_in_space != nullptr, "last live must not be null");
 281       if (rd != last_live_in_space) {
 282         if (rd->new_top() < rd->end()) {
 283           ObjectStartArray* sa = start_array(SpaceId(space_id));
 284           if (sa != nullptr) {
 285             sa->update_for_block(rd->new_top(), rd->end());
 286           }
 287           ParallelScavengeHeap::heap()->fill_with_dummy_object(rd->new_top(), rd->end(), false);
 288         }
 289         size_t live = pointer_delta(rd->new_top(), rd->bottom());
 290         size_t waste = pointer_delta(rd->end(), rd->new_top());
 291         total_live += live;
 292         total_waste += waste;
 293         log_develop_trace(gc, compaction)(
 294                 "Live compaction region, #%zu: [" PTR_FORMAT ", " PTR_FORMAT "), new_top: " PTR_FORMAT ", live: %zu, waste: %zu",
 295                 rd->idx(),
 296                 p2i(rd->bottom()), p2i(rd->end()), p2i(rd->new_top()), live, waste);
 297       } else {
 298         // Update top of space.
 299         space->set_top(rd->new_top());
 300         size_t live = pointer_delta(rd->new_top(), rd->bottom());
 301         total_live += live;
 302         log_develop_trace(gc, compaction)(
 303                 "Live compaction region, #%zu: [" PTR_FORMAT ", " PTR_FORMAT "), new_top: " PTR_FORMAT ", live: %zu, waste: %zu",
 304                 rd->idx(),
 305                 p2i(rd->bottom()), p2i(rd->end()), p2i(rd->new_top()), live, size_t(0));
 306 
 307         // Fast-Forward to next space.
 308         for (; idx < num_regions - 1; idx++) {
 309           rd = &region_data_array[idx + 1];
 310           if (!space->contains(rd->bottom())) {
 311             space_id++;
 312             assert(space_id < last_space_id, "must be");
 313             space = _space_info[space_id].space();
 314             assert(space->contains(rd->bottom()), "space must contain region");
 315             break;
 316           }
 317         }
 318       }
 319     }
 320     log_develop_debug(gc, compaction)("total live: %zu, total waste: %zu, ratio: %f", total_live, total_waste, ((float)total_waste)/((float)(total_live + total_waste)));
 321   }
 322   {
 323     FREE_C_HEAP_ARRAY(PCRegionData*, _per_worker_region_data);
 324     FREE_C_HEAP_ARRAY(PCRegionData, _region_data_array);
 325     FREE_C_HEAP_ARRAY(PCRegionData, _region_data_array_serial);
 326   }
 327 #ifdef ASSERT
 328   {
 329     mark_bitmap()->verify_clear();
 330   }
 331 #endif
 332 
 333   ParCompactionManagerNew::flush_all_string_dedup_requests();
 334 
 335   MutableSpace* const eden_space = _space_info[eden_space_id].space();
 336   MutableSpace* const from_space = _space_info[from_space_id].space();
 337   MutableSpace* const to_space   = _space_info[to_space_id].space();
 338 
 339   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 340   bool eden_empty = eden_space->is_empty();
 341 
 342   // Update heap occupancy information which is used as input to the soft ref
 343   // clearing policy at the next gc.
 344   Universe::heap()->update_capacity_and_used_at_gc();
 345 
 346   bool young_gen_empty = eden_empty && from_space->is_empty() &&
 347     to_space->is_empty();
 348 
 349   PSCardTable* ct = heap->card_table();
 350   MemRegion old_mr = ParallelScavengeHeap::old_gen()->committed();
 351   if (young_gen_empty) {
 352     ct->clear_MemRegion(old_mr);
 353   } else {
 354     ct->dirty_MemRegion(old_mr);
 355   }
 356 
 357   {
 358     // Delete metaspaces for unloaded class loaders and clean up loader_data graph
 359     GCTraceTime(Debug, gc, phases) t("Purge Class Loader Data", gc_timer());
 360     ClassLoaderDataGraph::purge(true /* at_safepoint */);
 361     DEBUG_ONLY(MetaspaceUtils::verify();)
 362   }
 363 
 364   // Need to clear claim bits for the next mark.
 365   ClassLoaderDataGraph::clear_claimed_marks();
 366 
 367   heap->prune_scavengable_nmethods();
 368 
 369 #if COMPILER2_OR_JVMCI
 370   DerivedPointerTable::update_pointers();
 371 #endif
 372 
 373   // Signal that we have completed a visit to all live objects.
 374   Universe::heap()->record_whole_heap_examined_timestamp();
 375 }
 376 
 377 void PSParallelCompactNew::setup_regions_parallel() {
 378   static const size_t REGION_SIZE_WORDS = (SpaceAlignment / HeapWordSize);
 379   size_t num_regions = 0;
 380   for (uint i = old_space_id; i < last_space_id; ++i) {
 381     MutableSpace* const space = _space_info[i].space();
 382     size_t const space_size_words = space->capacity_in_words();
 383     num_regions += align_up(space_size_words, REGION_SIZE_WORDS) / REGION_SIZE_WORDS;
 384   }
 385   _region_data_array = NEW_C_HEAP_ARRAY(PCRegionData, num_regions, mtGC);
 386 
 387   size_t region_idx = 0;
 388   for (uint i = old_space_id; i < last_space_id; ++i) {
 389     const MutableSpace* space = _space_info[i].space();
 390     HeapWord* addr = space->bottom();
 391     HeapWord* sp_end = space->end();
 392     HeapWord* sp_top = space->top();
 393     while (addr < sp_end) {
 394       HeapWord* end = MIN2(align_up(addr + REGION_SIZE_WORDS, REGION_SIZE_WORDS), space->end());
 395       if (addr < sp_top) {
 396         HeapWord* prev_obj_start = _mark_bitmap.find_obj_beg_reverse(addr, end);
 397         if (prev_obj_start < end) {
 398           HeapWord* prev_obj_end = prev_obj_start + cast_to_oop(prev_obj_start)->size();
 399           if (end < prev_obj_end) {
 400             // Object crosses region boundary, adjust end to be after object's last word.
 401             end = prev_obj_end;
 402           }
 403         }
 404       }
 405       assert(region_idx < num_regions, "must not exceed number of regions: region_idx: %zu, num_regions: %zu", region_idx, num_regions);
 406       HeapWord* top;
 407       if (sp_top < addr) {
 408         top = addr;
 409       } else if (sp_top >= end) {
 410         top = end;
 411       } else {
 412         top = sp_top;
 413       }
 414       assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr must be in heap: " PTR_FORMAT, p2i(addr));
 415       new (_region_data_array + region_idx) PCRegionData(region_idx, addr, top, end);
 416       addr = end;
 417       region_idx++;
 418     }
 419   }
 420   _num_regions = region_idx;
 421   log_info(gc)("Number of regions: %zu", _num_regions);
 422 }
 423 
 424 void PSParallelCompactNew::setup_regions_serial() {
 425   _num_regions_serial = last_space_id;
 426   _region_data_array_serial = NEW_C_HEAP_ARRAY(PCRegionData, _num_regions_serial, mtGC);
 427   new (_region_data_array_serial + old_space_id)  PCRegionData(old_space_id, space(old_space_id)->bottom(), space(old_space_id)->top(), space(old_space_id)->end());
 428   new (_region_data_array_serial + eden_space_id) PCRegionData(eden_space_id, space(eden_space_id)->bottom(), space(eden_space_id)->top(), space(eden_space_id)->end());
 429   new (_region_data_array_serial + from_space_id) PCRegionData(from_space_id, space(from_space_id)->bottom(), space(from_space_id)->top(), space(from_space_id)->end());
 430   new (_region_data_array_serial + to_space_id)   PCRegionData(to_space_id, space(to_space_id)->bottom(), space(to_space_id)->top(), space(to_space_id)->end());
 431 }
 432 
 433 bool PSParallelCompactNew::check_maximum_compaction() {
 434 
 435   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 436 
 437   // Check System.GC
 438   bool is_max_on_system_gc = UseMaximumCompactionOnSystemGC
 439                           && GCCause::is_user_requested_gc(heap->gc_cause());
 440 
 441   // JVM flags
 442   const uint total_invocations = heap->total_full_collections();
 443   assert(total_invocations >= _maximum_compaction_gc_num, "sanity");
 444   const size_t gcs_since_max = total_invocations - _maximum_compaction_gc_num;
 445   const bool is_interval_ended = gcs_since_max > HeapMaximumCompactionInterval;
 446 
 447   if (is_max_on_system_gc || is_interval_ended) {
 448     _maximum_compaction_gc_num = total_invocations;
 449     return true;
 450   }
 451 
 452   return false;
 453 }
 454 
 455 void PSParallelCompactNew::summary_phase() {
 456   GCTraceTime(Info, gc, phases) tm("Summary Phase", &_gc_timer);
 457 
 458   setup_regions_serial();
 459   setup_regions_parallel();
 460 
 461 #ifndef PRODUCT
 462   for (size_t idx = 0; idx < _num_regions; idx++) {
 463     PCRegionData* rd = &_region_data_array[idx];
 464     log_develop_trace(gc, compaction)("Compaction region #%zu: [" PTR_FORMAT ", " PTR_FORMAT ")", rd->idx(), p2i(
 465             rd->bottom()), p2i(rd->end()));
 466   }
 467 #endif
 468 }
 469 
 470 // This method should contain all heap-specific policy for invoking a full
 471 // collection.  invoke_no_policy() will only attempt to compact the heap; it
 472 // will do nothing further.  If we need to bail out for policy reasons, scavenge
 473 // before full gc, or any other specialized behavior, it needs to be added here.
 474 //
 475 // Note that this method should only be called from the vm_thread while at a
 476 // safepoint.
 477 //
 478 // Note that the all_soft_refs_clear flag in the soft ref policy
 479 // may be true because this method can be called without intervening
 480 // activity.  For example when the heap space is tight and full measure
 481 // are being taken to free space.
 482 bool PSParallelCompactNew::invoke(bool clear_all_soft_refs, bool serial) {
 483   assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
 484   assert(Thread::current() == (Thread*)VMThread::vm_thread(),
 485          "should be in vm thread");
 486 
 487   SvcGCMarker sgcm(SvcGCMarker::FULL);
 488   IsSTWGCActiveMark mark;
 489 
 490   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 491   clear_all_soft_refs = clear_all_soft_refs
 492                      || heap->soft_ref_policy()->should_clear_all_soft_refs();
 493 
 494   return PSParallelCompactNew::invoke_no_policy(clear_all_soft_refs, serial);
 495 }
 496 
 497 // This method contains no policy. You should probably
 498 // be calling invoke() instead.
 499 bool PSParallelCompactNew::invoke_no_policy(bool clear_all_soft_refs, bool serial) {
 500   assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
 501   assert(ref_processor() != nullptr, "Sanity");
 502 
 503   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 504 
 505   GCIdMark gc_id_mark;
 506   _gc_timer.register_gc_start();
 507   _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
 508 
 509   GCCause::Cause gc_cause = heap->gc_cause();
 510   PSYoungGen* young_gen = ParallelScavengeHeap::young_gen();
 511   PSOldGen* old_gen = ParallelScavengeHeap::old_gen();
 512   PSAdaptiveSizePolicy* size_policy = heap->size_policy();
 513 
 514   // The scope of casr should end after code that can change
 515   // SoftRefPolicy::_should_clear_all_soft_refs.
 516   ClearedAllSoftRefs casr(clear_all_soft_refs,
 517                           heap->soft_ref_policy());
 518 
 519   // Make sure data structures are sane, make the heap parsable, and do other
 520   // miscellaneous bookkeeping.
 521   pre_compact();
 522 
 523   const PreGenGCValues pre_gc_values = heap->get_pre_gc_values();
 524 
 525   {
 526     const uint active_workers =
 527       WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().max_workers(),
 528                                         ParallelScavengeHeap::heap()->workers().active_workers(),
 529                                         Threads::number_of_non_daemon_threads());
 530     ParallelScavengeHeap::heap()->workers().set_active_workers(active_workers);
 531 
 532     if (serial || check_maximum_compaction()) {
 533       // Serial compaction executes the forwarding and compaction phases serially,
 534       // thus achieving perfect compaction.
 535       // Marking and ajust-references would still be executed in parallel threads.
 536       _serial = true;
 537     } else {
 538       _serial = false;
 539     }
 540 
 541     GCTraceCPUTime tcpu(&_gc_tracer);
 542     GCTraceTime(Info, gc) tm("Pause Full", nullptr, gc_cause, true);
 543 
 544     heap->pre_full_gc_dump(&_gc_timer);
 545 
 546     TraceCollectorStats tcs(counters());
 547     TraceMemoryManagerStats tms(heap->old_gc_manager(), gc_cause, "end of major GC");
 548 
 549     if (log_is_enabled(Debug, gc, heap, exit)) {
 550       accumulated_time()->start();
 551     }
 552 
 553     // Let the size policy know we're starting
 554     size_policy->major_collection_begin();
 555 
 556 #if COMPILER2_OR_JVMCI
 557     DerivedPointerTable::clear();
 558 #endif
 559 
 560     ref_processor()->start_discovery(clear_all_soft_refs);
 561 
 562     ClassUnloadingContext ctx(1 /* num_nmethod_unlink_workers */,
 563                               false /* unregister_nmethods_during_purge */,
 564                               false /* lock_nmethod_free_separately */);
 565 
 566     marking_phase(&_gc_tracer);
 567 
 568     summary_phase();
 569 
 570 #if COMPILER2_OR_JVMCI
 571     assert(DerivedPointerTable::is_active(), "Sanity");
 572     DerivedPointerTable::set_active(false);
 573 #endif
 574 
 575     FullGCForwarding::begin();
 576 
 577     forward_to_new_addr();
 578 
 579     adjust_pointers();
 580 
 581     compact();
 582 
 583     FullGCForwarding::end();
 584 
 585     ParCompactionManagerNew::_preserved_marks_set->restore(&ParallelScavengeHeap::heap()->workers());
 586 
 587     // Reset the mark bitmap, summary data, and do other bookkeeping.  Must be
 588     // done before resizing.
 589     post_compact();
 590 
 591     // Let the size policy know we're done
 592     size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
 593 
 594     if (UseAdaptiveSizePolicy) {
 595       log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections());
 596       log_trace(gc, ergo)("old_gen_capacity: %zu young_gen_capacity: %zu",
 597                           old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
 598 
 599       // Don't check if the size_policy is ready here.  Let
 600       // the size_policy check that internally.
 601       if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
 602           AdaptiveSizePolicy::should_update_promo_stats(gc_cause)) {
 603         // Swap the survivor spaces if from_space is empty. The
 604         // resize_young_gen() called below is normally used after
 605         // a successful young GC and swapping of survivor spaces;
 606         // otherwise, it will fail to resize the young gen with
 607         // the current implementation.
 608         if (young_gen->from_space()->is_empty()) {
 609           young_gen->from_space()->clear(SpaceDecorator::Mangle);
 610           young_gen->swap_spaces();
 611         }
 612 
 613         // Calculate optimal free space amounts
 614         assert(young_gen->max_gen_size() >
 615           young_gen->from_space()->capacity_in_bytes() +
 616           young_gen->to_space()->capacity_in_bytes(),
 617           "Sizes of space in young gen are out-of-bounds");
 618 
 619         size_t young_live = young_gen->used_in_bytes();
 620         size_t eden_live = young_gen->eden_space()->used_in_bytes();
 621         size_t old_live = old_gen->used_in_bytes();
 622         size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
 623         size_t max_old_gen_size = old_gen->max_gen_size();
 624         size_t max_eden_size = young_gen->max_gen_size() -
 625           young_gen->from_space()->capacity_in_bytes() -
 626           young_gen->to_space()->capacity_in_bytes();
 627 
 628         // Used for diagnostics
 629         size_policy->clear_generation_free_space_flags();
 630 
 631         size_policy->compute_generations_free_space(young_live,
 632                                                     eden_live,
 633                                                     old_live,
 634                                                     cur_eden,
 635                                                     max_old_gen_size,
 636                                                     max_eden_size,
 637                                                     true /* full gc*/);
 638 
 639         size_policy->check_gc_overhead_limit(eden_live,
 640                                              max_old_gen_size,
 641                                              max_eden_size,
 642                                              true /* full gc*/,
 643                                              gc_cause,
 644                                              heap->soft_ref_policy());
 645 
 646         size_policy->decay_supplemental_growth(true /* full gc*/);
 647 
 648         heap->resize_old_gen(
 649           size_policy->calculated_old_free_size_in_bytes());
 650 
 651         heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
 652                                size_policy->calculated_survivor_size_in_bytes());
 653       }
 654 
 655       log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections());
 656     }
 657 
 658     if (UsePerfData) {
 659       PSGCAdaptivePolicyCounters* const counters = ParallelScavengeHeap::gc_policy_counters();
 660       counters->update_counters();
 661       counters->update_old_capacity(old_gen->capacity_in_bytes());
 662       counters->update_young_capacity(young_gen->capacity_in_bytes());
 663     }
 664 
 665     heap->resize_all_tlabs();
 666 
 667     // Resize the metaspace capacity after a collection
 668     MetaspaceGC::compute_new_size();
 669 
 670     if (log_is_enabled(Debug, gc, heap, exit)) {
 671       accumulated_time()->stop();
 672     }
 673 
 674     heap->print_heap_change(pre_gc_values);
 675 
 676     // Track memory usage and detect low memory
 677     MemoryService::track_memory_usage();
 678     heap->update_counters();
 679 
 680     heap->post_full_gc_dump(&_gc_timer);
 681   }
 682 
 683   if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
 684     Universe::verify("After GC");
 685   }
 686 
 687   heap->print_heap_after_gc();
 688   heap->trace_heap_after_gc(&_gc_tracer);
 689 
 690   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
 691 
 692   _gc_timer.register_gc_end();
 693 
 694   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
 695 
 696   return true;
 697 }
 698 
 699 class PCAddThreadRootsMarkingTaskClosureNew : public ThreadClosure {
 700 private:
 701   uint _worker_id;
 702 
 703 public:
 704   explicit PCAddThreadRootsMarkingTaskClosureNew(uint worker_id) : _worker_id(worker_id) { }
 705   void do_thread(Thread* thread) final {
 706     assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
 707 
 708     ResourceMark rm;
 709 
 710     ParCompactionManagerNew* cm = ParCompactionManagerNew::gc_thread_compaction_manager(_worker_id);
 711 
 712     MarkingNMethodClosure mark_and_push_in_blobs(&cm->_mark_and_push_closure,
 713                                                  !NMethodToOopClosure::FixRelocations,
 714                                                  true /* keepalive nmethods */);
 715 
 716     thread->oops_do(&cm->_mark_and_push_closure, &mark_and_push_in_blobs);
 717 
 718     // Do the real work
 719     cm->follow_marking_stacks();
 720   }
 721 };
 722 
 723 void steal_marking_work_new(TaskTerminator& terminator, uint worker_id) {
 724   assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc");
 725 
 726   ParCompactionManagerNew* cm =
 727     ParCompactionManagerNew::gc_thread_compaction_manager(worker_id);
 728 
 729   do {
 730     ScannerTask task;
 731     if (ParCompactionManagerNew::steal(worker_id, task)) {
 732       cm->follow_contents(task, true);
 733     }
 734     cm->follow_marking_stacks();
 735   } while (!terminator.offer_termination());
 736 }
 737 
 738 class MarkFromRootsTaskNew : public WorkerTask {
 739   StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do
 740   OopStorageSetStrongParState<false /* concurrent */, false /* is_const */> _oop_storage_set_par_state;
 741   TaskTerminator _terminator;
 742   uint _active_workers;
 743 
 744 public:
 745   explicit MarkFromRootsTaskNew(uint active_workers) :
 746       WorkerTask("MarkFromRootsTaskNew"),
 747       _strong_roots_scope(active_workers),
 748       _terminator(active_workers, ParCompactionManagerNew::marking_stacks()),
 749       _active_workers(active_workers) {}
 750 
 751   void work(uint worker_id) final {
 752     ParCompactionManagerNew* cm = ParCompactionManagerNew::gc_thread_compaction_manager(worker_id);
 753     {
 754       CLDToOopClosure cld_closure(&cm->_mark_and_push_closure, ClassLoaderData::_claim_stw_fullgc_mark);
 755       ClassLoaderDataGraph::always_strong_cld_do(&cld_closure);
 756 
 757       // Do the real work
 758       cm->follow_marking_stacks();
 759     }
 760 
 761     {
 762       PCAddThreadRootsMarkingTaskClosureNew closure(worker_id);
 763       Threads::possibly_parallel_threads_do(_active_workers > 1 /* is_par */, &closure);
 764     }
 765 
 766     // Mark from OopStorages
 767     {
 768       _oop_storage_set_par_state.oops_do(&cm->_mark_and_push_closure);
 769       // Do the real work
 770       cm->follow_marking_stacks();
 771     }
 772 
 773     if (_active_workers > 1) {
 774       steal_marking_work_new(_terminator, worker_id);
 775     }
 776   }
 777 };
 778 
 779 class ParallelCompactRefProcProxyTaskNew : public RefProcProxyTask {
 780   TaskTerminator _terminator;
 781 
 782 public:
 783   explicit ParallelCompactRefProcProxyTaskNew(uint max_workers)
 784     : RefProcProxyTask("ParallelCompactRefProcProxyTaskNew", max_workers),
 785       _terminator(_max_workers, ParCompactionManagerNew::marking_stacks()) {}
 786 
 787   void work(uint worker_id) final {
 788     assert(worker_id < _max_workers, "sanity");
 789     ParCompactionManagerNew* cm = (_tm == RefProcThreadModel::Single) ? ParCompactionManagerNew::get_vmthread_cm() : ParCompactionManagerNew::gc_thread_compaction_manager(worker_id);
 790     BarrierEnqueueDiscoveredFieldClosure enqueue;
 791     ParCompactionManagerNew::FollowStackClosure complete_gc(cm, (_tm == RefProcThreadModel::Single) ? nullptr : &_terminator, worker_id);
 792     _rp_task->rp_work(worker_id, PSParallelCompactNew::is_alive_closure(), &cm->_mark_and_push_closure, &enqueue, &complete_gc);
 793   }
 794 
 795   void prepare_run_task_hook() final {
 796     _terminator.reset_for_reuse(_queue_count);
 797   }
 798 };
 799 
 800 void PSParallelCompactNew::marking_phase(ParallelOldTracer *gc_tracer) {
 801   // Recursively traverse all live objects and mark them
 802   GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
 803 
 804   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
 805 
 806   ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_mark);
 807   {
 808     GCTraceTime(Debug, gc, phases) pm_tm("Par Mark", &_gc_timer);
 809 
 810     MarkFromRootsTaskNew task(active_gc_threads);
 811     ParallelScavengeHeap::heap()->workers().run_task(&task);
 812   }
 813 
 814   // Process reference objects found during marking
 815   {
 816     GCTraceTime(Debug, gc, phases) rp_tm("Reference Processing", &_gc_timer);
 817 
 818     ReferenceProcessorStats stats;
 819     ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->max_num_queues());
 820 
 821     ref_processor()->set_active_mt_degree(active_gc_threads);
 822     ParallelCompactRefProcProxyTaskNew task(ref_processor()->max_num_queues());
 823     stats = ref_processor()->process_discovered_references(task, pt);
 824 
 825     gc_tracer->report_gc_reference_stats(stats);
 826     pt.print_all_references();
 827   }
 828 
 829   // This is the point where the entire marking should have completed.
 830   ParCompactionManagerNew::verify_all_marking_stack_empty();
 831 
 832   {
 833     GCTraceTime(Debug, gc, phases) wp_tm("Weak Processing", &_gc_timer);
 834     WeakProcessor::weak_oops_do(&ParallelScavengeHeap::heap()->workers(),
 835                                 is_alive_closure(),
 836                                 &do_nothing_cl,
 837                                 1);
 838   }
 839 
 840   {
 841     GCTraceTime(Debug, gc, phases) tm_m("Class Unloading", &_gc_timer);
 842 
 843     ClassUnloadingContext* ctx = ClassUnloadingContext::context();
 844 
 845     bool unloading_occurred;
 846     {
 847       CodeCache::UnlinkingScope scope(is_alive_closure());
 848 
 849       // Follow system dictionary roots and unload classes.
 850       unloading_occurred = SystemDictionary::do_unloading(&_gc_timer);
 851 
 852       // Unload nmethods.
 853       CodeCache::do_unloading(unloading_occurred);
 854     }
 855 
 856     {
 857       GCTraceTime(Debug, gc, phases) t("Purge Unlinked NMethods", gc_timer());
 858       // Release unloaded nmethod's memory.
 859       ctx->purge_nmethods();
 860     }
 861     {
 862       GCTraceTime(Debug, gc, phases) ur("Unregister NMethods", &_gc_timer);
 863       ParallelScavengeHeap::heap()->prune_unlinked_nmethods();
 864     }
 865     {
 866       GCTraceTime(Debug, gc, phases) t("Free Code Blobs", gc_timer());
 867       ctx->free_nmethods();
 868     }
 869 
 870     // Prune dead klasses from subklass/sibling/implementor lists.
 871     Klass::clean_weak_klass_links(unloading_occurred);
 872 
 873     // Clean JVMCI metadata handles.
 874     JVMCI_ONLY(JVMCI::do_unloading(unloading_occurred));
 875   }
 876 
 877   {
 878     GCTraceTime(Debug, gc, phases) roc_tm("Report Object Count", &_gc_timer);
 879     _gc_tracer.report_object_count_after_gc(is_alive_closure(), &ParallelScavengeHeap::heap()->workers());
 880   }
 881 #if TASKQUEUE_STATS
 882   ParCompactionManagerNew::print_and_reset_taskqueue_stats();
 883 #endif
 884 }
 885 
 886 void PSParallelCompactNew::adjust_pointers_in_spaces(uint worker_id) {
 887   auto start_time = Ticks::now();
 888   for (size_t i = 0; i < _num_regions; i++) {
 889     PCRegionData* region = &_region_data_array[i];
 890     if (!region->claim()) {
 891       continue;
 892     }
 893     log_trace(gc, compaction)("Adjusting pointers in region: %zu (worker_id: %u)", region->idx(), worker_id);
 894     HeapWord* end = region->top();
 895     HeapWord* current = _mark_bitmap.find_obj_beg(region->bottom(), end);
 896     while (current < end) {
 897       assert(_mark_bitmap.is_marked(current), "must be marked");
 898       oop obj = cast_to_oop(current);
 899       size_t size = obj->size();
 900       obj->oop_iterate(&pc_adjust_pointer_closure);
 901       current = _mark_bitmap.find_obj_beg(current + size, end);
 902     }
 903   }
 904   log_trace(gc, phases)("adjust_pointers_in_spaces worker %u: %.3f ms", worker_id, (Ticks::now() - start_time).seconds() * 1000);
 905 }
 906 
 907 class PSAdjustTaskNew final : public WorkerTask {
 908   SubTasksDone                               _sub_tasks;
 909   WeakProcessor::Task                        _weak_proc_task;
 910   OopStorageSetStrongParState<false, false>  _oop_storage_iter;
 911   uint                                       _nworkers;
 912 
 913   enum PSAdjustSubTask {
 914     PSAdjustSubTask_code_cache,
 915 
 916     PSAdjustSubTask_num_elements
 917   };
 918 
 919 public:
 920   explicit PSAdjustTaskNew(uint nworkers) :
 921     WorkerTask("PSAdjust task"),
 922     _sub_tasks(PSAdjustSubTask_num_elements),
 923     _weak_proc_task(nworkers),
 924     _nworkers(nworkers) {
 925 
 926     ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
 927     if (nworkers > 1) {
 928       Threads::change_thread_claim_token();
 929     }
 930   }
 931 
 932   ~PSAdjustTaskNew() {
 933     Threads::assert_all_threads_claimed();
 934   }
 935 
 936   void work(uint worker_id) final {
 937     ParCompactionManagerNew* cm = ParCompactionManagerNew::gc_thread_compaction_manager(worker_id);
 938     cm->preserved_marks()->adjust_during_full_gc();
 939     {
 940       // adjust pointers in all spaces
 941       PSParallelCompactNew::adjust_pointers_in_spaces(worker_id);
 942     }
 943     {
 944       ResourceMark rm;
 945       Threads::possibly_parallel_oops_do(_nworkers > 1, &pc_adjust_pointer_closure, nullptr);
 946     }
 947     _oop_storage_iter.oops_do(&pc_adjust_pointer_closure);
 948     {
 949       CLDToOopClosure cld_closure(&pc_adjust_pointer_closure, ClassLoaderData::_claim_stw_fullgc_adjust);
 950       ClassLoaderDataGraph::cld_do(&cld_closure);
 951     }
 952     {
 953       AlwaysTrueClosure always_alive;
 954       _weak_proc_task.work(worker_id, &always_alive, &pc_adjust_pointer_closure);
 955     }
 956     if (_sub_tasks.try_claim_task(PSAdjustSubTask_code_cache)) {
 957       NMethodToOopClosure adjust_code(&pc_adjust_pointer_closure, NMethodToOopClosure::FixRelocations);
 958       CodeCache::nmethods_do(&adjust_code);
 959     }
 960     _sub_tasks.all_tasks_claimed();
 961   }
 962 };
 963 
 964 void PSParallelCompactNew::adjust_pointers() {
 965   // Adjust the pointers to reflect the new locations
 966   GCTraceTime(Info, gc, phases) tm("Adjust Pointers", &_gc_timer);
 967   uint num_workers = ParallelScavengeHeap::heap()->workers().active_workers();
 968   PSAdjustTaskNew task(num_workers);
 969   ParallelScavengeHeap::heap()->workers().run_task(&task);
 970 }
 971 
 972 void PSParallelCompactNew::forward_to_new_addr() {
 973   GCTraceTime(Info, gc, phases) tm("Forward", &_gc_timer);
 974   uint num_workers = get_num_workers();
 975   _per_worker_region_data = NEW_C_HEAP_ARRAY(PCRegionData*, num_workers, mtGC);
 976   for (uint i = 0; i < num_workers; i++) {
 977     _per_worker_region_data[i] = nullptr;
 978   }
 979 
 980   class ForwardState {
 981     uint _worker_id;
 982     PCRegionData* _compaction_region;
 983     HeapWord* _compaction_point;
 984 
 985     void ensure_compaction_point() {
 986       if (_compaction_point == nullptr) {
 987         assert(_compaction_region == nullptr, "invariant");
 988         _compaction_region = _per_worker_region_data[_worker_id];
 989         assert(_compaction_region != nullptr, "invariant");
 990         _compaction_point = _compaction_region->bottom();
 991       }
 992     }
 993   public:
 994     explicit ForwardState(uint worker_id) :
 995             _worker_id(worker_id),
 996             _compaction_region(nullptr),
 997             _compaction_point(nullptr) {
 998     }
 999 
1000     size_t available() const {
1001       return pointer_delta(_compaction_region->end(), _compaction_point);
1002     }
1003 
1004     void forward_objs_in_region(ParCompactionManagerNew* cm, PCRegionData* region) {
1005       ensure_compaction_point();
1006       HeapWord* end = region->end();
1007       HeapWord* current = _mark_bitmap.find_obj_beg(region->bottom(), end);
1008       while (current < end) {
1009         assert(_mark_bitmap.is_marked(current), "must be marked");
1010         oop obj = cast_to_oop(current);
1011         assert(region->contains(obj), "object must not cross region boundary: obj: " PTR_FORMAT ", obj_end: " PTR_FORMAT ", region start: " PTR_FORMAT ", region end: " PTR_FORMAT, p2i(obj), p2i(cast_from_oop<HeapWord*>(obj) + obj->size()), p2i(region->bottom()), p2i(region->end()));
1012         size_t old_size = obj->size();
1013         size_t new_size = obj->copy_size(old_size, obj->mark());
1014         size_t size = (current == _compaction_point) ? old_size : new_size;
1015         while (size > available()) {
1016           _compaction_region->set_new_top(_compaction_point);
1017           _compaction_region = _compaction_region->local_next();
1018           assert(_compaction_region != nullptr, "must find a compaction region");
1019           _compaction_point = _compaction_region->bottom();
1020           size = (current == _compaction_point) ? old_size : new_size;
1021         }
1022         //log_develop_trace(gc, compaction)("Forwarding obj: " PTR_FORMAT ", to: " PTR_FORMAT, p2i(obj), p2i(_compaction_point));
1023         if (current != _compaction_point) {
1024           cm->preserved_marks()->push_if_necessary(obj, obj->mark());
1025           FullGCForwarding::forward_to(obj, cast_to_oop(_compaction_point));
1026         }
1027         _compaction_point += size;
1028         assert(_compaction_point <= _compaction_region->end(), "object must fit in region");
1029         current += old_size;
1030         assert(current <= end, "object must not cross region boundary");
1031         current = _mark_bitmap.find_obj_beg(current, end);
1032       }
1033     }
1034     void finish() {
1035       if (_compaction_region != nullptr) {
1036         _compaction_region->set_new_top(_compaction_point);
1037       }
1038     }
1039   };
1040 
1041   struct ForwardTask final : public WorkerTask {
1042     ForwardTask() : WorkerTask("PSForward task") {}
1043 
1044     void work(uint worker_id) override {
1045       ParCompactionManagerNew* cm = ParCompactionManagerNew::gc_thread_compaction_manager(worker_id);
1046       ForwardState state(worker_id);
1047       PCRegionData** last_link = &_per_worker_region_data[worker_id];
1048       size_t idx = worker_id;
1049       uint num_workers = get_num_workers();
1050       size_t num_regions = get_num_regions();
1051       PCRegionData* region_data_array = get_region_data_array();
1052       while (idx < num_regions) {
1053         PCRegionData* region = region_data_array + idx;
1054         *last_link = region;
1055         last_link = region->local_next_addr();
1056         state.forward_objs_in_region(cm, region);
1057         idx += num_workers;
1058       }
1059       state.finish();
1060     }
1061   } task;
1062 
1063   uint par_workers = ParallelScavengeHeap::heap()->workers().active_workers();
1064   ParallelScavengeHeap::heap()->workers().set_active_workers(num_workers);
1065   ParallelScavengeHeap::heap()->workers().run_task(&task);
1066   ParallelScavengeHeap::heap()->workers().set_active_workers(par_workers);
1067 
1068 #ifndef PRODUCT
1069   for (uint wid = 0; wid < num_workers; wid++) {
1070     for (PCRegionData* rd = _per_worker_region_data[wid]; rd != nullptr; rd = rd->local_next()) {
1071       log_develop_trace(gc, compaction)("Per worker compaction region, worker: %d, #%zu: [" PTR_FORMAT ", " PTR_FORMAT "), new_top: " PTR_FORMAT, wid, rd->idx(),
1072                                         p2i(rd->bottom()), p2i(rd->end()), p2i(rd->new_top()));
1073     }
1074   }
1075 #endif
1076 }
1077 
1078 void PSParallelCompactNew::compact() {
1079   GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer);
1080   class CompactTask final : public WorkerTask {
1081     static void compact_region(PCRegionData* region) {
1082       HeapWord* bottom = region->bottom();
1083       HeapWord* end = region->top();
1084       if (bottom == end) {
1085         return;
1086       }
1087       HeapWord* current = _mark_bitmap.find_obj_beg(bottom, end);
1088       while (current < end) {
1089         oop obj = cast_to_oop(current);
1090         size_t size = obj->size();
1091         if (FullGCForwarding::is_forwarded(obj)) {
1092           oop fwd = FullGCForwarding::forwardee(obj);
1093           auto* dst = cast_from_oop<HeapWord*>(fwd);
1094           ObjectStartArray* sa = start_array(space_id(dst));
1095           if (sa != nullptr) {
1096             assert(dst != current, "expect moving object");
1097             size_t new_words = obj->copy_size(size, obj->mark());
1098             sa->update_for_block(dst, dst + new_words);
1099           }
1100 
1101           Copy::aligned_conjoint_words(current, dst, size);
1102           fwd->init_mark();
1103           fwd->initialize_hash_if_necessary(obj);
1104         } else {
1105           // The start_array must be updated even if the object is not moving.
1106           ObjectStartArray* sa = start_array(space_id(current));
1107           if (sa != nullptr) {
1108             sa->update_for_block(current, current + size);
1109           }
1110         }
1111         current = _mark_bitmap.find_obj_beg(current + size, end);
1112       }
1113     }
1114   public:
1115     explicit CompactTask() : WorkerTask("PSCompact task") {}
1116     void work(uint worker_id) override {
1117       PCRegionData* region = _per_worker_region_data[worker_id];
1118       while (region != nullptr) {
1119         log_trace(gc)("Compact worker: %u, compacting region: %zu", worker_id, region->idx());
1120         compact_region(region);
1121         region = region->local_next();
1122       }
1123     }
1124   } task;
1125 
1126   uint num_workers = get_num_workers();
1127   uint par_workers = ParallelScavengeHeap::heap()->workers().active_workers();
1128   ParallelScavengeHeap::heap()->workers().set_active_workers(num_workers);
1129   ParallelScavengeHeap::heap()->workers().run_task(&task);
1130   ParallelScavengeHeap::heap()->workers().set_active_workers(par_workers);
1131 }
1132 
1133 // Return the SpaceId for the space containing addr.  If addr is not in the
1134 // heap, last_space_id is returned.  In debug mode it expects the address to be
1135 // in the heap and asserts such.
1136 PSParallelCompactNew::SpaceId PSParallelCompactNew::space_id(HeapWord* addr) {
1137   assert(ParallelScavengeHeap::heap()->is_in_reserved(addr), "addr not in the heap");
1138 
1139   for (unsigned int id = old_space_id; id < last_space_id; ++id) {
1140     if (_space_info[id].space()->contains(addr)) {
1141       return SpaceId(id);
1142     }
1143   }
1144 
1145   assert(false, "no space contains the addr");
1146   return last_space_id;
1147 }