1 /*
   2  * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2013, 2022, Red Hat, Inc. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "memory/allocation.hpp"
  28 #include "memory/universe.hpp"
  29 
  30 #include "gc/shared/classUnloadingContext.hpp"
  31 #include "gc/shared/gcArguments.hpp"
  32 #include "gc/shared/gcTimer.hpp"
  33 #include "gc/shared/gcTraceTime.inline.hpp"
  34 #include "gc/shared/locationPrinter.inline.hpp"
  35 #include "gc/shared/memAllocator.hpp"
  36 #include "gc/shared/plab.hpp"
  37 #include "gc/shared/tlab_globals.hpp"
  38 
  39 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  40 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  41 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  42 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  43 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  45 #include "gc/shenandoah/shenandoahControlThread.hpp"
  46 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  47 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  48 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  49 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp"
  50 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  51 #include "gc/shenandoah/shenandoahInitLogger.hpp"
  52 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  53 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  54 #include "gc/shenandoah/shenandoahMetrics.hpp"
  55 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  56 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  57 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  58 #include "gc/shenandoah/shenandoahPadding.hpp"
  59 #include "gc/shenandoah/shenandoahParallelCleaning.inline.hpp"
  60 #include "gc/shenandoah/shenandoahReferenceProcessor.hpp"
  61 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  62 #include "gc/shenandoah/shenandoahSTWMark.hpp"
  63 #include "gc/shenandoah/shenandoahUtils.hpp"
  64 #include "gc/shenandoah/shenandoahVerifier.hpp"
  65 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  66 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  67 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  68 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  69 #include "gc/shenandoah/mode/shenandoahIUMode.hpp"
  70 #include "gc/shenandoah/mode/shenandoahPassiveMode.hpp"
  71 #include "gc/shenandoah/mode/shenandoahSATBMode.hpp"
  72 #if INCLUDE_JFR
  73 #include "gc/shenandoah/shenandoahJfrSupport.hpp"
  74 #endif
  75 
  76 #include "classfile/systemDictionary.hpp"
  77 #include "code/codeCache.hpp"
  78 #include "memory/classLoaderMetaspace.hpp"
  79 #include "memory/metaspaceUtils.hpp"
  80 #include "oops/compressedOops.inline.hpp"
  81 #include "prims/jvmtiTagMap.hpp"
  82 #include "runtime/atomic.hpp"
  83 #include "runtime/globals.hpp"
  84 #include "runtime/interfaceSupport.inline.hpp"
  85 #include "runtime/java.hpp"
  86 #include "runtime/orderAccess.hpp"
  87 #include "runtime/safepointMechanism.hpp"
  88 #include "runtime/vmThread.hpp"
  89 #include "services/mallocTracker.hpp"
  90 #include "services/memTracker.hpp"
  91 #include "utilities/events.hpp"
  92 #include "utilities/powerOfTwo.hpp"
  93 
  94 class ShenandoahPretouchHeapTask : public WorkerTask {
  95 private:
  96   ShenandoahRegionIterator _regions;
  97   const size_t _page_size;
  98 public:
  99   ShenandoahPretouchHeapTask(size_t page_size) :
 100     WorkerTask("Shenandoah Pretouch Heap"),
 101     _page_size(page_size) {}
 102 
 103   virtual void work(uint worker_id) {
 104     ShenandoahHeapRegion* r = _regions.next();
 105     while (r != nullptr) {
 106       if (r->is_committed()) {
 107         os::pretouch_memory(r->bottom(), r->end(), _page_size);
 108       }
 109       r = _regions.next();
 110     }
 111   }
 112 };
 113 
 114 class ShenandoahPretouchBitmapTask : public WorkerTask {
 115 private:
 116   ShenandoahRegionIterator _regions;
 117   char* _bitmap_base;
 118   const size_t _bitmap_size;
 119   const size_t _page_size;
 120 public:
 121   ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
 122     WorkerTask("Shenandoah Pretouch Bitmap"),
 123     _bitmap_base(bitmap_base),
 124     _bitmap_size(bitmap_size),
 125     _page_size(page_size) {}
 126 
 127   virtual void work(uint worker_id) {
 128     ShenandoahHeapRegion* r = _regions.next();
 129     while (r != nullptr) {
 130       size_t start = r->index()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 131       size_t end   = (r->index() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 132       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 133 
 134       if (r->is_committed()) {
 135         os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 136       }
 137 
 138       r = _regions.next();
 139     }
 140   }
 141 };
 142 
 143 jint ShenandoahHeap::initialize() {
 144   //
 145   // Figure out heap sizing
 146   //
 147 
 148   size_t init_byte_size = InitialHeapSize;
 149   size_t min_byte_size  = MinHeapSize;
 150   size_t max_byte_size  = MaxHeapSize;
 151   size_t heap_alignment = HeapAlignment;
 152 
 153   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 154 
 155   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 156   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 157 
 158   _num_regions = ShenandoahHeapRegion::region_count();
 159   assert(_num_regions == (max_byte_size / reg_size_bytes),
 160          "Regions should cover entire heap exactly: " SIZE_FORMAT " != " SIZE_FORMAT "/" SIZE_FORMAT,
 161          _num_regions, max_byte_size, reg_size_bytes);
 162 
 163   // Now we know the number of regions, initialize the heuristics.
 164   initialize_heuristics();
 165 
 166   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 167   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 168   assert(num_committed_regions <= _num_regions, "sanity");
 169   _initial_size = num_committed_regions * reg_size_bytes;
 170 
 171   size_t num_min_regions = min_byte_size / reg_size_bytes;
 172   num_min_regions = MIN2(num_min_regions, _num_regions);
 173   assert(num_min_regions <= _num_regions, "sanity");
 174   _minimum_size = num_min_regions * reg_size_bytes;
 175 
 176   // Default to max heap size.
 177   _soft_max_size = _num_regions * reg_size_bytes;
 178 
 179   _committed = _initial_size;
 180 
 181   size_t heap_page_size   = UseLargePages ? os::large_page_size() : os::vm_page_size();
 182   size_t bitmap_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 183   size_t region_page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
 184 
 185   //
 186   // Reserve and commit memory for heap
 187   //
 188 
 189   ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 190   initialize_reserved_region(heap_rs);
 191   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 192   _heap_region_special = heap_rs.special();
 193 
 194   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 195          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 196 
 197 #if SHENANDOAH_OPTIMIZED_MARKTASK
 198   // The optimized ShenandoahMarkTask takes some bits away from the full object bits.
 199   // Fail if we ever attempt to address more than we can.
 200   if ((uintptr_t)heap_rs.end() >= ShenandoahMarkTask::max_addressable()) {
 201     FormatBuffer<512> buf("Shenandoah reserved [" PTR_FORMAT ", " PTR_FORMAT") for the heap, \n"
 202                           "but max object address is " PTR_FORMAT ". Try to reduce heap size, or try other \n"
 203                           "VM options that allocate heap at lower addresses (HeapBaseMinAddress, AllocateHeapAt, etc).",
 204                 p2i(heap_rs.base()), p2i(heap_rs.end()), ShenandoahMarkTask::max_addressable());
 205     vm_exit_during_initialization("Fatal Error", buf);
 206   }
 207 #endif
 208 
 209   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 210   if (!_heap_region_special) {
 211     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 212                               "Cannot commit heap memory");
 213   }
 214 
 215   //
 216   // Reserve and commit memory for bitmap(s)
 217   //
 218 
 219   _bitmap_size = ShenandoahMarkBitMap::compute_size(heap_rs.size());
 220   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 221 
 222   size_t bitmap_bytes_per_region = reg_size_bytes / ShenandoahMarkBitMap::heap_map_factor();
 223 
 224   guarantee(bitmap_bytes_per_region != 0,
 225             "Bitmap bytes per region should not be zero");
 226   guarantee(is_power_of_2(bitmap_bytes_per_region),
 227             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 228 
 229   if (bitmap_page_size > bitmap_bytes_per_region) {
 230     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 231     _bitmap_bytes_per_slice = bitmap_page_size;
 232   } else {
 233     _bitmap_regions_per_slice = 1;
 234     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 235   }
 236 
 237   guarantee(_bitmap_regions_per_slice >= 1,
 238             "Should have at least one region per slice: " SIZE_FORMAT,
 239             _bitmap_regions_per_slice);
 240 
 241   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 242             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 243             _bitmap_bytes_per_slice, bitmap_page_size);
 244 
 245   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 246   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 247   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 248   _bitmap_region_special = bitmap.special();
 249 
 250   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 251                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 252   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 253   if (!_bitmap_region_special) {
 254     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 255                               "Cannot commit bitmap memory");
 256   }
 257 
 258   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions, _max_workers);
 259 
 260   if (ShenandoahVerify) {
 261     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 262     if (!verify_bitmap.special()) {
 263       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 264                                 "Cannot commit verification bitmap memory");
 265     }
 266     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 267     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 268     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 269     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 270   }
 271 
 272   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 273   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 274   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 275   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 276   _aux_bitmap_region_special = aux_bitmap.special();
 277   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 278 
 279   //
 280   // Create regions and region sets
 281   //
 282   size_t region_align = align_up(sizeof(ShenandoahHeapRegion), SHENANDOAH_CACHE_LINE_SIZE);
 283   size_t region_storage_size = align_up(region_align * _num_regions, region_page_size);
 284   region_storage_size = align_up(region_storage_size, os::vm_allocation_granularity());
 285 
 286   ReservedSpace region_storage(region_storage_size, region_page_size);
 287   MemTracker::record_virtual_memory_type(region_storage.base(), mtGC);
 288   if (!region_storage.special()) {
 289     os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false,
 290                               "Cannot commit region memory");
 291   }
 292 
 293   // Try to fit the collection set bitmap at lower addresses. This optimizes code generation for cset checks.
 294   // Go up until a sensible limit (subject to encoding constraints) and try to reserve the space there.
 295   // If not successful, bite a bullet and allocate at whatever address.
 296   {
 297     size_t cset_align = MAX2<size_t>(os::vm_page_size(), os::vm_allocation_granularity());
 298     size_t cset_size = align_up(((size_t) sh_rs.base() + sh_rs.size()) >> ShenandoahHeapRegion::region_size_bytes_shift(), cset_align);
 299 
 300     uintptr_t min = round_up_power_of_2(cset_align);
 301     uintptr_t max = (1u << 30u);
 302 
 303     for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
 304       char* req_addr = (char*)addr;
 305       assert(is_aligned(req_addr, cset_align), "Should be aligned");
 306       ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size(), req_addr);
 307       if (cset_rs.is_reserved()) {
 308         assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
 309         _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 310         break;
 311       }
 312     }
 313 
 314     if (_collection_set == nullptr) {
 315       ReservedSpace cset_rs(cset_size, cset_align, os::vm_page_size());
 316       _collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
 317     }
 318   }
 319 
 320   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 321   _free_set = new ShenandoahFreeSet(this, _num_regions);
 322 
 323   {
 324     ShenandoahHeapLocker locker(lock());
 325 
 326     for (size_t i = 0; i < _num_regions; i++) {
 327       HeapWord* start = (HeapWord*)sh_rs.base() + ShenandoahHeapRegion::region_size_words() * i;
 328       bool is_committed = i < num_committed_regions;
 329       void* loc = region_storage.base() + i * region_align;
 330 
 331       ShenandoahHeapRegion* r = new (loc) ShenandoahHeapRegion(start, i, is_committed);
 332       assert(is_aligned(r, SHENANDOAH_CACHE_LINE_SIZE), "Sanity");
 333 
 334       _marking_context->initialize_top_at_mark_start(r);
 335       _regions[i] = r;
 336       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 337     }
 338 
 339     // Initialize to complete
 340     _marking_context->mark_complete();
 341 
 342     _free_set->rebuild();
 343   }
 344 
 345   if (AlwaysPreTouch) {
 346     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 347     // before initialize() below zeroes it with initializing thread. For any given region,
 348     // we touch the region and the corresponding bitmaps from the same thread.
 349     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 350 
 351     _pretouch_heap_page_size = heap_page_size;
 352     _pretouch_bitmap_page_size = bitmap_page_size;
 353 
 354 #ifdef LINUX
 355     // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
 356     // pages. But, the kernel needs to know that every small page is used, in order to coalesce
 357     // them into huge one. Therefore, we need to pretouch with smaller pages.
 358     if (UseTransparentHugePages) {
 359       _pretouch_heap_page_size = (size_t)os::vm_page_size();
 360       _pretouch_bitmap_page_size = (size_t)os::vm_page_size();
 361     }
 362 #endif
 363 
 364     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 365     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 366 
 367     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, _pretouch_bitmap_page_size);
 368     _workers->run_task(&bcl);
 369 
 370     ShenandoahPretouchHeapTask hcl(_pretouch_heap_page_size);
 371     _workers->run_task(&hcl);
 372   }
 373 
 374   //
 375   // Initialize the rest of GC subsystems
 376   //
 377 
 378   _liveness_cache = NEW_C_HEAP_ARRAY(ShenandoahLiveData*, _max_workers, mtGC);
 379   for (uint worker = 0; worker < _max_workers; worker++) {
 380     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(ShenandoahLiveData, _num_regions, mtGC);
 381     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(ShenandoahLiveData));
 382   }
 383 
 384   // There should probably be Shenandoah-specific options for these,
 385   // just as there are G1-specific options.
 386   {
 387     ShenandoahSATBMarkQueueSet& satbqs = ShenandoahBarrierSet::satb_mark_queue_set();
 388     satbqs.set_process_completed_buffers_threshold(20); // G1SATBProcessCompletedThreshold
 389     satbqs.set_buffer_enqueue_threshold_percentage(60); // G1SATBBufferEnqueueingThresholdPercent
 390   }
 391 
 392   _monitoring_support = new ShenandoahMonitoringSupport(this);
 393   _phase_timings = new ShenandoahPhaseTimings(max_workers());
 394   ShenandoahCodeRoots::initialize();
 395 
 396   if (ShenandoahPacing) {
 397     _pacer = new ShenandoahPacer(this);
 398     _pacer->setup_for_idle();
 399   } else {
 400     _pacer = nullptr;
 401   }
 402 
 403   _control_thread = new ShenandoahControlThread();
 404 
 405   ShenandoahInitLogger::print();
 406 
 407   return JNI_OK;
 408 }
 409 
 410 void ShenandoahHeap::initialize_mode() {
 411   if (ShenandoahGCMode != nullptr) {
 412     if (strcmp(ShenandoahGCMode, "satb") == 0) {
 413       _gc_mode = new ShenandoahSATBMode();
 414     } else if (strcmp(ShenandoahGCMode, "iu") == 0) {
 415       _gc_mode = new ShenandoahIUMode();
 416     } else if (strcmp(ShenandoahGCMode, "passive") == 0) {
 417       _gc_mode = new ShenandoahPassiveMode();
 418     } else {
 419       vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option");
 420     }
 421   } else {
 422     vm_exit_during_initialization("Unknown -XX:ShenandoahGCMode option (null)");
 423   }
 424   _gc_mode->initialize_flags();
 425   if (_gc_mode->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 426     vm_exit_during_initialization(
 427             err_msg("GC mode \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 428                     _gc_mode->name()));
 429   }
 430   if (_gc_mode->is_experimental() && !UnlockExperimentalVMOptions) {
 431     vm_exit_during_initialization(
 432             err_msg("GC mode \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 433                     _gc_mode->name()));
 434   }
 435 }
 436 
 437 void ShenandoahHeap::initialize_heuristics() {
 438   assert(_gc_mode != nullptr, "Must be initialized");
 439   _heuristics = _gc_mode->initialize_heuristics();
 440 
 441   if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 442     vm_exit_during_initialization(
 443             err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 444                     _heuristics->name()));
 445   }
 446   if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 447     vm_exit_during_initialization(
 448             err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 449                     _heuristics->name()));
 450   }
 451 }
 452 
 453 #ifdef _MSC_VER
 454 #pragma warning( push )
 455 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 456 #endif
 457 
 458 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 459   CollectedHeap(),
 460   _initial_size(0),
 461   _used(0),
 462   _committed(0),
 463   _bytes_allocated_since_gc_start(0),
 464   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 465   _workers(nullptr),
 466   _safepoint_workers(nullptr),
 467   _heap_region_special(false),
 468   _num_regions(0),
 469   _regions(nullptr),
 470   _update_refs_iterator(this),
 471   _gc_state_changed(false),
 472   _control_thread(nullptr),
 473   _shenandoah_policy(policy),
 474   _gc_mode(nullptr),
 475   _heuristics(nullptr),
 476   _free_set(nullptr),
 477   _pacer(nullptr),
 478   _verifier(nullptr),
 479   _phase_timings(nullptr),
 480   _monitoring_support(nullptr),
 481   _memory_pool(nullptr),
 482   _stw_memory_manager("Shenandoah Pauses"),
 483   _cycle_memory_manager("Shenandoah Cycles"),
 484   _gc_timer(new ConcurrentGCTimer()),
 485   _soft_ref_policy(),
 486   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 487   _ref_processor(new ShenandoahReferenceProcessor(MAX2(_max_workers, 1U))),
 488   _marking_context(nullptr),
 489   _bitmap_size(0),
 490   _bitmap_regions_per_slice(0),
 491   _bitmap_bytes_per_slice(0),
 492   _bitmap_region_special(false),
 493   _aux_bitmap_region_special(false),
 494   _liveness_cache(nullptr),
 495   _collection_set(nullptr)
 496 {
 497   // Initialize GC mode early, so we can adjust barrier support
 498   initialize_mode();
 499   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 500 
 501   _max_workers = MAX2(_max_workers, 1U);
 502   _workers = new ShenandoahWorkerThreads("Shenandoah GC Threads", _max_workers);
 503   if (_workers == nullptr) {
 504     vm_exit_during_initialization("Failed necessary allocation.");
 505   } else {
 506     _workers->initialize_workers();
 507   }
 508 
 509   if (ParallelGCThreads > 1) {
 510     _safepoint_workers = new ShenandoahWorkerThreads("Safepoint Cleanup Thread",
 511                                                 ParallelGCThreads);
 512     _safepoint_workers->initialize_workers();
 513   }
 514 }
 515 
 516 #ifdef _MSC_VER
 517 #pragma warning( pop )
 518 #endif
 519 
 520 class ShenandoahResetBitmapTask : public WorkerTask {
 521 private:
 522   ShenandoahRegionIterator _regions;
 523 
 524 public:
 525   ShenandoahResetBitmapTask() :
 526     WorkerTask("Shenandoah Reset Bitmap") {}
 527 
 528   void work(uint worker_id) {
 529     ShenandoahHeapRegion* region = _regions.next();
 530     ShenandoahHeap* heap = ShenandoahHeap::heap();
 531     ShenandoahMarkingContext* const ctx = heap->marking_context();
 532     while (region != nullptr) {
 533       if (heap->is_bitmap_slice_committed(region)) {
 534         ctx->clear_bitmap(region);
 535       }
 536       region = _regions.next();
 537     }
 538   }
 539 };
 540 
 541 void ShenandoahHeap::reset_mark_bitmap() {
 542   assert_gc_workers(_workers->active_workers());
 543   mark_incomplete_marking_context();
 544 
 545   ShenandoahResetBitmapTask task;
 546   _workers->run_task(&task);
 547 }
 548 
 549 void ShenandoahHeap::print_on(outputStream* st) const {
 550   st->print_cr("Shenandoah Heap");
 551   st->print_cr(" " SIZE_FORMAT "%s max, " SIZE_FORMAT "%s soft max, " SIZE_FORMAT "%s committed, " SIZE_FORMAT "%s used",
 552                byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity()),
 553                byte_size_in_proper_unit(soft_max_capacity()), proper_unit_for_byte_size(soft_max_capacity()),
 554                byte_size_in_proper_unit(committed()),    proper_unit_for_byte_size(committed()),
 555                byte_size_in_proper_unit(used()),         proper_unit_for_byte_size(used()));
 556   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"%s regions",
 557                num_regions(),
 558                byte_size_in_proper_unit(ShenandoahHeapRegion::region_size_bytes()),
 559                proper_unit_for_byte_size(ShenandoahHeapRegion::region_size_bytes()));
 560 
 561   st->print("Status: ");
 562   if (has_forwarded_objects())                 st->print("has forwarded objects, ");
 563   if (is_concurrent_mark_in_progress())        st->print("marking, ");
 564   if (is_evacuation_in_progress())             st->print("evacuating, ");
 565   if (is_update_refs_in_progress())            st->print("updating refs, ");
 566   if (is_degenerated_gc_in_progress())         st->print("degenerated gc, ");
 567   if (is_full_gc_in_progress())                st->print("full gc, ");
 568   if (is_full_gc_move_in_progress())           st->print("full gc move, ");
 569   if (is_concurrent_weak_root_in_progress())   st->print("concurrent weak roots, ");
 570   if (is_concurrent_strong_root_in_progress() &&
 571       !is_concurrent_weak_root_in_progress())  st->print("concurrent strong roots, ");
 572 
 573   if (cancelled_gc()) {
 574     st->print("cancelled");
 575   } else {
 576     st->print("not cancelled");
 577   }
 578   st->cr();
 579 
 580   st->print_cr("Reserved region:");
 581   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 582                p2i(reserved_region().start()),
 583                p2i(reserved_region().end()));
 584 
 585   ShenandoahCollectionSet* cset = collection_set();
 586   st->print_cr("Collection set:");
 587   if (cset != nullptr) {
 588     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
 589     st->print_cr(" - map (biased):  " PTR_FORMAT, p2i(cset->biased_map_address()));
 590   } else {
 591     st->print_cr(" (null)");
 592   }
 593 
 594   st->cr();
 595   MetaspaceUtils::print_on(st);
 596 
 597   if (Verbose) {
 598     st->cr();
 599     print_heap_regions_on(st);
 600   }
 601 }
 602 
 603 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 604 public:
 605   void do_thread(Thread* thread) {
 606     assert(thread != nullptr, "Sanity");
 607     assert(thread->is_Worker_thread(), "Only worker thread expected");
 608     ShenandoahThreadLocalData::initialize_gclab(thread);
 609   }
 610 };
 611 
 612 void ShenandoahHeap::post_initialize() {
 613   CollectedHeap::post_initialize();
 614   MutexLocker ml(Threads_lock);
 615 
 616   ShenandoahInitWorkerGCLABClosure init_gclabs;
 617   _workers->threads_do(&init_gclabs);
 618 
 619   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 620   // Now, we will let WorkerThreads to initialize gclab when new worker is created.
 621   _workers->set_initialize_gclab();
 622   if (_safepoint_workers != nullptr) {
 623     _safepoint_workers->threads_do(&init_gclabs);
 624     _safepoint_workers->set_initialize_gclab();
 625   }
 626 
 627   _heuristics->initialize();
 628 
 629   JFR_ONLY(ShenandoahJFRSupport::register_jfr_type_serializers());
 630 }
 631 
 632 size_t ShenandoahHeap::used() const {
 633   return Atomic::load(&_used);
 634 }
 635 
 636 size_t ShenandoahHeap::committed() const {
 637   return Atomic::load(&_committed);
 638 }
 639 
 640 void ShenandoahHeap::increase_committed(size_t bytes) {
 641   shenandoah_assert_heaplocked_or_safepoint();
 642   _committed += bytes;
 643 }
 644 
 645 void ShenandoahHeap::decrease_committed(size_t bytes) {
 646   shenandoah_assert_heaplocked_or_safepoint();
 647   _committed -= bytes;
 648 }
 649 
 650 void ShenandoahHeap::increase_used(size_t bytes) {
 651   Atomic::add(&_used, bytes, memory_order_relaxed);
 652 }
 653 
 654 void ShenandoahHeap::set_used(size_t bytes) {
 655   Atomic::store(&_used, bytes);
 656 }
 657 
 658 void ShenandoahHeap::decrease_used(size_t bytes) {
 659   assert(used() >= bytes, "never decrease heap size by more than we've left");
 660   Atomic::sub(&_used, bytes, memory_order_relaxed);
 661 }
 662 
 663 void ShenandoahHeap::increase_allocated(size_t bytes) {
 664   Atomic::add(&_bytes_allocated_since_gc_start, bytes, memory_order_relaxed);
 665 }
 666 
 667 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 668   size_t bytes = words * HeapWordSize;
 669   if (!waste) {
 670     increase_used(bytes);
 671   }
 672   increase_allocated(bytes);
 673   if (ShenandoahPacing) {
 674     control_thread()->pacing_notify_alloc(words);
 675     if (waste) {
 676       pacer()->claim_for_alloc(words, true);
 677     }
 678   }
 679 }
 680 
 681 size_t ShenandoahHeap::capacity() const {
 682   return committed();
 683 }
 684 
 685 size_t ShenandoahHeap::max_capacity() const {
 686   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 687 }
 688 
 689 size_t ShenandoahHeap::soft_max_capacity() const {
 690   size_t v = Atomic::load(&_soft_max_size);
 691   assert(min_capacity() <= v && v <= max_capacity(),
 692          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 693          min_capacity(), v, max_capacity());
 694   return v;
 695 }
 696 
 697 void ShenandoahHeap::set_soft_max_capacity(size_t v) {
 698   assert(min_capacity() <= v && v <= max_capacity(),
 699          "Should be in bounds: " SIZE_FORMAT " <= " SIZE_FORMAT " <= " SIZE_FORMAT,
 700          min_capacity(), v, max_capacity());
 701   Atomic::store(&_soft_max_size, v);
 702 }
 703 
 704 size_t ShenandoahHeap::min_capacity() const {
 705   return _minimum_size;
 706 }
 707 
 708 size_t ShenandoahHeap::initial_capacity() const {
 709   return _initial_size;
 710 }
 711 
 712 bool ShenandoahHeap::is_in(const void* p) const {
 713   HeapWord* heap_base = (HeapWord*) base();
 714   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 715   return p >= heap_base && p < last_region_end;
 716 }
 717 
 718 void ShenandoahHeap::op_uncommit(double shrink_before, size_t shrink_until) {
 719   assert (ShenandoahUncommit, "should be enabled");
 720 
 721   // Application allocates from the beginning of the heap, and GC allocates at
 722   // the end of it. It is more efficient to uncommit from the end, so that applications
 723   // could enjoy the near committed regions. GC allocations are much less frequent,
 724   // and therefore can accept the committing costs.
 725 
 726   size_t count = 0;
 727   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 728     ShenandoahHeapRegion* r = get_region(i - 1);
 729     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 730       ShenandoahHeapLocker locker(lock());
 731       if (r->is_empty_committed()) {
 732         if (committed() < shrink_until + ShenandoahHeapRegion::region_size_bytes()) {
 733           break;
 734         }
 735 
 736         r->make_uncommitted();
 737         count++;
 738       }
 739     }
 740     SpinPause(); // allow allocators to take the lock
 741   }
 742 
 743   if (count > 0) {
 744     control_thread()->notify_heap_changed();
 745   }
 746 }
 747 
 748 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 749   // New object should fit the GCLAB size
 750   size_t min_size = MAX2(size, PLAB::min_size());
 751 
 752   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 753   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 754   new_size = MIN2(new_size, PLAB::max_size());
 755   new_size = MAX2(new_size, PLAB::min_size());
 756 
 757   // Record new heuristic value even if we take any shortcut. This captures
 758   // the case when moderately-sized objects always take a shortcut. At some point,
 759   // heuristics should catch up with them.
 760   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 761 
 762   if (new_size < size) {
 763     // New size still does not fit the object. Fall back to shared allocation.
 764     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 765     return nullptr;
 766   }
 767 
 768   // Retire current GCLAB, and allocate a new one.
 769   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 770   gclab->retire();
 771 
 772   size_t actual_size = 0;
 773   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 774   if (gclab_buf == nullptr) {
 775     return nullptr;
 776   }
 777 
 778   assert (size <= actual_size, "allocation should fit");
 779 
 780   // ...and clear or zap just allocated TLAB, if needed.
 781   if (ZeroTLAB) {
 782     Copy::zero_to_words(gclab_buf, actual_size);
 783   } else if (ZapTLAB) {
 784     // Skip mangling the space corresponding to the object header to
 785     // ensure that the returned space is not considered parsable by
 786     // any concurrent GC thread.
 787     size_t hdr_size = oopDesc::header_size();
 788     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 789   }
 790   gclab->set_buf(gclab_buf, actual_size);
 791   return gclab->allocate(size);
 792 }
 793 
 794 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 795                                             size_t requested_size,
 796                                             size_t* actual_size) {
 797   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 798   HeapWord* res = allocate_memory(req);
 799   if (res != nullptr) {
 800     *actual_size = req.actual_size();
 801   } else {
 802     *actual_size = 0;
 803   }
 804   return res;
 805 }
 806 
 807 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 808                                              size_t word_size,
 809                                              size_t* actual_size) {
 810   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 811   HeapWord* res = allocate_memory(req);
 812   if (res != nullptr) {
 813     *actual_size = req.actual_size();
 814   } else {
 815     *actual_size = 0;
 816   }
 817   return res;
 818 }
 819 
 820 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
 821   intptr_t pacer_epoch = 0;
 822   bool in_new_region = false;
 823   HeapWord* result = nullptr;
 824 
 825   if (req.is_mutator_alloc()) {
 826     if (ShenandoahPacing) {
 827       pacer()->pace_for_alloc(req.size());
 828       pacer_epoch = pacer()->epoch();
 829     }
 830 
 831     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 832       result = allocate_memory_under_lock(req, in_new_region);
 833     }
 834 
 835     // Allocation failed, block until control thread reacted, then retry allocation.
 836     //
 837     // It might happen that one of the threads requesting allocation would unblock
 838     // way later after GC happened, only to fail the second allocation, because
 839     // other threads have already depleted the free storage. In this case, a better
 840     // strategy is to try again, as long as GC makes progress (or until at least
 841     // one full GC has completed).
 842     size_t original_count = shenandoah_policy()->full_gc_count();
 843     while (result == nullptr
 844         && (_progress_last_gc.is_set() || original_count == shenandoah_policy()->full_gc_count())) {
 845       control_thread()->handle_alloc_failure(req);
 846       result = allocate_memory_under_lock(req, in_new_region);
 847     }
 848   } else {
 849     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
 850     result = allocate_memory_under_lock(req, in_new_region);
 851     // Do not call handle_alloc_failure() here, because we cannot block.
 852     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
 853   }
 854 
 855   if (in_new_region) {
 856     control_thread()->notify_heap_changed();
 857   }
 858 
 859   if (result != nullptr) {
 860     size_t requested = req.size();
 861     size_t actual = req.actual_size();
 862 
 863     assert (req.is_lab_alloc() || (requested == actual),
 864             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
 865             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
 866 
 867     if (req.is_mutator_alloc()) {
 868       notify_mutator_alloc_words(actual, false);
 869 
 870       // If we requested more than we were granted, give the rest back to pacer.
 871       // This only matters if we are in the same pacing epoch: do not try to unpace
 872       // over the budget for the other phase.
 873       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
 874         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
 875       }
 876     } else {
 877       increase_used(actual*HeapWordSize);
 878     }
 879   }
 880 
 881   return result;
 882 }
 883 
 884 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
 885   // If we are dealing with mutator allocation, then we may need to block for safepoint.
 886   // We cannot block for safepoint for GC allocations, because there is a high chance
 887   // we are already running at safepoint or from stack watermark machinery, and we cannot
 888   // block again.
 889   ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
 890   return _free_set->allocate(req, in_new_region);
 891 }
 892 
 893 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
 894                                         bool*  gc_overhead_limit_was_exceeded) {
 895   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
 896   return allocate_memory(req);
 897 }
 898 
 899 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 900                                                              size_t size,
 901                                                              Metaspace::MetadataType mdtype) {
 902   MetaWord* result;
 903 
 904   // Inform metaspace OOM to GC heuristics if class unloading is possible.
 905   if (heuristics()->can_unload_classes()) {
 906     ShenandoahHeuristics* h = heuristics();
 907     h->record_metaspace_oom();
 908   }
 909 
 910   // Expand and retry allocation
 911   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 912   if (result != nullptr) {
 913     return result;
 914   }
 915 
 916   // Start full GC
 917   collect(GCCause::_metadata_GC_clear_soft_refs);
 918 
 919   // Retry allocation
 920   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
 921   if (result != nullptr) {
 922     return result;
 923   }
 924 
 925   // Expand and retry allocation
 926   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 927   if (result != nullptr) {
 928     return result;
 929   }
 930 
 931   // Out of memory
 932   return nullptr;
 933 }
 934 
 935 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
 936 private:
 937   ShenandoahHeap* const _heap;
 938   Thread* const _thread;
 939 public:
 940   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 941     _heap(heap), _thread(Thread::current()) {}
 942 
 943   void do_object(oop p) {
 944     shenandoah_assert_marked(nullptr, p);
 945     if (!p->is_forwarded()) {
 946       _heap->evacuate_object(p, _thread);
 947     }
 948   }
 949 };
 950 
 951 class ShenandoahEvacuationTask : public WorkerTask {
 952 private:
 953   ShenandoahHeap* const _sh;
 954   ShenandoahCollectionSet* const _cs;
 955   bool _concurrent;
 956 public:
 957   ShenandoahEvacuationTask(ShenandoahHeap* sh,
 958                            ShenandoahCollectionSet* cs,
 959                            bool concurrent) :
 960     WorkerTask("Shenandoah Evacuation"),
 961     _sh(sh),
 962     _cs(cs),
 963     _concurrent(concurrent)
 964   {}
 965 
 966   void work(uint worker_id) {
 967     if (_concurrent) {
 968       ShenandoahConcurrentWorkerSession worker_session(worker_id);
 969       ShenandoahSuspendibleThreadSetJoiner stsj;
 970       ShenandoahEvacOOMScope oom_evac_scope;
 971       do_work();
 972     } else {
 973       ShenandoahParallelWorkerSession worker_session(worker_id);
 974       ShenandoahEvacOOMScope oom_evac_scope;
 975       do_work();
 976     }
 977   }
 978 
 979 private:
 980   void do_work() {
 981     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
 982     ShenandoahHeapRegion* r;
 983     while ((r =_cs->claim_next()) != nullptr) {
 984       assert(r->has_live(), "Region " SIZE_FORMAT " should have been reclaimed early", r->index());
 985       _sh->marked_object_iterate(r, &cl);
 986 
 987       if (ShenandoahPacing) {
 988         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
 989       }
 990 
 991       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
 992         break;
 993       }
 994     }
 995   }
 996 };
 997 
 998 void ShenandoahHeap::evacuate_collection_set(bool concurrent) {
 999   ShenandoahEvacuationTask task(this, _collection_set, concurrent);
1000   workers()->run_task(&task);
1001 }
1002 
1003 void ShenandoahHeap::trash_cset_regions() {
1004   ShenandoahHeapLocker locker(lock());
1005 
1006   ShenandoahCollectionSet* set = collection_set();
1007   ShenandoahHeapRegion* r;
1008   set->clear_current_index();
1009   while ((r = set->next()) != nullptr) {
1010     r->make_trash();
1011   }
1012   collection_set()->clear();
1013 }
1014 
1015 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1016   st->print_cr("Heap Regions:");
1017   st->print_cr("Region state: EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HP=pinned humongous start");
1018   st->print_cr("              HC=humongous continuation, CS=collection set, TR=trash, P=pinned, CSP=pinned collection set");
1019   st->print_cr("BTE=bottom/top/end, TAMS=top-at-mark-start");
1020   st->print_cr("UWM=update watermark, U=used");
1021   st->print_cr("T=TLAB allocs, G=GCLAB allocs");
1022   st->print_cr("S=shared allocs, L=live data");
1023   st->print_cr("CP=critical pins");
1024 
1025   for (size_t i = 0; i < num_regions(); i++) {
1026     get_region(i)->print_on(st);
1027   }
1028 }
1029 
1030 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1031   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1032 
1033   oop humongous_obj = cast_to_oop(start->bottom());
1034   size_t size = humongous_obj->size();
1035   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1036   size_t index = start->index() + required_regions - 1;
1037 
1038   assert(!start->has_live(), "liveness must be zero");
1039 
1040   for(size_t i = 0; i < required_regions; i++) {
1041     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1042     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1043     ShenandoahHeapRegion* region = get_region(index --);
1044 
1045     assert(region->is_humongous(), "expect correct humongous start or continuation");
1046     assert(!region->is_cset(), "Humongous region should not be in collection set");
1047 
1048     region->make_trash_immediate();
1049   }
1050 }
1051 
1052 class ShenandoahCheckCleanGCLABClosure : public ThreadClosure {
1053 public:
1054   ShenandoahCheckCleanGCLABClosure() {}
1055   void do_thread(Thread* thread) {
1056     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1057     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1058     assert(gclab->words_remaining() == 0, "GCLAB should not need retirement");
1059   }
1060 };
1061 
1062 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1063 private:
1064   bool const _resize;
1065 public:
1066   ShenandoahRetireGCLABClosure(bool resize) : _resize(resize) {}
1067   void do_thread(Thread* thread) {
1068     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1069     assert(gclab != nullptr, "GCLAB should be initialized for %s", thread->name());
1070     gclab->retire();
1071     if (_resize && ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1072       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1073     }
1074   }
1075 };
1076 
1077 void ShenandoahHeap::labs_make_parsable() {
1078   assert(UseTLAB, "Only call with UseTLAB");
1079 
1080   ShenandoahRetireGCLABClosure cl(false);
1081 
1082   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1083     ThreadLocalAllocBuffer& tlab = t->tlab();
1084     tlab.make_parsable();
1085     cl.do_thread(t);
1086   }
1087 
1088   workers()->threads_do(&cl);
1089 }
1090 
1091 void ShenandoahHeap::tlabs_retire(bool resize) {
1092   assert(UseTLAB, "Only call with UseTLAB");
1093   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1094 
1095   ThreadLocalAllocStats stats;
1096 
1097   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1098     ThreadLocalAllocBuffer& tlab = t->tlab();
1099     tlab.retire(&stats);
1100     if (resize) {
1101       tlab.resize();
1102     }
1103   }
1104 
1105   stats.publish();
1106 
1107 #ifdef ASSERT
1108   ShenandoahCheckCleanGCLABClosure cl;
1109   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1110     cl.do_thread(t);
1111   }
1112   workers()->threads_do(&cl);
1113 #endif
1114 }
1115 
1116 void ShenandoahHeap::gclabs_retire(bool resize) {
1117   assert(UseTLAB, "Only call with UseTLAB");
1118   assert(!resize || ResizeTLAB, "Only call for resize when ResizeTLAB is enabled");
1119 
1120   ShenandoahRetireGCLABClosure cl(resize);
1121   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1122     cl.do_thread(t);
1123   }
1124   workers()->threads_do(&cl);
1125 
1126   if (safepoint_workers() != nullptr) {
1127     safepoint_workers()->threads_do(&cl);
1128   }
1129 }
1130 
1131 // Returns size in bytes
1132 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1133   // Return the max allowed size, and let the allocation path
1134   // figure out the safe size for current allocation.
1135   return ShenandoahHeapRegion::max_tlab_size_bytes();
1136 }
1137 
1138 size_t ShenandoahHeap::max_tlab_size() const {
1139   // Returns size in words
1140   return ShenandoahHeapRegion::max_tlab_size_words();
1141 }
1142 
1143 void ShenandoahHeap::collect(GCCause::Cause cause) {
1144   control_thread()->request_gc(cause);
1145 }
1146 
1147 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1148   //assert(false, "Shouldn't need to do full collections");
1149 }
1150 
1151 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1152   ShenandoahHeapRegion* r = heap_region_containing(addr);
1153   if (r != nullptr) {
1154     return r->block_start(addr);
1155   }
1156   return nullptr;
1157 }
1158 
1159 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1160   ShenandoahHeapRegion* r = heap_region_containing(addr);
1161   return r->block_is_obj(addr);
1162 }
1163 
1164 bool ShenandoahHeap::print_location(outputStream* st, void* addr) const {
1165   return BlockLocationPrinter<ShenandoahHeap>::print_location(st, addr);
1166 }
1167 
1168 void ShenandoahHeap::prepare_for_verify() {
1169   if (SafepointSynchronize::is_at_safepoint() && UseTLAB) {
1170     labs_make_parsable();
1171   }
1172 }
1173 
1174 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1175   tcl->do_thread(_control_thread);
1176   workers()->threads_do(tcl);
1177   if (_safepoint_workers != nullptr) {
1178     _safepoint_workers->threads_do(tcl);
1179   }
1180 }
1181 
1182 void ShenandoahHeap::print_tracing_info() const {
1183   LogTarget(Info, gc, stats) lt;
1184   if (lt.is_enabled()) {
1185     ResourceMark rm;
1186     LogStream ls(lt);
1187 
1188     phase_timings()->print_global_on(&ls);
1189 
1190     ls.cr();
1191     ls.cr();
1192 
1193     shenandoah_policy()->print_gc_stats(&ls);
1194 
1195     ls.cr();
1196     ls.cr();
1197   }
1198 }
1199 
1200 void ShenandoahHeap::verify(VerifyOption vo) {
1201   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1202     if (ShenandoahVerify) {
1203       verifier()->verify_generic(vo);
1204     } else {
1205       // TODO: Consider allocating verification bitmaps on demand,
1206       // and turn this on unconditionally.
1207     }
1208   }
1209 }
1210 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1211   return _free_set->capacity();
1212 }
1213 
1214 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1215 private:
1216   MarkBitMap* _bitmap;
1217   ShenandoahScanObjectStack* _oop_stack;
1218   ShenandoahHeap* const _heap;
1219   ShenandoahMarkingContext* const _marking_context;
1220 
1221   template <class T>
1222   void do_oop_work(T* p) {
1223     T o = RawAccess<>::oop_load(p);
1224     if (!CompressedOops::is_null(o)) {
1225       oop obj = CompressedOops::decode_not_null(o);
1226       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1227         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1228         return;
1229       }
1230       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1231 
1232       assert(oopDesc::is_oop(obj), "must be a valid oop");
1233       if (!_bitmap->is_marked(obj)) {
1234         _bitmap->mark(obj);
1235         _oop_stack->push(obj);
1236       }
1237     }
1238   }
1239 public:
1240   ObjectIterateScanRootClosure(MarkBitMap* bitmap, ShenandoahScanObjectStack* oop_stack) :
1241     _bitmap(bitmap), _oop_stack(oop_stack), _heap(ShenandoahHeap::heap()),
1242     _marking_context(_heap->marking_context()) {}
1243   void do_oop(oop* p)       { do_oop_work(p); }
1244   void do_oop(narrowOop* p) { do_oop_work(p); }
1245 };
1246 
1247 /*
1248  * This is public API, used in preparation of object_iterate().
1249  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1250  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1251  * control, we call SH::tlabs_retire, SH::gclabs_retire.
1252  */
1253 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1254   // No-op.
1255 }
1256 
1257 /*
1258  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1259  *
1260  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1261  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1262  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1263  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1264  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1265  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1266  * wiped the bitmap in preparation for next marking).
1267  *
1268  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1269  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1270  * is allowed to report dead objects, but is not required to do so.
1271  */
1272 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1273   // Reset bitmap
1274   if (!prepare_aux_bitmap_for_iteration())
1275     return;
1276 
1277   ShenandoahScanObjectStack oop_stack;
1278   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1279   // Seed the stack with root scan
1280   scan_roots_for_iteration(&oop_stack, &oops);
1281 
1282   // Work through the oop stack to traverse heap
1283   while (! oop_stack.is_empty()) {
1284     oop obj = oop_stack.pop();
1285     assert(oopDesc::is_oop(obj), "must be a valid oop");
1286     cl->do_object(obj);
1287     obj->oop_iterate(&oops);
1288   }
1289 
1290   assert(oop_stack.is_empty(), "should be empty");
1291   // Reclaim bitmap
1292   reclaim_aux_bitmap_for_iteration();
1293 }
1294 
1295 bool ShenandoahHeap::prepare_aux_bitmap_for_iteration() {
1296   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1297 
1298   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1299     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1300     return false;
1301   }
1302   // Reset bitmap
1303   _aux_bit_map.clear();
1304   return true;
1305 }
1306 
1307 void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_stack, ObjectIterateScanRootClosure* oops) {
1308   // Process GC roots according to current GC cycle
1309   // This populates the work stack with initial objects
1310   // It is important to relinquish the associated locks before diving
1311   // into heap dumper
1312   uint n_workers = safepoint_workers() != nullptr ? safepoint_workers()->active_workers() : 1;
1313   ShenandoahHeapIterationRootScanner rp(n_workers);
1314   rp.roots_do(oops);
1315 }
1316 
1317 void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
1318   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1319     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1320   }
1321 }
1322 
1323 // Closure for parallelly iterate objects
1324 class ShenandoahObjectIterateParScanClosure : public BasicOopIterateClosure {
1325 private:
1326   MarkBitMap* _bitmap;
1327   ShenandoahObjToScanQueue* _queue;
1328   ShenandoahHeap* const _heap;
1329   ShenandoahMarkingContext* const _marking_context;
1330 
1331   template <class T>
1332   void do_oop_work(T* p) {
1333     T o = RawAccess<>::oop_load(p);
1334     if (!CompressedOops::is_null(o)) {
1335       oop obj = CompressedOops::decode_not_null(o);
1336       if (_heap->is_concurrent_weak_root_in_progress() && !_marking_context->is_marked(obj)) {
1337         // There may be dead oops in weak roots in concurrent root phase, do not touch them.
1338         return;
1339       }
1340       obj = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(obj);
1341 
1342       assert(oopDesc::is_oop(obj), "Must be a valid oop");
1343       if (_bitmap->par_mark(obj)) {
1344         _queue->push(ShenandoahMarkTask(obj));
1345       }
1346     }
1347   }
1348 public:
1349   ShenandoahObjectIterateParScanClosure(MarkBitMap* bitmap, ShenandoahObjToScanQueue* q) :
1350     _bitmap(bitmap), _queue(q), _heap(ShenandoahHeap::heap()),
1351     _marking_context(_heap->marking_context()) {}
1352   void do_oop(oop* p)       { do_oop_work(p); }
1353   void do_oop(narrowOop* p) { do_oop_work(p); }
1354 };
1355 
1356 // Object iterator for parallel heap iteraion.
1357 // The root scanning phase happenes in construction as a preparation of
1358 // parallel marking queues.
1359 // Every worker processes it's own marking queue. work-stealing is used
1360 // to balance workload.
1361 class ShenandoahParallelObjectIterator : public ParallelObjectIteratorImpl {
1362 private:
1363   uint                         _num_workers;
1364   bool                         _init_ready;
1365   MarkBitMap*                  _aux_bit_map;
1366   ShenandoahHeap*              _heap;
1367   ShenandoahScanObjectStack    _roots_stack; // global roots stack
1368   ShenandoahObjToScanQueueSet* _task_queues;
1369 public:
1370   ShenandoahParallelObjectIterator(uint num_workers, MarkBitMap* bitmap) :
1371         _num_workers(num_workers),
1372         _init_ready(false),
1373         _aux_bit_map(bitmap),
1374         _heap(ShenandoahHeap::heap()) {
1375     // Initialize bitmap
1376     _init_ready = _heap->prepare_aux_bitmap_for_iteration();
1377     if (!_init_ready) {
1378       return;
1379     }
1380 
1381     ObjectIterateScanRootClosure oops(_aux_bit_map, &_roots_stack);
1382     _heap->scan_roots_for_iteration(&_roots_stack, &oops);
1383 
1384     _init_ready = prepare_worker_queues();
1385   }
1386 
1387   ~ShenandoahParallelObjectIterator() {
1388     // Reclaim bitmap
1389     _heap->reclaim_aux_bitmap_for_iteration();
1390     // Reclaim queue for workers
1391     if (_task_queues!= nullptr) {
1392       for (uint i = 0; i < _num_workers; ++i) {
1393         ShenandoahObjToScanQueue* q = _task_queues->queue(i);
1394         if (q != nullptr) {
1395           delete q;
1396           _task_queues->register_queue(i, nullptr);
1397         }
1398       }
1399       delete _task_queues;
1400       _task_queues = nullptr;
1401     }
1402   }
1403 
1404   virtual void object_iterate(ObjectClosure* cl, uint worker_id) {
1405     if (_init_ready) {
1406       object_iterate_parallel(cl, worker_id, _task_queues);
1407     }
1408   }
1409 
1410 private:
1411   // Divide global root_stack into worker queues
1412   bool prepare_worker_queues() {
1413     _task_queues = new ShenandoahObjToScanQueueSet((int) _num_workers);
1414     // Initialize queues for every workers
1415     for (uint i = 0; i < _num_workers; ++i) {
1416       ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
1417       _task_queues->register_queue(i, task_queue);
1418     }
1419     // Divide roots among the workers. Assume that object referencing distribution
1420     // is related with root kind, use round-robin to make every worker have same chance
1421     // to process every kind of roots
1422     size_t roots_num = _roots_stack.size();
1423     if (roots_num == 0) {
1424       // No work to do
1425       return false;
1426     }
1427 
1428     for (uint j = 0; j < roots_num; j++) {
1429       uint stack_id = j % _num_workers;
1430       oop obj = _roots_stack.pop();
1431       _task_queues->queue(stack_id)->push(ShenandoahMarkTask(obj));
1432     }
1433     return true;
1434   }
1435 
1436   void object_iterate_parallel(ObjectClosure* cl,
1437                                uint worker_id,
1438                                ShenandoahObjToScanQueueSet* queue_set) {
1439     assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1440     assert(queue_set != nullptr, "task queue must not be null");
1441 
1442     ShenandoahObjToScanQueue* q = queue_set->queue(worker_id);
1443     assert(q != nullptr, "object iterate queue must not be null");
1444 
1445     ShenandoahMarkTask t;
1446     ShenandoahObjectIterateParScanClosure oops(_aux_bit_map, q);
1447 
1448     // Work through the queue to traverse heap.
1449     // Steal when there is no task in queue.
1450     while (q->pop(t) || queue_set->steal(worker_id, t)) {
1451       oop obj = t.obj();
1452       assert(oopDesc::is_oop(obj), "must be a valid oop");
1453       cl->do_object(obj);
1454       obj->oop_iterate(&oops);
1455     }
1456     assert(q->is_empty(), "should be empty");
1457   }
1458 };
1459 
1460 ParallelObjectIteratorImpl* ShenandoahHeap::parallel_object_iterator(uint workers) {
1461   return new ShenandoahParallelObjectIterator(workers, &_aux_bit_map);
1462 }
1463 
1464 // Keep alive an object that was loaded with AS_NO_KEEPALIVE.
1465 void ShenandoahHeap::keep_alive(oop obj) {
1466   if (is_concurrent_mark_in_progress() && (obj != nullptr)) {
1467     ShenandoahBarrierSet::barrier_set()->enqueue(obj);
1468   }
1469 }
1470 
1471 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1472   for (size_t i = 0; i < num_regions(); i++) {
1473     ShenandoahHeapRegion* current = get_region(i);
1474     blk->heap_region_do(current);
1475   }
1476 }
1477 
1478 class ShenandoahParallelHeapRegionTask : public WorkerTask {
1479 private:
1480   ShenandoahHeap* const _heap;
1481   ShenandoahHeapRegionClosure* const _blk;
1482   size_t const _stride;
1483 
1484   shenandoah_padding(0);
1485   volatile size_t _index;
1486   shenandoah_padding(1);
1487 
1488 public:
1489   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk, size_t stride) :
1490           WorkerTask("Shenandoah Parallel Region Operation"),
1491           _heap(ShenandoahHeap::heap()), _blk(blk), _stride(stride), _index(0) {}
1492 
1493   void work(uint worker_id) {
1494     ShenandoahParallelWorkerSession worker_session(worker_id);
1495     size_t stride = _stride;
1496 
1497     size_t max = _heap->num_regions();
1498     while (Atomic::load(&_index) < max) {
1499       size_t cur = Atomic::fetch_then_add(&_index, stride, memory_order_relaxed);
1500       size_t start = cur;
1501       size_t end = MIN2(cur + stride, max);
1502       if (start >= max) break;
1503 
1504       for (size_t i = cur; i < end; i++) {
1505         ShenandoahHeapRegion* current = _heap->get_region(i);
1506         _blk->heap_region_do(current);
1507       }
1508     }
1509   }
1510 };
1511 
1512 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1513   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1514   const uint active_workers = workers()->active_workers();
1515   const size_t n_regions = num_regions();
1516   size_t stride = ShenandoahParallelRegionStride;
1517   if (stride == 0 && active_workers > 1) {
1518     // Automatically derive the stride to balance the work between threads
1519     // evenly. Do not try to split work if below the reasonable threshold.
1520     constexpr size_t threshold = 4096;
1521     stride = n_regions <= threshold ?
1522             threshold :
1523             (n_regions + active_workers - 1) / active_workers;
1524   }
1525 
1526   if (n_regions > stride && active_workers > 1) {
1527     ShenandoahParallelHeapRegionTask task(blk, stride);
1528     workers()->run_task(&task);
1529   } else {
1530     heap_region_iterate(blk);
1531   }
1532 }
1533 
1534 class ShenandoahInitMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1535 private:
1536   ShenandoahMarkingContext* const _ctx;
1537 public:
1538   ShenandoahInitMarkUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1539 
1540   void heap_region_do(ShenandoahHeapRegion* r) {
1541     assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1542     if (r->is_active()) {
1543       // Check if region needs updating its TAMS. We have updated it already during concurrent
1544       // reset, so it is very likely we don't need to do another write here.
1545       if (_ctx->top_at_mark_start(r) != r->top()) {
1546         _ctx->capture_top_at_mark_start(r);
1547       }
1548     } else {
1549       assert(_ctx->top_at_mark_start(r) == r->top(),
1550              "Region " SIZE_FORMAT " should already have correct TAMS", r->index());
1551     }
1552   }
1553 
1554   bool is_thread_safe() { return true; }
1555 };
1556 
1557 class ShenandoahRendezvousClosure : public HandshakeClosure {
1558 public:
1559   inline ShenandoahRendezvousClosure() : HandshakeClosure("ShenandoahRendezvous") {}
1560   inline void do_thread(Thread* thread) {}
1561 };
1562 
1563 void ShenandoahHeap::rendezvous_threads() {
1564   ShenandoahRendezvousClosure cl;
1565   Handshake::execute(&cl);
1566 }
1567 
1568 void ShenandoahHeap::recycle_trash() {
1569   free_set()->recycle_trash();
1570 }
1571 
1572 class ShenandoahResetUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1573 private:
1574   ShenandoahMarkingContext* const _ctx;
1575 public:
1576   ShenandoahResetUpdateRegionStateClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1577 
1578   void heap_region_do(ShenandoahHeapRegion* r) {
1579     if (r->is_active()) {
1580       // Reset live data and set TAMS optimistically. We would recheck these under the pause
1581       // anyway to capture any updates that happened since now.
1582       r->clear_live_data();
1583       _ctx->capture_top_at_mark_start(r);
1584     }
1585   }
1586 
1587   bool is_thread_safe() { return true; }
1588 };
1589 
1590 void ShenandoahHeap::prepare_gc() {
1591   reset_mark_bitmap();
1592 
1593   ShenandoahResetUpdateRegionStateClosure cl;
1594   parallel_heap_region_iterate(&cl);
1595 }
1596 
1597 class ShenandoahFinalMarkUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
1598 private:
1599   ShenandoahMarkingContext* const _ctx;
1600   ShenandoahHeapLock* const _lock;
1601 
1602 public:
1603   ShenandoahFinalMarkUpdateRegionStateClosure() :
1604     _ctx(ShenandoahHeap::heap()->complete_marking_context()), _lock(ShenandoahHeap::heap()->lock()) {}
1605 
1606   void heap_region_do(ShenandoahHeapRegion* r) {
1607     if (r->is_active()) {
1608       // All allocations past TAMS are implicitly live, adjust the region data.
1609       // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1610       HeapWord *tams = _ctx->top_at_mark_start(r);
1611       HeapWord *top = r->top();
1612       if (top > tams) {
1613         r->increase_live_data_alloc_words(pointer_delta(top, tams));
1614       }
1615 
1616       // We are about to select the collection set, make sure it knows about
1617       // current pinning status. Also, this allows trashing more regions that
1618       // now have their pinning status dropped.
1619       if (r->is_pinned()) {
1620         if (r->pin_count() == 0) {
1621           ShenandoahHeapLocker locker(_lock);
1622           r->make_unpinned();
1623         }
1624       } else {
1625         if (r->pin_count() > 0) {
1626           ShenandoahHeapLocker locker(_lock);
1627           r->make_pinned();
1628         }
1629       }
1630 
1631       // Remember limit for updating refs. It's guaranteed that we get no
1632       // from-space-refs written from here on.
1633       r->set_update_watermark_at_safepoint(r->top());
1634     } else {
1635       assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->index());
1636       assert(_ctx->top_at_mark_start(r) == r->top(),
1637              "Region " SIZE_FORMAT " should have correct TAMS", r->index());
1638     }
1639   }
1640 
1641   bool is_thread_safe() { return true; }
1642 };
1643 
1644 void ShenandoahHeap::prepare_regions_and_collection_set(bool concurrent) {
1645   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
1646   {
1647     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_update_region_states :
1648                                          ShenandoahPhaseTimings::degen_gc_final_update_region_states);
1649     ShenandoahFinalMarkUpdateRegionStateClosure cl;
1650     parallel_heap_region_iterate(&cl);
1651 
1652     assert_pinned_region_status();
1653   }
1654 
1655   {
1656     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::choose_cset :
1657                                          ShenandoahPhaseTimings::degen_gc_choose_cset);
1658     ShenandoahHeapLocker locker(lock());
1659     _collection_set->clear();
1660     heuristics()->choose_collection_set(_collection_set);
1661   }
1662 
1663   {
1664     ShenandoahGCPhase phase(concurrent ? ShenandoahPhaseTimings::final_rebuild_freeset :
1665                                          ShenandoahPhaseTimings::degen_gc_final_rebuild_freeset);
1666     ShenandoahHeapLocker locker(lock());
1667     _free_set->rebuild();
1668   }
1669 }
1670 
1671 void ShenandoahHeap::do_class_unloading() {
1672   _unloader.unload();
1673 }
1674 
1675 void ShenandoahHeap::stw_weak_refs(bool full_gc) {
1676   // Weak refs processing
1677   ShenandoahPhaseTimings::Phase phase = full_gc ? ShenandoahPhaseTimings::full_gc_weakrefs
1678                                                 : ShenandoahPhaseTimings::degen_gc_weakrefs;
1679   ShenandoahTimingsTracker t(phase);
1680   ShenandoahGCWorkerPhase worker_phase(phase);
1681   ref_processor()->process_references(phase, workers(), false /* concurrent */);
1682 }
1683 
1684 void ShenandoahHeap::prepare_update_heap_references(bool concurrent) {
1685   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1686 
1687   // Evacuation is over, no GCLABs are needed anymore. GCLABs are under URWM, so we need to
1688   // make them parsable for update code to work correctly. Plus, we can compute new sizes
1689   // for future GCLABs here.
1690   if (UseTLAB) {
1691     ShenandoahGCPhase phase(concurrent ?
1692                             ShenandoahPhaseTimings::init_update_refs_manage_gclabs :
1693                             ShenandoahPhaseTimings::degen_gc_init_update_refs_manage_gclabs);
1694     gclabs_retire(ResizeTLAB);
1695   }
1696 
1697   _update_refs_iterator.reset();
1698 }
1699 
1700 void ShenandoahHeap::propagate_gc_state_to_java_threads() {
1701   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1702   if (_gc_state_changed) {
1703     _gc_state_changed = false;
1704     char state = gc_state();
1705     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1706       ShenandoahThreadLocalData::set_gc_state(t, state);
1707     }
1708   }
1709 }
1710 
1711 void ShenandoahHeap::set_gc_state(uint mask, bool value) {
1712   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at Shenandoah safepoint");
1713   _gc_state.set_cond(mask, value);
1714   _gc_state_changed = true;
1715 }
1716 
1717 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1718   assert(!has_forwarded_objects(), "Not expected before/after mark phase");
1719   set_gc_state(MARKING, in_progress);
1720   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1721 }
1722 
1723 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1724   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1725   set_gc_state(EVACUATION, in_progress);
1726 }
1727 
1728 void ShenandoahHeap::set_concurrent_strong_root_in_progress(bool in_progress) {
1729   if (in_progress) {
1730     _concurrent_strong_root_in_progress.set();
1731   } else {
1732     _concurrent_strong_root_in_progress.unset();
1733   }
1734 }
1735 
1736 void ShenandoahHeap::set_concurrent_weak_root_in_progress(bool cond) {
1737   set_gc_state(WEAK_ROOTS, cond);
1738 }
1739 
1740 GCTracer* ShenandoahHeap::tracer() {
1741   return shenandoah_policy()->tracer();
1742 }
1743 
1744 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1745   return _free_set->used();
1746 }
1747 
1748 bool ShenandoahHeap::try_cancel_gc() {
1749   jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1750   return prev == CANCELLABLE;
1751 }
1752 
1753 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1754   if (try_cancel_gc()) {
1755     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1756     log_info(gc)("%s", msg.buffer());
1757     Events::log(Thread::current(), "%s", msg.buffer());
1758   }
1759 }
1760 
1761 uint ShenandoahHeap::max_workers() {
1762   return _max_workers;
1763 }
1764 
1765 void ShenandoahHeap::stop() {
1766   // The shutdown sequence should be able to terminate when GC is running.
1767 
1768   // Step 0. Notify policy to disable event recording.
1769   _shenandoah_policy->record_shutdown();
1770 
1771   // Step 1. Notify control thread that we are in shutdown.
1772   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1773   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1774   control_thread()->prepare_for_graceful_shutdown();
1775 
1776   // Step 2. Notify GC workers that we are cancelling GC.
1777   cancel_gc(GCCause::_shenandoah_stop_vm);
1778 
1779   // Step 3. Wait until GC worker exits normally.
1780   control_thread()->stop();
1781 }
1782 
1783 void ShenandoahHeap::stw_unload_classes(bool full_gc) {
1784   if (!unload_classes()) return;
1785   ClassUnloadingContext ctx(_workers->active_workers(),
1786                             true /* unregister_nmethods_during_purge */,
1787                             false /* lock_codeblob_free_separately */);
1788 
1789   // Unload classes and purge SystemDictionary.
1790   {
1791     ShenandoahPhaseTimings::Phase phase = full_gc ?
1792                                           ShenandoahPhaseTimings::full_gc_purge_class_unload :
1793                                           ShenandoahPhaseTimings::degen_gc_purge_class_unload;
1794     ShenandoahIsAliveSelector is_alive;
1795     {
1796       CodeCache::UnlinkingScope scope(is_alive.is_alive_closure());
1797       ShenandoahGCPhase gc_phase(phase);
1798       ShenandoahGCWorkerPhase worker_phase(phase);
1799       bool unloading_occurred = SystemDictionary::do_unloading(gc_timer());
1800 
1801       uint num_workers = _workers->active_workers();
1802       ShenandoahClassUnloadingTask unlink_task(phase, num_workers, unloading_occurred);
1803       _workers->run_task(&unlink_task);
1804     }
1805     // Release unloaded nmethods's memory.
1806     ClassUnloadingContext::context()->purge_and_free_nmethods();
1807   }
1808 
1809   {
1810     ShenandoahGCPhase phase(full_gc ?
1811                             ShenandoahPhaseTimings::full_gc_purge_cldg :
1812                             ShenandoahPhaseTimings::degen_gc_purge_cldg);
1813     ClassLoaderDataGraph::purge(true /* at_safepoint */);
1814   }
1815   // Resize and verify metaspace
1816   MetaspaceGC::compute_new_size();
1817   DEBUG_ONLY(MetaspaceUtils::verify();)
1818 }
1819 
1820 // Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
1821 // so they should not have forwarded oops.
1822 // However, we do need to "null" dead oops in the roots, if can not be done
1823 // in concurrent cycles.
1824 void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
1825   uint num_workers = _workers->active_workers();
1826   ShenandoahPhaseTimings::Phase timing_phase = full_gc ?
1827                                                ShenandoahPhaseTimings::full_gc_purge_weak_par :
1828                                                ShenandoahPhaseTimings::degen_gc_purge_weak_par;
1829   ShenandoahGCPhase phase(timing_phase);
1830   ShenandoahGCWorkerPhase worker_phase(timing_phase);
1831   // Cleanup weak roots
1832   if (has_forwarded_objects()) {
1833     ShenandoahForwardedIsAliveClosure is_alive;
1834     ShenandoahUpdateRefsClosure keep_alive;
1835     ShenandoahParallelWeakRootsCleaningTask<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>
1836       cleaning_task(timing_phase, &is_alive, &keep_alive, num_workers);
1837     _workers->run_task(&cleaning_task);
1838   } else {
1839     ShenandoahIsAliveClosure is_alive;
1840 #ifdef ASSERT
1841     ShenandoahAssertNotForwardedClosure verify_cl;
1842     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, ShenandoahAssertNotForwardedClosure>
1843       cleaning_task(timing_phase, &is_alive, &verify_cl, num_workers);
1844 #else
1845     ShenandoahParallelWeakRootsCleaningTask<ShenandoahIsAliveClosure, DoNothingClosure>
1846       cleaning_task(timing_phase, &is_alive, &do_nothing_cl, num_workers);
1847 #endif
1848     _workers->run_task(&cleaning_task);
1849   }
1850 }
1851 
1852 void ShenandoahHeap::parallel_cleaning(bool full_gc) {
1853   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1854   assert(is_stw_gc_in_progress(), "Only for Degenerated and Full GC");
1855   ShenandoahGCPhase phase(full_gc ?
1856                           ShenandoahPhaseTimings::full_gc_purge :
1857                           ShenandoahPhaseTimings::degen_gc_purge);
1858   stw_weak_refs(full_gc);
1859   stw_process_weak_roots(full_gc);
1860   stw_unload_classes(full_gc);
1861 }
1862 
1863 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
1864   set_gc_state(HAS_FORWARDED, cond);
1865 }
1866 
1867 void ShenandoahHeap::set_unload_classes(bool uc) {
1868   _unload_classes.set_cond(uc);
1869 }
1870 
1871 bool ShenandoahHeap::unload_classes() const {
1872   return _unload_classes.is_set();
1873 }
1874 
1875 address ShenandoahHeap::in_cset_fast_test_addr() {
1876   ShenandoahHeap* heap = ShenandoahHeap::heap();
1877   assert(heap->collection_set() != nullptr, "Sanity");
1878   return (address) heap->collection_set()->biased_map_address();
1879 }
1880 
1881 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
1882   return Atomic::load(&_bytes_allocated_since_gc_start);
1883 }
1884 
1885 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
1886   Atomic::store(&_bytes_allocated_since_gc_start, (size_t)0);
1887 }
1888 
1889 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
1890   _degenerated_gc_in_progress.set_cond(in_progress);
1891 }
1892 
1893 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
1894   _full_gc_in_progress.set_cond(in_progress);
1895 }
1896 
1897 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
1898   assert (is_full_gc_in_progress(), "should be");
1899   _full_gc_move_in_progress.set_cond(in_progress);
1900 }
1901 
1902 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
1903   set_gc_state(UPDATEREFS, in_progress);
1904 }
1905 
1906 void ShenandoahHeap::register_nmethod(nmethod* nm) {
1907   ShenandoahCodeRoots::register_nmethod(nm);
1908 }
1909 
1910 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
1911   ShenandoahCodeRoots::unregister_nmethod(nm);
1912 }
1913 
1914 void ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
1915   heap_region_containing(o)->record_pin();
1916 }
1917 
1918 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
1919   ShenandoahHeapRegion* r = heap_region_containing(o);
1920   assert(r != nullptr, "Sanity");
1921   assert(r->pin_count() > 0, "Region " SIZE_FORMAT " should have non-zero pins", r->index());
1922   r->record_unpin();
1923 }
1924 
1925 void ShenandoahHeap::sync_pinned_region_status() {
1926   ShenandoahHeapLocker locker(lock());
1927 
1928   for (size_t i = 0; i < num_regions(); i++) {
1929     ShenandoahHeapRegion *r = get_region(i);
1930     if (r->is_active()) {
1931       if (r->is_pinned()) {
1932         if (r->pin_count() == 0) {
1933           r->make_unpinned();
1934         }
1935       } else {
1936         if (r->pin_count() > 0) {
1937           r->make_pinned();
1938         }
1939       }
1940     }
1941   }
1942 
1943   assert_pinned_region_status();
1944 }
1945 
1946 #ifdef ASSERT
1947 void ShenandoahHeap::assert_pinned_region_status() {
1948   for (size_t i = 0; i < num_regions(); i++) {
1949     ShenandoahHeapRegion* r = get_region(i);
1950     assert((r->is_pinned() && r->pin_count() > 0) || (!r->is_pinned() && r->pin_count() == 0),
1951            "Region " SIZE_FORMAT " pinning status is inconsistent", i);
1952   }
1953 }
1954 #endif
1955 
1956 ConcurrentGCTimer* ShenandoahHeap::gc_timer() const {
1957   return _gc_timer;
1958 }
1959 
1960 void ShenandoahHeap::prepare_concurrent_roots() {
1961   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1962   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1963   set_concurrent_strong_root_in_progress(!collection_set()->is_empty());
1964   set_concurrent_weak_root_in_progress(true);
1965   if (unload_classes()) {
1966     _unloader.prepare();
1967   }
1968 }
1969 
1970 void ShenandoahHeap::finish_concurrent_roots() {
1971   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
1972   assert(!is_stw_gc_in_progress(), "Only concurrent GC");
1973   if (unload_classes()) {
1974     _unloader.finish();
1975   }
1976 }
1977 
1978 #ifdef ASSERT
1979 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
1980   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
1981 
1982   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1983     if (UseDynamicNumberOfGCThreads) {
1984       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
1985     } else {
1986       // Use ParallelGCThreads inside safepoints
1987       assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads within safepoints");
1988     }
1989   } else {
1990     if (UseDynamicNumberOfGCThreads) {
1991       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
1992     } else {
1993       // Use ConcGCThreads outside safepoints
1994       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
1995     }
1996   }
1997 }
1998 #endif
1999 
2000 ShenandoahVerifier* ShenandoahHeap::verifier() {
2001   guarantee(ShenandoahVerify, "Should be enabled");
2002   assert (_verifier != nullptr, "sanity");
2003   return _verifier;
2004 }
2005 
2006 template<bool CONCURRENT>
2007 class ShenandoahUpdateHeapRefsTask : public WorkerTask {
2008 private:
2009   ShenandoahHeap* _heap;
2010   ShenandoahRegionIterator* _regions;
2011 public:
2012   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions) :
2013     WorkerTask("Shenandoah Update References"),
2014     _heap(ShenandoahHeap::heap()),
2015     _regions(regions) {
2016   }
2017 
2018   void work(uint worker_id) {
2019     if (CONCURRENT) {
2020       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2021       ShenandoahSuspendibleThreadSetJoiner stsj;
2022       do_work<ShenandoahConcUpdateRefsClosure>();
2023     } else {
2024       ShenandoahParallelWorkerSession worker_session(worker_id);
2025       do_work<ShenandoahSTWUpdateRefsClosure>();
2026     }
2027   }
2028 
2029 private:
2030   template<class T>
2031   void do_work() {
2032     T cl;
2033     ShenandoahHeapRegion* r = _regions->next();
2034     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2035     while (r != nullptr) {
2036       HeapWord* update_watermark = r->get_update_watermark();
2037       assert (update_watermark >= r->bottom(), "sanity");
2038       if (r->is_active() && !r->is_cset()) {
2039         _heap->marked_object_oop_iterate(r, &cl, update_watermark);
2040       }
2041       if (ShenandoahPacing) {
2042         _heap->pacer()->report_updaterefs(pointer_delta(update_watermark, r->bottom()));
2043       }
2044       if (_heap->check_cancelled_gc_and_yield(CONCURRENT)) {
2045         return;
2046       }
2047       r = _regions->next();
2048     }
2049   }
2050 };
2051 
2052 void ShenandoahHeap::update_heap_references(bool concurrent) {
2053   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2054 
2055   if (concurrent) {
2056     ShenandoahUpdateHeapRefsTask<true> task(&_update_refs_iterator);
2057     workers()->run_task(&task);
2058   } else {
2059     ShenandoahUpdateHeapRefsTask<false> task(&_update_refs_iterator);
2060     workers()->run_task(&task);
2061   }
2062 }
2063 
2064 
2065 class ShenandoahFinalUpdateRefsUpdateRegionStateClosure : public ShenandoahHeapRegionClosure {
2066 private:
2067   ShenandoahHeapLock* const _lock;
2068 
2069 public:
2070   ShenandoahFinalUpdateRefsUpdateRegionStateClosure() : _lock(ShenandoahHeap::heap()->lock()) {}
2071 
2072   void heap_region_do(ShenandoahHeapRegion* r) {
2073     // Drop unnecessary "pinned" state from regions that does not have CP marks
2074     // anymore, as this would allow trashing them.
2075 
2076     if (r->is_active()) {
2077       if (r->is_pinned()) {
2078         if (r->pin_count() == 0) {
2079           ShenandoahHeapLocker locker(_lock);
2080           r->make_unpinned();
2081         }
2082       } else {
2083         if (r->pin_count() > 0) {
2084           ShenandoahHeapLocker locker(_lock);
2085           r->make_pinned();
2086         }
2087       }
2088     }
2089   }
2090 
2091   bool is_thread_safe() { return true; }
2092 };
2093 
2094 void ShenandoahHeap::update_heap_region_states(bool concurrent) {
2095   assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
2096   assert(!is_full_gc_in_progress(), "Only for concurrent and degenerated GC");
2097 
2098   {
2099     ShenandoahGCPhase phase(concurrent ?
2100                             ShenandoahPhaseTimings::final_update_refs_update_region_states :
2101                             ShenandoahPhaseTimings::degen_gc_final_update_refs_update_region_states);
2102     ShenandoahFinalUpdateRefsUpdateRegionStateClosure cl;
2103     parallel_heap_region_iterate(&cl);
2104 
2105     assert_pinned_region_status();
2106   }
2107 
2108   {
2109     ShenandoahGCPhase phase(concurrent ?
2110                             ShenandoahPhaseTimings::final_update_refs_trash_cset :
2111                             ShenandoahPhaseTimings::degen_gc_final_update_refs_trash_cset);
2112     trash_cset_regions();
2113   }
2114 }
2115 
2116 void ShenandoahHeap::rebuild_free_set(bool concurrent) {
2117   {
2118     ShenandoahGCPhase phase(concurrent ?
2119                             ShenandoahPhaseTimings::final_update_refs_rebuild_freeset :
2120                             ShenandoahPhaseTimings::degen_gc_final_update_refs_rebuild_freeset);
2121     ShenandoahHeapLocker locker(lock());
2122     _free_set->rebuild();
2123   }
2124 }
2125 
2126 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2127   print_on(st);
2128   st->cr();
2129   print_heap_regions_on(st);
2130 }
2131 
2132 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2133   size_t slice = r->index() / _bitmap_regions_per_slice;
2134 
2135   size_t regions_from = _bitmap_regions_per_slice * slice;
2136   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2137   for (size_t g = regions_from; g < regions_to; g++) {
2138     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2139     if (skip_self && g == r->index()) continue;
2140     if (get_region(g)->is_committed()) {
2141       return true;
2142     }
2143   }
2144   return false;
2145 }
2146 
2147 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2148   shenandoah_assert_heaplocked();
2149 
2150   // Bitmaps in special regions do not need commits
2151   if (_bitmap_region_special) {
2152     return true;
2153   }
2154 
2155   if (is_bitmap_slice_committed(r, true)) {
2156     // Some other region from the group is already committed, meaning the bitmap
2157     // slice is already committed, we exit right away.
2158     return true;
2159   }
2160 
2161   // Commit the bitmap slice:
2162   size_t slice = r->index() / _bitmap_regions_per_slice;
2163   size_t off = _bitmap_bytes_per_slice * slice;
2164   size_t len = _bitmap_bytes_per_slice;
2165   char* start = (char*) _bitmap_region.start() + off;
2166 
2167   if (!os::commit_memory(start, len, false)) {
2168     return false;
2169   }
2170 
2171   if (AlwaysPreTouch) {
2172     os::pretouch_memory(start, start + len, _pretouch_bitmap_page_size);
2173   }
2174 
2175   return true;
2176 }
2177 
2178 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2179   shenandoah_assert_heaplocked();
2180 
2181   // Bitmaps in special regions do not need uncommits
2182   if (_bitmap_region_special) {
2183     return true;
2184   }
2185 
2186   if (is_bitmap_slice_committed(r, true)) {
2187     // Some other region from the group is still committed, meaning the bitmap
2188     // slice is should stay committed, exit right away.
2189     return true;
2190   }
2191 
2192   // Uncommit the bitmap slice:
2193   size_t slice = r->index() / _bitmap_regions_per_slice;
2194   size_t off = _bitmap_bytes_per_slice * slice;
2195   size_t len = _bitmap_bytes_per_slice;
2196   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2197     return false;
2198   }
2199   return true;
2200 }
2201 
2202 void ShenandoahHeap::safepoint_synchronize_begin() {
2203   SuspendibleThreadSet::synchronize();
2204 }
2205 
2206 void ShenandoahHeap::safepoint_synchronize_end() {
2207   SuspendibleThreadSet::desynchronize();
2208 }
2209 
2210 void ShenandoahHeap::entry_uncommit(double shrink_before, size_t shrink_until) {
2211   static const char *msg = "Concurrent uncommit";
2212   ShenandoahConcurrentPhase gc_phase(msg, ShenandoahPhaseTimings::conc_uncommit, true /* log_heap_usage */);
2213   EventMark em("%s", msg);
2214 
2215   op_uncommit(shrink_before, shrink_until);
2216 }
2217 
2218 void ShenandoahHeap::try_inject_alloc_failure() {
2219   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2220     _inject_alloc_failure.set();
2221     os::naked_short_sleep(1);
2222     if (cancelled_gc()) {
2223       log_info(gc)("Allocation failure was successfully injected");
2224     }
2225   }
2226 }
2227 
2228 bool ShenandoahHeap::should_inject_alloc_failure() {
2229   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2230 }
2231 
2232 void ShenandoahHeap::initialize_serviceability() {
2233   _memory_pool = new ShenandoahMemoryPool(this);
2234   _cycle_memory_manager.add_pool(_memory_pool);
2235   _stw_memory_manager.add_pool(_memory_pool);
2236 }
2237 
2238 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2239   GrowableArray<GCMemoryManager*> memory_managers(2);
2240   memory_managers.append(&_cycle_memory_manager);
2241   memory_managers.append(&_stw_memory_manager);
2242   return memory_managers;
2243 }
2244 
2245 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2246   GrowableArray<MemoryPool*> memory_pools(1);
2247   memory_pools.append(_memory_pool);
2248   return memory_pools;
2249 }
2250 
2251 MemoryUsage ShenandoahHeap::memory_usage() {
2252   return _memory_pool->get_memory_usage();
2253 }
2254 
2255 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2256   _heap(ShenandoahHeap::heap()),
2257   _index(0) {}
2258 
2259 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2260   _heap(heap),
2261   _index(0) {}
2262 
2263 void ShenandoahRegionIterator::reset() {
2264   _index = 0;
2265 }
2266 
2267 bool ShenandoahRegionIterator::has_next() const {
2268   return _index < _heap->num_regions();
2269 }
2270 
2271 char ShenandoahHeap::gc_state() const {
2272   return _gc_state.raw_value();
2273 }
2274 
2275 ShenandoahLiveData* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2276 #ifdef ASSERT
2277   assert(_liveness_cache != nullptr, "sanity");
2278   assert(worker_id < _max_workers, "sanity");
2279   for (uint i = 0; i < num_regions(); i++) {
2280     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2281   }
2282 #endif
2283   return _liveness_cache[worker_id];
2284 }
2285 
2286 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2287   assert(worker_id < _max_workers, "sanity");
2288   assert(_liveness_cache != nullptr, "sanity");
2289   ShenandoahLiveData* ld = _liveness_cache[worker_id];
2290   for (uint i = 0; i < num_regions(); i++) {
2291     ShenandoahLiveData live = ld[i];
2292     if (live > 0) {
2293       ShenandoahHeapRegion* r = get_region(i);
2294       r->increase_live_data_gc_words(live);
2295       ld[i] = 0;
2296     }
2297   }
2298 }
2299 
2300 bool ShenandoahHeap::requires_barriers(stackChunkOop obj) const {
2301   if (is_idle()) return false;
2302 
2303   // Objects allocated after marking start are implicitly alive, don't need any barriers during
2304   // marking phase.
2305   if (is_concurrent_mark_in_progress() &&
2306      !marking_context()->allocated_after_mark_start(obj)) {
2307     return true;
2308   }
2309 
2310   // Can not guarantee obj is deeply good.
2311   if (has_forwarded_objects()) {
2312     return true;
2313   }
2314 
2315   return false;
2316 }