< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp

Print this page

   1 /*
   2  * Copyright (c) 2016, 2021, Red Hat, Inc. All rights reserved.

   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/tlab_globals.hpp"

  27 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  29 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  30 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"




  31 #include "logging/logStream.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "runtime/orderAccess.hpp"
  34 
















































































































































































































































































































































































































































































































































































































































































































  35 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
  36   _heap(heap),
  37   _mutator_free_bitmap(max_regions, mtGC),
  38   _collector_free_bitmap(max_regions, mtGC),
  39   _max(max_regions)
  40 {
  41   clear_internal();
  42 }
  43 
  44 void ShenandoahFreeSet::increase_used(size_t num_bytes) {
  45   shenandoah_assert_heaplocked();
  46   _used += num_bytes;
  47 
  48   assert(_used <= _capacity, "must not use more than we have: used: " SIZE_FORMAT
  49          ", capacity: " SIZE_FORMAT ", num_bytes: " SIZE_FORMAT, _used, _capacity, num_bytes);





  50 }
  51 
  52 bool ShenandoahFreeSet::is_mutator_free(size_t idx) const {
  53   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
  54           idx, _max, _mutator_leftmost, _mutator_rightmost);
  55   return _mutator_free_bitmap.at(idx);








  56 }
  57 
  58 bool ShenandoahFreeSet::is_collector_free(size_t idx) const {
  59   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT " (left: " SIZE_FORMAT ", right: " SIZE_FORMAT ")",
  60           idx, _max, _collector_leftmost, _collector_rightmost);
  61   return _collector_free_bitmap.at(idx);










  62 }
  63 
  64 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {


  65   // Scan the bitmap looking for a first fit.
  66   //
  67   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
  68   // we would find the region to allocate at right away.
  69   //
  70   // Allocations are biased: new application allocs go to beginning of the heap, and GC allocs
  71   // go to the end. This makes application allocation faster, because we would clear lots
  72   // of regions from the beginning most of the time.
  73   //
  74   // Free set maintains mutator and collector views, and normally they allocate in their views only,
  75   // unless we special cases for stealing and mixed allocations.


  76 
  77   switch (req.type()) {
  78     case ShenandoahAllocRequest::_alloc_tlab:
  79     case ShenandoahAllocRequest::_alloc_shared: {
  80 
  81       // Try to allocate in the mutator view
  82       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
  83         if (is_mutator_free(idx)) {
  84           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
  85           if (result != nullptr) {
  86             return result;
  87           }
  88         }
  89       }
  90 
  91       // There is no recovery. Mutator does not touch collector view at all.
  92       break;
  93     }
  94     case ShenandoahAllocRequest::_alloc_gclab:
  95     case ShenandoahAllocRequest::_alloc_shared_gc: {
  96       // size_t is unsigned, need to dodge underflow when _leftmost = 0
  97 
  98       // Fast-path: try to allocate in the collector view first
  99       for (size_t c = _collector_rightmost + 1; c > _collector_leftmost; c--) {
 100         size_t idx = c - 1;
 101         if (is_collector_free(idx)) {
 102           HeapWord* result = try_allocate_in(_heap->get_region(idx), req, in_new_region);
 103           if (result != nullptr) {
 104             return result;
 105           }
 106         }
 107       }
 108 
 109       // No dice. Can we borrow space from mutator view?
 110       if (!ShenandoahEvacReserveOverflow) {
 111         return nullptr;
 112       }
 113 
 114       // Try to steal the empty region from the mutator view
 115       for (size_t c = _mutator_rightmost + 1; c > _mutator_leftmost; c--) {
 116         size_t idx = c - 1;
 117         if (is_mutator_free(idx)) {
 118           ShenandoahHeapRegion* r = _heap->get_region(idx);
 119           if (can_allocate_from(r)) {
 120             flip_to_gc(r);
 121             HeapWord *result = try_allocate_in(r, req, in_new_region);
 122             if (result != nullptr) {
 123               return result;
 124             }
 125           }
 126         }
 127       }
 128 
 129       // No dice. Do not try to mix mutator and GC allocations, because
 130       // URWM moves due to GC allocations would expose unparsable mutator
 131       // allocations.
 132 
 133       break;
 134     }
 135     default:
 136       ShouldNotReachHere();
 137   }
 138 
 139   return nullptr;
 140 }
 141 
 142 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
 143   assert (!has_no_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
 144 
 145   if (_heap->is_concurrent_weak_root_in_progress() &&
 146       r->is_trash()) {
 147     return nullptr;
 148   }
 149 
 150   try_recycle_trashed(r);







 151 
 152   in_new_region = r->is_empty();



 153 
 154   HeapWord* result = nullptr;
 155   size_t size = req.size();























 156 
 157   if (req.is_lab_alloc()) {
 158     size_t free = align_down(r->free() >> LogHeapWordSize, MinObjAlignment);
 159     if (size > free) {
 160       size = free;
 161     }
 162     if (size >= req.min_size()) {
 163       result = r->allocate(size, req.type());
 164       assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, size);


 165     }
 166   } else {
 167     result = r->allocate(size, req.type());
 168   }


 169 




 170   if (result != nullptr) {
 171     // Allocation successful, bump stats:
 172     if (req.is_mutator_alloc()) {
 173       increase_used(size * HeapWordSize);






 174     }

 175 
 176     // Record actual allocation size
 177     req.set_actual_size(size);


 178 
 179     if (req.is_gc_alloc()) {
 180       r->set_update_watermark(r->top());
 181     }
 182   }
 183 
 184   if (result == nullptr || has_no_alloc_capacity(r)) {
 185     // Region cannot afford this or future allocations. Retire it.
 186     //
 187     // While this seems a bit harsh, especially in the case when this large allocation does not
 188     // fit, but the next small one would, we are risking to inflate scan times when lots of
 189     // almost-full regions precede the fully-empty region where we want allocate the entire TLAB.
 190     // TODO: Record first fully-empty region, and use that for large allocations


 191 
 192     // Record the remainder as allocation waste
 193     if (req.is_mutator_alloc()) {
 194       size_t waste = r->free();
 195       if (waste > 0) {
 196         increase_used(waste);
 197         _heap->notify_mutator_alloc_words(waste >> LogHeapWordSize, true);






















 198       }




 199     }

 200 
 201     size_t num = r->index();
 202     _collector_free_bitmap.clear_bit(num);
 203     _mutator_free_bitmap.clear_bit(num);
 204     // Touched the bounds? Need to update:
 205     if (touches_bounds(num)) {
 206       adjust_bounds();























 207     }
 208     assert_bounds();
 209   }
 210   return result;

 211 }
 212 
 213 bool ShenandoahFreeSet::touches_bounds(size_t num) const {
 214   return num == _collector_leftmost || num == _collector_rightmost || num == _mutator_leftmost || num == _mutator_rightmost;














 215 }
 216 
 217 void ShenandoahFreeSet::recompute_bounds() {
 218   // Reset to the most pessimistic case:
 219   _mutator_rightmost = _max - 1;
 220   _mutator_leftmost = 0;
 221   _collector_rightmost = _max - 1;
 222   _collector_leftmost = 0;







 223 
 224   // ...and adjust from there
 225   adjust_bounds();
 226 }













 227 
 228 void ShenandoahFreeSet::adjust_bounds() {
 229   // Rewind both mutator bounds until the next bit.
 230   while (_mutator_leftmost < _max && !is_mutator_free(_mutator_leftmost)) {
 231     _mutator_leftmost++;










 232   }
 233   while (_mutator_rightmost > 0 && !is_mutator_free(_mutator_rightmost)) {
 234     _mutator_rightmost--;
















































 235   }
 236   // Rewind both collector bounds until the next bit.
 237   while (_collector_leftmost < _max && !is_collector_free(_collector_leftmost)) {
 238     _collector_leftmost++;


















 239   }
 240   while (_collector_rightmost > 0 && !is_collector_free(_collector_rightmost)) {
 241     _collector_rightmost--;



























 242   }

 243 }
 244 
 245 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {

 246   shenandoah_assert_heaplocked();
 247 
 248   size_t words_size = req.size();
 249   size_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
 250 
 251   // No regions left to satisfy allocation, bye.
 252   if (num > mutator_count()) {



 253     return nullptr;
 254   }
 255 




 256   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
 257   // inclusive. Contiguous allocations are biased to the beginning.
 258 
 259   size_t beg = _mutator_leftmost;
 260   size_t end = beg;




 261 
 262   while (true) {
 263     if (end >= _max) {
 264       // Hit the end, goodbye
 265       return nullptr;
 266     }
 267 
 268     // If regions are not adjacent, then current [beg; end] is useless, and we may fast-forward.
 269     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.
 270     if (!is_mutator_free(end) || !can_allocate_from(_heap->get_region(end))) {
 271       end++;
 272       beg = end;
 273       continue;















 274     }
 275 
 276     if ((end - beg + 1) == num) {
 277       // found the match
 278       break;
 279     }
 280 
 281     end++;
 282   }
 283 
 284   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();
 285 
 286   // Initialize regions:
 287   for (size_t i = beg; i <= end; i++) {
 288     ShenandoahHeapRegion* r = _heap->get_region(i);
 289     try_recycle_trashed(r);
 290 
 291     assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
 292     assert(r->is_empty(), "Should be empty");
 293 
 294     if (i == beg) {
 295       r->make_humongous_start();
 296     } else {
 297       r->make_humongous_cont();
 298     }
 299 
 300     // Trailing region may be non-full, record the remainder there
 301     size_t used_words;
 302     if ((i == end) && (remainder != 0)) {
 303       used_words = remainder;
 304     } else {
 305       used_words = ShenandoahHeapRegion::region_size_words();
 306     }
 307 


 308     r->set_top(r->bottom() + used_words);
 309 
 310     _mutator_free_bitmap.clear_bit(r->index());
 311   }
 312 
 313   // While individual regions report their true use, all humongous regions are
 314   // marked used in the free set.
 315   increase_used(ShenandoahHeapRegion::region_size_bytes() * num);
 316 
 317   if (remainder != 0) {
 318     // Record this remainder as allocation waste
 319     _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
 320   }
 321 
 322   // Allocated at left/rightmost? Move the bounds appropriately.
 323   if (beg == _mutator_leftmost || end == _mutator_rightmost) {
 324     adjust_bounds();
 325   }
 326   assert_bounds();
 327 



 328   req.set_actual_size(words_size);



 329   return _heap->get_region(beg)->bottom();
 330 }
 331 
 332 bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) {
 333   return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
 334 }
 335 
 336 size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) {
 337   if (r->is_trash()) {
 338     // This would be recycled on allocation path
 339     return ShenandoahHeapRegion::region_size_bytes();
 340   } else {
 341     return r->free();
 342   }
 343 }
 344 
 345 bool ShenandoahFreeSet::has_no_alloc_capacity(ShenandoahHeapRegion *r) {
 346   return alloc_capacity(r) == 0;
 347 }
 348 
 349 void ShenandoahFreeSet::try_recycle_trashed(ShenandoahHeapRegion *r) {
 350   if (r->is_trash()) {
 351     _heap->decrease_used(r->used());
 352     r->recycle();
 353   }
 354 }
 355 
 356 void ShenandoahFreeSet::recycle_trash() {
 357   // lock is not reentrable, check we don't have it
 358   shenandoah_assert_not_heaplocked();
 359 
 360   for (size_t i = 0; i < _heap->num_regions(); i++) {
 361     ShenandoahHeapRegion* r = _heap->get_region(i);
 362     if (r->is_trash()) {
 363       ShenandoahHeapLocker locker(_heap->lock());
 364       try_recycle_trashed(r);
























































 365     }
 366     SpinPause(); // allow allocators to take the lock
 367   }



 368 }
 369 
 370 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
 371   size_t idx = r->index();
 372 
 373   assert(_mutator_free_bitmap.at(idx), "Should be in mutator view");
 374   assert(can_allocate_from(r), "Should not be allocated");
 375 
 376   _mutator_free_bitmap.clear_bit(idx);
 377   _collector_free_bitmap.set_bit(idx);
 378   _collector_leftmost = MIN2(idx, _collector_leftmost);
 379   _collector_rightmost = MAX2(idx, _collector_rightmost);
 380 
 381   _capacity -= alloc_capacity(r);
 382 
 383   if (touches_bounds(idx)) {
 384     adjust_bounds();
 385   }
 386   assert_bounds();
 387 }
 388 
 389 void ShenandoahFreeSet::clear() {
 390   shenandoah_assert_heaplocked();
 391   clear_internal();
 392 }
 393 
 394 void ShenandoahFreeSet::clear_internal() {
 395   _mutator_free_bitmap.clear();
 396   _collector_free_bitmap.clear();
 397   _mutator_leftmost = _max;
 398   _mutator_rightmost = 0;
 399   _collector_leftmost = _max;
 400   _collector_rightmost = 0;
 401   _capacity = 0;
 402   _used = 0;
 403 }
 404 
 405 void ShenandoahFreeSet::rebuild() {
 406   shenandoah_assert_heaplocked();
 407   clear();

 408 
 409   for (size_t idx = 0; idx < _heap->num_regions(); idx++) {
























 410     ShenandoahHeapRegion* region = _heap->get_region(idx);

















 411     if (region->is_alloc_allowed() || region->is_trash()) {
 412       assert(!region->is_cset(), "Shouldn't be adding those to the free set");
 413 
 414       // Do not add regions that would surely fail allocation
 415       if (has_no_alloc_capacity(region)) continue;












































































 416 
 417       _capacity += alloc_capacity(region);
 418       assert(_used <= _capacity, "must not use more than we have");
















 419 
 420       assert(!is_mutator_free(idx), "We are about to add it, it shouldn't be there already");
 421       _mutator_free_bitmap.set_bit(idx);











 422     }
 423   }


 424 
 425   // Evac reserve: reserve trailing space for evacuations
 426   size_t to_reserve = _heap->max_capacity() / 100 * ShenandoahEvacReserve;
 427   size_t reserved = 0;










 428 
 429   for (size_t idx = _heap->num_regions() - 1; idx > 0; idx--) {
 430     if (reserved >= to_reserve) break;











 431 
 432     ShenandoahHeapRegion* region = _heap->get_region(idx);
 433     if (_mutator_free_bitmap.at(idx) && can_allocate_from(region)) {
 434       _mutator_free_bitmap.clear_bit(idx);
 435       _collector_free_bitmap.set_bit(idx);
 436       size_t ac = alloc_capacity(region);
 437       _capacity -= ac;
 438       reserved += ac;





































































































 439     }





























 440   }
 441 
 442   recompute_bounds();
 443   assert_bounds();




































































































































 444 }
 445 
 446 void ShenandoahFreeSet::log_status() {
 447   shenandoah_assert_heaplocked();
 448 
 449   LogTarget(Info, gc, ergo) lt;



































































 450   if (lt.is_enabled()) {
 451     ResourceMark rm;
 452     LogStream ls(lt);
 453 
 454     {
 455       size_t last_idx = 0;
 456       size_t max = 0;
 457       size_t max_contig = 0;
 458       size_t empty_contig = 0;
 459 
 460       size_t total_used = 0;
 461       size_t total_free = 0;
 462       size_t total_free_ext = 0;
 463 
 464       for (size_t idx = _mutator_leftmost; idx <= _mutator_rightmost; idx++) {
 465         if (is_mutator_free(idx)) {

 466           ShenandoahHeapRegion *r = _heap->get_region(idx);
 467           size_t free = alloc_capacity(r);
 468 
 469           max = MAX2(max, free);
 470 
 471           if (r->is_empty()) {
 472             total_free_ext += free;
 473             if (last_idx + 1 == idx) {
 474               empty_contig++;
 475             } else {
 476               empty_contig = 1;
 477             }
 478           } else {
 479             empty_contig = 0;
 480           }
 481 
 482           total_used += r->used();
 483           total_free += free;
 484 
 485           max_contig = MAX2(max_contig, empty_contig);
 486           last_idx = idx;
 487         }
 488       }
 489 
 490       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
 491       size_t free = capacity() - used();
 492 




 493       ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
 494                byte_size_in_proper_unit(total_free),    proper_unit_for_byte_size(total_free),
 495                byte_size_in_proper_unit(max),           proper_unit_for_byte_size(max),
 496                byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
 497       );
 498 
 499       ls.print("Frag: ");
 500       size_t frag_ext;
 501       if (total_free_ext > 0) {
 502         frag_ext = 100 - (100 * max_humongous / total_free_ext);
 503       } else {
 504         frag_ext = 0;
 505       }
 506       ls.print(SIZE_FORMAT "%% external, ", frag_ext);
 507 
 508       size_t frag_int;
 509       if (mutator_count() > 0) {
 510         frag_int = (100 * (total_used / mutator_count()) / ShenandoahHeapRegion::region_size_bytes());

 511       } else {
 512         frag_int = 0;
 513       }
 514       ls.print(SIZE_FORMAT "%% internal; ", frag_int);



 515     }
 516 
 517     {
 518       size_t max = 0;
 519       size_t total_free = 0;

 520 
 521       for (size_t idx = _collector_leftmost; idx <= _collector_rightmost; idx++) {
 522         if (is_collector_free(idx)) {

 523           ShenandoahHeapRegion *r = _heap->get_region(idx);
 524           size_t free = alloc_capacity(r);
 525           max = MAX2(max, free);
 526           total_free += free;

 527         }
 528       }





 529 
 530       ls.print_cr("Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s",















 531                   byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
 532                   byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max));

 533     }
 534   }
 535 }
 536 
 537 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
 538   shenandoah_assert_heaplocked();
 539   assert_bounds();
 540 
 541   if (req.size() > ShenandoahHeapRegion::humongous_threshold_words()) {
 542     switch (req.type()) {
 543       case ShenandoahAllocRequest::_alloc_shared:
 544       case ShenandoahAllocRequest::_alloc_shared_gc:
 545         in_new_region = true;
 546         return allocate_contiguous(req);

 547       case ShenandoahAllocRequest::_alloc_gclab:
 548       case ShenandoahAllocRequest::_alloc_tlab:
 549         in_new_region = false;
 550         assert(false, "Trying to allocate TLAB larger than the humongous threshold: " SIZE_FORMAT " > " SIZE_FORMAT,
 551                req.size(), ShenandoahHeapRegion::humongous_threshold_words());
 552         return nullptr;
 553       default:
 554         ShouldNotReachHere();
 555         return nullptr;
 556     }
 557   } else {
 558     return allocate_single(req, in_new_region);
 559   }
 560 }
 561 
 562 size_t ShenandoahFreeSet::unsafe_peek_free() const {
 563   // Deliberately not locked, this method is unsafe when free set is modified.
 564 
 565   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 566     if (index < _max && is_mutator_free(index)) {
 567       ShenandoahHeapRegion* r = _heap->get_region(index);
 568       if (r->free() >= MinTLABSize) {
 569         return r->free();
 570       }
 571     }
 572   }
 573 
 574   // It appears that no regions left
 575   return 0;
 576 }
 577 
 578 void ShenandoahFreeSet::print_on(outputStream* out) const {
 579   out->print_cr("Mutator Free Set: " SIZE_FORMAT "", mutator_count());
 580   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 581     if (is_mutator_free(index)) {
 582       _heap->get_region(index)->print_on(out);
 583     }
 584   }
 585   out->print_cr("Collector Free Set: " SIZE_FORMAT "", collector_count());
 586   for (size_t index = _collector_leftmost; index <= _collector_rightmost; index++) {
 587     if (is_collector_free(index)) {
 588       _heap->get_region(index)->print_on(out);




 589     }
 590   }
 591 }
 592 
 593 /*
 594  * Internal fragmentation metric: describes how fragmented the heap regions are.
 595  *
 596  * It is derived as:
 597  *
 598  *               sum(used[i]^2, i=0..k)
 599  *   IF = 1 - ------------------------------
 600  *              C * sum(used[i], i=0..k)
 601  *
 602  * ...where k is the number of regions in computation, C is the region capacity, and
 603  * used[i] is the used space in the region.
 604  *
 605  * The non-linearity causes IF to be lower for the cases where the same total heap
 606  * used is densely packed. For example:
 607  *   a) Heap is completely full  => IF = 0
 608  *   b) Heap is half full, first 50% regions are completely full => IF = 0
 609  *   c) Heap is half full, each region is 50% full => IF = 1/2
 610  *   d) Heap is quarter full, first 50% regions are completely full => IF = 0
 611  *   e) Heap is quarter full, each region is 25% full => IF = 3/4
 612  *   f) Heap has one small object per each region => IF =~ 1
 613  */
 614 double ShenandoahFreeSet::internal_fragmentation() {
 615   double squared = 0;
 616   double linear = 0;
 617 
 618   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 619     if (is_mutator_free(index)) {
 620       ShenandoahHeapRegion* r = _heap->get_region(index);
 621       size_t used = r->used();
 622       squared += used * used;
 623       linear += used;
 624     }
 625   }
 626 
 627   if (linear > 0) {
 628     double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear);
 629     return 1 - s;
 630   } else {
 631     return 0;
 632   }
 633 }
 634 
 635 /*
 636  * External fragmentation metric: describes how fragmented the heap is.
 637  *
 638  * It is derived as:
 639  *
 640  *   EF = 1 - largest_contiguous_free / total_free
 641  *
 642  * For example:
 643  *   a) Heap is completely empty => EF = 0
 644  *   b) Heap is completely full => EF = 0
 645  *   c) Heap is first-half full => EF = 1/2
 646  *   d) Heap is half full, full and empty regions interleave => EF =~ 1
 647  */
 648 double ShenandoahFreeSet::external_fragmentation() {
 649   size_t last_idx = 0;
 650   size_t max_contig = 0;
 651   size_t empty_contig = 0;
 652 
 653   size_t free = 0;
 654 
 655   for (size_t index = _mutator_leftmost; index <= _mutator_rightmost; index++) {
 656     if (is_mutator_free(index)) {
 657       ShenandoahHeapRegion* r = _heap->get_region(index);
 658       if (r->is_empty()) {
 659         free += ShenandoahHeapRegion::region_size_bytes();
 660         if (last_idx + 1 == index) {
 661           empty_contig++;
 662         } else {
 663           empty_contig = 1;
 664         }
 665       } else {
 666         empty_contig = 0;
 667       }
 668 
 669       max_contig = MAX2(max_contig, empty_contig);
 670       last_idx = index;
 671     }


 672   }
 673 
 674   if (free > 0) {
 675     return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free);
 676   } else {
 677     return 0;
 678   }
 679 }
 680 
 681 #ifdef ASSERT
 682 void ShenandoahFreeSet::assert_bounds() const {
 683   // Performance invariants. Failing these would not break the free set, but performance
 684   // would suffer.
 685   assert (_mutator_leftmost <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _mutator_leftmost,  _max);
 686   assert (_mutator_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _mutator_rightmost, _max);
 687 
 688   assert (_mutator_leftmost == _max || is_mutator_free(_mutator_leftmost),  "leftmost region should be free: " SIZE_FORMAT,  _mutator_leftmost);
 689   assert (_mutator_rightmost == 0   || is_mutator_free(_mutator_rightmost), "rightmost region should be free: " SIZE_FORMAT, _mutator_rightmost);
 690 
 691   size_t beg_off = _mutator_free_bitmap.find_first_set_bit(0);
 692   size_t end_off = _mutator_free_bitmap.find_first_set_bit(_mutator_rightmost + 1);
 693   assert (beg_off >= _mutator_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _mutator_leftmost);
 694   assert (end_off == _max,      "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _mutator_rightmost);
 695 
 696   assert (_collector_leftmost <= _max, "leftmost in bounds: "  SIZE_FORMAT " < " SIZE_FORMAT, _collector_leftmost,  _max);
 697   assert (_collector_rightmost < _max, "rightmost in bounds: " SIZE_FORMAT " < " SIZE_FORMAT, _collector_rightmost, _max);
 698 
 699   assert (_collector_leftmost == _max || is_collector_free(_collector_leftmost),  "leftmost region should be free: " SIZE_FORMAT,  _collector_leftmost);
 700   assert (_collector_rightmost == 0   || is_collector_free(_collector_rightmost), "rightmost region should be free: " SIZE_FORMAT, _collector_rightmost);
 701 
 702   beg_off = _collector_free_bitmap.find_first_set_bit(0);
 703   end_off = _collector_free_bitmap.find_first_set_bit(_collector_rightmost + 1);
 704   assert (beg_off >= _collector_leftmost, "free regions before the leftmost: " SIZE_FORMAT ", bound " SIZE_FORMAT, beg_off, _collector_leftmost);
 705   assert (end_off == _max,      "free regions past the rightmost: " SIZE_FORMAT ", bound " SIZE_FORMAT,  end_off, _collector_rightmost);
 706 }
 707 #endif

   1 /*
   2  * Copyright (c) 2016, 2021, Red Hat, Inc. All rights reserved.
   3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "gc/shared/tlab_globals.hpp"
  28 #include "gc/shenandoah/shenandoahAffiliation.hpp"
  29 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  31 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  32 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  33 #include "gc/shenandoah/shenandoahOldGeneration.hpp"
  34 #include "gc/shenandoah/shenandoahYoungGeneration.hpp"
  35 #include "gc/shenandoah/shenandoahSimpleBitMap.hpp"
  36 #include "gc/shenandoah/shenandoahSimpleBitMap.inline.hpp"
  37 #include "logging/logStream.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "runtime/orderAccess.hpp"
  40 
  41 static const char* partition_name(ShenandoahFreeSetPartitionId t) {
  42   switch (t) {
  43     case ShenandoahFreeSetPartitionId::NotFree: return "NotFree";
  44     case ShenandoahFreeSetPartitionId::Mutator: return "Mutator";
  45     case ShenandoahFreeSetPartitionId::Collector: return "Collector";
  46     case ShenandoahFreeSetPartitionId::OldCollector: return "OldCollector";
  47     default:
  48       ShouldNotReachHere();
  49       return "Unrecognized";
  50   }
  51 }
  52 
  53 class ShenandoahLeftRightIterator {
  54 private:
  55   idx_t _idx;
  56   idx_t _end;
  57   ShenandoahRegionPartitions* _partitions;
  58   ShenandoahFreeSetPartitionId _partition;
  59 public:
  60   explicit ShenandoahLeftRightIterator(ShenandoahRegionPartitions* partitions, ShenandoahFreeSetPartitionId partition, bool use_empty = false)
  61     : _idx(0), _end(0), _partitions(partitions), _partition(partition) {
  62     _idx = use_empty ? _partitions->leftmost_empty(_partition) : _partitions->leftmost(_partition);
  63     _end = use_empty ? _partitions->rightmost_empty(_partition) : _partitions->rightmost(_partition);
  64   }
  65 
  66   bool has_next() const {
  67     if (_idx <= _end) {
  68       assert(_partitions->in_free_set(_partition, _idx), "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, _idx);
  69       return true;
  70     }
  71     return false;
  72   }
  73 
  74   idx_t current() const {
  75     return _idx;
  76   }
  77 
  78   idx_t next() {
  79     _idx = _partitions->find_index_of_next_available_region(_partition, _idx + 1);
  80     return current();
  81   }
  82 };
  83 
  84 class ShenandoahRightLeftIterator {
  85 private:
  86   idx_t _idx;
  87   idx_t _end;
  88   ShenandoahRegionPartitions* _partitions;
  89   ShenandoahFreeSetPartitionId _partition;
  90 public:
  91   explicit ShenandoahRightLeftIterator(ShenandoahRegionPartitions* partitions, ShenandoahFreeSetPartitionId partition, bool use_empty = false)
  92     : _idx(0), _end(0), _partitions(partitions), _partition(partition) {
  93     _idx = use_empty ? _partitions->rightmost_empty(_partition) : _partitions->rightmost(_partition);
  94     _end = use_empty ? _partitions->leftmost_empty(_partition) : _partitions->leftmost(_partition);
  95   }
  96 
  97   bool has_next() const {
  98     if (_idx >= _end) {
  99       assert(_partitions->in_free_set(_partition, _idx), "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, _idx);
 100       return true;
 101     }
 102     return false;
 103   }
 104 
 105   idx_t current() const {
 106     return _idx;
 107   }
 108 
 109   idx_t next() {
 110     _idx = _partitions->find_index_of_previous_available_region(_partition, _idx - 1);
 111     return current();
 112   }
 113 };
 114 
 115 #ifndef PRODUCT
 116 void ShenandoahRegionPartitions::dump_bitmap() const {
 117   log_debug(gc)("Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT "], Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT
 118                 "], Old Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
 119                 _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)],
 120                 _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)],
 121                 _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)],
 122                 _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)],
 123                 _leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)],
 124                 _rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)]);
 125   log_debug(gc)("Empty Mutator range [" SSIZE_FORMAT ", " SSIZE_FORMAT
 126                 "], Empty Collector range [" SSIZE_FORMAT ", " SSIZE_FORMAT
 127                 "], Empty Old Collecto range [" SSIZE_FORMAT ", " SSIZE_FORMAT "]",
 128                 _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
 129                 _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)],
 130                 _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
 131                 _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
 132                 _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
 133                 _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)]);
 134 
 135   log_debug(gc)("%6s: %18s %18s %18s %18s", "index", "Mutator Bits", "Collector Bits", "Old Collector Bits", "NotFree Bits");
 136   dump_bitmap_range(0, _max-1);
 137 }
 138 
 139 void ShenandoahRegionPartitions::dump_bitmap_range(idx_t start_region_idx, idx_t end_region_idx) const {
 140   assert((start_region_idx >= 0) && (start_region_idx < (idx_t) _max), "precondition");
 141   assert((end_region_idx >= 0) && (end_region_idx < (idx_t) _max), "precondition");
 142   idx_t aligned_start = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(start_region_idx);
 143   idx_t aligned_end = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(end_region_idx);
 144   idx_t alignment = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].alignment();
 145   while (aligned_start <= aligned_end) {
 146     dump_bitmap_row(aligned_start);
 147     aligned_start += alignment;
 148   }
 149 }
 150 
 151 void ShenandoahRegionPartitions::dump_bitmap_row(idx_t region_idx) const {
 152   assert((region_idx >= 0) && (region_idx < (idx_t) _max), "precondition");
 153   idx_t aligned_idx = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].aligned_index(region_idx);
 154   uintx mutator_bits = _membership[int(ShenandoahFreeSetPartitionId::Mutator)].bits_at(aligned_idx);
 155   uintx collector_bits = _membership[int(ShenandoahFreeSetPartitionId::Collector)].bits_at(aligned_idx);
 156   uintx old_collector_bits = _membership[int(ShenandoahFreeSetPartitionId::OldCollector)].bits_at(aligned_idx);
 157   uintx free_bits = mutator_bits | collector_bits | old_collector_bits;
 158   uintx notfree_bits =  ~free_bits;
 159   log_debug(gc)(SSIZE_FORMAT_W(6) ": " SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0 " 0x" SIZE_FORMAT_X_0,
 160                 aligned_idx, mutator_bits, collector_bits, old_collector_bits, notfree_bits);
 161 }
 162 #endif
 163 
 164 ShenandoahRegionPartitions::ShenandoahRegionPartitions(size_t max_regions, ShenandoahFreeSet* free_set) :
 165     _max(max_regions),
 166     _region_size_bytes(ShenandoahHeapRegion::region_size_bytes()),
 167     _free_set(free_set),
 168     _membership{ ShenandoahSimpleBitMap(max_regions), ShenandoahSimpleBitMap(max_regions) , ShenandoahSimpleBitMap(max_regions) }
 169 {
 170   make_all_regions_unavailable();
 171 }
 172 
 173 inline bool ShenandoahFreeSet::can_allocate_from(ShenandoahHeapRegion *r) const {
 174   return r->is_empty() || (r->is_trash() && !_heap->is_concurrent_weak_root_in_progress());
 175 }
 176 
 177 inline bool ShenandoahFreeSet::can_allocate_from(size_t idx) const {
 178   ShenandoahHeapRegion* r = _heap->get_region(idx);
 179   return can_allocate_from(r);
 180 }
 181 
 182 inline size_t ShenandoahFreeSet::alloc_capacity(ShenandoahHeapRegion *r) const {
 183   if (r->is_trash()) {
 184     // This would be recycled on allocation path
 185     return ShenandoahHeapRegion::region_size_bytes();
 186   } else {
 187     return r->free();
 188   }
 189 }
 190 
 191 inline size_t ShenandoahFreeSet::alloc_capacity(size_t idx) const {
 192   ShenandoahHeapRegion* r = _heap->get_region(idx);
 193   return alloc_capacity(r);
 194 }
 195 
 196 inline bool ShenandoahFreeSet::has_alloc_capacity(ShenandoahHeapRegion *r) const {
 197   return alloc_capacity(r) > 0;
 198 }
 199 
 200 inline idx_t ShenandoahRegionPartitions::leftmost(ShenandoahFreeSetPartitionId which_partition) const {
 201   assert (which_partition < NumPartitions, "selected free partition must be valid");
 202   idx_t idx = _leftmosts[int(which_partition)];
 203   if (idx >= _max) {
 204     return _max;
 205   } else {
 206     // Cannot assert that membership[which_partition.is_set(idx) because this helper method may be used
 207     // to query the original value of leftmost when leftmost must be adjusted because the interval representing
 208     // which_partition is shrinking after the region that used to be leftmost is retired.
 209     return idx;
 210   }
 211 }
 212 
 213 inline idx_t ShenandoahRegionPartitions::rightmost(ShenandoahFreeSetPartitionId which_partition) const {
 214   assert (which_partition < NumPartitions, "selected free partition must be valid");
 215   idx_t idx = _rightmosts[int(which_partition)];
 216   // Cannot assert that membership[which_partition.is_set(idx) because this helper method may be used
 217   // to query the original value of leftmost when leftmost must be adjusted because the interval representing
 218   // which_partition is shrinking after the region that used to be leftmost is retired.
 219   return idx;
 220 }
 221 
 222 void ShenandoahRegionPartitions::make_all_regions_unavailable() {
 223   for (size_t partition_id = 0; partition_id < IntNumPartitions; partition_id++) {
 224     _membership[partition_id].clear_all();
 225     _leftmosts[partition_id] = _max;
 226     _rightmosts[partition_id] = -1;
 227     _leftmosts_empty[partition_id] = _max;
 228     _rightmosts_empty[partition_id] = -1;;
 229     _capacity[partition_id] = 0;
 230     _used[partition_id] = 0;
 231   }
 232   _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 233 }
 234 
 235 void ShenandoahRegionPartitions::establish_mutator_intervals(idx_t mutator_leftmost, idx_t mutator_rightmost,
 236                                                              idx_t mutator_leftmost_empty, idx_t mutator_rightmost_empty,
 237                                                              size_t mutator_region_count, size_t mutator_used) {
 238   _leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost;
 239   _rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost;
 240   _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_leftmost_empty;
 241   _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_rightmost_empty;
 242 
 243   _region_counts[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count;
 244   _used[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_used;
 245   _capacity[int(ShenandoahFreeSetPartitionId::Mutator)] = mutator_region_count * _region_size_bytes;
 246 
 247   _leftmosts[int(ShenandoahFreeSetPartitionId::Collector)] = _max;
 248   _rightmosts[int(ShenandoahFreeSetPartitionId::Collector)] = -1;
 249   _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = _max;
 250   _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)] = -1;
 251 
 252   _region_counts[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 253   _used[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 254   _capacity[int(ShenandoahFreeSetPartitionId::Collector)] = 0;
 255 }
 256 
 257 void ShenandoahRegionPartitions::establish_old_collector_intervals(idx_t old_collector_leftmost, idx_t old_collector_rightmost,
 258                                                                    idx_t old_collector_leftmost_empty,
 259                                                                    idx_t old_collector_rightmost_empty,
 260                                                                    size_t old_collector_region_count, size_t old_collector_used) {
 261   _leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_leftmost;
 262   _rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_rightmost;
 263   _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_leftmost_empty;
 264   _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_rightmost_empty;
 265 
 266   _region_counts[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_region_count;
 267   _used[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_used;
 268   _capacity[int(ShenandoahFreeSetPartitionId::OldCollector)] = old_collector_region_count * _region_size_bytes;
 269 }
 270 
 271 void ShenandoahRegionPartitions::increase_used(ShenandoahFreeSetPartitionId which_partition, size_t bytes) {
 272   assert (which_partition < NumPartitions, "Partition must be valid");
 273   _used[int(which_partition)] += bytes;
 274   assert (_used[int(which_partition)] <= _capacity[int(which_partition)],
 275           "Must not use (" SIZE_FORMAT ") more than capacity (" SIZE_FORMAT ") after increase by " SIZE_FORMAT,
 276           _used[int(which_partition)], _capacity[int(which_partition)], bytes);
 277 }
 278 
 279 inline void ShenandoahRegionPartitions::shrink_interval_if_range_modifies_either_boundary(
 280   ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) {
 281   assert((low_idx <= high_idx) && (low_idx >= 0) && (high_idx < _max), "Range must span legal index values");
 282   if (low_idx == leftmost(partition)) {
 283     assert (!_membership[int(partition)].is_set(low_idx), "Do not shrink interval if region not removed");
 284     if (high_idx + 1 == _max) {
 285       _leftmosts[int(partition)] = _max;
 286     } else {
 287       _leftmosts[int(partition)] = find_index_of_next_available_region(partition, high_idx + 1);
 288     }
 289     if (_leftmosts_empty[int(partition)] < _leftmosts[int(partition)]) {
 290       // This gets us closer to where we need to be; we'll scan further when leftmosts_empty is requested.
 291       _leftmosts_empty[int(partition)] = _leftmosts[int(partition)];
 292     }
 293   }
 294   if (high_idx == _rightmosts[int(partition)]) {
 295     assert (!_membership[int(partition)].is_set(high_idx), "Do not shrink interval if region not removed");
 296     if (low_idx == 0) {
 297       _rightmosts[int(partition)] = -1;
 298     } else {
 299       _rightmosts[int(partition)] = find_index_of_previous_available_region(partition, low_idx - 1);
 300     }
 301     if (_rightmosts_empty[int(partition)] > _rightmosts[int(partition)]) {
 302       // This gets us closer to where we need to be; we'll scan further when rightmosts_empty is requested.
 303       _rightmosts_empty[int(partition)] = _rightmosts[int(partition)];
 304     }
 305   }
 306   if (_leftmosts[int(partition)] > _rightmosts[int(partition)]) {
 307     _leftmosts[int(partition)] = _max;
 308     _rightmosts[int(partition)] = -1;
 309     _leftmosts_empty[int(partition)] = _max;
 310     _rightmosts_empty[int(partition)] = -1;
 311   }
 312 }
 313 
 314 inline void ShenandoahRegionPartitions::shrink_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition, idx_t idx) {
 315   shrink_interval_if_range_modifies_either_boundary(partition, idx, idx);
 316 }
 317 
 318 inline void ShenandoahRegionPartitions::expand_interval_if_boundary_modified(ShenandoahFreeSetPartitionId partition,
 319                                                                              idx_t idx, size_t region_available) {
 320   if (_leftmosts[int(partition)] > idx) {
 321     _leftmosts[int(partition)] = idx;
 322   }
 323   if (_rightmosts[int(partition)] < idx) {
 324     _rightmosts[int(partition)] = idx;
 325   }
 326   if (region_available == _region_size_bytes) {
 327     if (_leftmosts_empty[int(partition)] > idx) {
 328       _leftmosts_empty[int(partition)] = idx;
 329     }
 330     if (_rightmosts_empty[int(partition)] < idx) {
 331       _rightmosts_empty[int(partition)] = idx;
 332     }
 333   }
 334 }
 335 
 336 void ShenandoahRegionPartitions::retire_range_from_partition(
 337   ShenandoahFreeSetPartitionId partition, idx_t low_idx, idx_t high_idx) {
 338 
 339   // Note: we may remove from free partition even if region is not entirely full, such as when available < PLAB::min_size()
 340   assert ((low_idx < _max) && (high_idx < _max), "Both indices are sane: " SIZE_FORMAT " and " SIZE_FORMAT " < " SIZE_FORMAT,
 341           low_idx, high_idx, _max);
 342   assert (partition < NumPartitions, "Cannot remove from free partitions if not already free");
 343 
 344   for (idx_t idx = low_idx; idx <= high_idx; idx++) {
 345     assert (in_free_set(partition, idx), "Must be in partition to remove from partition");
 346     _membership[int(partition)].clear_bit(idx);
 347   }
 348   _region_counts[int(partition)] -= high_idx + 1 - low_idx;
 349   shrink_interval_if_range_modifies_either_boundary(partition, low_idx, high_idx);
 350 }
 351 
 352 void ShenandoahRegionPartitions::retire_from_partition(ShenandoahFreeSetPartitionId partition, idx_t idx, size_t used_bytes) {
 353 
 354   // Note: we may remove from free partition even if region is not entirely full, such as when available < PLAB::min_size()
 355   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 356   assert (partition < NumPartitions, "Cannot remove from free partitions if not already free");
 357   assert (in_free_set(partition, idx), "Must be in partition to remove from partition");
 358 
 359   if (used_bytes < _region_size_bytes) {
 360     // Count the alignment pad remnant of memory as used when we retire this region
 361     increase_used(partition, _region_size_bytes - used_bytes);
 362   }
 363   _membership[int(partition)].clear_bit(idx);
 364   shrink_interval_if_boundary_modified(partition, idx);
 365   _region_counts[int(partition)]--;
 366 }
 367 
 368 void ShenandoahRegionPartitions::make_free(idx_t idx, ShenandoahFreeSetPartitionId which_partition, size_t available) {
 369   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 370   assert (membership(idx) == ShenandoahFreeSetPartitionId::NotFree, "Cannot make free if already free");
 371   assert (which_partition < NumPartitions, "selected free partition must be valid");
 372   assert (available <= _region_size_bytes, "Available cannot exceed region size");
 373 
 374   _membership[int(which_partition)].set_bit(idx);
 375   _capacity[int(which_partition)] += _region_size_bytes;
 376   _used[int(which_partition)] += _region_size_bytes - available;
 377   expand_interval_if_boundary_modified(which_partition, idx, available);
 378   _region_counts[int(which_partition)]++;
 379 }
 380 
 381 bool ShenandoahRegionPartitions::is_mutator_partition(ShenandoahFreeSetPartitionId p) {
 382   return (p == ShenandoahFreeSetPartitionId::Mutator);
 383 }
 384 
 385 bool ShenandoahRegionPartitions::is_young_collector_partition(ShenandoahFreeSetPartitionId p) {
 386   return (p == ShenandoahFreeSetPartitionId::Collector);
 387 }
 388 
 389 bool ShenandoahRegionPartitions::is_old_collector_partition(ShenandoahFreeSetPartitionId p) {
 390   return (p == ShenandoahFreeSetPartitionId::OldCollector);
 391 }
 392 
 393 bool ShenandoahRegionPartitions::available_implies_empty(size_t available_in_region) {
 394   return (available_in_region == _region_size_bytes);
 395 }
 396 
 397 
 398 void ShenandoahRegionPartitions::move_from_partition_to_partition(idx_t idx, ShenandoahFreeSetPartitionId orig_partition,
 399                                                                   ShenandoahFreeSetPartitionId new_partition, size_t available) {
 400   ShenandoahHeapRegion* r = ShenandoahHeap::heap()->get_region(idx);
 401   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 402   assert (orig_partition < NumPartitions, "Original partition must be valid");
 403   assert (new_partition < NumPartitions, "New partition must be valid");
 404   assert (available <= _region_size_bytes, "Available cannot exceed region size");
 405   assert (_membership[int(orig_partition)].is_set(idx), "Cannot move from partition unless in partition");
 406   assert ((r != nullptr) && ((r->is_trash() && (available == _region_size_bytes)) ||
 407                              (r->used() + available == _region_size_bytes)),
 408           "Used: " SIZE_FORMAT " + available: " SIZE_FORMAT " should equal region size: " SIZE_FORMAT,
 409           ShenandoahHeap::heap()->get_region(idx)->used(), available, _region_size_bytes);
 410 
 411   // Expected transitions:
 412   //  During rebuild:         Mutator => Collector
 413   //                          Mutator empty => Collector
 414   //                          Mutator empty => OldCollector
 415   //  During flip_to_gc:      Mutator empty => Collector
 416   //                          Mutator empty => OldCollector
 417   // At start of update refs: Collector => Mutator
 418   //                          OldCollector Empty => Mutator
 419   assert ((is_mutator_partition(orig_partition) && is_young_collector_partition(new_partition)) ||
 420           (is_mutator_partition(orig_partition) &&
 421            available_implies_empty(available) && is_old_collector_partition(new_partition)) ||
 422           (is_young_collector_partition(orig_partition) && is_mutator_partition(new_partition)) ||
 423           (is_old_collector_partition(orig_partition)
 424            && available_implies_empty(available) && is_mutator_partition(new_partition)),
 425           "Unexpected movement between partitions, available: " SIZE_FORMAT ", _region_size_bytes: " SIZE_FORMAT
 426           ", orig_partition: %s, new_partition: %s",
 427           available, _region_size_bytes, partition_name(orig_partition), partition_name(new_partition));
 428 
 429   size_t used = _region_size_bytes - available;
 430   assert (_used[int(orig_partition)] >= used,
 431           "Orig partition used: " SIZE_FORMAT " must exceed moved used: " SIZE_FORMAT " within region " SSIZE_FORMAT,
 432           _used[int(orig_partition)], used, idx);
 433 
 434   _membership[int(orig_partition)].clear_bit(idx);
 435   _membership[int(new_partition)].set_bit(idx);
 436 
 437   _capacity[int(orig_partition)] -= _region_size_bytes;
 438   _used[int(orig_partition)] -= used;
 439   shrink_interval_if_boundary_modified(orig_partition, idx);
 440 
 441   _capacity[int(new_partition)] += _region_size_bytes;;
 442   _used[int(new_partition)] += used;
 443   expand_interval_if_boundary_modified(new_partition, idx, available);
 444 
 445   _region_counts[int(orig_partition)]--;
 446   _region_counts[int(new_partition)]++;
 447 }
 448 
 449 const char* ShenandoahRegionPartitions::partition_membership_name(idx_t idx) const {
 450   return partition_name(membership(idx));
 451 }
 452 
 453 inline ShenandoahFreeSetPartitionId ShenandoahRegionPartitions::membership(idx_t idx) const {
 454   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 455   ShenandoahFreeSetPartitionId result = ShenandoahFreeSetPartitionId::NotFree;
 456   for (uint partition_id = 0; partition_id < UIntNumPartitions; partition_id++) {
 457     if (_membership[partition_id].is_set(idx)) {
 458       assert(result == ShenandoahFreeSetPartitionId::NotFree, "Region should reside in only one partition");
 459       result = (ShenandoahFreeSetPartitionId) partition_id;
 460     }
 461   }
 462   return result;
 463 }
 464 
 465 #ifdef ASSERT
 466 inline bool ShenandoahRegionPartitions::partition_id_matches(idx_t idx, ShenandoahFreeSetPartitionId test_partition) const {
 467   assert (idx < _max, "index is sane: " SIZE_FORMAT " < " SIZE_FORMAT, idx, _max);
 468   assert (test_partition < ShenandoahFreeSetPartitionId::NotFree, "must be a valid partition");
 469 
 470   return membership(idx) == test_partition;
 471 }
 472 #endif
 473 
 474 inline bool ShenandoahRegionPartitions::is_empty(ShenandoahFreeSetPartitionId which_partition) const {
 475   assert (which_partition < NumPartitions, "selected free partition must be valid");
 476   return (leftmost(which_partition) > rightmost(which_partition));
 477 }
 478 
 479 inline idx_t ShenandoahRegionPartitions::find_index_of_next_available_region(
 480   ShenandoahFreeSetPartitionId which_partition, idx_t start_index) const {
 481   idx_t rightmost_idx = rightmost(which_partition);
 482   idx_t leftmost_idx = leftmost(which_partition);
 483   if ((rightmost_idx < leftmost_idx) || (start_index > rightmost_idx)) return _max;
 484   if (start_index < leftmost_idx) {
 485     start_index = leftmost_idx;
 486   }
 487   idx_t result = _membership[int(which_partition)].find_first_set_bit(start_index, rightmost_idx + 1);
 488   if (result > rightmost_idx) {
 489     result = _max;
 490   }
 491   assert (result >= start_index, "Requires progress");
 492   return result;
 493 }
 494 
 495 inline idx_t ShenandoahRegionPartitions::find_index_of_previous_available_region(
 496   ShenandoahFreeSetPartitionId which_partition, idx_t last_index) const {
 497   idx_t rightmost_idx = rightmost(which_partition);
 498   idx_t leftmost_idx = leftmost(which_partition);
 499   // if (leftmost_idx == max) then (last_index < leftmost_idx)
 500   if (last_index < leftmost_idx) return -1;
 501   if (last_index > rightmost_idx) {
 502     last_index = rightmost_idx;
 503   }
 504   idx_t result = _membership[int(which_partition)].find_last_set_bit(-1, last_index);
 505   if (result < leftmost_idx) {
 506     result = -1;
 507   }
 508   assert (result <= last_index, "Requires progress");
 509   return result;
 510 }
 511 
 512 inline idx_t ShenandoahRegionPartitions::find_index_of_next_available_cluster_of_regions(
 513   ShenandoahFreeSetPartitionId which_partition, idx_t start_index, size_t cluster_size) const {
 514   idx_t rightmost_idx = rightmost(which_partition);
 515   idx_t leftmost_idx = leftmost(which_partition);
 516   if ((rightmost_idx < leftmost_idx) || (start_index > rightmost_idx)) return _max;
 517   idx_t result = _membership[int(which_partition)].find_first_consecutive_set_bits(start_index, rightmost_idx + 1, cluster_size);
 518   if (result > rightmost_idx) {
 519     result = _max;
 520   }
 521   assert (result >= start_index, "Requires progress");
 522   return result;
 523 }
 524 
 525 inline idx_t ShenandoahRegionPartitions::find_index_of_previous_available_cluster_of_regions(
 526   ShenandoahFreeSetPartitionId which_partition, idx_t last_index, size_t cluster_size) const {
 527   idx_t leftmost_idx = leftmost(which_partition);
 528   // if (leftmost_idx == max) then (last_index < leftmost_idx)
 529   if (last_index < leftmost_idx) return -1;
 530   idx_t result = _membership[int(which_partition)].find_last_consecutive_set_bits(leftmost_idx - 1, last_index, cluster_size);
 531   if (result <= leftmost_idx) {
 532     result = -1;
 533   }
 534   assert (result <= last_index, "Requires progress");
 535   return result;
 536 }
 537 
 538 idx_t ShenandoahRegionPartitions::leftmost_empty(ShenandoahFreeSetPartitionId which_partition) {
 539   assert (which_partition < NumPartitions, "selected free partition must be valid");
 540   idx_t max_regions = _max;
 541   if (_leftmosts_empty[int(which_partition)] == _max) {
 542     return _max;
 543   }
 544   for (idx_t idx = find_index_of_next_available_region(which_partition, _leftmosts_empty[int(which_partition)]);
 545        idx < max_regions; ) {
 546     assert(in_free_set(which_partition, idx), "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
 547     if (_free_set->alloc_capacity(idx) == _region_size_bytes) {
 548       _leftmosts_empty[int(which_partition)] = idx;
 549       return idx;
 550     }
 551     idx = find_index_of_next_available_region(which_partition, idx + 1);
 552   }
 553   _leftmosts_empty[int(which_partition)] = _max;
 554   _rightmosts_empty[int(which_partition)] = -1;
 555   return _max;
 556 }
 557 
 558 idx_t ShenandoahRegionPartitions::rightmost_empty(ShenandoahFreeSetPartitionId which_partition) {
 559   assert (which_partition < NumPartitions, "selected free partition must be valid");
 560   if (_rightmosts_empty[int(which_partition)] < 0) {
 561     return -1;
 562   }
 563   for (idx_t idx = find_index_of_previous_available_region(which_partition, _rightmosts_empty[int(which_partition)]);
 564        idx >= 0; ) {
 565     assert(in_free_set(which_partition, idx), "Boundaries or find_last_set_bit failed: " SSIZE_FORMAT, idx);
 566     if (_free_set->alloc_capacity(idx) == _region_size_bytes) {
 567       _rightmosts_empty[int(which_partition)] = idx;
 568       return idx;
 569     }
 570     idx = find_index_of_previous_available_region(which_partition, idx - 1);
 571   }
 572   _leftmosts_empty[int(which_partition)] = _max;
 573   _rightmosts_empty[int(which_partition)] = -1;
 574   return -1;
 575 }
 576 
 577 
 578 #ifdef ASSERT
 579 void ShenandoahRegionPartitions::assert_bounds() {
 580 
 581   idx_t leftmosts[UIntNumPartitions];
 582   idx_t rightmosts[UIntNumPartitions];
 583   idx_t empty_leftmosts[UIntNumPartitions];
 584   idx_t empty_rightmosts[UIntNumPartitions];
 585 
 586   for (uint i = 0; i < UIntNumPartitions; i++) {
 587     leftmosts[i] = _max;
 588     empty_leftmosts[i] = _max;
 589     rightmosts[i] = -1;
 590     empty_rightmosts[i] = -1;
 591   }
 592 
 593   for (idx_t i = 0; i < _max; i++) {
 594     ShenandoahFreeSetPartitionId partition = membership(i);
 595     switch (partition) {
 596       case ShenandoahFreeSetPartitionId::NotFree:
 597         break;
 598 
 599       case ShenandoahFreeSetPartitionId::Mutator:
 600       case ShenandoahFreeSetPartitionId::Collector:
 601       case ShenandoahFreeSetPartitionId::OldCollector:
 602       {
 603         size_t capacity = _free_set->alloc_capacity(i);
 604         bool is_empty = (capacity == _region_size_bytes);
 605         assert(capacity > 0, "free regions must have allocation capacity");
 606         if (i < leftmosts[int(partition)]) {
 607           leftmosts[int(partition)] = i;
 608         }
 609         if (is_empty && (i < empty_leftmosts[int(partition)])) {
 610           empty_leftmosts[int(partition)] = i;
 611         }
 612         if (i > rightmosts[int(partition)]) {
 613           rightmosts[int(partition)] = i;
 614         }
 615         if (is_empty && (i > empty_rightmosts[int(partition)])) {
 616           empty_rightmosts[int(partition)] = i;
 617         }
 618         break;
 619       }
 620 
 621       default:
 622         ShouldNotReachHere();
 623     }
 624   }
 625 
 626   // Performance invariants. Failing these would not break the free partition, but performance would suffer.
 627   assert (leftmost(ShenandoahFreeSetPartitionId::Mutator) <= _max,
 628           "leftmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT, leftmost(ShenandoahFreeSetPartitionId::Mutator),  _max);
 629   assert (rightmost(ShenandoahFreeSetPartitionId::Mutator) < _max,
 630           "rightmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::Mutator),  _max);
 631 
 632   assert (leftmost(ShenandoahFreeSetPartitionId::Mutator) == _max
 633           || partition_id_matches(leftmost(ShenandoahFreeSetPartitionId::Mutator), ShenandoahFreeSetPartitionId::Mutator),
 634           "leftmost region should be free: " SSIZE_FORMAT,  leftmost(ShenandoahFreeSetPartitionId::Mutator));
 635   assert (leftmost(ShenandoahFreeSetPartitionId::Mutator) == _max
 636           || partition_id_matches(rightmost(ShenandoahFreeSetPartitionId::Mutator), ShenandoahFreeSetPartitionId::Mutator),
 637           "rightmost region should be free: " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::Mutator));
 638 
 639   // If Mutator partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
 640   // Likewise for empty region partitions.
 641   idx_t beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
 642   idx_t end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
 643   assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Mutator),
 644           "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 645           beg_off, leftmost(ShenandoahFreeSetPartitionId::Mutator));
 646   assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Mutator),
 647           "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 648           end_off, rightmost(ShenandoahFreeSetPartitionId::Mutator));
 649 
 650   beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
 651   end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Mutator)];
 652   assert (beg_off >= leftmost_empty(ShenandoahFreeSetPartitionId::Mutator),
 653           "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 654           beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Mutator));
 655   assert (end_off <= rightmost_empty(ShenandoahFreeSetPartitionId::Mutator),
 656           "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 657           end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Mutator));
 658 
 659   // Performance invariants. Failing these would not break the free partition, but performance would suffer.
 660   assert (leftmost(ShenandoahFreeSetPartitionId::Collector) <= _max, "leftmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT,
 661           leftmost(ShenandoahFreeSetPartitionId::Collector),  _max);
 662   assert (rightmost(ShenandoahFreeSetPartitionId::Collector) < _max, "rightmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT,
 663           rightmost(ShenandoahFreeSetPartitionId::Collector),  _max);
 664 
 665   assert (leftmost(ShenandoahFreeSetPartitionId::Collector) == _max
 666           || partition_id_matches(leftmost(ShenandoahFreeSetPartitionId::Collector), ShenandoahFreeSetPartitionId::Collector),
 667           "leftmost region should be free: " SSIZE_FORMAT,  leftmost(ShenandoahFreeSetPartitionId::Collector));
 668   assert (leftmost(ShenandoahFreeSetPartitionId::Collector) == _max
 669           || partition_id_matches(rightmost(ShenandoahFreeSetPartitionId::Collector), ShenandoahFreeSetPartitionId::Collector),
 670           "rightmost region should be free: " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::Collector));
 671 
 672   // If Collector partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
 673   // Likewise for empty region partitions.
 674   beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 675   end_off = rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 676   assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::Collector),
 677           "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 678           beg_off, leftmost(ShenandoahFreeSetPartitionId::Collector));
 679   assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::Collector),
 680           "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 681           end_off, rightmost(ShenandoahFreeSetPartitionId::Collector));
 682 
 683   beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 684   end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::Collector)];
 685   assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
 686           "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 687           beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::Collector));
 688   assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::Collector)],
 689           "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 690           end_off, rightmost_empty(ShenandoahFreeSetPartitionId::Collector));
 691 
 692   // Performance invariants. Failing these would not break the free partition, but performance would suffer.
 693   assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) <= _max, "leftmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT,
 694           leftmost(ShenandoahFreeSetPartitionId::OldCollector),  _max);
 695   assert (rightmost(ShenandoahFreeSetPartitionId::OldCollector) < _max, "rightmost in bounds: "  SSIZE_FORMAT " < " SSIZE_FORMAT,
 696           rightmost(ShenandoahFreeSetPartitionId::OldCollector),  _max);
 697 
 698   assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) == _max
 699           || partition_id_matches(leftmost(ShenandoahFreeSetPartitionId::OldCollector),
 700                                   ShenandoahFreeSetPartitionId::OldCollector),
 701           "leftmost region should be free: " SSIZE_FORMAT,  leftmost(ShenandoahFreeSetPartitionId::OldCollector));
 702   assert (leftmost(ShenandoahFreeSetPartitionId::OldCollector) == _max
 703           || partition_id_matches(rightmost(ShenandoahFreeSetPartitionId::OldCollector),
 704                                   ShenandoahFreeSetPartitionId::OldCollector),
 705           "rightmost region should be free: " SSIZE_FORMAT, rightmost(ShenandoahFreeSetPartitionId::OldCollector));
 706 
 707   // If OldCollector partition is empty, leftmosts will both equal max, rightmosts will both equal zero.
 708   // Likewise for empty region partitions.
 709   beg_off = leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
 710   end_off = rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
 711   assert (beg_off >= leftmost(ShenandoahFreeSetPartitionId::OldCollector),
 712           "free regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 713           beg_off, leftmost(ShenandoahFreeSetPartitionId::OldCollector));
 714   assert (end_off <= rightmost(ShenandoahFreeSetPartitionId::OldCollector),
 715           "free regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 716           end_off, rightmost(ShenandoahFreeSetPartitionId::OldCollector));
 717 
 718   beg_off = empty_leftmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
 719   end_off = empty_rightmosts[int(ShenandoahFreeSetPartitionId::OldCollector)];
 720   assert (beg_off >= _leftmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
 721           "free empty regions before the leftmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 722           beg_off, leftmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
 723   assert (end_off <= _rightmosts_empty[int(ShenandoahFreeSetPartitionId::OldCollector)],
 724           "free empty regions past the rightmost: " SSIZE_FORMAT ", bound " SSIZE_FORMAT,
 725           end_off, rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector));
 726 }
 727 #endif
 728 
 729 ShenandoahFreeSet::ShenandoahFreeSet(ShenandoahHeap* heap, size_t max_regions) :
 730   _heap(heap),
 731   _partitions(max_regions, this),
 732   _alloc_bias_weight(0)

 733 {
 734   clear_internal();
 735 }
 736 
 737 void ShenandoahFreeSet::add_promoted_in_place_region_to_old_collector(ShenandoahHeapRegion* region) {
 738   shenandoah_assert_heaplocked();
 739   size_t plab_min_size_in_bytes = ShenandoahGenerationalHeap::heap()->plab_min_size() * HeapWordSize;
 740   size_t idx = region->index();
 741   size_t capacity = alloc_capacity(region);
 742   assert(_partitions.membership(idx) == ShenandoahFreeSetPartitionId::NotFree,
 743          "Regions promoted in place should have been excluded from Mutator partition");
 744   if (capacity >= plab_min_size_in_bytes) {
 745     _partitions.make_free(idx, ShenandoahFreeSetPartitionId::OldCollector, capacity);
 746     _heap->old_generation()->augment_promoted_reserve(capacity);
 747   }
 748 }
 749 
 750 HeapWord* ShenandoahFreeSet::allocate_from_partition_with_affiliation(ShenandoahAffiliation affiliation,
 751                                                                       ShenandoahAllocRequest& req, bool& in_new_region) {
 752 
 753   shenandoah_assert_heaplocked();
 754   ShenandoahFreeSetPartitionId which_partition = req.is_old()? ShenandoahFreeSetPartitionId::OldCollector: ShenandoahFreeSetPartitionId::Collector;
 755   if (_partitions.alloc_from_left_bias(which_partition)) {
 756     ShenandoahLeftRightIterator iterator(&_partitions, which_partition, affiliation == ShenandoahAffiliation::FREE);
 757     return allocate_with_affiliation(iterator, affiliation, req, in_new_region);
 758   } else {
 759     ShenandoahRightLeftIterator iterator(&_partitions, which_partition, affiliation == ShenandoahAffiliation::FREE);
 760     return allocate_with_affiliation(iterator, affiliation, req, in_new_region);
 761   }
 762 }
 763 
 764 template<typename Iter>
 765 HeapWord* ShenandoahFreeSet::allocate_with_affiliation(Iter& iterator, ShenandoahAffiliation affiliation, ShenandoahAllocRequest& req, bool& in_new_region) {
 766   for (idx_t idx = iterator.current(); iterator.has_next(); idx = iterator.next()) {
 767     ShenandoahHeapRegion* r = _heap->get_region(idx);
 768     if (r->affiliation() == affiliation) {
 769       HeapWord* result = try_allocate_in(r, req, in_new_region);
 770       if (result != nullptr) {
 771         return result;
 772       }
 773     }
 774   }
 775   log_debug(gc, free)("Could not allocate collector region with affiliation: %s for request " PTR_FORMAT,
 776                       shenandoah_affiliation_name(affiliation), p2i(&req));
 777   return nullptr;
 778 }
 779 
 780 HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool& in_new_region) {
 781   shenandoah_assert_heaplocked();
 782 
 783   // Scan the bitmap looking for a first fit.
 784   //
 785   // Leftmost and rightmost bounds provide enough caching to walk bitmap efficiently. Normally,
 786   // we would find the region to allocate at right away.
 787   //
 788   // Allocations are biased: GC allocations are taken from the high end of the heap.  Regular (and TLAB)
 789   // mutator allocations are taken from the middle of heap, below the memory reserved for Collector.
 790   // Humongous mutator allocations are taken from the bottom of the heap.
 791   //
 792   // Free set maintains mutator and collector partitions.  Normally, each allocates only from its partition,
 793   // except in special cases when the collector steals regions from the mutator partition.
 794 
 795   // Overwrite with non-zero (non-NULL) values only if necessary for allocation bookkeeping.
 796 
 797   switch (req.type()) {
 798     case ShenandoahAllocRequest::_alloc_tlab:
 799     case ShenandoahAllocRequest::_alloc_shared:
 800       return allocate_for_mutator(req, in_new_region);













 801     case ShenandoahAllocRequest::_alloc_gclab:
 802     case ShenandoahAllocRequest::_alloc_plab:
 803     case ShenandoahAllocRequest::_alloc_shared_gc:
 804       return allocate_for_collector(req, in_new_region);





































 805     default:
 806       ShouldNotReachHere();
 807   }

 808   return nullptr;
 809 }
 810 
 811 HeapWord* ShenandoahFreeSet::allocate_for_mutator(ShenandoahAllocRequest &req, bool &in_new_region) {
 812   update_allocation_bias();
 813 
 814   if (_partitions.is_empty(ShenandoahFreeSetPartitionId::Mutator)) {
 815     // There is no recovery. Mutator does not touch collector view at all.
 816     return nullptr;
 817   }
 818 
 819   // Try to allocate in the mutator view
 820   if (_partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::Mutator)) {
 821     // Allocate from low to high memory.  This keeps the range of fully empty regions more tightly packed.
 822     // Note that the most recently allocated regions tend not to be evacuated in a given GC cycle.  So this
 823     // tends to accumulate "fragmented" uncollected regions in high memory.
 824     ShenandoahLeftRightIterator iterator(&_partitions, ShenandoahFreeSetPartitionId::Mutator);
 825     return allocate_from_regions(iterator, req, in_new_region);
 826   }
 827 
 828   // Allocate from high to low memory. This preserves low memory for humongous allocations.
 829   ShenandoahRightLeftIterator iterator(&_partitions, ShenandoahFreeSetPartitionId::Mutator);
 830   return allocate_from_regions(iterator, req, in_new_region);
 831 }
 832 
 833 void ShenandoahFreeSet::update_allocation_bias() {
 834   if (_alloc_bias_weight-- <= 0) {
 835     // We have observed that regions not collected in previous GC cycle tend to congregate at one end or the other
 836     // of the heap.  Typically, these are the more recently engaged regions and the objects in these regions have not
 837     // yet had a chance to die (and/or are treated as floating garbage).  If we use the same allocation bias on each
 838     // GC pass, these "most recently" engaged regions for GC pass N will also be the "most recently" engaged regions
 839     // for GC pass N+1, and the relatively large amount of live data and/or floating garbage introduced
 840     // during the most recent GC pass may once again prevent the region from being collected.  We have found that
 841     // alternating the allocation behavior between GC passes improves evacuation performance by 3-7% on certain
 842     // benchmarks.  In the best case, this has the effect of consuming these partially consumed regions before
 843     // the start of the next mark cycle so all of their garbage can be efficiently reclaimed.
 844     //
 845     // First, finish consuming regions that are already partially consumed so as to more tightly limit ranges of
 846     // available regions.  Other potential benefits:
 847     //  1. Eventual collection set has fewer regions because we have packed newly allocated objects into fewer regions
 848     //  2. We preserve the "empty" regions longer into the GC cycle, reducing likelihood of allocation failures
 849     //     late in the GC cycle.
 850     idx_t non_empty_on_left = (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator)
 851                                - _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator));
 852     idx_t non_empty_on_right = (_partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator)
 853                                 - _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator));
 854     _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Mutator, (non_empty_on_right < non_empty_on_left));
 855     _alloc_bias_weight = INITIAL_ALLOC_BIAS_WEIGHT;
 856   }
 857 }
 858 
 859 template<typename Iter>
 860 HeapWord* ShenandoahFreeSet::allocate_from_regions(Iter& iterator, ShenandoahAllocRequest &req, bool &in_new_region) {
 861   for (idx_t idx = iterator.current(); iterator.has_next(); idx = iterator.next()) {
 862     ShenandoahHeapRegion* r = _heap->get_region(idx);
 863     size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab) ? req.min_size() : req.size();
 864     if (alloc_capacity(r) >= min_size) {
 865       HeapWord* result = try_allocate_in(r, req, in_new_region);
 866       if (result != nullptr) {
 867         return result;
 868       }
 869     }


 870   }
 871   return nullptr;
 872 }
 873 
 874 HeapWord* ShenandoahFreeSet::allocate_for_collector(ShenandoahAllocRequest &req, bool &in_new_region) {
 875   // Fast-path: try to allocate in the collector view first
 876   HeapWord* result;
 877   result = allocate_from_partition_with_affiliation(req.affiliation(), req, in_new_region);
 878   if (result != nullptr) {
 879     return result;
 880   }
 881 
 882   bool allow_new_region = can_allocate_in_new_region(req);
 883   if (allow_new_region) {
 884     // Try a free region that is dedicated to GC allocations.
 885     result = allocate_from_partition_with_affiliation(ShenandoahAffiliation::FREE, req, in_new_region);
 886     if (result != nullptr) {
 887       return result;
 888     }
 889   }
 890 
 891   // No dice. Can we borrow space from mutator view?
 892   if (!ShenandoahEvacReserveOverflow) {
 893     return nullptr;
 894   }
 895 
 896   if (!allow_new_region && req.is_old() && (_heap->young_generation()->free_unaffiliated_regions() > 0)) {
 897     // This allows us to flip a mutator region to old_collector
 898     allow_new_region = true;
 899   }
 900 
 901   // We should expand old-gen if this can prevent an old-gen evacuation failure.  We don't care so much about
 902   // promotion failures since they can be mitigated in a subsequent GC pass.  Would be nice to know if this
 903   // allocation request is for evacuation or promotion.  Individual threads limit their use of PLAB memory for
 904   // promotions, so we already have an assurance that any additional memory set aside for old-gen will be used
 905   // only for old-gen evacuations.
 906   if (allow_new_region) {
 907     // Try to steal an empty region from the mutator view.
 908     result = try_allocate_from_mutator(req, in_new_region);
 909   }
 910 
 911   // This is it. Do not try to mix mutator and GC allocations, because adjusting region UWM
 912   // due to GC allocations would expose unparsable mutator allocations.
 913   return result;
 914 }
 915 
 916 bool ShenandoahFreeSet::can_allocate_in_new_region(const ShenandoahAllocRequest& req) {
 917   if (!_heap->mode()->is_generational()) {
 918     return true;
 919   }
 920 
 921   assert(req.is_old() || req.is_young(), "Should request affiliation");
 922   return (req.is_old() && _heap->old_generation()->free_unaffiliated_regions() > 0)
 923          || (req.is_young() && _heap->young_generation()->free_unaffiliated_regions() > 0);
 924 }
 925 
 926 HeapWord* ShenandoahFreeSet::try_allocate_from_mutator(ShenandoahAllocRequest& req, bool& in_new_region) {
 927   // The collector prefers to keep longer lived regions toward the right side of the heap, so it always
 928   // searches for regions from right to left here.
 929   ShenandoahRightLeftIterator iterator(&_partitions, ShenandoahFreeSetPartitionId::Mutator, true);
 930   for (idx_t idx = iterator.current(); iterator.has_next(); idx = iterator.next()) {
 931     ShenandoahHeapRegion* r = _heap->get_region(idx);
 932     if (can_allocate_from(r)) {
 933       if (req.is_old()) {
 934         if (!flip_to_old_gc(r)) {
 935           continue;
 936         }
 937       } else {
 938         flip_to_gc(r);
 939       }
 940       // Region r is entirely empty.  If try_allocate_in fails on region r, something else is really wrong.
 941       // Don't bother to retry with other regions.
 942       log_debug(gc, free)("Flipped region " SIZE_FORMAT " to gc for request: " PTR_FORMAT, idx, p2i(&req));
 943       return try_allocate_in(r, req, in_new_region);
 944     }
 945   }
 946 
 947   return nullptr;
 948 }
 949 
 950 // This work method takes an argument corresponding to the number of bytes
 951 // free in a region, and returns the largest amount in heapwords that can be allocated
 952 // such that both of the following conditions are satisfied:
 953 //
 954 // 1. it is a multiple of card size
 955 // 2. any remaining shard may be filled with a filler object
 956 //
 957 // The idea is that the allocation starts and ends at card boundaries. Because
 958 // a region ('s end) is card-aligned, the remainder shard that must be filled is
 959 // at the start of the free space.
 960 //
 961 // This is merely a helper method to use for the purpose of such a calculation.
 962 size_t ShenandoahFreeSet::get_usable_free_words(size_t free_bytes) const {
 963   // e.g. card_size is 512, card_shift is 9, min_fill_size() is 8
 964   //      free is 514
 965   //      usable_free is 512, which is decreased to 0
 966   size_t usable_free = (free_bytes / CardTable::card_size()) << CardTable::card_shift();
 967   assert(usable_free <= free_bytes, "Sanity check");
 968   if ((free_bytes != usable_free) && (free_bytes - usable_free < ShenandoahHeap::min_fill_size() * HeapWordSize)) {
 969     // After aligning to card multiples, the remainder would be smaller than
 970     // the minimum filler object, so we'll need to take away another card's
 971     // worth to construct a filler object.
 972     if (usable_free >= CardTable::card_size()) {
 973       usable_free -= CardTable::card_size();
 974     } else {
 975       assert(usable_free == 0, "usable_free is a multiple of card_size and card_size > min_fill_size");
 976     }

 977   }
 978 
 979   return usable_free / HeapWordSize;
 980 }
 981 
 982 // Given a size argument, which is a multiple of card size, a request struct
 983 // for a PLAB, and an old region, return a pointer to the allocated space for
 984 // a PLAB which is card-aligned and where any remaining shard in the region
 985 // has been suitably filled by a filler object.
 986 // It is assumed (and assertion-checked) that such an allocation is always possible.
 987 HeapWord* ShenandoahFreeSet::allocate_aligned_plab(size_t size, ShenandoahAllocRequest& req, ShenandoahHeapRegion* r) {
 988   assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
 989   assert(r->is_old(), "All PLABs reside in old-gen");
 990   assert(!req.is_mutator_alloc(), "PLABs should not be allocated by mutators.");
 991   assert(is_aligned(size, CardTable::card_size_in_words()), "Align by design");
 992 
 993   HeapWord* result = r->allocate_aligned(size, req, CardTable::card_size());
 994   assert(result != nullptr, "Allocation cannot fail");
 995   assert(r->top() <= r->end(), "Allocation cannot span end of region");
 996   assert(is_aligned(result, CardTable::card_size_in_words()), "Align by design");
 997   return result;
 998 }
 999 
1000 HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
1001   assert (has_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->index());
1002   if (_heap->is_concurrent_weak_root_in_progress() && r->is_trash()) {
1003     // We cannot use this region for allocation when weak roots are in progress because the collector may need
1004     // to reference unmarked oops during concurrent classunloading. The collector also needs accurate marking
1005     // information to determine which weak handles need to be null'd out. If the region is recycled before weak
1006     // roots processing has finished, weak root processing may fail to null out a handle into a trashed region.
1007     // This turns the handle into a dangling pointer and will crash or corrupt the heap.
1008     return nullptr;
1009   }
1010   HeapWord* result = nullptr;
1011   r->try_recycle_under_lock();
1012   in_new_region = r->is_empty();
1013 
1014   if (in_new_region) {
1015     log_debug(gc, free)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").",
1016                         r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
1017     assert(!r->is_affiliated(), "New region " SIZE_FORMAT " should be unaffiliated", r->index());
1018     r->set_affiliation(req.affiliation());
1019     if (r->is_old()) {
1020       // Any OLD region allocated during concurrent coalesce-and-fill does not need to be coalesced and filled because
1021       // all objects allocated within this region are above TAMS (and thus are implicitly marked).  In case this is an
1022       // OLD region and concurrent preparation for mixed evacuations visits this region before the start of the next
1023       // old-gen concurrent mark (i.e. this region is allocated following the start of old-gen concurrent mark but before
1024       // concurrent preparations for mixed evacuations are completed), we mark this region as not requiring any
1025       // coalesce-and-fill processing.
1026       r->end_preemptible_coalesce_and_fill();
1027       _heap->old_generation()->clear_cards_for(r);
1028     }
1029     _heap->generation_for(r->affiliation())->increment_affiliated_region_count();
1030 
1031 #ifdef ASSERT
1032     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
1033     assert(ctx->top_at_mark_start(r) == r->bottom(), "Newly established allocation region starts with TAMS equal to bottom");
1034     assert(ctx->is_bitmap_range_within_region_clear(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
1035 #endif
1036     log_debug(gc, free)("Using new region (" SIZE_FORMAT ") for %s (" PTR_FORMAT ").",
1037                         r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
1038   } else {
1039     assert(r->is_affiliated(), "Region " SIZE_FORMAT " that is not new should be affiliated", r->index());
1040     if (r->affiliation() != req.affiliation()) {
1041       assert(_heap->mode()->is_generational(), "Request for %s from %s region should only happen in generational mode.",
1042              req.affiliation_name(), r->affiliation_name());
1043       return nullptr;
1044     }
1045   }
1046 
1047   // req.size() is in words, r->free() is in bytes.
1048   if (req.is_lab_alloc()) {
1049     size_t adjusted_size = req.size();
1050     size_t free = r->free();    // free represents bytes available within region r
1051     if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
1052       // This is a PLAB allocation
1053       assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
1054       assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, r->index()),
1055              "PLABS must be allocated in old_collector_free regions");
1056 
1057       // Need to assure that plabs are aligned on multiple of card region
1058       // Convert free from unaligned bytes to aligned number of words
1059       size_t usable_free = get_usable_free_words(free);
1060       if (adjusted_size > usable_free) {
1061         adjusted_size = usable_free;
1062       }
1063       adjusted_size = align_down(adjusted_size, CardTable::card_size_in_words());
1064       if (adjusted_size >= req.min_size()) {
1065         result = allocate_aligned_plab(adjusted_size, req, r);
1066         assert(result != nullptr, "allocate must succeed");
1067         req.set_actual_size(adjusted_size);
1068       } else {
1069         // Otherwise, leave result == nullptr because the adjusted size is smaller than min size.
1070         log_trace(gc, free)("Failed to shrink PLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
1071                             " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size());
1072       }
1073     } else {
1074       // This is a GCLAB or a TLAB allocation
1075       // Convert free from unaligned bytes to aligned number of words
1076       free = align_down(free >> LogHeapWordSize, MinObjAlignment);
1077       if (adjusted_size > free) {
1078         adjusted_size = free;
1079       }
1080       if (adjusted_size >= req.min_size()) {
1081         result = r->allocate(adjusted_size, req);
1082         assert (result != nullptr, "Allocation must succeed: free " SIZE_FORMAT ", actual " SIZE_FORMAT, free, adjusted_size);
1083         req.set_actual_size(adjusted_size);
1084       } else {
1085         log_trace(gc, free)("Failed to shrink TLAB or GCLAB request (" SIZE_FORMAT ") in region " SIZE_FORMAT " to " SIZE_FORMAT
1086                             " because min_size() is " SIZE_FORMAT, req.size(), r->index(), adjusted_size, req.min_size());
1087       }
1088     }
1089   } else {
1090     size_t size = req.size();
1091     result = r->allocate(size, req);
1092     if (result != nullptr) {
1093       // Record actual allocation size
1094       req.set_actual_size(size);
1095     }
1096   }
1097 
1098   if (result != nullptr) {
1099     // Allocation successful, bump stats:
1100     if (req.is_mutator_alloc()) {
1101       assert(req.is_young(), "Mutator allocations always come from young generation.");
1102       _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, req.actual_size() * HeapWordSize);
1103     } else {
1104       assert(req.is_gc_alloc(), "Should be gc_alloc since req wasn't mutator alloc");
1105 
1106       // For GC allocations, we advance update_watermark because the objects relocated into this memory during
1107       // evacuation are not updated during evacuation.  For both young and old regions r, it is essential that all
1108       // PLABs be made parsable at the end of evacuation.  This is enabled by retiring all plabs at end of evacuation.
1109       r->set_update_watermark(r->top());
1110       if (r->is_old()) {
1111         _partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, req.actual_size() * HeapWordSize);
1112         assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "old-gen allocations use PLAB or shared allocation");
1113         // for plabs, we'll sort the difference between evac and promotion usage when we retire the plab
1114       } else {
1115         _partitions.increase_used(ShenandoahFreeSetPartitionId::Collector, req.actual_size() * HeapWordSize);
1116       }
1117     }
1118   }
1119 
1120   static const size_t min_capacity = (size_t) (ShenandoahHeapRegion::region_size_bytes() * (1.0 - 1.0 / ShenandoahEvacWaste));
1121   size_t ac = alloc_capacity(r);
1122 
1123   if (((result == nullptr) && (ac < min_capacity)) || (alloc_capacity(r) < PLAB::min_size() * HeapWordSize)) {
1124     // Regardless of whether this allocation succeeded, if the remaining memory is less than PLAB:min_size(), retire this region.
1125     // Note that retire_from_partition() increases used to account for waste.
1126 
1127     // Also, if this allocation request failed and the consumed within this region * ShenandoahEvacWaste > region size,
1128     // then retire the region so that subsequent searches can find available memory more quickly.
1129 
1130     size_t idx = r->index();
1131     ShenandoahFreeSetPartitionId orig_partition;
1132     if (req.is_mutator_alloc()) {
1133       orig_partition = ShenandoahFreeSetPartitionId::Mutator;
1134     } else if (req.type() == ShenandoahAllocRequest::_alloc_gclab) {
1135       orig_partition = ShenandoahFreeSetPartitionId::Collector;
1136     } else if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
1137       orig_partition = ShenandoahFreeSetPartitionId::OldCollector;
1138     } else {
1139       assert(req.type() == ShenandoahAllocRequest::_alloc_shared_gc, "Unexpected allocation type");
1140       if (req.is_old()) {
1141         orig_partition = ShenandoahFreeSetPartitionId::OldCollector;
1142       } else {
1143         orig_partition = ShenandoahFreeSetPartitionId::Collector;
1144       }
1145     }
1146     _partitions.retire_from_partition(orig_partition, idx, r->used());
1147     _partitions.assert_bounds();
1148   }
1149   return result;
1150 }
1151 
1152 HeapWord* ShenandoahFreeSet::allocate_contiguous(ShenandoahAllocRequest& req) {
1153   assert(req.is_mutator_alloc(), "All humongous allocations are performed by mutator");
1154   shenandoah_assert_heaplocked();
1155 
1156   size_t words_size = req.size();
1157   idx_t num = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize);
1158 
1159   assert(req.is_young(), "Humongous regions always allocated in YOUNG");
1160   ShenandoahGeneration* generation = _heap->generation_for(req.affiliation());
1161 
1162   // Check if there are enough regions left to satisfy allocation.
1163   if (num > (idx_t) _partitions.count(ShenandoahFreeSetPartitionId::Mutator)) {
1164     return nullptr;
1165   }
1166 
1167   idx_t start_range = _partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Mutator);
1168   idx_t end_range = _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Mutator) + 1;
1169   idx_t last_possible_start = end_range - num;
1170 
1171   // Find the continuous interval of $num regions, starting from $beg and ending in $end,
1172   // inclusive. Contiguous allocations are biased to the beginning.
1173   idx_t beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator,
1174                                                                           start_range, num);
1175   if (beg > last_possible_start) {
1176     // Hit the end, goodbye
1177     return nullptr;
1178   }
1179   idx_t end = beg;
1180 
1181   while (true) {
1182     // We've confirmed num contiguous regions belonging to Mutator partition, so no need to confirm membership.
1183     // If region is not completely free, the current [beg; end] is useless, and we may fast-forward.  If we can extend
1184     // the existing range, we can exploit that certain regions are already known to be in the Mutator free set.
1185     while (!can_allocate_from(_heap->get_region(end))) {
1186       // region[end] is not empty, so we restart our search after region[end]
1187       idx_t slide_delta = end + 1 - beg;
1188       if (beg + slide_delta > last_possible_start) {
1189         // no room to slide
1190         return nullptr;
1191       }
1192       for (idx_t span_end = beg + num; slide_delta > 0; slide_delta--) {
1193         if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, span_end)) {
1194           beg = _partitions.find_index_of_next_available_cluster_of_regions(ShenandoahFreeSetPartitionId::Mutator,
1195                                                                             span_end + 1, num);
1196           break;
1197         } else {
1198           beg++;
1199           span_end++;
1200         }
1201       }
1202       // Here, either beg identifies a range of num regions all of which are in the Mutator free set, or beg > last_possible_start
1203       if (beg > last_possible_start) {
1204         // Hit the end, goodbye
1205         return nullptr;
1206       }
1207       end = beg;
1208     }
1209 
1210     if ((end - beg + 1) == num) {
1211       // found the match
1212       break;
1213     }
1214 
1215     end++;
1216   }
1217 
1218   size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask();

1219   // Initialize regions:
1220   for (idx_t i = beg; i <= end; i++) {
1221     ShenandoahHeapRegion* r = _heap->get_region(i);
1222     r->try_recycle_under_lock();
1223 
1224     assert(i == beg || _heap->get_region(i - 1)->index() + 1 == r->index(), "Should be contiguous");
1225     assert(r->is_empty(), "Should be empty");
1226 
1227     if (i == beg) {
1228       r->make_humongous_start();
1229     } else {
1230       r->make_humongous_cont();
1231     }
1232 
1233     // Trailing region may be non-full, record the remainder there
1234     size_t used_words;
1235     if ((i == end) && (remainder != 0)) {
1236       used_words = remainder;
1237     } else {
1238       used_words = ShenandoahHeapRegion::region_size_words();
1239     }
1240 
1241     r->set_affiliation(req.affiliation());
1242     r->set_update_watermark(r->bottom());
1243     r->set_top(r->bottom() + used_words);


1244   }
1245   generation->increase_affiliated_region_count(num);




1246   if (remainder != 0) {
1247     // Record this remainder as allocation waste
1248     _heap->notify_mutator_alloc_words(ShenandoahHeapRegion::region_size_words() - remainder, true);
1249   }
1250 
1251   // retire_range_from_partition() will adjust bounds on Mutator free set if appropriate
1252   _partitions.retire_range_from_partition(ShenandoahFreeSetPartitionId::Mutator, beg, end);



1253 
1254   size_t total_humongous_size = ShenandoahHeapRegion::region_size_bytes() * num;
1255   _partitions.increase_used(ShenandoahFreeSetPartitionId::Mutator, total_humongous_size);
1256   _partitions.assert_bounds();
1257   req.set_actual_size(words_size);
1258   if (remainder != 0) {
1259     req.set_waste(ShenandoahHeapRegion::region_size_words() - remainder);
1260   }
1261   return _heap->get_region(beg)->bottom();
1262 }
1263 
1264 class ShenandoahRecycleTrashedRegionClosure final : public ShenandoahHeapRegionClosure {
1265 public:
1266   ShenandoahRecycleTrashedRegionClosure(): ShenandoahHeapRegionClosure() {}
1267 
1268   void heap_region_do(ShenandoahHeapRegion* r) {
1269     r->try_recycle();




1270   }





1271 
1272   bool is_thread_safe() {
1273     return true;


1274   }
1275 };
1276 
1277 void ShenandoahFreeSet::recycle_trash() {
1278   // lock is not non-reentrant, check we don't have it
1279   shenandoah_assert_not_heaplocked();
1280 
1281   ShenandoahHeap* heap = ShenandoahHeap::heap();
1282   heap->assert_gc_workers(heap->workers()->active_workers());
1283 
1284   ShenandoahRecycleTrashedRegionClosure closure;
1285   heap->parallel_heap_region_iterate(&closure);
1286 }
1287 
1288 bool ShenandoahFreeSet::flip_to_old_gc(ShenandoahHeapRegion* r) {
1289   const size_t idx = r->index();
1290 
1291   assert(_partitions.partition_id_matches(idx, ShenandoahFreeSetPartitionId::Mutator), "Should be in mutator view");
1292   assert(can_allocate_from(r), "Should not be allocated");
1293 
1294   ShenandoahGenerationalHeap* gen_heap = ShenandoahGenerationalHeap::heap();
1295   const size_t region_capacity = alloc_capacity(r);
1296 
1297   bool transferred = gen_heap->generation_sizer()->transfer_to_old(1);
1298   if (transferred) {
1299     _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1300                                                  ShenandoahFreeSetPartitionId::OldCollector, region_capacity);
1301     _partitions.assert_bounds();
1302     _heap->old_generation()->augment_evacuation_reserve(region_capacity);
1303     return true;
1304   }
1305 
1306   if (_heap->young_generation()->free_unaffiliated_regions() == 0 && _heap->old_generation()->free_unaffiliated_regions() > 0) {
1307     // Old has free unaffiliated regions, but it couldn't use them for allocation (likely because they
1308     // are trash and weak roots are in process). In this scenario, we aren't really stealing from the
1309     // mutator (they have nothing to steal), but they do have a usable region in their partition. What
1310     // we want to do here is swap that region from the mutator partition with one from the old collector
1311     // partition.
1312     // 1. Find a temporarily unusable trash region in the old collector partition
1313     ShenandoahRightLeftIterator iterator(&_partitions, ShenandoahFreeSetPartitionId::OldCollector, true);
1314     idx_t unusable_trash = -1;
1315     for (unusable_trash = iterator.current(); iterator.has_next(); unusable_trash = iterator.next()) {
1316       const ShenandoahHeapRegion* region = _heap->get_region(unusable_trash);
1317       if (region->is_trash() && _heap->is_concurrent_weak_root_in_progress()) {
1318         break;
1319       }
1320     }
1321 
1322     if (unusable_trash != -1) {
1323       const size_t unusable_capacity = alloc_capacity(unusable_trash);
1324       // 2. Move the (temporarily) unusable trash region we found to the mutator partition
1325       _partitions.move_from_partition_to_partition(unusable_trash,
1326                                                    ShenandoahFreeSetPartitionId::OldCollector,
1327                                                    ShenandoahFreeSetPartitionId::Mutator, unusable_capacity);
1328 
1329       // 3. Move this usable region from the mutator partition to the old collector partition
1330       _partitions.move_from_partition_to_partition(idx,
1331                                                    ShenandoahFreeSetPartitionId::Mutator,
1332                                                    ShenandoahFreeSetPartitionId::OldCollector, region_capacity);
1333 
1334       _partitions.assert_bounds();
1335 
1336       // 4. Do not adjust capacities for generations, we just swapped the regions that have already
1337       // been accounted for. However, we should adjust the evacuation reserves as those may have changed.
1338       shenandoah_assert_heaplocked();
1339       const size_t reserve = _heap->old_generation()->get_evacuation_reserve();
1340       _heap->old_generation()->set_evacuation_reserve(reserve - unusable_capacity + region_capacity);
1341       return true;
1342     }

1343   }
1344 
1345   // We can't take this region young because it has no free unaffiliated regions (transfer failed).
1346   return false;
1347 }
1348 
1349 void ShenandoahFreeSet::flip_to_gc(ShenandoahHeapRegion* r) {
1350   size_t idx = r->index();
1351 
1352   assert(_partitions.partition_id_matches(idx, ShenandoahFreeSetPartitionId::Mutator), "Should be in mutator view");
1353   assert(can_allocate_from(r), "Should not be allocated");
1354 
1355   size_t ac = alloc_capacity(r);
1356   _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1357                                                ShenandoahFreeSetPartitionId::Collector, ac);
1358   _partitions.assert_bounds();


1359 
1360   // We do not ensure that the region is no longer trash, relying on try_allocate_in(), which always comes next,
1361   // to recycle trash before attempting to allocate anything in the region.


1362 }
1363 
1364 void ShenandoahFreeSet::clear() {
1365   shenandoah_assert_heaplocked();
1366   clear_internal();
1367 }
1368 
1369 void ShenandoahFreeSet::clear_internal() {
1370   _partitions.make_all_regions_unavailable();
1371 
1372   _alloc_bias_weight = 0;
1373   _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Mutator, true);
1374   _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::Collector, false);
1375   _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::OldCollector, false);


1376 }
1377 
1378 void ShenandoahFreeSet::find_regions_with_alloc_capacity(size_t &young_cset_regions, size_t &old_cset_regions,
1379                                                          size_t &first_old_region, size_t &last_old_region,
1380                                                          size_t &old_region_count) {
1381   clear_internal();
1382 
1383   first_old_region = _heap->num_regions();
1384   last_old_region = 0;
1385   old_region_count = 0;
1386   old_cset_regions = 0;
1387   young_cset_regions = 0;
1388 
1389   size_t region_size_bytes = _partitions.region_size_bytes();
1390   size_t max_regions = _partitions.max_regions();
1391 
1392   size_t mutator_leftmost = max_regions;
1393   size_t mutator_rightmost = 0;
1394   size_t mutator_leftmost_empty = max_regions;
1395   size_t mutator_rightmost_empty = 0;
1396   size_t mutator_regions = 0;
1397   size_t mutator_used = 0;
1398 
1399   size_t old_collector_leftmost = max_regions;
1400   size_t old_collector_rightmost = 0;
1401   size_t old_collector_leftmost_empty = max_regions;
1402   size_t old_collector_rightmost_empty = 0;
1403   size_t old_collector_regions = 0;
1404   size_t old_collector_used = 0;
1405 
1406   size_t num_regions = _heap->num_regions();
1407   for (size_t idx = 0; idx < num_regions; idx++) {
1408     ShenandoahHeapRegion* region = _heap->get_region(idx);
1409     if (region->is_trash()) {
1410       // Trashed regions represent regions that had been in the collection partition but have not yet been "cleaned up".
1411       // The cset regions are not "trashed" until we have finished update refs.
1412       if (region->is_old()) {
1413         old_cset_regions++;
1414       } else {
1415         assert(region->is_young(), "Trashed region should be old or young");
1416         young_cset_regions++;
1417       }
1418     } else if (region->is_old()) {
1419       // count both humongous and regular regions, but don't count trash (cset) regions.
1420       old_region_count++;
1421       if (first_old_region > idx) {
1422         first_old_region = idx;
1423       }
1424       last_old_region = idx;
1425     }
1426     if (region->is_alloc_allowed() || region->is_trash()) {
1427       assert(!region->is_cset(), "Shouldn't be adding cset regions to the free set");
1428 
1429       // Do not add regions that would almost surely fail allocation
1430       size_t ac = alloc_capacity(region);
1431       if (ac > PLAB::min_size() * HeapWordSize) {
1432         if (region->is_trash() || !region->is_old()) {
1433           // Both young and old collected regions (trashed) are placed into the Mutator set
1434           _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::Mutator);
1435           if (idx < mutator_leftmost) {
1436             mutator_leftmost = idx;
1437           }
1438           if (idx > mutator_rightmost) {
1439             mutator_rightmost = idx;
1440           }
1441           if (ac == region_size_bytes) {
1442             if (idx < mutator_leftmost_empty) {
1443               mutator_leftmost_empty = idx;
1444             }
1445             if (idx > mutator_rightmost_empty) {
1446               mutator_rightmost_empty = idx;
1447             }
1448           }
1449           mutator_regions++;
1450           mutator_used += (region_size_bytes - ac);
1451         } else {
1452           // !region->is_trash() && region is_old()
1453           _partitions.raw_assign_membership(idx, ShenandoahFreeSetPartitionId::OldCollector);
1454           if (idx < old_collector_leftmost) {
1455             old_collector_leftmost = idx;
1456           }
1457           if (idx > old_collector_rightmost) {
1458             old_collector_rightmost = idx;
1459           }
1460           if (ac == region_size_bytes) {
1461             if (idx < old_collector_leftmost_empty) {
1462               old_collector_leftmost_empty = idx;
1463             }
1464             if (idx > old_collector_rightmost_empty) {
1465               old_collector_rightmost_empty = idx;
1466             }
1467           }
1468           old_collector_regions++;
1469           old_collector_used += (region_size_bytes - ac);
1470         }
1471       }
1472     }
1473   }
1474   log_debug(gc, free)("  At end of prep_to_rebuild, mutator_leftmost: " SIZE_FORMAT
1475                       ", mutator_rightmost: " SIZE_FORMAT
1476                       ", mutator_leftmost_empty: " SIZE_FORMAT
1477                       ", mutator_rightmost_empty: " SIZE_FORMAT
1478                       ", mutator_regions: " SIZE_FORMAT
1479                       ", mutator_used: " SIZE_FORMAT,
1480                       mutator_leftmost, mutator_rightmost, mutator_leftmost_empty, mutator_rightmost_empty,
1481                       mutator_regions, mutator_used);
1482 
1483   log_debug(gc, free)("  old_collector_leftmost: " SIZE_FORMAT
1484                       ", old_collector_rightmost: " SIZE_FORMAT
1485                       ", old_collector_leftmost_empty: " SIZE_FORMAT
1486                       ", old_collector_rightmost_empty: " SIZE_FORMAT
1487                       ", old_collector_regions: " SIZE_FORMAT
1488                       ", old_collector_used: " SIZE_FORMAT,
1489                       old_collector_leftmost, old_collector_rightmost, old_collector_leftmost_empty, old_collector_rightmost_empty,
1490                       old_collector_regions, old_collector_used);
1491 
1492   idx_t rightmost_idx = (mutator_leftmost == max_regions)? -1: (idx_t) mutator_rightmost;
1493   idx_t rightmost_empty_idx = (mutator_leftmost_empty == max_regions)? -1: (idx_t) mutator_rightmost_empty;
1494   _partitions.establish_mutator_intervals(mutator_leftmost, rightmost_idx, mutator_leftmost_empty, rightmost_empty_idx,
1495                                           mutator_regions, mutator_used);
1496   rightmost_idx = (old_collector_leftmost == max_regions)? -1: (idx_t) old_collector_rightmost;
1497   rightmost_empty_idx = (old_collector_leftmost_empty == max_regions)? -1: (idx_t) old_collector_rightmost_empty;
1498   _partitions.establish_old_collector_intervals(old_collector_leftmost, rightmost_idx, old_collector_leftmost_empty,
1499                                                 rightmost_empty_idx, old_collector_regions, old_collector_used);
1500   log_debug(gc, free)("  After find_regions_with_alloc_capacity(), Mutator range [%zd, %zd],"
1501                       "  Old Collector range [%zd, %zd]",
1502                       _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1503                       _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
1504                       _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector),
1505                       _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector));
1506 }
1507 
1508 // Returns number of regions transferred, adds transferred bytes to var argument bytes_transferred
1509 size_t ShenandoahFreeSet::transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId which_collector,
1510                                                                                    size_t max_xfer_regions,
1511                                                                                    size_t& bytes_transferred) {
1512   shenandoah_assert_heaplocked();
1513   const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1514   size_t transferred_regions = 0;
1515   ShenandoahLeftRightIterator iterator(&_partitions, which_collector, true);
1516   for (idx_t idx = iterator.current(); transferred_regions < max_xfer_regions && iterator.has_next(); idx = iterator.next()) {
1517     // Note: can_allocate_from() denotes that region is entirely empty
1518     if (can_allocate_from(idx)) {
1519       _partitions.move_from_partition_to_partition(idx, which_collector, ShenandoahFreeSetPartitionId::Mutator, region_size_bytes);
1520       transferred_regions++;
1521       bytes_transferred += region_size_bytes;
1522     }
1523   }
1524   return transferred_regions;
1525 }
1526 
1527 // Returns number of regions transferred, adds transferred bytes to var argument bytes_transferred
1528 size_t ShenandoahFreeSet::transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId which_collector,
1529                                                                                        size_t max_xfer_regions,
1530                                                                                        size_t& bytes_transferred) {
1531   shenandoah_assert_heaplocked();
1532   size_t transferred_regions = 0;
1533   ShenandoahLeftRightIterator iterator(&_partitions, which_collector, false);
1534   for (idx_t idx = iterator.current(); transferred_regions < max_xfer_regions && iterator.has_next(); idx = iterator.next()) {
1535     size_t ac = alloc_capacity(idx);
1536     if (ac > 0) {
1537       _partitions.move_from_partition_to_partition(idx, which_collector, ShenandoahFreeSetPartitionId::Mutator, ac);
1538       transferred_regions++;
1539       bytes_transferred += ac;
1540     }
1541   }
1542   return transferred_regions;
1543 }
1544 
1545 void ShenandoahFreeSet::move_regions_from_collector_to_mutator(size_t max_xfer_regions) {
1546   size_t collector_xfer = 0;
1547   size_t old_collector_xfer = 0;
1548 
1549   // Process empty regions within the Collector free partition
1550   if ((max_xfer_regions > 0) &&
1551       (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::Collector)
1552        <= _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::Collector))) {
1553     ShenandoahHeapLocker locker(_heap->lock());
1554     max_xfer_regions -=
1555       transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId::Collector, max_xfer_regions,
1556                                                                collector_xfer);
1557   }
1558 
1559   // Process empty regions within the OldCollector free partition
1560   if ((max_xfer_regions > 0) &&
1561       (_partitions.leftmost_empty(ShenandoahFreeSetPartitionId::OldCollector)
1562        <= _partitions.rightmost_empty(ShenandoahFreeSetPartitionId::OldCollector))) {
1563     ShenandoahHeapLocker locker(_heap->lock());
1564     size_t old_collector_regions =
1565       transfer_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId::OldCollector, max_xfer_regions,
1566                                                                old_collector_xfer);
1567     max_xfer_regions -= old_collector_regions;
1568     if (old_collector_regions > 0) {
1569       ShenandoahGenerationalHeap::cast(_heap)->generation_sizer()->transfer_to_young(old_collector_regions);
1570     }
1571   }
1572 
1573   // If there are any non-empty regions within Collector partition, we can also move them to the Mutator free partition
1574   if ((max_xfer_regions > 0) && (_partitions.leftmost(ShenandoahFreeSetPartitionId::Collector)
1575                                  <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector))) {
1576     ShenandoahHeapLocker locker(_heap->lock());
1577     max_xfer_regions -=
1578       transfer_non_empty_regions_from_collector_set_to_mutator_set(ShenandoahFreeSetPartitionId::Collector, max_xfer_regions,
1579                                                                    collector_xfer);
1580   }
1581 
1582   size_t total_xfer = collector_xfer + old_collector_xfer;
1583   log_info(gc, ergo)("At start of update refs, moving " SIZE_FORMAT "%s to Mutator free set from Collector Reserve ("
1584                      SIZE_FORMAT "%s) and from Old Collector Reserve (" SIZE_FORMAT "%s)",
1585                      byte_size_in_proper_unit(total_xfer), proper_unit_for_byte_size(total_xfer),
1586                      byte_size_in_proper_unit(collector_xfer), proper_unit_for_byte_size(collector_xfer),
1587                      byte_size_in_proper_unit(old_collector_xfer), proper_unit_for_byte_size(old_collector_xfer));
1588 }
1589 
1590 
1591 // Overwrite arguments to represent the amount of memory in each generation that is about to be recycled
1592 void ShenandoahFreeSet::prepare_to_rebuild(size_t &young_cset_regions, size_t &old_cset_regions,
1593                                            size_t &first_old_region, size_t &last_old_region, size_t &old_region_count) {
1594   shenandoah_assert_heaplocked();
1595   // This resets all state information, removing all regions from all sets.
1596   clear();
1597   log_debug(gc, free)("Rebuilding FreeSet");
1598 
1599   // This places regions that have alloc_capacity into the old_collector set if they identify as is_old() or the
1600   // mutator set otherwise.  All trashed (cset) regions are affiliated young and placed in mutator set.
1601   find_regions_with_alloc_capacity(young_cset_regions, old_cset_regions, first_old_region, last_old_region, old_region_count);
1602 }
1603 
1604 void ShenandoahFreeSet::establish_generation_sizes(size_t young_region_count, size_t old_region_count) {
1605   assert(young_region_count + old_region_count == ShenandoahHeap::heap()->num_regions(), "Sanity");
1606   if (ShenandoahHeap::heap()->mode()->is_generational()) {
1607     ShenandoahGenerationalHeap* heap = ShenandoahGenerationalHeap::heap();
1608     ShenandoahOldGeneration* old_gen = heap->old_generation();
1609     ShenandoahYoungGeneration* young_gen = heap->young_generation();
1610     size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1611 
1612     size_t original_old_capacity = old_gen->max_capacity();
1613     size_t new_old_capacity = old_region_count * region_size_bytes;
1614     size_t new_young_capacity = young_region_count * region_size_bytes;
1615     old_gen->set_capacity(new_old_capacity);
1616     young_gen->set_capacity(new_young_capacity);
1617 
1618     if (new_old_capacity > original_old_capacity) {
1619       size_t region_count = (new_old_capacity - original_old_capacity) / region_size_bytes;
1620       log_info(gc, ergo)("Transfer " SIZE_FORMAT " region(s) from %s to %s, yielding increased size: " PROPERFMT,
1621                          region_count, young_gen->name(), old_gen->name(), PROPERFMTARGS(new_old_capacity));
1622     } else if (new_old_capacity < original_old_capacity) {
1623       size_t region_count = (original_old_capacity - new_old_capacity) / region_size_bytes;
1624       log_info(gc, ergo)("Transfer " SIZE_FORMAT " region(s) from %s to %s, yielding increased size: " PROPERFMT,
1625                          region_count, old_gen->name(), young_gen->name(), PROPERFMTARGS(new_young_capacity));
1626     }
1627     // This balances generations, so clear any pending request to balance.
1628     old_gen->set_region_balance(0);
1629   }
1630 }
1631 
1632 void ShenandoahFreeSet::finish_rebuild(size_t young_cset_regions, size_t old_cset_regions, size_t old_region_count,
1633                                        bool have_evacuation_reserves) {
1634   shenandoah_assert_heaplocked();
1635   size_t young_reserve(0), old_reserve(0);
1636 
1637   if (_heap->mode()->is_generational()) {
1638     compute_young_and_old_reserves(young_cset_regions, old_cset_regions, have_evacuation_reserves,
1639                                    young_reserve, old_reserve);
1640   } else {
1641     young_reserve = (_heap->max_capacity() / 100) * ShenandoahEvacReserve;
1642     old_reserve = 0;
1643   }
1644 
1645   // Move some of the mutator regions in the Collector and OldCollector partitions in order to satisfy
1646   // young_reserve and old_reserve.
1647   reserve_regions(young_reserve, old_reserve, old_region_count);
1648   size_t young_region_count = _heap->num_regions() - old_region_count;
1649   establish_generation_sizes(young_region_count, old_region_count);
1650   establish_old_collector_alloc_bias();
1651   _partitions.assert_bounds();
1652   log_status();
1653 }
1654 
1655 void ShenandoahFreeSet::compute_young_and_old_reserves(size_t young_cset_regions, size_t old_cset_regions,
1656                                                        bool have_evacuation_reserves,
1657                                                        size_t& young_reserve_result, size_t& old_reserve_result) const {
1658   shenandoah_assert_generational();
1659   const size_t region_size_bytes = ShenandoahHeapRegion::region_size_bytes();
1660 
1661   ShenandoahOldGeneration* const old_generation = _heap->old_generation();
1662   size_t old_available = old_generation->available();
1663   size_t old_unaffiliated_regions = old_generation->free_unaffiliated_regions();
1664   ShenandoahYoungGeneration* const young_generation = _heap->young_generation();
1665   size_t young_capacity = young_generation->max_capacity();
1666   size_t young_unaffiliated_regions = young_generation->free_unaffiliated_regions();
1667 
1668   // Add in the regions we anticipate to be freed by evacuation of the collection set
1669   old_unaffiliated_regions += old_cset_regions;
1670   young_unaffiliated_regions += young_cset_regions;
1671 
1672   // Consult old-region balance to make adjustments to current generation capacities and availability.
1673   // The generation region transfers take place after we rebuild.
1674   const ssize_t old_region_balance = old_generation->get_region_balance();
1675   if (old_region_balance != 0) {
1676 #ifdef ASSERT
1677     if (old_region_balance > 0) {
1678       assert(old_region_balance <= checked_cast<ssize_t>(old_unaffiliated_regions), "Cannot transfer regions that are affiliated");
1679     } else {
1680       assert(0 - old_region_balance <= checked_cast<ssize_t>(young_unaffiliated_regions), "Cannot transfer regions that are affiliated");
1681     }
1682 #endif
1683 
1684     ssize_t xfer_bytes = old_region_balance * checked_cast<ssize_t>(region_size_bytes);
1685     old_available -= xfer_bytes;
1686     old_unaffiliated_regions -= old_region_balance;
1687     young_capacity += xfer_bytes;
1688     young_unaffiliated_regions += old_region_balance;
1689   }
1690 
1691   // All allocations taken from the old collector set are performed by GC, generally using PLABs for both
1692   // promotions and evacuations.  The partition between which old memory is reserved for evacuation and
1693   // which is reserved for promotion is enforced using thread-local variables that prescribe intentions for
1694   // each PLAB's available memory.
1695   if (have_evacuation_reserves) {
1696     // We are rebuilding at the end of final mark, having already established evacuation budgets for this GC pass.
1697     const size_t promoted_reserve = old_generation->get_promoted_reserve();
1698     const size_t old_evac_reserve = old_generation->get_evacuation_reserve();
1699     young_reserve_result = young_generation->get_evacuation_reserve();
1700     old_reserve_result = promoted_reserve + old_evac_reserve;
1701     assert(old_reserve_result <= old_available,
1702            "Cannot reserve (" SIZE_FORMAT " + " SIZE_FORMAT") more OLD than is available: " SIZE_FORMAT,
1703            promoted_reserve, old_evac_reserve, old_available);
1704   } else {
1705     // We are rebuilding at end of GC, so we set aside budgets specified on command line (or defaults)
1706     young_reserve_result = (young_capacity * ShenandoahEvacReserve) / 100;
1707     // The auto-sizer has already made old-gen large enough to hold all anticipated evacuations and promotions.
1708     // Affiliated old-gen regions are already in the OldCollector free set.  Add in the relevant number of
1709     // unaffiliated regions.
1710     old_reserve_result = old_available;
1711   }
1712 
1713   // Old available regions that have less than PLAB::min_size() of available memory are not placed into the OldCollector
1714   // free set.  Because of this, old_available may not have enough memory to represent the intended reserve.  Adjust
1715   // the reserve downward to account for this possibility. This loss is part of the reason why the original budget
1716   // was adjusted with ShenandoahOldEvacWaste and ShenandoahOldPromoWaste multipliers.
1717   if (old_reserve_result >
1718       _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes) {
1719     old_reserve_result =
1720       _partitions.capacity_of(ShenandoahFreeSetPartitionId::OldCollector) + old_unaffiliated_regions * region_size_bytes;
1721   }
1722 
1723   if (young_reserve_result > young_unaffiliated_regions * region_size_bytes) {
1724     young_reserve_result = young_unaffiliated_regions * region_size_bytes;
1725   }
1726 }
1727 
1728 // Having placed all regions that have allocation capacity into the mutator set if they identify as is_young()
1729 // or into the old collector set if they identify as is_old(), move some of these regions from the mutator set
1730 // into the collector set or old collector set in order to assure that the memory available for allocations within
1731 // the collector set is at least to_reserve and the memory available for allocations within the old collector set
1732 // is at least to_reserve_old.
1733 void ShenandoahFreeSet::reserve_regions(size_t to_reserve, size_t to_reserve_old, size_t &old_region_count) {
1734   for (size_t i = _heap->num_regions(); i > 0; i--) {
1735     size_t idx = i - 1;
1736     ShenandoahHeapRegion* r = _heap->get_region(idx);
1737     if (!_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx)) {
1738       continue;
1739     }
1740 
1741     size_t ac = alloc_capacity(r);
1742     assert (ac > 0, "Membership in free set implies has capacity");
1743     assert (!r->is_old() || r->is_trash(), "Except for trash, mutator_is_free regions should not be affiliated OLD");
1744 
1745     bool move_to_old_collector = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector) < to_reserve_old;
1746     bool move_to_collector = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector) < to_reserve;
1747 
1748     if (!move_to_collector && !move_to_old_collector) {
1749       // We've satisfied both to_reserve and to_reserved_old
1750       break;
1751     }
1752 
1753     if (move_to_old_collector) {
1754       // We give priority to OldCollector partition because we desire to pack OldCollector regions into higher
1755       // addresses than Collector regions.  Presumably, OldCollector regions are more "stable" and less likely to
1756       // be collected in the near future.
1757       if (r->is_trash() || !r->is_affiliated()) {
1758         // OLD regions that have available memory are already in the old_collector free set.
1759         _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1760                                                      ShenandoahFreeSetPartitionId::OldCollector, ac);
1761         log_trace(gc, free)("  Shifting region " SIZE_FORMAT " from mutator_free to old_collector_free", idx);
1762         log_trace(gc, free)("  Shifted Mutator range [%zd, %zd],"
1763                             "  Old Collector range [%zd, %zd]",
1764                             _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1765                             _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
1766                             _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector),
1767                             _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector));
1768         old_region_count++;
1769         continue;
1770       }
1771     }
1772 
1773     if (move_to_collector) {
1774       // Note: In a previous implementation, regions were only placed into the survivor space (collector_is_free) if
1775       // they were entirely empty.  This has the effect of causing new Mutator allocation to reside next to objects
1776       // that have already survived at least one GC, mixing ephemeral with longer-lived objects in the same region.
1777       // Any objects that have survived a GC are less likely to immediately become garbage, so a region that contains
1778       // survivor objects is less likely to be selected for the collection set.  This alternative implementation allows
1779       // survivor regions to continue accumulating other survivor objects, and makes it more likely that ephemeral objects
1780       // occupy regions comprised entirely of ephemeral objects.  These regions are highly likely to be included in the next
1781       // collection set, and they are easily evacuated because they have low density of live objects.
1782       _partitions.move_from_partition_to_partition(idx, ShenandoahFreeSetPartitionId::Mutator,
1783                                                    ShenandoahFreeSetPartitionId::Collector, ac);
1784       log_trace(gc, free)("  Shifting region " SIZE_FORMAT " from mutator_free to collector_free", idx);
1785       log_trace(gc, free)("  Shifted Mutator range [%zd, %zd],"
1786                           "  Collector range [%zd, %zd]",
1787                           _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1788                           _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
1789                           _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector),
1790                           _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector));
1791     }
1792   }
1793 
1794   if (LogTarget(Info, gc, free)::is_enabled()) {
1795     size_t old_reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::OldCollector);
1796     if (old_reserve < to_reserve_old) {
1797       log_info(gc, free)("Wanted " PROPERFMT " for old reserve, but only reserved: " PROPERFMT,
1798                          PROPERFMTARGS(to_reserve_old), PROPERFMTARGS(old_reserve));
1799     }
1800     size_t reserve = _partitions.available_in(ShenandoahFreeSetPartitionId::Collector);
1801     if (reserve < to_reserve) {
1802       log_info(gc, free)("Wanted " PROPERFMT " for young reserve, but only reserved: " PROPERFMT,
1803                           PROPERFMTARGS(to_reserve), PROPERFMTARGS(reserve));
1804     }
1805   }
1806 }
1807 
1808 void ShenandoahFreeSet::establish_old_collector_alloc_bias() {
1809   ShenandoahHeap* heap = ShenandoahHeap::heap();
1810   shenandoah_assert_heaplocked();
1811 
1812   idx_t left_idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector);
1813   idx_t right_idx = _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector);
1814   idx_t middle = (left_idx + right_idx) / 2;
1815   size_t available_in_first_half = 0;
1816   size_t available_in_second_half = 0;
1817 
1818   for (idx_t index = left_idx; index < middle; index++) {
1819     if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) {
1820       ShenandoahHeapRegion* r = heap->get_region((size_t) index);
1821       available_in_first_half += r->free();
1822     }
1823   }
1824   for (idx_t index = middle; index <= right_idx; index++) {
1825     if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) {
1826       ShenandoahHeapRegion* r = heap->get_region(index);
1827       available_in_second_half += r->free();
1828     }
1829   }
1830 
1831   // We desire to first consume the sparsely distributed regions in order that the remaining regions are densely packed.
1832   // Densely packing regions reduces the effort to search for a region that has sufficient memory to satisfy a new allocation
1833   // request.  Regions become sparsely distributed following a Full GC, which tends to slide all regions to the front of the
1834   // heap rather than allowing survivor regions to remain at the high end of the heap where we intend for them to congregate.
1835   _partitions.set_bias_from_left_to_right(ShenandoahFreeSetPartitionId::OldCollector,
1836                                           (available_in_second_half > available_in_first_half));
1837 }
1838 
1839 void ShenandoahFreeSet::log_status_under_lock() {
1840   // Must not be heap locked, it acquires heap lock only when log is enabled
1841   shenandoah_assert_not_heaplocked();
1842   if (LogTarget(Info, gc, free)::is_enabled()
1843       DEBUG_ONLY(|| LogTarget(Debug, gc, free)::is_enabled())) {
1844     ShenandoahHeapLocker locker(_heap->lock());
1845     log_status();
1846   }
1847 }
1848 
1849 void ShenandoahFreeSet::log_status() {
1850   shenandoah_assert_heaplocked();
1851 
1852 #ifdef ASSERT
1853   // Dump of the FreeSet details is only enabled if assertions are enabled
1854   LogTarget(Debug, gc, free) debug_free;
1855   if (debug_free.is_enabled()) {
1856 #define BUFFER_SIZE 80
1857     LogStream ls(debug_free);
1858 
1859     char buffer[BUFFER_SIZE];
1860     for (uint i = 0; i < BUFFER_SIZE; i++) {
1861       buffer[i] = '\0';
1862     }
1863 
1864     ls.cr();
1865     ls.print_cr("Mutator free range [%zd..%zd] allocating from %s",
1866                 _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator),
1867                 _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator),
1868                 _partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::Mutator)? "left to right": "right to left");
1869 
1870     ls.print_cr("Collector free range [%zd..%zd] allocating from %s",
1871                 _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector),
1872                 _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector),
1873                 _partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::Collector)? "left to right": "right to left");
1874 
1875     ls.print_cr("Old collector free range [%zd..%zd] allocates from %s",
1876                 _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector),
1877                 _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector),
1878                 _partitions.alloc_from_left_bias(ShenandoahFreeSetPartitionId::OldCollector)? "left to right": "right to left");
1879     ls.cr();
1880     ls.print_cr("FreeSet map legend:");
1881     ls.print_cr(" M/m:mutator, C/c:collector O/o:old_collector (Empty/Occupied)");
1882     ls.print_cr(" H/h:humongous, X/x:no alloc capacity, ~/_:retired (Old/Young)");
1883 
1884     for (uint i = 0; i < _heap->num_regions(); i++) {
1885       ShenandoahHeapRegion *r = _heap->get_region(i);
1886       uint idx = i % 64;
1887       if ((i != 0) && (idx == 0)) {
1888         ls.print_cr(" %6u: %s", i-64, buffer);
1889       }
1890       if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, i)) {
1891         size_t capacity = alloc_capacity(r);
1892         assert(!r->is_old() || r->is_trash(), "Old regions except trash regions should not be in mutator_free set");
1893         buffer[idx] = (capacity == ShenandoahHeapRegion::region_size_bytes()) ? 'M' : 'm';
1894       } else if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, i)) {
1895         size_t capacity = alloc_capacity(r);
1896         assert(!r->is_old() || r->is_trash(), "Old regions except trash regions should not be in collector_free set");
1897         buffer[idx] = (capacity == ShenandoahHeapRegion::region_size_bytes()) ? 'C' : 'c';
1898       } else if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, i)) {
1899         size_t capacity = alloc_capacity(r);
1900         buffer[idx] = (capacity == ShenandoahHeapRegion::region_size_bytes()) ? 'O' : 'o';
1901       } else if (r->is_humongous()) {
1902         buffer[idx] = (r->is_old() ? 'H' : 'h');
1903       } else if (alloc_capacity(r) == 0) {
1904         buffer[idx] = (r->is_old() ? 'X' : 'x');
1905       } else {
1906         buffer[idx] = (r->is_old() ? '~' : '_');
1907       }
1908     }
1909     uint remnant = _heap->num_regions() % 64;
1910     if (remnant > 0) {
1911       buffer[remnant] = '\0';
1912     } else {
1913       remnant = 64;
1914     }
1915     ls.print_cr(" %6u: %s", (uint) (_heap->num_regions() - remnant), buffer);
1916   }
1917 #endif
1918 
1919   LogTarget(Info, gc, free) lt;
1920   if (lt.is_enabled()) {
1921     ResourceMark rm;
1922     LogStream ls(lt);
1923 
1924     {
1925       idx_t last_idx = 0;
1926       size_t max = 0;
1927       size_t max_contig = 0;
1928       size_t empty_contig = 0;
1929 
1930       size_t total_used = 0;
1931       size_t total_free = 0;
1932       size_t total_free_ext = 0;
1933 
1934       for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Mutator);
1935            idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Mutator); idx++) {
1936         if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Mutator, idx)) {
1937           ShenandoahHeapRegion *r = _heap->get_region(idx);
1938           size_t free = alloc_capacity(r);

1939           max = MAX2(max, free);

1940           if (r->is_empty()) {
1941             total_free_ext += free;
1942             if (last_idx + 1 == idx) {
1943               empty_contig++;
1944             } else {
1945               empty_contig = 1;
1946             }
1947           } else {
1948             empty_contig = 0;
1949           }

1950           total_used += r->used();
1951           total_free += free;

1952           max_contig = MAX2(max_contig, empty_contig);
1953           last_idx = idx;
1954         }
1955       }
1956 
1957       size_t max_humongous = max_contig * ShenandoahHeapRegion::region_size_bytes();
1958       size_t free = capacity() - used();
1959 
1960       // Since certain regions that belonged to the Mutator free partition at the time of most recent rebuild may have been
1961       // retired, the sum of used and capacities within regions that are still in the Mutator free partition may not match
1962       // my internally tracked values of used() and free().
1963       assert(free == total_free, "Free memory should match");
1964       ls.print("Free: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s regular, " SIZE_FORMAT "%s humongous, ",
1965                byte_size_in_proper_unit(total_free),    proper_unit_for_byte_size(total_free),
1966                byte_size_in_proper_unit(max),           proper_unit_for_byte_size(max),
1967                byte_size_in_proper_unit(max_humongous), proper_unit_for_byte_size(max_humongous)
1968       );
1969 
1970       ls.print("Frag: ");
1971       size_t frag_ext;
1972       if (total_free_ext > 0) {
1973         frag_ext = 100 - (100 * max_humongous / total_free_ext);
1974       } else {
1975         frag_ext = 0;
1976       }
1977       ls.print(SIZE_FORMAT "%% external, ", frag_ext);
1978 
1979       size_t frag_int;
1980       if (_partitions.count(ShenandoahFreeSetPartitionId::Mutator) > 0) {
1981         frag_int = (100 * (total_used / _partitions.count(ShenandoahFreeSetPartitionId::Mutator))
1982                     / ShenandoahHeapRegion::region_size_bytes());
1983       } else {
1984         frag_int = 0;
1985       }
1986       ls.print(SIZE_FORMAT "%% internal; ", frag_int);
1987       ls.print("Used: " SIZE_FORMAT "%s, Mutator Free: " SIZE_FORMAT,
1988                byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used),
1989                _partitions.count(ShenandoahFreeSetPartitionId::Mutator));
1990     }
1991 
1992     {
1993       size_t max = 0;
1994       size_t total_free = 0;
1995       size_t total_used = 0;
1996 
1997       for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::Collector);
1998            idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::Collector); idx++) {
1999         if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::Collector, idx)) {
2000           ShenandoahHeapRegion *r = _heap->get_region(idx);
2001           size_t free = alloc_capacity(r);
2002           max = MAX2(max, free);
2003           total_free += free;
2004           total_used += r->used();
2005         }
2006       }
2007       ls.print(" Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
2008                byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
2009                byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max),
2010                byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
2011     }
2012 
2013     if (_heap->mode()->is_generational()) {
2014       size_t max = 0;
2015       size_t total_free = 0;
2016       size_t total_used = 0;
2017 
2018       for (idx_t idx = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector);
2019            idx <= _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector); idx++) {
2020         if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, idx)) {
2021           ShenandoahHeapRegion *r = _heap->get_region(idx);
2022           size_t free = alloc_capacity(r);
2023           max = MAX2(max, free);
2024           total_free += free;
2025           total_used += r->used();
2026         }
2027       }
2028       ls.print_cr(" Old Collector Reserve: " SIZE_FORMAT "%s, Max: " SIZE_FORMAT "%s; Used: " SIZE_FORMAT "%s",
2029                   byte_size_in_proper_unit(total_free), proper_unit_for_byte_size(total_free),
2030                   byte_size_in_proper_unit(max),        proper_unit_for_byte_size(max),
2031                   byte_size_in_proper_unit(total_used), proper_unit_for_byte_size(total_used));
2032     }
2033   }
2034 }
2035 
2036 HeapWord* ShenandoahFreeSet::allocate(ShenandoahAllocRequest& req, bool& in_new_region) {
2037   shenandoah_assert_heaplocked();
2038   if (ShenandoahHeapRegion::requires_humongous(req.size())) {


2039     switch (req.type()) {
2040       case ShenandoahAllocRequest::_alloc_shared:
2041       case ShenandoahAllocRequest::_alloc_shared_gc:
2042         in_new_region = true;
2043         return allocate_contiguous(req);
2044       case ShenandoahAllocRequest::_alloc_plab:
2045       case ShenandoahAllocRequest::_alloc_gclab:
2046       case ShenandoahAllocRequest::_alloc_tlab:
2047         in_new_region = false;
2048         assert(false, "Trying to allocate TLAB in humongous region: " SIZE_FORMAT, req.size());

2049         return nullptr;
2050       default:
2051         ShouldNotReachHere();
2052         return nullptr;
2053     }
2054   } else {
2055     return allocate_single(req, in_new_region);
2056   }
2057 }
2058 
2059 void ShenandoahFreeSet::print_on(outputStream* out) const {
2060   out->print_cr("Mutator Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Mutator));
2061   ShenandoahLeftRightIterator mutator(const_cast<ShenandoahRegionPartitions*>(&_partitions), ShenandoahFreeSetPartitionId::Mutator);
2062   for (idx_t index = mutator.current(); mutator.has_next(); index = mutator.next()) {
2063     _heap->get_region(index)->print_on(out);





2064   }
2065 
2066   out->print_cr("Collector Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::Collector));
2067   ShenandoahLeftRightIterator collector(const_cast<ShenandoahRegionPartitions*>(&_partitions), ShenandoahFreeSetPartitionId::Collector);
2068   for (idx_t index = collector.current(); collector.has_next(); index = collector.next()) {
2069     _heap->get_region(index)->print_on(out);






2070   }
2071 
2072   if (_heap->mode()->is_generational()) {
2073     out->print_cr("Old Collector Free Set: " SIZE_FORMAT "", _partitions.count(ShenandoahFreeSetPartitionId::OldCollector));
2074     for (idx_t index = _partitions.leftmost(ShenandoahFreeSetPartitionId::OldCollector);
2075          index <= _partitions.rightmost(ShenandoahFreeSetPartitionId::OldCollector); index++) {
2076       if (_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, index)) {
2077         _heap->get_region(index)->print_on(out);
2078       }
2079     }
2080   }
2081 }
2082 





















2083 double ShenandoahFreeSet::internal_fragmentation() {
2084   double squared = 0;
2085   double linear = 0;
2086 
2087   ShenandoahLeftRightIterator iterator(&_partitions, ShenandoahFreeSetPartitionId::Mutator);
2088   for (idx_t index = iterator.current(); iterator.has_next(); index = iterator.next()) {
2089     ShenandoahHeapRegion* r = _heap->get_region(index);
2090     size_t used = r->used();
2091     squared += used * used;
2092     linear += used;

2093   }
2094 
2095   if (linear > 0) {
2096     double s = squared / (ShenandoahHeapRegion::region_size_bytes() * linear);
2097     return 1 - s;
2098   } else {
2099     return 0;
2100   }
2101 }
2102 













2103 double ShenandoahFreeSet::external_fragmentation() {
2104   idx_t last_idx = 0;
2105   size_t max_contig = 0;
2106   size_t empty_contig = 0;

2107   size_t free = 0;
2108 
2109   ShenandoahLeftRightIterator iterator(&_partitions, ShenandoahFreeSetPartitionId::Mutator);
2110   for (idx_t index = iterator.current(); iterator.has_next(); index = iterator.next()) {
2111     ShenandoahHeapRegion* r = _heap->get_region(index);
2112     if (r->is_empty()) {
2113       free += ShenandoahHeapRegion::region_size_bytes();
2114       if (last_idx + 1 == index) {
2115         empty_contig++;



2116       } else {
2117         empty_contig = 1;
2118       }
2119     } else {
2120       empty_contig = 0;

2121     }
2122     max_contig = MAX2(max_contig, empty_contig);
2123     last_idx = index;
2124   }
2125 
2126   if (free > 0) {
2127     return 1 - (1.0 * max_contig * ShenandoahHeapRegion::region_size_bytes() / free);
2128   } else {
2129     return 0;
2130   }
2131 }
2132 



























< prev index next >