1 /* 2 * Copyright (c) 2002, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "classfile/classLoaderDataGraph.hpp" 26 #include "classfile/stringTable.hpp" 27 #include "code/codeCache.hpp" 28 #include "compiler/oopMap.hpp" 29 #include "gc/parallel/parallelScavengeHeap.hpp" 30 #include "gc/parallel/psAdaptiveSizePolicy.hpp" 31 #include "gc/parallel/psClosure.inline.hpp" 32 #include "gc/parallel/psCompactionManager.hpp" 33 #include "gc/parallel/psParallelCompact.inline.hpp" 34 #include "gc/parallel/psPromotionManager.inline.hpp" 35 #include "gc/parallel/psRootType.hpp" 36 #include "gc/parallel/psScavenge.inline.hpp" 37 #include "gc/shared/gcCause.hpp" 38 #include "gc/shared/gcHeapSummary.hpp" 39 #include "gc/shared/gcId.hpp" 40 #include "gc/shared/gcLocker.hpp" 41 #include "gc/shared/gcTimer.hpp" 42 #include "gc/shared/gcTrace.hpp" 43 #include "gc/shared/gcTraceTime.inline.hpp" 44 #include "gc/shared/gcVMOperations.hpp" 45 #include "gc/shared/isGCActiveMark.hpp" 46 #include "gc/shared/oopStorage.inline.hpp" 47 #include "gc/shared/oopStorageSetParState.inline.hpp" 48 #include "gc/shared/oopStorageParState.inline.hpp" 49 #include "gc/shared/referencePolicy.hpp" 50 #include "gc/shared/referenceProcessor.hpp" 51 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 52 #include "gc/shared/scavengableNMethods.hpp" 53 #include "gc/shared/spaceDecorator.hpp" 54 #include "gc/shared/strongRootsScope.hpp" 55 #include "gc/shared/taskTerminator.hpp" 56 #include "gc/shared/weakProcessor.inline.hpp" 57 #include "gc/shared/workerPolicy.hpp" 58 #include "gc/shared/workerThread.hpp" 59 #include "gc/shared/workerUtils.hpp" 60 #include "memory/iterator.hpp" 61 #include "memory/resourceArea.hpp" 62 #include "memory/universe.hpp" 63 #include "logging/log.hpp" 64 #include "oops/access.inline.hpp" 65 #include "oops/compressedOops.inline.hpp" 66 #include "oops/oop.inline.hpp" 67 #include "runtime/handles.inline.hpp" 68 #include "runtime/threadCritical.hpp" 69 #include "runtime/threads.hpp" 70 #include "runtime/vmThread.hpp" 71 #include "runtime/vmOperations.hpp" 72 #include "services/memoryService.hpp" 73 #include "utilities/stack.inline.hpp" 74 75 SpanSubjectToDiscoveryClosure PSScavenge::_span_based_discoverer; 76 ReferenceProcessor* PSScavenge::_ref_processor = nullptr; 77 PSCardTable* PSScavenge::_card_table = nullptr; 78 bool PSScavenge::_survivor_overflow = false; 79 uint PSScavenge::_tenuring_threshold = 0; 80 HeapWord* PSScavenge::_young_generation_boundary = nullptr; 81 uintptr_t PSScavenge::_young_generation_boundary_compressed = 0; 82 elapsedTimer PSScavenge::_accumulated_time; 83 STWGCTimer PSScavenge::_gc_timer; 84 ParallelScavengeTracer PSScavenge::_gc_tracer; 85 CollectorCounters* PSScavenge::_counters = nullptr; 86 87 static void scavenge_roots_work(ParallelRootType::Value root_type, uint worker_id) { 88 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc"); 89 90 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id); 91 PSPromoteRootsClosure roots_to_old_closure(pm); 92 93 switch (root_type) { 94 case ParallelRootType::class_loader_data: 95 { 96 PSScavengeCLDClosure cld_closure(pm); 97 ClassLoaderDataGraph::cld_do(&cld_closure); 98 } 99 break; 100 101 case ParallelRootType::code_cache: 102 { 103 MarkingNMethodClosure code_closure(&roots_to_old_closure, NMethodToOopClosure::FixRelocations, false /* keepalive nmethods */); 104 ScavengableNMethods::nmethods_do(&code_closure); 105 } 106 break; 107 108 case ParallelRootType::sentinel: 109 DEBUG_ONLY(default:) // DEBUG_ONLY hack will create compile error on release builds (-Wswitch) and runtime check on debug builds 110 fatal("Bad enumeration value: %u", root_type); 111 break; 112 } 113 114 // Do the real work 115 pm->drain_stacks(false); 116 } 117 118 static void steal_work(TaskTerminator& terminator, uint worker_id) { 119 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc"); 120 121 PSPromotionManager* pm = 122 PSPromotionManager::gc_thread_promotion_manager(worker_id); 123 pm->drain_stacks(true); 124 guarantee(pm->stacks_empty(), 125 "stacks should be empty at this point"); 126 127 while (true) { 128 ScannerTask task; 129 if (PSPromotionManager::steal_depth(worker_id, task)) { 130 pm->process_popped_location_depth(task, true); 131 pm->drain_stacks_depth(true); 132 } else { 133 if (terminator.offer_termination()) { 134 break; 135 } 136 } 137 } 138 guarantee(pm->stacks_empty(), "stacks should be empty at this point"); 139 } 140 141 // Define before use 142 class PSIsAliveClosure: public BoolObjectClosure { 143 public: 144 bool do_object_b(oop p) { 145 return (!PSScavenge::is_obj_in_young(p)) || p->is_forwarded(); 146 } 147 }; 148 149 PSIsAliveClosure PSScavenge::_is_alive_closure; 150 151 class PSKeepAliveClosure: public OopClosure { 152 protected: 153 MutableSpace* _to_space; 154 PSPromotionManager* _promotion_manager; 155 156 public: 157 PSKeepAliveClosure(PSPromotionManager* pm) : _promotion_manager(pm) { 158 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 159 _to_space = heap->young_gen()->to_space(); 160 161 assert(_promotion_manager != nullptr, "Sanity"); 162 } 163 164 template <class T> void do_oop_work(T* p) { 165 #ifdef ASSERT 166 // Referent must be non-null and in from-space 167 oop obj = RawAccess<IS_NOT_NULL>::oop_load(p); 168 assert(oopDesc::is_oop(obj), "referent must be an oop"); 169 assert(PSScavenge::is_obj_in_young(obj), "must be in young-gen"); 170 assert(!PSScavenge::is_obj_in_to_space(obj), "must be in from-space"); 171 #endif 172 173 _promotion_manager->copy_and_push_safe_barrier</*promote_immediately=*/false>(p); 174 } 175 virtual void do_oop(oop* p) { PSKeepAliveClosure::do_oop_work(p); } 176 virtual void do_oop(narrowOop* p) { PSKeepAliveClosure::do_oop_work(p); } 177 }; 178 179 class PSEvacuateFollowersClosure: public VoidClosure { 180 private: 181 PSPromotionManager* _promotion_manager; 182 TaskTerminator* _terminator; 183 uint _worker_id; 184 185 public: 186 PSEvacuateFollowersClosure(PSPromotionManager* pm, TaskTerminator* terminator, uint worker_id) 187 : _promotion_manager(pm), _terminator(terminator), _worker_id(worker_id) {} 188 189 virtual void do_void() { 190 assert(_promotion_manager != nullptr, "Sanity"); 191 _promotion_manager->drain_stacks(true); 192 guarantee(_promotion_manager->stacks_empty(), 193 "stacks should be empty at this point"); 194 195 if (_terminator != nullptr) { 196 steal_work(*_terminator, _worker_id); 197 } 198 } 199 }; 200 201 class ParallelScavengeRefProcProxyTask : public RefProcProxyTask { 202 TaskTerminator _terminator; 203 204 public: 205 ParallelScavengeRefProcProxyTask(uint max_workers) 206 : RefProcProxyTask("ParallelScavengeRefProcProxyTask", max_workers), 207 _terminator(max_workers, ParCompactionManager::marking_stacks()) {} 208 209 void work(uint worker_id) override { 210 assert(worker_id < _max_workers, "sanity"); 211 PSPromotionManager* promotion_manager = (_tm == RefProcThreadModel::Single) ? PSPromotionManager::vm_thread_promotion_manager() : PSPromotionManager::gc_thread_promotion_manager(worker_id); 212 PSIsAliveClosure is_alive; 213 PSKeepAliveClosure keep_alive(promotion_manager); 214 BarrierEnqueueDiscoveredFieldClosure enqueue; 215 PSEvacuateFollowersClosure complete_gc(promotion_manager, (_marks_oops_alive && _tm == RefProcThreadModel::Multi) ? &_terminator : nullptr, worker_id);; 216 _rp_task->rp_work(worker_id, &is_alive, &keep_alive, &enqueue, &complete_gc); 217 } 218 219 void prepare_run_task_hook() override { 220 _terminator.reset_for_reuse(_queue_count); 221 } 222 }; 223 224 class PSThreadRootsTaskClosure : public ThreadClosure { 225 uint _worker_id; 226 public: 227 PSThreadRootsTaskClosure(uint worker_id) : _worker_id(worker_id) { } 228 virtual void do_thread(Thread* thread) { 229 assert(ParallelScavengeHeap::heap()->is_stw_gc_active(), "called outside gc"); 230 231 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(_worker_id); 232 PSScavengeRootsClosure roots_closure(pm); 233 MarkingNMethodClosure roots_in_nmethods(&roots_closure, NMethodToOopClosure::FixRelocations, false /* keepalive nmethods */); 234 235 thread->oops_do(&roots_closure, &roots_in_nmethods); 236 237 // Do the real work 238 pm->drain_stacks(false); 239 } 240 }; 241 242 class ScavengeRootsTask : public WorkerTask { 243 StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do 244 OopStorageSetStrongParState<false /* concurrent */, false /* is_const */> _oop_storage_strong_par_state; 245 SequentialSubTasksDone _subtasks; 246 PSOldGen* _old_gen; 247 HeapWord* _gen_top; 248 uint _active_workers; 249 bool _is_old_gen_empty; 250 TaskTerminator _terminator; 251 252 public: 253 ScavengeRootsTask(PSOldGen* old_gen, 254 uint active_workers) : 255 WorkerTask("ScavengeRootsTask"), 256 _strong_roots_scope(active_workers), 257 _subtasks(ParallelRootType::sentinel), 258 _old_gen(old_gen), 259 _gen_top(old_gen->object_space()->top()), 260 _active_workers(active_workers), 261 _is_old_gen_empty(old_gen->object_space()->is_empty()), 262 _terminator(active_workers, PSPromotionManager::vm_thread_promotion_manager()->stack_array_depth()) { 263 if (!_is_old_gen_empty) { 264 PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table(); 265 card_table->pre_scavenge(active_workers); 266 } 267 } 268 269 virtual void work(uint worker_id) { 270 assert(worker_id < _active_workers, "Sanity"); 271 ResourceMark rm; 272 273 if (!_is_old_gen_empty) { 274 // There are only old-to-young pointers if there are objects 275 // in the old gen. 276 { 277 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id); 278 PSCardTable* card_table = ParallelScavengeHeap::heap()->card_table(); 279 280 // The top of the old gen changes during scavenge when objects are promoted. 281 card_table->scavenge_contents_parallel(_old_gen->start_array(), 282 _old_gen->object_space()->bottom(), 283 _gen_top, 284 pm, 285 worker_id, 286 _active_workers); 287 288 // Do the real work 289 pm->drain_stacks(false); 290 } 291 } 292 293 for (uint root_type = 0; _subtasks.try_claim_task(root_type); /* empty */ ) { 294 scavenge_roots_work(static_cast<ParallelRootType::Value>(root_type), worker_id); 295 } 296 297 PSThreadRootsTaskClosure closure(worker_id); 298 Threads::possibly_parallel_threads_do(true /* is_par */, &closure); 299 300 // Scavenge OopStorages 301 { 302 PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(worker_id); 303 PSScavengeRootsClosure closure(pm); 304 _oop_storage_strong_par_state.oops_do(&closure); 305 // Do the real work 306 pm->drain_stacks(false); 307 } 308 309 // If active_workers can exceed 1, add a steal_work(). 310 // PSPromotionManager::drain_stacks_depth() does not fully drain its 311 // stacks and expects a steal_work() to complete the draining if 312 // ParallelGCThreads is > 1. 313 314 if (_active_workers > 1) { 315 steal_work(_terminator, worker_id); 316 } 317 } 318 }; 319 320 bool PSScavenge::invoke(bool clear_soft_refs) { 321 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 322 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 323 324 // Check for potential problems. 325 if (!should_attempt_scavenge()) { 326 return false; 327 } 328 329 IsSTWGCActiveMark mark; 330 331 _gc_timer.register_gc_start(); 332 333 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 334 GCCause::Cause gc_cause = heap->gc_cause(); 335 336 SvcGCMarker sgcm(SvcGCMarker::MINOR); 337 GCIdMark gc_id_mark; 338 _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start()); 339 340 bool promotion_failure_occurred = false; 341 342 PSYoungGen* young_gen = heap->young_gen(); 343 PSOldGen* old_gen = heap->old_gen(); 344 PSAdaptiveSizePolicy* size_policy = heap->size_policy(); 345 346 assert(young_gen->to_space()->is_empty(), 347 "Attempt to scavenge with live objects in to_space"); 348 349 heap->increment_total_collections(); 350 351 if (AdaptiveSizePolicy::should_update_eden_stats(gc_cause)) { 352 // Gather the feedback data for eden occupancy. 353 young_gen->eden_space()->accumulate_statistics(); 354 } 355 356 heap->print_heap_before_gc(); 357 heap->trace_heap_before_gc(&_gc_tracer); 358 359 assert(!NeverTenure || _tenuring_threshold == markWord::max_age + 1, "Sanity"); 360 assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity"); 361 362 // Fill in TLABs 363 heap->ensure_parsability(true); // retire TLABs 364 365 if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) { 366 Universe::verify("Before GC"); 367 } 368 369 { 370 ResourceMark rm; 371 372 GCTraceCPUTime tcpu(&_gc_tracer); 373 GCTraceTime(Info, gc) tm("Pause Young", nullptr, gc_cause, true); 374 TraceCollectorStats tcs(counters()); 375 TraceMemoryManagerStats tms(heap->young_gc_manager(), gc_cause, "end of minor GC"); 376 377 if (log_is_enabled(Debug, gc, heap, exit)) { 378 accumulated_time()->start(); 379 } 380 381 // Let the size policy know we're starting 382 size_policy->minor_collection_begin(); 383 384 #if COMPILER2_OR_JVMCI 385 DerivedPointerTable::clear(); 386 #endif 387 388 reference_processor()->start_discovery(clear_soft_refs); 389 390 const PreGenGCValues pre_gc_values = heap->get_pre_gc_values(); 391 392 // Reset our survivor overflow. 393 set_survivor_overflow(false); 394 395 const uint active_workers = 396 WorkerPolicy::calc_active_workers(ParallelScavengeHeap::heap()->workers().max_workers(), 397 ParallelScavengeHeap::heap()->workers().active_workers(), 398 Threads::number_of_non_daemon_threads()); 399 ParallelScavengeHeap::heap()->workers().set_active_workers(active_workers); 400 401 PSPromotionManager::pre_scavenge(); 402 403 { 404 GCTraceTime(Debug, gc, phases) tm("Scavenge", &_gc_timer); 405 406 ScavengeRootsTask task(old_gen, active_workers); 407 ParallelScavengeHeap::heap()->workers().run_task(&task); 408 } 409 410 // Process reference objects discovered during scavenge 411 { 412 GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer); 413 414 reference_processor()->set_active_mt_degree(active_workers); 415 ReferenceProcessorStats stats; 416 ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->max_num_queues()); 417 418 ParallelScavengeRefProcProxyTask task(reference_processor()->max_num_queues()); 419 stats = reference_processor()->process_discovered_references(task, pt); 420 421 _gc_tracer.report_gc_reference_stats(stats); 422 pt.print_all_references(); 423 } 424 425 { 426 GCTraceTime(Debug, gc, phases) tm("Weak Processing", &_gc_timer); 427 PSAdjustWeakRootsClosure root_closure; 428 WeakProcessor::weak_oops_do(&ParallelScavengeHeap::heap()->workers(), &_is_alive_closure, &root_closure, 1); 429 } 430 431 // Finally, flush the promotion_manager's labs, and deallocate its stacks. 432 promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer); 433 if (promotion_failure_occurred) { 434 clean_up_failed_promotion(); 435 log_info(gc, promotion)("Promotion failed"); 436 } 437 438 _gc_tracer.report_tenuring_threshold(tenuring_threshold()); 439 440 // Let the size policy know we're done. Note that we count promotion 441 // failure cleanup time as part of the collection (otherwise, we're 442 // implicitly saying it's mutator time). 443 size_policy->minor_collection_end(gc_cause); 444 445 if (!promotion_failure_occurred) { 446 // Swap the survivor spaces. 447 young_gen->eden_space()->clear(SpaceDecorator::Mangle); 448 young_gen->from_space()->clear(SpaceDecorator::Mangle); 449 young_gen->swap_spaces(); 450 451 size_t survived = young_gen->from_space()->used_in_bytes(); 452 size_t promoted = old_gen->used_in_bytes() - pre_gc_values.old_gen_used(); 453 size_policy->update_averages(_survivor_overflow, survived, promoted); 454 455 // A successful scavenge should restart the GC time limit count which is 456 // for full GC's. 457 size_policy->reset_gc_overhead_limit_count(); 458 if (UseAdaptiveSizePolicy) { 459 // Calculate the new survivor size and tenuring threshold 460 461 log_debug(gc, ergo)("AdaptiveSizeStart: collection: %d ", heap->total_collections()); 462 log_trace(gc, ergo)("old_gen_capacity: %zu young_gen_capacity: %zu", 463 old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes()); 464 465 if (UsePerfData) { 466 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 467 counters->update_old_eden_size( 468 size_policy->calculated_eden_size_in_bytes()); 469 counters->update_old_promo_size( 470 size_policy->calculated_promo_size_in_bytes()); 471 counters->update_old_capacity(old_gen->capacity_in_bytes()); 472 counters->update_young_capacity(young_gen->capacity_in_bytes()); 473 counters->update_survived(survived); 474 counters->update_promoted(promoted); 475 counters->update_survivor_overflowed(_survivor_overflow); 476 } 477 478 size_t max_young_size = young_gen->max_gen_size(); 479 480 // Deciding a free ratio in the young generation is tricky, so if 481 // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating 482 // that the old generation size may have been limited because of them) we 483 // should then limit our young generation size using NewRatio to have it 484 // follow the old generation size. 485 if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) { 486 max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, 487 young_gen->max_gen_size()); 488 } 489 490 size_t survivor_limit = 491 size_policy->max_survivor_size(max_young_size); 492 _tenuring_threshold = 493 size_policy->compute_survivor_space_size_and_threshold(_survivor_overflow, 494 _tenuring_threshold, 495 survivor_limit); 496 497 log_debug(gc, age)("Desired survivor size %zu bytes, new threshold %u (max threshold %u)", 498 size_policy->calculated_survivor_size_in_bytes(), 499 _tenuring_threshold, MaxTenuringThreshold); 500 501 if (UsePerfData) { 502 PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters(); 503 counters->update_tenuring_threshold(_tenuring_threshold); 504 counters->update_survivor_size_counters(); 505 } 506 507 // Do call at minor collections? 508 // Don't check if the size_policy is ready at this 509 // level. Let the size_policy check that internally. 510 if (UseAdaptiveGenerationSizePolicyAtMinorCollection && 511 AdaptiveSizePolicy::should_update_eden_stats(gc_cause)) { 512 // Calculate optimal free space amounts 513 assert(young_gen->max_gen_size() > 514 young_gen->from_space()->capacity_in_bytes() + 515 young_gen->to_space()->capacity_in_bytes(), 516 "Sizes of space in young gen are out-of-bounds"); 517 518 size_t young_live = young_gen->used_in_bytes(); 519 size_t eden_live = young_gen->eden_space()->used_in_bytes(); 520 size_t cur_eden = young_gen->eden_space()->capacity_in_bytes(); 521 size_t max_old_gen_size = old_gen->max_gen_size(); 522 size_t max_eden_size = max_young_size - 523 young_gen->from_space()->capacity_in_bytes() - 524 young_gen->to_space()->capacity_in_bytes(); 525 526 // Used for diagnostics 527 size_policy->clear_generation_free_space_flags(); 528 529 size_policy->compute_eden_space_size(young_live, 530 eden_live, 531 cur_eden, 532 max_eden_size, 533 false /* not full gc*/); 534 535 size_policy->check_gc_overhead_limit(eden_live, 536 max_old_gen_size, 537 max_eden_size, 538 false /* not full gc*/, 539 gc_cause, 540 heap->soft_ref_policy()); 541 542 size_policy->decay_supplemental_growth(false /* not full gc*/); 543 } 544 // Resize the young generation at every collection 545 // even if new sizes have not been calculated. This is 546 // to allow resizes that may have been inhibited by the 547 // relative location of the "to" and "from" spaces. 548 549 // Resizing the old gen at young collections can cause increases 550 // that don't feed back to the generation sizing policy until 551 // a full collection. Don't resize the old gen here. 552 553 heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(), 554 size_policy->calculated_survivor_size_in_bytes()); 555 556 log_debug(gc, ergo)("AdaptiveSizeStop: collection: %d ", heap->total_collections()); 557 } 558 559 // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can 560 // cause the change of the heap layout. Make sure eden is reshaped if that's the case. 561 // Also update() will case adaptive NUMA chunk resizing. 562 assert(young_gen->eden_space()->is_empty(), "eden space should be empty now"); 563 young_gen->eden_space()->update(); 564 565 heap->gc_policy_counters()->update_counters(); 566 567 heap->resize_all_tlabs(); 568 569 assert(young_gen->to_space()->is_empty(), "to space should be empty now"); 570 } 571 572 #if COMPILER2_OR_JVMCI 573 DerivedPointerTable::update_pointers(); 574 #endif 575 576 if (log_is_enabled(Debug, gc, heap, exit)) { 577 accumulated_time()->stop(); 578 } 579 580 heap->print_heap_change(pre_gc_values); 581 582 // Track memory usage and detect low memory 583 MemoryService::track_memory_usage(); 584 heap->update_counters(); 585 } 586 587 if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) { 588 Universe::verify("After GC"); 589 } 590 591 heap->print_heap_after_gc(); 592 heap->trace_heap_after_gc(&_gc_tracer); 593 594 AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections()); 595 596 _gc_timer.register_gc_end(); 597 598 _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions()); 599 600 return !promotion_failure_occurred; 601 } 602 603 void PSScavenge::clean_up_failed_promotion() { 604 PSPromotionManager::restore_preserved_marks(); 605 606 // Reset the PromotionFailureALot counters. 607 NOT_PRODUCT(ParallelScavengeHeap::heap()->reset_promotion_should_fail();) 608 } 609 610 bool PSScavenge::should_attempt_scavenge() { 611 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 612 613 PSYoungGen* young_gen = heap->young_gen(); 614 PSOldGen* old_gen = heap->old_gen(); 615 616 if (!young_gen->to_space()->is_empty()) { 617 // To-space is not empty; should run full-gc instead. 618 return false; 619 } 620 621 // Test to see if the scavenge will likely fail. 622 PSAdaptiveSizePolicy* policy = heap->size_policy(); 623 624 size_t avg_promoted = (size_t) policy->padded_average_promoted_in_bytes(); 625 size_t promotion_estimate = MIN2(avg_promoted, young_gen->used_in_bytes()); 626 // Total free size after possible old gen expansion 627 size_t free_in_old_gen = old_gen->max_gen_size() - old_gen->used_in_bytes(); 628 bool result = promotion_estimate < free_in_old_gen; 629 630 log_trace(ergo)("%s scavenge: average_promoted %zu padded_average_promoted %zu free in old gen %zu", 631 result ? "Do" : "Skip", (size_t) policy->average_promoted_in_bytes(), 632 (size_t) policy->padded_average_promoted_in_bytes(), 633 free_in_old_gen); 634 635 return result; 636 } 637 638 // Adaptive size policy support. 639 void PSScavenge::set_young_generation_boundary(HeapWord* v) { 640 _young_generation_boundary = v; 641 if (UseCompressedOops) { 642 _young_generation_boundary_compressed = (uintptr_t)CompressedOops::encode(cast_to_oop(v)); 643 } 644 } 645 646 void PSScavenge::initialize() { 647 // Arguments must have been parsed 648 649 if (AlwaysTenure || NeverTenure) { 650 assert(MaxTenuringThreshold == 0 || MaxTenuringThreshold == markWord::max_age + 1, 651 "MaxTenuringThreshold should be 0 or markWord::max_age + 1, but is %d", (int) MaxTenuringThreshold); 652 _tenuring_threshold = MaxTenuringThreshold; 653 } else { 654 // We want to smooth out our startup times for the AdaptiveSizePolicy 655 _tenuring_threshold = (UseAdaptiveSizePolicy) ? InitialTenuringThreshold : 656 MaxTenuringThreshold; 657 } 658 659 ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); 660 PSYoungGen* young_gen = heap->young_gen(); 661 PSOldGen* old_gen = heap->old_gen(); 662 663 // Set boundary between young_gen and old_gen 664 assert(old_gen->reserved().end() <= young_gen->eden_space()->bottom(), 665 "old above young"); 666 set_young_generation_boundary(young_gen->eden_space()->bottom()); 667 668 // Initialize ref handling object for scavenging. 669 _span_based_discoverer.set_span(young_gen->reserved()); 670 _ref_processor = 671 new ReferenceProcessor(&_span_based_discoverer, 672 ParallelGCThreads, // mt processing degree 673 ParallelGCThreads, // mt discovery degree 674 false, // concurrent_discovery 675 &_is_alive_closure); // header provides liveness info 676 677 // Cache the cardtable 678 _card_table = heap->card_table(); 679 680 _counters = new CollectorCounters("Parallel young collection pauses", 0); 681 }