< prev index next >

src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp

Print this page
*** 27,10 ***
--- 27,11 ---
  #include "gc/parallel/parallelInitLogger.hpp"
  #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  #include "gc/parallel/psMemoryPool.hpp"
  #include "gc/parallel/psParallelCompact.inline.hpp"
+ #include "gc/parallel/psParallelCompactNew.inline.hpp"
  #include "gc/parallel/psPromotionManager.hpp"
  #include "gc/parallel/psScavenge.hpp"
  #include "gc/parallel/psVMOperations.hpp"
  #include "gc/shared/fullGCForwarding.inline.hpp"
  #include "gc/shared/gcHeapSummary.hpp"

*** 118,12 ***
           "Boundaries must meet");
    // initialize the policy counters - 2 collectors, 2 generations
    _gc_policy_counters =
      new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
  
!   if (!PSParallelCompact::initialize_aux_data()) {
!     return JNI_ENOMEM;
    }
  
    // Create CPU time counter
    CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
  
--- 119,18 ---
           "Boundaries must meet");
    // initialize the policy counters - 2 collectors, 2 generations
    _gc_policy_counters =
      new PSGCAdaptivePolicyCounters("ParScav:MSC", 2, 2, _size_policy);
  
!   if (UseCompactObjectHeaders) {
!     if (!PSParallelCompactNew::initialize_aux_data()) {
+       return JNI_ENOMEM;
+     }
+   } else {
+     if (!PSParallelCompact::initialize_aux_data()) {
+       return JNI_ENOMEM;
+     }
    }
  
    // Create CPU time counter
    CPUTimeCounters::create_counter(CPUTimeGroups::CPUTimeType::gc_parallel_workers);
  

*** 182,11 ***
  
  void ParallelScavengeHeap::post_initialize() {
    CollectedHeap::post_initialize();
    // Need to init the tenuring threshold
    PSScavenge::initialize();
!   PSParallelCompact::post_initialize();
    PSPromotionManager::initialize();
  
    ScavengableNMethods::initialize(&_is_scavengable);
    GCLocker::initialize();
  }
--- 189,15 ---
  
  void ParallelScavengeHeap::post_initialize() {
    CollectedHeap::post_initialize();
    // Need to init the tenuring threshold
    PSScavenge::initialize();
!   if (UseCompactObjectHeaders) {
+     PSParallelCompactNew::post_initialize();
+   } else {
+     PSParallelCompact::post_initialize();
+   }
    PSPromotionManager::initialize();
  
    ScavengableNMethods::initialize(&_is_scavengable);
    GCLocker::initialize();
  }

*** 389,11 ***
  
    return nullptr;
  }
  
  void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
!   PSParallelCompact::invoke(clear_all_soft_refs);
  }
  
  HeapWord* ParallelScavengeHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
    HeapWord* result = nullptr;
  
--- 400,15 ---
  
    return nullptr;
  }
  
  void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
!   if (UseCompactObjectHeaders) {
+     PSParallelCompactNew::invoke(clear_all_soft_refs, false /* serial */);
+   } else {
+     PSParallelCompact::invoke(clear_all_soft_refs);
+   }
  }
  
  HeapWord* ParallelScavengeHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
    HeapWord* result = nullptr;
  

*** 427,21 ***
      // Make sure the heap is fully compacted
      uintx old_interval = HeapMaximumCompactionInterval;
      HeapMaximumCompactionInterval = 0;
  
      const bool clear_all_soft_refs = true;
!     PSParallelCompact::invoke(clear_all_soft_refs);
  
      // Restore
      HeapMaximumCompactionInterval = old_interval;
    }
  
    result = expand_heap_and_allocate(size, is_tlab);
    if (result != nullptr) {
      return result;
    }
  
    // What else?  We might try synchronous finalization later.  If the total
    // space available is large enough for the allocation, then a more
    // complete compaction phase than we've tried so far might be
    // appropriate.
    return nullptr;
--- 442,34 ---
      // Make sure the heap is fully compacted
      uintx old_interval = HeapMaximumCompactionInterval;
      HeapMaximumCompactionInterval = 0;
  
      const bool clear_all_soft_refs = true;
!     if (UseCompactObjectHeaders) {
+       PSParallelCompactNew::invoke(clear_all_soft_refs, false /* serial */);
+     } else {
+       PSParallelCompact::invoke(clear_all_soft_refs);
+     }
  
      // Restore
      HeapMaximumCompactionInterval = old_interval;
    }
  
    result = expand_heap_and_allocate(size, is_tlab);
    if (result != nullptr) {
      return result;
    }
  
+   if (UseCompactObjectHeaders) {
+     PSParallelCompactNew::invoke(true /* clear_soft_refs */, true /* serial */);
+   }
+ 
+   result = expand_heap_and_allocate(size, is_tlab);
+   if (result != nullptr) {
+     return result;
+   }
+ 
    // What else?  We might try synchronous finalization later.  If the total
    // space available is large enough for the allocation, then a more
    // complete compaction phase than we've tried so far might be
    // appropriate.
    return nullptr;

*** 533,11 ***
      if (success) {
        return;
      }
      // Upgrade to Full-GC if young-gc fails
    }
!   PSParallelCompact::invoke(clear_soft_refs);
  }
  
  void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
    young_gen()->object_iterate(cl);
    old_gen()->object_iterate(cl);
--- 561,15 ---
      if (success) {
        return;
      }
      // Upgrade to Full-GC if young-gc fails
    }
!   if (UseCompactObjectHeaders) {
+     PSParallelCompactNew::invoke(clear_soft_refs, false /* serial */);
+   } else {
+     PSParallelCompact::invoke(clear_soft_refs);
+   }
  }
  
  void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {
    young_gen()->object_iterate(cl);
    old_gen()->object_iterate(cl);

*** 674,21 ***
  
  void ParallelScavengeHeap::print_on_error(outputStream* st) const {
    this->CollectedHeap::print_on_error(st);
  
    st->cr();
!   PSParallelCompact::print_on_error(st);
  }
  
  void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
    ParallelScavengeHeap::heap()->workers().threads_do(tc);
  }
  
  void ParallelScavengeHeap::print_tracing_info() const {
    AdaptiveSizePolicyOutput::print();
    log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
!   log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
  }
  
  PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
    const PSYoungGen* const young = young_gen();
    const MutableSpace* const eden = young->eden_space();
--- 706,29 ---
  
  void ParallelScavengeHeap::print_on_error(outputStream* st) const {
    this->CollectedHeap::print_on_error(st);
  
    st->cr();
!   if (UseCompactObjectHeaders) {
+     PSParallelCompactNew::print_on_error(st);
+   } else {
+     PSParallelCompact::print_on_error(st);
+   }
  }
  
  void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
    ParallelScavengeHeap::heap()->workers().threads_do(tc);
  }
  
  void ParallelScavengeHeap::print_tracing_info() const {
    AdaptiveSizePolicyOutput::print();
    log_debug(gc, heap, exit)("Accumulated young generation GC time %3.7f secs", PSScavenge::accumulated_time()->seconds());
!   if (UseCompactObjectHeaders) {
+     log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompactNew::accumulated_time()->seconds());
+   } else {
+     log_debug(gc, heap, exit)("Accumulated old generation GC time %3.7f secs", PSParallelCompact::accumulated_time()->seconds());
+   }
  }
  
  PreGenGCValues ParallelScavengeHeap::get_pre_gc_values() const {
    const PSYoungGen* const young = young_gen();
    const MutableSpace* const eden = young->eden_space();
< prev index next >