< prev index next >

src/hotspot/share/gc/serial/defNewGeneration.cpp

Print this page

671   _gc_timer->register_gc_end();
672 
673   _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
674 
675   return !_promotion_failed;
676 }
677 
678 void DefNewGeneration::init_assuming_no_promotion_failure() {
679   _promotion_failed = false;
680   _promotion_failed_info.reset();
681 }
682 
683 void DefNewGeneration::remove_forwarding_pointers() {
684   assert(_promotion_failed, "precondition");
685 
686   // Will enter Full GC soon due to failed promotion. Must reset the mark word
687   // of objs in young-gen so that no objs are marked (forwarded) when Full GC
688   // starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.)
689   struct ResetForwardedMarkWord : ObjectClosure {
690     void do_object(oop obj) override {
691       if (obj->is_self_forwarded()) {
692         obj->unset_self_forwarded();
693       } else if (obj->is_forwarded()) {
694         // To restore the klass-bits in the header.
695         // Needed for object iteration to work properly.
696         obj->set_mark(obj->forwardee()->prototype_mark());
697       }
698     }
699   } cl;
700   eden()->object_iterate(&cl);
701   from()->object_iterate(&cl);
702 }
703 
704 void DefNewGeneration::handle_promotion_failure(oop old) {
705   log_debug(gc, promotion)("Promotion failure size = %zu) ", old->size());
706 
707   _promotion_failed = true;
708   _promotion_failed_info.register_copy_failure(old->size());
709 
710   ContinuationGCSupport::transform_stack_chunk(old);
711 
712   // forward to self
713   old->forward_to_self();
714 
715   _promo_failure_scan_stack.push(old);
716 
717   if (!_promo_failure_drain_in_progress) {
718     // prevent recursion in copy_to_survivor_space()
719     _promo_failure_drain_in_progress = true;
720     drain_promo_failure_scan_stack();
721     _promo_failure_drain_in_progress = false;
722   }
723 }
724 
725 oop DefNewGeneration::copy_to_survivor_space(oop old) {
726   assert(is_in_reserved(old) && !old->is_forwarded(),
727          "shouldn't be scavenging this oop");
728   size_t s = old->size();


729   oop obj = nullptr;
730 
731   // Try allocating obj in to-space (unless too old)
732   if (old->age() < tenuring_threshold()) {
733     obj = cast_to_oop(to()->allocate(s));
734   }
735 
736   bool new_obj_is_tenured = false;
737   // Otherwise try allocating obj tenured
738   if (obj == nullptr) {
739     obj = _old_gen->allocate_for_promotion(old, s);
740     if (obj == nullptr) {
741       handle_promotion_failure(old);
742       return old;
743     }
744 
745     new_obj_is_tenured = true;
746   }
747 
748   // Prefetch beyond obj
749   const intx interval = PrefetchCopyIntervalInBytes;
750   Prefetch::write(obj, interval);
751 
752   // Copy obj
753   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), cast_from_oop<HeapWord*>(obj), s);
754 
755   ContinuationGCSupport::transform_stack_chunk(obj);
756 
757   if (!new_obj_is_tenured) {
758     // Increment age if obj still in new generation
759     obj->incr_age();
760     age_table()->add(obj, s);
761   }
762 


763   // Done, insert forward pointer to obj in this header
764   old->forward_to(obj);
765 
766   if (SerialStringDedup::is_candidate_from_evacuation(obj, new_obj_is_tenured)) {
767     // Record old; request adds a new weak reference, which reference
768     // processing expects to refer to a from-space object.
769     _string_dedup_requests.add(old);
770   }
771   return obj;
772 }
773 
774 void DefNewGeneration::drain_promo_failure_scan_stack() {
775   PromoteFailureClosure cl{this};
776   while (!_promo_failure_scan_stack.is_empty()) {
777      oop obj = _promo_failure_scan_stack.pop();
778      obj->oop_iterate(&cl);
779   }
780 }
781 
782 void DefNewGeneration::contribute_scratch(void*& scratch, size_t& num_words) {

671   _gc_timer->register_gc_end();
672 
673   _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
674 
675   return !_promotion_failed;
676 }
677 
678 void DefNewGeneration::init_assuming_no_promotion_failure() {
679   _promotion_failed = false;
680   _promotion_failed_info.reset();
681 }
682 
683 void DefNewGeneration::remove_forwarding_pointers() {
684   assert(_promotion_failed, "precondition");
685 
686   // Will enter Full GC soon due to failed promotion. Must reset the mark word
687   // of objs in young-gen so that no objs are marked (forwarded) when Full GC
688   // starts. (The mark word is overloaded: `is_marked()` == `is_forwarded()`.)
689   struct ResetForwardedMarkWord : ObjectClosure {
690     void do_object(oop obj) override {
691       obj->reset_forwarded();






692     }
693   } cl;
694   eden()->object_iterate(&cl);
695   from()->object_iterate(&cl);
696 }
697 
698 void DefNewGeneration::handle_promotion_failure(oop old) {
699   log_debug(gc, promotion)("Promotion failure size = %zu) ", old->size());
700 
701   _promotion_failed = true;
702   _promotion_failed_info.register_copy_failure(old->size());
703 
704   ContinuationGCSupport::transform_stack_chunk(old);
705 
706   // forward to self
707   old->forward_to_self();
708 
709   _promo_failure_scan_stack.push(old);
710 
711   if (!_promo_failure_drain_in_progress) {
712     // prevent recursion in copy_to_survivor_space()
713     _promo_failure_drain_in_progress = true;
714     drain_promo_failure_scan_stack();
715     _promo_failure_drain_in_progress = false;
716   }
717 }
718 
719 oop DefNewGeneration::copy_to_survivor_space(oop old) {
720   assert(is_in_reserved(old) && !old->is_forwarded(),
721          "shouldn't be scavenging this oop");
722   size_t old_size = old->size();
723   size_t s = old->copy_size(old_size, old->mark());
724 
725   oop obj = nullptr;
726 
727   // Try allocating obj in to-space (unless too old)
728   if (old->age() < tenuring_threshold()) {
729     obj = cast_to_oop(to()->allocate(s));
730   }
731 
732   bool new_obj_is_tenured = false;
733   // Otherwise try allocating obj tenured
734   if (obj == nullptr) {
735     obj = _old_gen->allocate_for_promotion(old, s);
736     if (obj == nullptr) {
737       handle_promotion_failure(old);
738       return old;
739     }
740 
741     new_obj_is_tenured = true;
742   }
743 
744   // Prefetch beyond obj
745   const intx interval = PrefetchCopyIntervalInBytes;
746   Prefetch::write(obj, interval);
747 
748   // Copy obj
749   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), cast_from_oop<HeapWord*>(obj), old_size);
750 
751   ContinuationGCSupport::transform_stack_chunk(obj);
752 
753   if (!new_obj_is_tenured) {
754     // Increment age if obj still in new generation
755     obj->incr_age();
756     age_table()->add(obj, s);
757   }
758 
759   obj->initialize_hash_if_necessary(old);
760 
761   // Done, insert forward pointer to obj in this header
762   old->forward_to(obj);
763 
764   if (SerialStringDedup::is_candidate_from_evacuation(obj, new_obj_is_tenured)) {
765     // Record old; request adds a new weak reference, which reference
766     // processing expects to refer to a from-space object.
767     _string_dedup_requests.add(old);
768   }
769   return obj;
770 }
771 
772 void DefNewGeneration::drain_promo_failure_scan_stack() {
773   PromoteFailureClosure cl{this};
774   while (!_promo_failure_scan_stack.is_empty()) {
775      oop obj = _promo_failure_scan_stack.pop();
776      obj->oop_iterate(&cl);
777   }
778 }
779 
780 void DefNewGeneration::contribute_scratch(void*& scratch, size_t& num_words) {
< prev index next >