< prev index next >

src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp

Print this page

160 //
161 template<bool promote_immediately>
162 inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o,
163                                                                markWord test_mark) {
164   assert(should_scavenge(&o), "Sanity");
165 
166   oop new_obj = nullptr;
167   bool new_obj_is_tenured = false;
168 
169   // NOTE: With compact headers, it is not safe to load the Klass* from old, because
170   // that would access the mark-word, that might change at any time by concurrent
171   // workers.
172   // This mark word would refer to a forwardee, which may not yet have completed
173   // copying. Therefore we must load the Klass* from the mark-word that we already
174   // loaded. This is safe, because we only enter here if not yet forwarded.
175   assert(!test_mark.is_forwarded(), "precondition");
176   Klass* klass = UseCompactObjectHeaders
177       ? test_mark.klass()
178       : o->klass();
179 
180   size_t new_obj_size = o->size_given_klass(klass);

181 
182   // Find the objects age, MT safe.
183   uint age = (test_mark.has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
184       test_mark.displaced_mark_helper().age() : test_mark.age();
185 
186   if (!promote_immediately) {
187     // Try allocating obj in to-space (unless too old)
188     if (age < PSScavenge::tenuring_threshold()) {
189       new_obj = cast_to_oop(_young_lab.allocate(new_obj_size));
190       if (new_obj == nullptr && !_young_gen_is_full) {
191         // Do we allocate directly, or flush and refill?
192         if (new_obj_size > (YoungPLABSize / 2)) {
193           // Allocate this object directly
194           new_obj = cast_to_oop(young_space()->cas_allocate(new_obj_size));
195           promotion_trace_event(new_obj, klass, new_obj_size, age, false, nullptr);
196         } else {
197           // Flush and fill
198           _young_lab.flush();
199 
200           HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);

262   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(o), cast_from_oop<HeapWord*>(new_obj), new_obj_size);
263 
264   // Now we have to CAS in the header.
265   // Because the forwarding is done with memory_order_relaxed there is no
266   // ordering with the above copy.  Clients that get the forwardee must not
267   // examine its contents without other synchronization, since the contents
268   // may not be up to date for them.
269   oop forwardee = o->forward_to_atomic(new_obj, test_mark, memory_order_relaxed);
270   if (forwardee == nullptr) {  // forwardee is null when forwarding is successful
271     // We won any races, we "own" this object.
272     assert(new_obj == o->forwardee(), "Sanity");
273 
274     // Increment age if obj still in new generation. Now that
275     // we're dealing with a markWord that cannot change, it is
276     // okay to use the non mt safe oop methods.
277     if (!new_obj_is_tenured) {
278       new_obj->incr_age();
279       assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
280     }
281 


282     ContinuationGCSupport::transform_stack_chunk(new_obj);
283 
284     // Do the size comparison first with new_obj_size, which we
285     // already have. Hopefully, only a few objects are larger than
286     // _min_array_size_for_chunking, and most of them will be arrays.
287     // So, the is->objArray() test would be very infrequent.
288     if (new_obj_size > _min_array_size_for_chunking &&
289         new_obj->is_objArray() &&
290         PSChunkLargeArrays) {
291       push_objArray(o, new_obj);
292     } else {
293       // we'll just push its contents
294       push_contents(new_obj);
295 
296       if (StringDedup::is_enabled() &&
297           java_lang_String::is_instance(new_obj) &&
298           psStringDedup::is_candidate_from_evacuation(new_obj, new_obj_is_tenured)) {
299         _string_dedup_requests.add(o);
300       }
301     }

160 //
161 template<bool promote_immediately>
162 inline oop PSPromotionManager::copy_unmarked_to_survivor_space(oop o,
163                                                                markWord test_mark) {
164   assert(should_scavenge(&o), "Sanity");
165 
166   oop new_obj = nullptr;
167   bool new_obj_is_tenured = false;
168 
169   // NOTE: With compact headers, it is not safe to load the Klass* from old, because
170   // that would access the mark-word, that might change at any time by concurrent
171   // workers.
172   // This mark word would refer to a forwardee, which may not yet have completed
173   // copying. Therefore we must load the Klass* from the mark-word that we already
174   // loaded. This is safe, because we only enter here if not yet forwarded.
175   assert(!test_mark.is_forwarded(), "precondition");
176   Klass* klass = UseCompactObjectHeaders
177       ? test_mark.klass()
178       : o->klass();
179 
180   size_t old_obj_size = o->size_given_mark_and_klass(test_mark, klass);
181   size_t new_obj_size = o->copy_size(old_obj_size, test_mark);
182 
183   // Find the objects age, MT safe.
184   uint age = (test_mark.has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
185       test_mark.displaced_mark_helper().age() : test_mark.age();
186 
187   if (!promote_immediately) {
188     // Try allocating obj in to-space (unless too old)
189     if (age < PSScavenge::tenuring_threshold()) {
190       new_obj = cast_to_oop(_young_lab.allocate(new_obj_size));
191       if (new_obj == nullptr && !_young_gen_is_full) {
192         // Do we allocate directly, or flush and refill?
193         if (new_obj_size > (YoungPLABSize / 2)) {
194           // Allocate this object directly
195           new_obj = cast_to_oop(young_space()->cas_allocate(new_obj_size));
196           promotion_trace_event(new_obj, klass, new_obj_size, age, false, nullptr);
197         } else {
198           // Flush and fill
199           _young_lab.flush();
200 
201           HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);

263   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(o), cast_from_oop<HeapWord*>(new_obj), new_obj_size);
264 
265   // Now we have to CAS in the header.
266   // Because the forwarding is done with memory_order_relaxed there is no
267   // ordering with the above copy.  Clients that get the forwardee must not
268   // examine its contents without other synchronization, since the contents
269   // may not be up to date for them.
270   oop forwardee = o->forward_to_atomic(new_obj, test_mark, memory_order_relaxed);
271   if (forwardee == nullptr) {  // forwardee is null when forwarding is successful
272     // We won any races, we "own" this object.
273     assert(new_obj == o->forwardee(), "Sanity");
274 
275     // Increment age if obj still in new generation. Now that
276     // we're dealing with a markWord that cannot change, it is
277     // okay to use the non mt safe oop methods.
278     if (!new_obj_is_tenured) {
279       new_obj->incr_age();
280       assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
281     }
282 
283     new_obj->initialize_hash_if_necessary(o);
284 
285     ContinuationGCSupport::transform_stack_chunk(new_obj);
286 
287     // Do the size comparison first with new_obj_size, which we
288     // already have. Hopefully, only a few objects are larger than
289     // _min_array_size_for_chunking, and most of them will be arrays.
290     // So, the is->objArray() test would be very infrequent.
291     if (new_obj_size > _min_array_size_for_chunking &&
292         new_obj->is_objArray() &&
293         PSChunkLargeArrays) {
294       push_objArray(o, new_obj);
295     } else {
296       // we'll just push its contents
297       push_contents(new_obj);
298 
299       if (StringDedup::is_enabled() &&
300           java_lang_String::is_instance(new_obj) &&
301           psStringDedup::is_candidate_from_evacuation(new_obj, new_obj_is_tenured)) {
302         _string_dedup_requests.add(o);
303       }
304     }
< prev index next >