< prev index next >

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Print this page

1345     // This thread went through the OOM during evac protocol. It is safe to return
1346     // the forward pointer. It must not attempt to evacuate any other objects.
1347     return ShenandoahBarrierSet::resolve_forwarded(p);
1348   }
1349 
1350   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1351 
1352   ShenandoahHeapRegion* r = heap_region_containing(p);
1353   assert(!r->is_humongous(), "never evacuate humongous objects");
1354 
1355   ShenandoahAffiliation target_gen = r->affiliation();
1356   return try_evacuate_object(p, thread, r, target_gen);
1357 }
1358 
1359 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1360                                                ShenandoahAffiliation target_gen) {
1361   assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1362   assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1363   bool alloc_from_lab = true;
1364   HeapWord* copy = nullptr;
1365   size_t size = ShenandoahForwarding::size(p);






1366 
1367 #ifdef ASSERT
1368   if (ShenandoahOOMDuringEvacALot &&
1369       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1370     copy = nullptr;
1371   } else {
1372 #endif
1373     if (UseTLAB) {
1374       copy = allocate_from_gclab(thread, size);
1375     }
1376     if (copy == nullptr) {
1377       // If we failed to allocate in LAB, we'll try a shared allocation.
1378       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1379       copy = allocate_memory(req);
1380       alloc_from_lab = false;
1381     }
1382 #ifdef ASSERT
1383   }
1384 #endif
1385 
1386   if (copy == nullptr) {
1387     control_thread()->handle_alloc_failure_evac(size);
1388 
1389     _oom_evac_handler.handle_out_of_memory_during_evacuation();
1390 
1391     return ShenandoahBarrierSet::resolve_forwarded(p);
1392   }
1393 
1394   // Copy the object:
1395   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, size);
1396 
1397   // Try to install the new forwarding pointer.
1398   oop copy_val = cast_to_oop(copy);
1399   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1400   if (result == copy_val) {
1401     // Successfully evacuated. Our copy is now the public one!

1402     ContinuationGCSupport::relativize_stack_chunk(copy_val);
1403     shenandoah_assert_correct(nullptr, copy_val);
1404     return copy_val;
1405   }  else {
1406     // Failed to evacuate. We need to deal with the object that is left behind. Since this
1407     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1408     // But if it happens to contain references to evacuated regions, those references would
1409     // not get updated for this stale copy during this cycle, and we will crash while scanning
1410     // it the next cycle.
1411     if (alloc_from_lab) {
1412       // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1413       // object will overwrite this stale copy, or the filler object on LAB retirement will
1414       // do this.
1415       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1416     } else {
1417       // For non-LAB allocations, we have no way to retract the allocation, and
1418       // have to explicitly overwrite the copy with the filler object. With that overwrite,
1419       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1420       assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1421       fill_with_object(copy, size);

1345     // This thread went through the OOM during evac protocol. It is safe to return
1346     // the forward pointer. It must not attempt to evacuate any other objects.
1347     return ShenandoahBarrierSet::resolve_forwarded(p);
1348   }
1349 
1350   assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
1351 
1352   ShenandoahHeapRegion* r = heap_region_containing(p);
1353   assert(!r->is_humongous(), "never evacuate humongous objects");
1354 
1355   ShenandoahAffiliation target_gen = r->affiliation();
1356   return try_evacuate_object(p, thread, r, target_gen);
1357 }
1358 
1359 oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapRegion* from_region,
1360                                                ShenandoahAffiliation target_gen) {
1361   assert(target_gen == YOUNG_GENERATION, "Only expect evacuations to young in this mode");
1362   assert(from_region->is_young(), "Only expect evacuations from young in this mode");
1363   bool alloc_from_lab = true;
1364   HeapWord* copy = nullptr;
1365 
1366   markWord mark = p->mark();
1367   if (ShenandoahForwarding::is_forwarded(mark)) {
1368     return ShenandoahForwarding::get_forwardee(p);
1369   }
1370   size_t old_size = ShenandoahForwarding::size(p);
1371   size_t size = p->copy_size(old_size, mark);
1372 
1373 #ifdef ASSERT
1374   if (ShenandoahOOMDuringEvacALot &&
1375       (os::random() & 1) == 0) { // Simulate OOM every ~2nd slow-path call
1376     copy = nullptr;
1377   } else {
1378 #endif
1379     if (UseTLAB) {
1380       copy = allocate_from_gclab(thread, size);
1381     }
1382     if (copy == nullptr) {
1383       // If we failed to allocate in LAB, we'll try a shared allocation.
1384       ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared_gc(size, target_gen);
1385       copy = allocate_memory(req);
1386       alloc_from_lab = false;
1387     }
1388 #ifdef ASSERT
1389   }
1390 #endif
1391 
1392   if (copy == nullptr) {
1393     control_thread()->handle_alloc_failure_evac(size);
1394 
1395     _oom_evac_handler.handle_out_of_memory_during_evacuation();
1396 
1397     return ShenandoahBarrierSet::resolve_forwarded(p);
1398   }
1399 
1400   // Copy the object:
1401   Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(p), copy, old_size);
1402 
1403   // Try to install the new forwarding pointer.
1404   oop copy_val = cast_to_oop(copy);
1405   oop result = ShenandoahForwarding::try_update_forwardee(p, copy_val);
1406   if (result == copy_val) {
1407     // Successfully evacuated. Our copy is now the public one!
1408     copy_val->initialize_hash_if_necessary(p);
1409     ContinuationGCSupport::relativize_stack_chunk(copy_val);
1410     shenandoah_assert_correct(nullptr, copy_val);
1411     return copy_val;
1412   }  else {
1413     // Failed to evacuate. We need to deal with the object that is left behind. Since this
1414     // new allocation is certainly after TAMS, it will be considered live in the next cycle.
1415     // But if it happens to contain references to evacuated regions, those references would
1416     // not get updated for this stale copy during this cycle, and we will crash while scanning
1417     // it the next cycle.
1418     if (alloc_from_lab) {
1419       // For LAB allocations, it is enough to rollback the allocation ptr. Either the next
1420       // object will overwrite this stale copy, or the filler object on LAB retirement will
1421       // do this.
1422       ShenandoahThreadLocalData::gclab(thread)->undo_allocation(copy, size);
1423     } else {
1424       // For non-LAB allocations, we have no way to retract the allocation, and
1425       // have to explicitly overwrite the copy with the filler object. With that overwrite,
1426       // we have to keep the fwdptr initialized and pointing to our (stale) copy.
1427       assert(size >= ShenandoahHeap::min_fill_size(), "previously allocated object known to be larger than min_size");
1428       fill_with_object(copy, size);
< prev index next >