< prev index next >

src/hotspot/share/runtime/deoptimization.cpp

Print this page

  33 #include "code/scopeDesc.hpp"
  34 #include "compiler/compilationPolicy.hpp"
  35 #include "compiler/compilerDefinitions.inline.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/memAllocator.hpp"
  38 #include "interpreter/bytecode.hpp"
  39 #include "interpreter/bytecode.inline.hpp"
  40 #include "interpreter/bytecodeStream.hpp"
  41 #include "interpreter/interpreter.hpp"
  42 #include "interpreter/oopMapCache.hpp"
  43 #include "jvm.h"
  44 #include "logging/log.hpp"
  45 #include "logging/logLevel.hpp"
  46 #include "logging/logMessage.hpp"
  47 #include "logging/logStream.hpp"
  48 #include "memory/allocation.inline.hpp"
  49 #include "memory/oopFactory.hpp"
  50 #include "memory/resourceArea.hpp"
  51 #include "memory/universe.hpp"
  52 #include "oops/constantPool.hpp"


  53 #include "oops/fieldStreams.inline.hpp"
  54 #include "oops/method.hpp"
  55 #include "oops/objArrayKlass.hpp"
  56 #include "oops/objArrayOop.inline.hpp"
  57 #include "oops/oop.inline.hpp"

  58 #include "oops/typeArrayOop.inline.hpp"
  59 #include "oops/verifyOopClosure.hpp"
  60 #include "prims/jvmtiDeferredUpdates.hpp"
  61 #include "prims/jvmtiExport.hpp"
  62 #include "prims/jvmtiThreadState.hpp"
  63 #include "prims/methodHandles.hpp"
  64 #include "prims/vectorSupport.hpp"
  65 #include "runtime/atomic.hpp"
  66 #include "runtime/basicLock.inline.hpp"
  67 #include "runtime/continuation.hpp"
  68 #include "runtime/continuationEntry.inline.hpp"
  69 #include "runtime/deoptimization.hpp"
  70 #include "runtime/escapeBarrier.hpp"
  71 #include "runtime/fieldDescriptor.hpp"
  72 #include "runtime/fieldDescriptor.inline.hpp"
  73 #include "runtime/frame.inline.hpp"
  74 #include "runtime/handles.inline.hpp"
  75 #include "runtime/interfaceSupport.inline.hpp"
  76 #include "runtime/javaThread.hpp"
  77 #include "runtime/jniHandles.inline.hpp"

 333                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 334                                   bool& deoptimized_objects) {
 335   bool realloc_failures = false;
 336   assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
 337 
 338   JavaThread* deoptee_thread = chunk->at(0)->thread();
 339   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 340          "a frame can only be deoptimized by the owner thread");
 341 
 342   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map);
 343 
 344   // The flag return_oop() indicates call sites which return oop
 345   // in compiled code. Such sites include java method calls,
 346   // runtime calls (for example, used to allocate new objects/arrays
 347   // on slow code path) and any other calls generated in compiled code.
 348   // It is not guaranteed that we can get such information here only
 349   // by analyzing bytecode in deoptimized frames. This is why this flag
 350   // is set during method compilation (see Compile::Process_OopMap_Node()).
 351   // If the previous frame was popped or if we are dispatching an exception,
 352   // we don't have an oop result.
 353   bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 354   Handle return_value;











 355   if (save_oop_result) {
 356     // Reallocation may trigger GC. If deoptimization happened on return from
 357     // call which returns oop we need to save it since it is not in oopmap.
 358     oop result = deoptee.saved_oop_result(&map);
 359     assert(oopDesc::is_oop_or_null(result), "must be oop");
 360     return_value = Handle(thread, result);
 361     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 362     if (TraceDeoptimization) {
 363       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 364       tty->cr();
 365     }
 366   }
 367   if (objects != nullptr) {
 368     if (exec_mode == Deoptimization::Unpack_none) {
 369       assert(thread->thread_state() == _thread_in_vm, "assumption");
 370       JavaThread* THREAD = thread; // For exception macros.
 371       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 372       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));








 373       deoptimized_objects = true;
 374     } else {
 375       JavaThread* current = thread; // For JRT_BLOCK
 376       JRT_BLOCK
 377       realloc_failures = Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);








 378       JRT_END
 379     }
 380     guarantee(compiled_method != nullptr, "deopt must be associated with an nmethod");
 381     bool is_jvmci = compiled_method->is_compiled_by_jvmci();
 382     Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, is_jvmci);
 383     if (TraceDeoptimization) {
 384       print_objects(deoptee_thread, objects, realloc_failures);
 385     }
 386   }
 387   if (save_oop_result) {
 388     // Restore result.
 389     deoptee.set_saved_oop_result(&map, return_value());

 390   }
 391   return realloc_failures;
 392 }
 393 
 394 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 395                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 396   JavaThread* deoptee_thread = chunk->at(0)->thread();
 397   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 398   assert(thread == Thread::current(), "should be");
 399   HandleMark hm(thread);
 400 #ifndef PRODUCT
 401   bool first = true;
 402 #endif // !PRODUCT
 403   // Start locking from outermost/oldest frame
 404   for (int i = (chunk->length() - 1); i >= 0; i--) {
 405     compiledVFrame* cvf = chunk->at(i);
 406     assert (cvf->scope() != nullptr,"expect only compiled java frames");
 407     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 408     if (monitors->is_nonempty()) {
 409       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,

 703   // its caller's stack by. If the caller is a compiled frame then
 704   // we pretend that the callee has no parameters so that the
 705   // extension counts for the full amount of locals and not just
 706   // locals-parms. This is because without a c2i adapter the parm
 707   // area as created by the compiled frame will not be usable by
 708   // the interpreter. (Depending on the calling convention there
 709   // may not even be enough space).
 710 
 711   // QQQ I'd rather see this pushed down into last_frame_adjust
 712   // and have it take the sender (aka caller).
 713 
 714   if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
 715     caller_adjustment = last_frame_adjust(0, callee_locals);
 716   } else if (callee_locals > callee_parameters) {
 717     // The caller frame may need extending to accommodate
 718     // non-parameter locals of the first unpacked interpreted frame.
 719     // Compute that adjustment.
 720     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 721   }
 722 
 723   // If the sender is deoptimized the we must retrieve the address of the handler
 724   // since the frame will "magically" show the original pc before the deopt
 725   // and we'd undo the deopt.
 726 
 727   frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
 728   if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
 729     ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
 730   }
 731 
 732   assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
 733 
 734 #if INCLUDE_JVMCI
 735   if (exceptionObject() != nullptr) {
 736     current->set_exception_oop(exceptionObject());
 737     exec_mode = Unpack_exception;
 738   }
 739 #endif
 740 
 741   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 742     assert(current->has_pending_exception(), "should have thrown OOME");
 743     current->set_exception_oop(current->pending_exception());

1208        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1209        default:;
1210      }
1211    }
1212    return nullptr;
1213 }
1214 #endif // INCLUDE_JVMCI
1215 
1216 #if COMPILER2_OR_JVMCI
1217 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1218   Handle pending_exception(THREAD, thread->pending_exception());
1219   const char* exception_file = thread->exception_file();
1220   int exception_line = thread->exception_line();
1221   thread->clear_pending_exception();
1222 
1223   bool failures = false;
1224 
1225   for (int i = 0; i < objects->length(); i++) {
1226     assert(objects->at(i)->is_object(), "invalid debug information");
1227     ObjectValue* sv = (ObjectValue*) objects->at(i);
1228 
1229     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1230     oop obj = nullptr;
1231 











1232     bool cache_init_error = false;
1233     if (k->is_instance_klass()) {
1234 #if INCLUDE_JVMCI
1235       nmethod* nm = fr->cb()->as_nmethod_or_null();
1236       if (nm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1237         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1238         obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1239         if (obj != nullptr) {
1240           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1241           abv->set_cached(true);
1242         } else if (cache_init_error) {
1243           // Results in an OOME which is valid (as opposed to a class initialization error)
1244           // and is fine for the rare case a cache initialization failing.
1245           failures = true;
1246         }
1247       }
1248 #endif // INCLUDE_JVMCI
1249 
1250       InstanceKlass* ik = InstanceKlass::cast(k);
1251       if (obj == nullptr && !cache_init_error) {
1252         InternalOOMEMark iom(THREAD);
1253         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1254           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1255         } else {
1256           obj = ik->allocate_instance(THREAD);
1257         }
1258       }




1259     } else if (k->is_typeArray_klass()) {
1260       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1261       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1262       int len = sv->field_size() / type2size[ak->element_type()];
1263       InternalOOMEMark iom(THREAD);
1264       obj = ak->allocate(len, THREAD);
1265     } else if (k->is_objArray_klass()) {
1266       ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1267       InternalOOMEMark iom(THREAD);
1268       obj = ak->allocate(sv->field_size(), THREAD);
1269     }
1270 
1271     if (obj == nullptr) {
1272       failures = true;
1273     }
1274 
1275     assert(sv->value().is_null(), "redundant reallocation");
1276     assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1277     CLEAR_PENDING_EXCEPTION;
1278     sv->set_value(obj);
1279   }
1280 
1281   if (failures) {
1282     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1283   } else if (pending_exception.not_null()) {
1284     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1285   }
1286 
1287   return failures;
1288 }
1289 















1290 #if INCLUDE_JVMCI
1291 /**
1292  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1293  * we need to somehow be able to recover the actual kind to be able to write the correct
1294  * amount of bytes.
1295  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1296  * the entries at index n + 1 to n + i are 'markers'.
1297  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1298  * expected form of the array would be:
1299  *
1300  * {b0, b1, b2, b3, INT, marker, b6, b7}
1301  *
1302  * Thus, in order to get back the size of the entry, we simply need to count the number
1303  * of marked entries
1304  *
1305  * @param virtualArray the virtualized byte array
1306  * @param i index of the virtual entry we are recovering
1307  * @return The number of bytes the entry spans
1308  */
1309 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {

1435       default:
1436         ShouldNotReachHere();
1437     }
1438     index++;
1439   }
1440 }
1441 
1442 // restore fields of an eliminated object array
1443 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1444   for (int i = 0; i < sv->field_size(); i++) {
1445     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1446     assert(value->type() == T_OBJECT, "object element expected");
1447     obj->obj_at_put(i, value->get_obj()());
1448   }
1449 }
1450 
1451 class ReassignedField {
1452 public:
1453   int _offset;
1454   BasicType _type;



1455 public:
1456   ReassignedField() {
1457     _offset = 0;
1458     _type = T_ILLEGAL;
1459   }
1460 };
1461 





1462 // Gets the fields of `klass` that are eliminated by escape analysis and need to be reassigned
1463 static GrowableArray<ReassignedField>* get_reassigned_fields(InstanceKlass* klass, GrowableArray<ReassignedField>* fields, bool is_jvmci) {
1464   InstanceKlass* super = klass->superklass();
1465   if (super != nullptr) {
1466     get_reassigned_fields(super, fields, is_jvmci);
1467   }
1468   for (AllFieldStream fs(klass); !fs.done(); fs.next()) {
1469     if (!fs.access_flags().is_static() && (is_jvmci || !fs.field_flags().is_injected())) {
1470       ReassignedField field;
1471       field._offset = fs.offset();
1472       field._type = Signature::basic_type(fs.signature());






1473       fields->append(field);
1474     }
1475   }
1476   return fields;
1477 }
1478 
1479 // Restore fields of an eliminated instance object employing the same field order used by the compiler.
1480 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool is_jvmci) {
1481   GrowableArray<ReassignedField>* fields = get_reassigned_fields(klass, new GrowableArray<ReassignedField>(), is_jvmci);









1482   for (int i = 0; i < fields->length(); i++) {















1483     ScopeValue* scope_field = sv->field_at(svIndex);
1484     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);
1485     int offset = fields->at(i)._offset;
1486     BasicType type = fields->at(i)._type;
1487     switch (type) {
1488       case T_OBJECT: case T_ARRAY:

1489         assert(value->type() == T_OBJECT, "Agreement.");
1490         obj->obj_field_put(offset, value->get_obj()());
1491         break;
1492 
1493       case T_INT: case T_FLOAT: { // 4 bytes.
1494         assert(value->type() == T_INT, "Agreement.");
1495         bool big_value = false;
1496         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1497           if (scope_field->is_location()) {
1498             Location::Type type = ((LocationValue*) scope_field)->location().type();
1499             if (type == Location::dbl || type == Location::lng) {
1500               big_value = true;
1501             }
1502           }
1503           if (scope_field->is_constant_int()) {
1504             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1505             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1506               big_value = true;
1507             }
1508           }

1539       case T_CHAR:
1540         assert(value->type() == T_INT, "Agreement.");
1541         obj->char_field_put(offset, (jchar)value->get_jint());
1542         break;
1543 
1544       case T_BYTE:
1545         assert(value->type() == T_INT, "Agreement.");
1546         obj->byte_field_put(offset, (jbyte)value->get_jint());
1547         break;
1548 
1549       case T_BOOLEAN:
1550         assert(value->type() == T_INT, "Agreement.");
1551         obj->bool_field_put(offset, (jboolean)value->get_jint());
1552         break;
1553 
1554       default:
1555         ShouldNotReachHere();
1556     }
1557     svIndex++;
1558   }









1559   return svIndex;
1560 }
1561 














1562 // restore fields of all eliminated objects and arrays
1563 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool is_jvmci) {
1564   for (int i = 0; i < objects->length(); i++) {
1565     assert(objects->at(i)->is_object(), "invalid debug information");
1566     ObjectValue* sv = (ObjectValue*) objects->at(i);
1567     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1568     Handle obj = sv->value();
1569     assert(obj.not_null() || realloc_failures, "reallocation was missed");
1570 #ifndef PRODUCT
1571     if (PrintDeoptimizationDetails) {
1572       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1573     }
1574 #endif // !PRODUCT
1575 
1576     if (obj.is_null()) {
1577       continue;
1578     }
1579 
1580 #if INCLUDE_JVMCI
1581     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1582     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1583       continue;
1584     }
1585 #endif // INCLUDE_JVMCI
1586     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1587       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1588       ScopeValue* payload = sv->field_at(0);
1589       if (payload->is_location() &&
1590           payload->as_LocationValue()->location().type() == Location::vector) {
1591 #ifndef PRODUCT
1592         if (PrintDeoptimizationDetails) {
1593           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1594           if (Verbose) {
1595             Handle obj = sv->value();
1596             k->oop_print_on(obj(), tty);
1597           }
1598         }
1599 #endif // !PRODUCT
1600         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1601       }
1602       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1603       // which could be restored after vector object allocation.
1604     }
1605     if (k->is_instance_klass()) {
1606       InstanceKlass* ik = InstanceKlass::cast(k);
1607       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), is_jvmci);



1608     } else if (k->is_typeArray_klass()) {
1609       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1610       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1611     } else if (k->is_objArray_klass()) {
1612       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1613     }
1614   }
1615   // These objects may escape when we return to Interpreter after deoptimization.
1616   // We need barrier so that stores that initialize these objects can't be reordered
1617   // with subsequent stores that make these objects accessible by other threads.
1618   OrderAccess::storestore();
1619 }
1620 
1621 
1622 // relock objects for which synchronization was eliminated
1623 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1624                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1625   bool relocked_objects = false;
1626   for (int i = 0; i < monitors->length(); i++) {
1627     MonitorInfo* mon_info = monitors->at(i);

1777     xtty->begin_head("deoptimized thread='%zu' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1778     nm->log_identity(xtty);
1779     xtty->end_head();
1780     for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1781       xtty->begin_elem("jvms bci='%d'", sd->bci());
1782       xtty->method(sd->method());
1783       xtty->end_elem();
1784       if (sd->is_top())  break;
1785     }
1786     xtty->tail("deoptimized");
1787   }
1788 
1789   Continuation::notify_deopt(thread, fr.sp());
1790 
1791   // Patch the compiled method so that when execution returns to it we will
1792   // deopt the execution state and return to the interpreter.
1793   fr.deoptimize(thread);
1794 }
1795 
1796 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1797   // Deoptimize only if the frame comes from compile code.
1798   // Do not deoptimize the frame which is already patched
1799   // during the execution of the loops below.
1800   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1801     return;
1802   }
1803   ResourceMark rm;
1804   deoptimize_single_frame(thread, fr, reason);
1805 }
1806 
1807 #if INCLUDE_JVMCI
1808 address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm) {
1809   // there is no exception handler for this pc => deoptimize
1810   nm->make_not_entrant("missing exception handler");
1811 
1812   // Use Deoptimization::deoptimize for all of its side-effects:
1813   // gathering traps statistics, logging...
1814   // it also patches the return pc but we do not care about that
1815   // since we return a continuation to the deopt_blob below.
1816   JavaThread* thread = JavaThread::current();
1817   RegisterMap reg_map(thread,

  33 #include "code/scopeDesc.hpp"
  34 #include "compiler/compilationPolicy.hpp"
  35 #include "compiler/compilerDefinitions.inline.hpp"
  36 #include "gc/shared/collectedHeap.hpp"
  37 #include "gc/shared/memAllocator.hpp"
  38 #include "interpreter/bytecode.hpp"
  39 #include "interpreter/bytecode.inline.hpp"
  40 #include "interpreter/bytecodeStream.hpp"
  41 #include "interpreter/interpreter.hpp"
  42 #include "interpreter/oopMapCache.hpp"
  43 #include "jvm.h"
  44 #include "logging/log.hpp"
  45 #include "logging/logLevel.hpp"
  46 #include "logging/logMessage.hpp"
  47 #include "logging/logStream.hpp"
  48 #include "memory/allocation.inline.hpp"
  49 #include "memory/oopFactory.hpp"
  50 #include "memory/resourceArea.hpp"
  51 #include "memory/universe.hpp"
  52 #include "oops/constantPool.hpp"
  53 #include "oops/flatArrayKlass.hpp"
  54 #include "oops/flatArrayOop.hpp"
  55 #include "oops/fieldStreams.inline.hpp"
  56 #include "oops/method.hpp"
  57 #include "oops/objArrayKlass.hpp"
  58 #include "oops/objArrayOop.inline.hpp"
  59 #include "oops/oop.inline.hpp"
  60 #include "oops/inlineKlass.inline.hpp"
  61 #include "oops/typeArrayOop.inline.hpp"
  62 #include "oops/verifyOopClosure.hpp"
  63 #include "prims/jvmtiDeferredUpdates.hpp"
  64 #include "prims/jvmtiExport.hpp"
  65 #include "prims/jvmtiThreadState.hpp"
  66 #include "prims/methodHandles.hpp"
  67 #include "prims/vectorSupport.hpp"
  68 #include "runtime/atomic.hpp"
  69 #include "runtime/basicLock.inline.hpp"
  70 #include "runtime/continuation.hpp"
  71 #include "runtime/continuationEntry.inline.hpp"
  72 #include "runtime/deoptimization.hpp"
  73 #include "runtime/escapeBarrier.hpp"
  74 #include "runtime/fieldDescriptor.hpp"
  75 #include "runtime/fieldDescriptor.inline.hpp"
  76 #include "runtime/frame.inline.hpp"
  77 #include "runtime/handles.inline.hpp"
  78 #include "runtime/interfaceSupport.inline.hpp"
  79 #include "runtime/javaThread.hpp"
  80 #include "runtime/jniHandles.inline.hpp"

 336                                   frame& deoptee, RegisterMap& map, GrowableArray<compiledVFrame*>* chunk,
 337                                   bool& deoptimized_objects) {
 338   bool realloc_failures = false;
 339   assert (chunk->at(0)->scope() != nullptr,"expect only compiled java frames");
 340 
 341   JavaThread* deoptee_thread = chunk->at(0)->thread();
 342   assert(exec_mode == Deoptimization::Unpack_none || (deoptee_thread == thread),
 343          "a frame can only be deoptimized by the owner thread");
 344 
 345   GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects_to_rematerialize(deoptee, map);
 346 
 347   // The flag return_oop() indicates call sites which return oop
 348   // in compiled code. Such sites include java method calls,
 349   // runtime calls (for example, used to allocate new objects/arrays
 350   // on slow code path) and any other calls generated in compiled code.
 351   // It is not guaranteed that we can get such information here only
 352   // by analyzing bytecode in deoptimized frames. This is why this flag
 353   // is set during method compilation (see Compile::Process_OopMap_Node()).
 354   // If the previous frame was popped or if we are dispatching an exception,
 355   // we don't have an oop result.
 356   ScopeDesc* scope = chunk->at(0)->scope();
 357   bool save_oop_result = scope->return_oop() && !thread->popframe_forcing_deopt_reexecution() && (exec_mode == Deoptimization::Unpack_deopt);
 358   // In case of the return of multiple values, we must take care
 359   // of all oop return values.
 360   GrowableArray<Handle> return_oops;
 361   InlineKlass* vk = nullptr;
 362   if (save_oop_result && scope->return_scalarized()) {
 363     vk = InlineKlass::returned_inline_klass(map);
 364     if (vk != nullptr) {
 365       vk->save_oop_fields(map, return_oops);
 366       save_oop_result = false;
 367     }
 368   }
 369   if (save_oop_result) {
 370     // Reallocation may trigger GC. If deoptimization happened on return from
 371     // call which returns oop we need to save it since it is not in oopmap.
 372     oop result = deoptee.saved_oop_result(&map);
 373     assert(oopDesc::is_oop_or_null(result), "must be oop");
 374     return_oops.push(Handle(thread, result));
 375     assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
 376     if (TraceDeoptimization) {
 377       tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, p2i(result), p2i(thread));
 378       tty->cr();
 379     }
 380   }
 381   if (objects != nullptr || vk != nullptr) {
 382     if (exec_mode == Deoptimization::Unpack_none) {
 383       assert(thread->thread_state() == _thread_in_vm, "assumption");
 384       JavaThread* THREAD = thread; // For exception macros.
 385       // Clear pending OOM if reallocation fails and return true indicating allocation failure
 386       if (vk != nullptr) {
 387         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, CHECK_AND_CLEAR_(true));
 388       }
 389       if (objects != nullptr) {
 390         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, CHECK_AND_CLEAR_(true));
 391         guarantee(compiled_method != nullptr, "deopt must be associated with an nmethod");
 392         bool is_jvmci = compiled_method->is_compiled_by_jvmci();
 393         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, is_jvmci, CHECK_AND_CLEAR_(true));
 394       }
 395       deoptimized_objects = true;
 396     } else {
 397       JavaThread* current = thread; // For JRT_BLOCK
 398       JRT_BLOCK
 399       if (vk != nullptr) {
 400         realloc_failures = Deoptimization::realloc_inline_type_result(vk, map, return_oops, THREAD);
 401       }
 402       if (objects != nullptr) {
 403         realloc_failures = realloc_failures || Deoptimization::realloc_objects(thread, &deoptee, &map, objects, THREAD);
 404         guarantee(compiled_method != nullptr, "deopt must be associated with an nmethod");
 405         bool is_jvmci = compiled_method->is_compiled_by_jvmci();
 406         Deoptimization::reassign_fields(&deoptee, &map, objects, realloc_failures, is_jvmci, THREAD);
 407       }
 408       JRT_END
 409     }
 410     if (TraceDeoptimization && objects != nullptr) {



 411       print_objects(deoptee_thread, objects, realloc_failures);
 412     }
 413   }
 414   if (save_oop_result || vk != nullptr) {
 415     // Restore result.
 416     assert(return_oops.length() == 1, "no inline type");
 417     deoptee.set_saved_oop_result(&map, return_oops.pop()());
 418   }
 419   return realloc_failures;
 420 }
 421 
 422 static void restore_eliminated_locks(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures,
 423                                      frame& deoptee, int exec_mode, bool& deoptimized_objects) {
 424   JavaThread* deoptee_thread = chunk->at(0)->thread();
 425   assert(!EscapeBarrier::objs_are_deoptimized(deoptee_thread, deoptee.id()), "must relock just once");
 426   assert(thread == Thread::current(), "should be");
 427   HandleMark hm(thread);
 428 #ifndef PRODUCT
 429   bool first = true;
 430 #endif // !PRODUCT
 431   // Start locking from outermost/oldest frame
 432   for (int i = (chunk->length() - 1); i >= 0; i--) {
 433     compiledVFrame* cvf = chunk->at(i);
 434     assert (cvf->scope() != nullptr,"expect only compiled java frames");
 435     GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
 436     if (monitors->is_nonempty()) {
 437       bool relocked = Deoptimization::relock_objects(thread, monitors, deoptee_thread, deoptee,

 731   // its caller's stack by. If the caller is a compiled frame then
 732   // we pretend that the callee has no parameters so that the
 733   // extension counts for the full amount of locals and not just
 734   // locals-parms. This is because without a c2i adapter the parm
 735   // area as created by the compiled frame will not be usable by
 736   // the interpreter. (Depending on the calling convention there
 737   // may not even be enough space).
 738 
 739   // QQQ I'd rather see this pushed down into last_frame_adjust
 740   // and have it take the sender (aka caller).
 741 
 742   if (!deopt_sender.is_interpreted_frame() || caller_was_method_handle) {
 743     caller_adjustment = last_frame_adjust(0, callee_locals);
 744   } else if (callee_locals > callee_parameters) {
 745     // The caller frame may need extending to accommodate
 746     // non-parameter locals of the first unpacked interpreted frame.
 747     // Compute that adjustment.
 748     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
 749   }
 750 
 751   // If the sender is deoptimized we must retrieve the address of the handler
 752   // since the frame will "magically" show the original pc before the deopt
 753   // and we'd undo the deopt.
 754 
 755   frame_pcs[0] = Continuation::is_cont_barrier_frame(deoptee) ? StubRoutines::cont_returnBarrier() : deopt_sender.raw_pc();
 756   if (Continuation::is_continuation_enterSpecial(deopt_sender)) {
 757     ContinuationEntry::from_frame(deopt_sender)->set_argsize(0);
 758   }
 759 
 760   assert(CodeCache::find_blob(frame_pcs[0]) != nullptr, "bad pc");
 761 
 762 #if INCLUDE_JVMCI
 763   if (exceptionObject() != nullptr) {
 764     current->set_exception_oop(exceptionObject());
 765     exec_mode = Unpack_exception;
 766   }
 767 #endif
 768 
 769   if (current->frames_to_pop_failed_realloc() > 0 && exec_mode != Unpack_uncommon_trap) {
 770     assert(current->has_pending_exception(), "should have thrown OOME");
 771     current->set_exception_oop(current->pending_exception());

1236        case T_LONG:    return LongBoxCache::singleton(THREAD)->lookup_raw(value->get_intptr(), cache_init_error);
1237        default:;
1238      }
1239    }
1240    return nullptr;
1241 }
1242 #endif // INCLUDE_JVMCI
1243 
1244 #if COMPILER2_OR_JVMCI
1245 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, TRAPS) {
1246   Handle pending_exception(THREAD, thread->pending_exception());
1247   const char* exception_file = thread->exception_file();
1248   int exception_line = thread->exception_line();
1249   thread->clear_pending_exception();
1250 
1251   bool failures = false;
1252 
1253   for (int i = 0; i < objects->length(); i++) {
1254     assert(objects->at(i)->is_object(), "invalid debug information");
1255     ObjectValue* sv = (ObjectValue*) objects->at(i);

1256     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());

1257 
1258     // Check if the object may be null and has an additional is_init input that needs
1259     // to be checked before using the field values. Skip re-allocation if it is null.
1260     if (sv->maybe_null()) {
1261       assert(k->is_inline_klass(), "must be an inline klass");
1262       jint is_init = StackValue::create_stack_value(fr, reg_map, sv->is_init())->get_jint();
1263       if (is_init == 0) {
1264         continue;
1265       }
1266     }
1267 
1268     oop obj = nullptr;
1269     bool cache_init_error = false;
1270     if (k->is_instance_klass()) {
1271 #if INCLUDE_JVMCI
1272       nmethod* nm = fr->cb()->as_nmethod_or_null();
1273       if (nm->is_compiled_by_jvmci() && sv->is_auto_box()) {
1274         AutoBoxObjectValue* abv = (AutoBoxObjectValue*) sv;
1275         obj = get_cached_box(abv, fr, reg_map, cache_init_error, THREAD);
1276         if (obj != nullptr) {
1277           // Set the flag to indicate the box came from a cache, so that we can skip the field reassignment for it.
1278           abv->set_cached(true);
1279         } else if (cache_init_error) {
1280           // Results in an OOME which is valid (as opposed to a class initialization error)
1281           // and is fine for the rare case a cache initialization failing.
1282           failures = true;
1283         }
1284       }
1285 #endif // INCLUDE_JVMCI
1286 
1287       InstanceKlass* ik = InstanceKlass::cast(k);
1288       if (obj == nullptr && !cache_init_error) {
1289         InternalOOMEMark iom(THREAD);
1290         if (EnableVectorSupport && VectorSupport::is_vector(ik)) {
1291           obj = VectorSupport::allocate_vector(ik, fr, reg_map, sv, THREAD);
1292         } else {
1293           obj = ik->allocate_instance(THREAD);
1294         }
1295       }
1296     } else if (k->is_flatArray_klass()) {
1297       FlatArrayKlass* ak = FlatArrayKlass::cast(k);
1298       // Inline type array must be zeroed because not all memory is reassigned
1299       obj = ak->allocate(sv->field_size(), ak->layout_kind(), THREAD);
1300     } else if (k->is_typeArray_klass()) {
1301       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1302       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
1303       int len = sv->field_size() / type2size[ak->element_type()];
1304       InternalOOMEMark iom(THREAD);
1305       obj = ak->allocate(len, THREAD);
1306     } else if (k->is_objArray_klass()) {
1307       ObjArrayKlass* ak = ObjArrayKlass::cast(k);
1308       InternalOOMEMark iom(THREAD);
1309       obj = ak->allocate(sv->field_size(), THREAD);
1310     }
1311 
1312     if (obj == nullptr) {
1313       failures = true;
1314     }
1315 
1316     assert(sv->value().is_null(), "redundant reallocation");
1317     assert(obj != nullptr || HAS_PENDING_EXCEPTION || cache_init_error, "allocation should succeed or we should get an exception");
1318     CLEAR_PENDING_EXCEPTION;
1319     sv->set_value(obj);
1320   }
1321 
1322   if (failures) {
1323     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
1324   } else if (pending_exception.not_null()) {
1325     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
1326   }
1327 
1328   return failures;
1329 }
1330 
1331 // We're deoptimizing at the return of a call, inline type fields are
1332 // in registers. When we go back to the interpreter, it will expect a
1333 // reference to an inline type instance. Allocate and initialize it from
1334 // the register values here.
1335 bool Deoptimization::realloc_inline_type_result(InlineKlass* vk, const RegisterMap& map, GrowableArray<Handle>& return_oops, TRAPS) {
1336   oop new_vt = vk->realloc_result(map, return_oops, THREAD);
1337   if (new_vt == nullptr) {
1338     CLEAR_PENDING_EXCEPTION;
1339     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), true);
1340   }
1341   return_oops.clear();
1342   return_oops.push(Handle(THREAD, new_vt));
1343   return false;
1344 }
1345 
1346 #if INCLUDE_JVMCI
1347 /**
1348  * For primitive types whose kind gets "erased" at runtime (shorts become stack ints),
1349  * we need to somehow be able to recover the actual kind to be able to write the correct
1350  * amount of bytes.
1351  * For that purpose, this method assumes that, for an entry spanning n bytes at index i,
1352  * the entries at index n + 1 to n + i are 'markers'.
1353  * For example, if we were writing a short at index 4 of a byte array of size 8, the
1354  * expected form of the array would be:
1355  *
1356  * {b0, b1, b2, b3, INT, marker, b6, b7}
1357  *
1358  * Thus, in order to get back the size of the entry, we simply need to count the number
1359  * of marked entries
1360  *
1361  * @param virtualArray the virtualized byte array
1362  * @param i index of the virtual entry we are recovering
1363  * @return The number of bytes the entry spans
1364  */
1365 static int count_number_of_bytes_for_entry(ObjectValue *virtualArray, int i) {

1491       default:
1492         ShouldNotReachHere();
1493     }
1494     index++;
1495   }
1496 }
1497 
1498 // restore fields of an eliminated object array
1499 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
1500   for (int i = 0; i < sv->field_size(); i++) {
1501     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
1502     assert(value->type() == T_OBJECT, "object element expected");
1503     obj->obj_at_put(i, value->get_obj()());
1504   }
1505 }
1506 
1507 class ReassignedField {
1508 public:
1509   int _offset;
1510   BasicType _type;
1511   InstanceKlass* _klass;
1512   bool _is_flat;
1513   bool _is_null_free;
1514 public:
1515   ReassignedField() : _offset(0), _type(T_ILLEGAL), _klass(nullptr), _is_flat(false), _is_null_free(false) { }



1516 };
1517 
1518 static int compare(ReassignedField* left, ReassignedField* right) {
1519   return left->_offset - right->_offset;
1520 }
1521 
1522 
1523 // Gets the fields of `klass` that are eliminated by escape analysis and need to be reassigned
1524 static GrowableArray<ReassignedField>* get_reassigned_fields(InstanceKlass* klass, GrowableArray<ReassignedField>* fields, bool is_jvmci) {
1525   InstanceKlass* super = klass->superklass();
1526   if (super != nullptr) {
1527     get_reassigned_fields(super, fields, is_jvmci);
1528   }
1529   for (AllFieldStream fs(klass); !fs.done(); fs.next()) {
1530     if (!fs.access_flags().is_static() && (is_jvmci || !fs.field_flags().is_injected())) {
1531       ReassignedField field;
1532       field._offset = fs.offset();
1533       field._type = Signature::basic_type(fs.signature());
1534       if (fs.is_flat()) {
1535         field._is_flat = true;
1536         field._is_null_free = fs.is_null_free_inline_type();
1537         // Resolve klass of flat inline type field
1538         field._klass = InlineKlass::cast(klass->get_inline_type_field_klass(fs.index()));
1539       }
1540       fields->append(field);
1541     }
1542   }
1543   return fields;
1544 }
1545 
1546 // Restore fields of an eliminated instance object employing the same field order used by the compiler.
1547 static int reassign_fields_by_klass(InstanceKlass* klass, frame* fr, RegisterMap* reg_map, ObjectValue* sv, int svIndex, oop obj, bool is_jvmci, int base_offset, GrowableArray<int>* null_marker_offsets, TRAPS) {
1548   GrowableArray<ReassignedField>* fields = get_reassigned_fields(klass, new GrowableArray<ReassignedField>(), is_jvmci);
1549   fields->sort(compare);
1550 
1551   // Keep track of null marker offset for flat fields
1552   bool set_null_markers = false;
1553   if (null_marker_offsets == nullptr) {
1554     set_null_markers = true;
1555     null_marker_offsets = new GrowableArray<int>();
1556   }
1557 
1558   for (int i = 0; i < fields->length(); i++) {
1559     BasicType type = fields->at(i)._type;
1560     int offset = base_offset + fields->at(i)._offset;
1561     // Check for flat inline type field before accessing the ScopeValue because it might not have any fields
1562     if (fields->at(i)._is_flat) {
1563       // Recursively re-assign flat inline type fields
1564       InstanceKlass* vk = fields->at(i)._klass;
1565       assert(vk != nullptr, "must be resolved");
1566       offset -= InlineKlass::cast(vk)->payload_offset(); // Adjust offset to omit oop header
1567       svIndex = reassign_fields_by_klass(vk, fr, reg_map, sv, svIndex, obj, is_jvmci, offset, null_marker_offsets, CHECK_0);
1568       if (!fields->at(i)._is_null_free) {
1569         int nm_offset = offset + InlineKlass::cast(vk)->null_marker_offset();
1570         null_marker_offsets->append(nm_offset);
1571       }
1572       continue; // Continue because we don't need to increment svIndex
1573     }
1574     ScopeValue* scope_field = sv->field_at(svIndex);
1575     StackValue* value = StackValue::create_stack_value(fr, reg_map, scope_field);


1576     switch (type) {
1577       case T_OBJECT:
1578       case T_ARRAY:
1579         assert(value->type() == T_OBJECT, "Agreement.");
1580         obj->obj_field_put(offset, value->get_obj()());
1581         break;
1582 
1583       case T_INT: case T_FLOAT: { // 4 bytes.
1584         assert(value->type() == T_INT, "Agreement.");
1585         bool big_value = false;
1586         if (i+1 < fields->length() && fields->at(i+1)._type == T_INT) {
1587           if (scope_field->is_location()) {
1588             Location::Type type = ((LocationValue*) scope_field)->location().type();
1589             if (type == Location::dbl || type == Location::lng) {
1590               big_value = true;
1591             }
1592           }
1593           if (scope_field->is_constant_int()) {
1594             ScopeValue* next_scope_field = sv->field_at(svIndex + 1);
1595             if (next_scope_field->is_constant_long() || next_scope_field->is_constant_double()) {
1596               big_value = true;
1597             }
1598           }

1629       case T_CHAR:
1630         assert(value->type() == T_INT, "Agreement.");
1631         obj->char_field_put(offset, (jchar)value->get_jint());
1632         break;
1633 
1634       case T_BYTE:
1635         assert(value->type() == T_INT, "Agreement.");
1636         obj->byte_field_put(offset, (jbyte)value->get_jint());
1637         break;
1638 
1639       case T_BOOLEAN:
1640         assert(value->type() == T_INT, "Agreement.");
1641         obj->bool_field_put(offset, (jboolean)value->get_jint());
1642         break;
1643 
1644       default:
1645         ShouldNotReachHere();
1646     }
1647     svIndex++;
1648   }
1649   if (set_null_markers) {
1650     // The null marker values come after all the field values in the debug info
1651     assert(null_marker_offsets->length() == (sv->field_size() - svIndex), "Missing null marker(s) in debug info");
1652     for (int i = 0; i < null_marker_offsets->length(); ++i) {
1653       int offset = null_marker_offsets->at(i);
1654       jbyte is_init = (jbyte)StackValue::create_stack_value(fr, reg_map, sv->field_at(svIndex++))->get_jint();
1655       obj->byte_field_put(offset, is_init);
1656     }
1657   }
1658   return svIndex;
1659 }
1660 
1661 // restore fields of an eliminated inline type array
1662 void Deoptimization::reassign_flat_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, flatArrayOop obj, FlatArrayKlass* vak, bool is_jvmci, TRAPS) {
1663   InlineKlass* vk = vak->element_klass();
1664   assert(vk->flat_array(), "should only be used for flat inline type arrays");
1665   // Adjust offset to omit oop header
1666   int base_offset = arrayOopDesc::base_offset_in_bytes(T_FLAT_ELEMENT) - InlineKlass::cast(vk)->payload_offset();
1667   // Initialize all elements of the flat inline type array
1668   for (int i = 0; i < sv->field_size(); i++) {
1669     ScopeValue* val = sv->field_at(i);
1670     int offset = base_offset + (i << Klass::layout_helper_log2_element_size(vak->layout_helper()));
1671     reassign_fields_by_klass(vk, fr, reg_map, val->as_ObjectValue(), 0, (oop)obj, is_jvmci, offset, nullptr, CHECK);
1672   }
1673 }
1674 
1675 // restore fields of all eliminated objects and arrays
1676 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures, bool is_jvmci, TRAPS) {
1677   for (int i = 0; i < objects->length(); i++) {
1678     assert(objects->at(i)->is_object(), "invalid debug information");
1679     ObjectValue* sv = (ObjectValue*) objects->at(i);
1680     Klass* k = java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()());
1681     Handle obj = sv->value();
1682     assert(obj.not_null() || realloc_failures || sv->maybe_null(), "reallocation was missed");
1683 #ifndef PRODUCT
1684     if (PrintDeoptimizationDetails) {
1685       tty->print_cr("reassign fields for object of type %s!", k->name()->as_C_string());
1686     }
1687 #endif // !PRODUCT
1688 
1689     if (obj.is_null()) {
1690       continue;
1691     }
1692 
1693 #if INCLUDE_JVMCI
1694     // Don't reassign fields of boxes that came from a cache. Caches may be in CDS.
1695     if (sv->is_auto_box() && ((AutoBoxObjectValue*) sv)->is_cached()) {
1696       continue;
1697     }
1698 #endif // INCLUDE_JVMCI
1699     if (EnableVectorSupport && VectorSupport::is_vector(k)) {
1700       assert(sv->field_size() == 1, "%s not a vector", k->name()->as_C_string());
1701       ScopeValue* payload = sv->field_at(0);
1702       if (payload->is_location() &&
1703           payload->as_LocationValue()->location().type() == Location::vector) {
1704 #ifndef PRODUCT
1705         if (PrintDeoptimizationDetails) {
1706           tty->print_cr("skip field reassignment for this vector - it should be assigned already");
1707           if (Verbose) {
1708             Handle obj = sv->value();
1709             k->oop_print_on(obj(), tty);
1710           }
1711         }
1712 #endif // !PRODUCT
1713         continue; // Such vector's value was already restored in VectorSupport::allocate_vector().
1714       }
1715       // Else fall-through to do assignment for scalar-replaced boxed vector representation
1716       // which could be restored after vector object allocation.
1717     }
1718     if (k->is_instance_klass()) {
1719       InstanceKlass* ik = InstanceKlass::cast(k);
1720       reassign_fields_by_klass(ik, fr, reg_map, sv, 0, obj(), is_jvmci, 0, nullptr, CHECK);
1721     } else if (k->is_flatArray_klass()) {
1722       FlatArrayKlass* vak = FlatArrayKlass::cast(k);
1723       reassign_flat_array_elements(fr, reg_map, sv, (flatArrayOop) obj(), vak, is_jvmci, CHECK);
1724     } else if (k->is_typeArray_klass()) {
1725       TypeArrayKlass* ak = TypeArrayKlass::cast(k);
1726       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
1727     } else if (k->is_objArray_klass()) {
1728       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
1729     }
1730   }
1731   // These objects may escape when we return to Interpreter after deoptimization.
1732   // We need barrier so that stores that initialize these objects can't be reordered
1733   // with subsequent stores that make these objects accessible by other threads.
1734   OrderAccess::storestore();
1735 }
1736 
1737 
1738 // relock objects for which synchronization was eliminated
1739 bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInfo*>* monitors,
1740                                     JavaThread* deoptee_thread, frame& fr, int exec_mode, bool realloc_failures) {
1741   bool relocked_objects = false;
1742   for (int i = 0; i < monitors->length(); i++) {
1743     MonitorInfo* mon_info = monitors->at(i);

1893     xtty->begin_head("deoptimized thread='%zu' reason='%s' pc='" INTPTR_FORMAT "'",(uintx)thread->osthread()->thread_id(), trap_reason_name(reason), p2i(fr.pc()));
1894     nm->log_identity(xtty);
1895     xtty->end_head();
1896     for (ScopeDesc* sd = nm->scope_desc_at(fr.pc()); ; sd = sd->sender()) {
1897       xtty->begin_elem("jvms bci='%d'", sd->bci());
1898       xtty->method(sd->method());
1899       xtty->end_elem();
1900       if (sd->is_top())  break;
1901     }
1902     xtty->tail("deoptimized");
1903   }
1904 
1905   Continuation::notify_deopt(thread, fr.sp());
1906 
1907   // Patch the compiled method so that when execution returns to it we will
1908   // deopt the execution state and return to the interpreter.
1909   fr.deoptimize(thread);
1910 }
1911 
1912 void Deoptimization::deoptimize(JavaThread* thread, frame fr, DeoptReason reason) {
1913   // Deoptimize only if the frame comes from compiled code.
1914   // Do not deoptimize the frame which is already patched
1915   // during the execution of the loops below.
1916   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
1917     return;
1918   }
1919   ResourceMark rm;
1920   deoptimize_single_frame(thread, fr, reason);
1921 }
1922 
1923 #if INCLUDE_JVMCI
1924 address Deoptimization::deoptimize_for_missing_exception_handler(nmethod* nm) {
1925   // there is no exception handler for this pc => deoptimize
1926   nm->make_not_entrant("missing exception handler");
1927 
1928   // Use Deoptimization::deoptimize for all of its side-effects:
1929   // gathering traps statistics, logging...
1930   // it also patches the return pc but we do not care about that
1931   // since we return a continuation to the deopt_blob below.
1932   JavaThread* thread = JavaThread::current();
1933   RegisterMap reg_map(thread,
< prev index next >