< prev index next >

src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp

Print this page

  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "compiler/compiler_globals.hpp"
  29 #include "gc/shared/barrierSet.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interp_masm_aarch64.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interpreterRuntime.hpp"
  34 #include "logging/log.hpp"
  35 #include "oops/arrayOop.hpp"

  36 #include "oops/markWord.hpp"
  37 #include "oops/method.hpp"
  38 #include "oops/methodData.hpp"

  39 #include "oops/resolvedFieldEntry.hpp"
  40 #include "oops/resolvedIndyEntry.hpp"
  41 #include "oops/resolvedMethodEntry.hpp"
  42 #include "prims/jvmtiExport.hpp"
  43 #include "prims/jvmtiThreadState.hpp"
  44 #include "runtime/basicLock.hpp"
  45 #include "runtime/frame.inline.hpp"
  46 #include "runtime/javaThread.hpp"
  47 #include "runtime/safepointMechanism.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 #include "utilities/powerOfTwo.hpp"
  50 
  51 void InterpreterMacroAssembler::narrow(Register result) {
  52 
  53   // Get method->_constMethod->_result_type
  54   ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
  55   ldr(rscratch1, Address(rscratch1, Method::const_offset()));
  56   ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
  57 
  58   Label done, notBool, notByte, notChar;

 192     ldrw(index, Address(rbcp, bcp_offset));
 193   } else if (index_size == sizeof(u1)) {
 194     load_unsigned_byte(index, Address(rbcp, bcp_offset));
 195   } else {
 196     ShouldNotReachHere();
 197   }
 198 }
 199 
 200 void InterpreterMacroAssembler::get_method_counters(Register method,
 201                                                     Register mcs, Label& skip) {
 202   Label has_counters;
 203   ldr(mcs, Address(method, Method::method_counters_offset()));
 204   cbnz(mcs, has_counters);
 205   call_VM(noreg, CAST_FROM_FN_PTR(address,
 206           InterpreterRuntime::build_method_counters), method);
 207   ldr(mcs, Address(method, Method::method_counters_offset()));
 208   cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory
 209   bind(has_counters);
 210 }
 211 



























































 212 // Load object from cpool->resolved_references(index)
 213 void InterpreterMacroAssembler::load_resolved_reference_at_index(
 214                                            Register result, Register index, Register tmp) {
 215   assert_different_registers(result, index);
 216 
 217   get_constant_pool(result);
 218   // load pointer for resolved_references[] objArray
 219   ldr(result, Address(result, ConstantPool::cache_offset()));
 220   ldr(result, Address(result, ConstantPoolCache::resolved_references_offset()));
 221   resolve_oop_handle(result, tmp, rscratch2);
 222   // Add in the index
 223   add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 224   load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)), tmp, rscratch2);
 225 }
 226 
 227 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
 228                              Register cpool, Register index, Register klass, Register temp) {
 229   add(temp, cpool, index, LSL, LogBytesPerWord);
 230   ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
 231   ldr(klass, Address(cpool,  ConstantPool::resolved_klasses_offset())); // klass = cpool->_resolved_klasses
 232   add(klass, klass, temp, LSL, LogBytesPerWord);
 233   ldr(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
 234 }
 235 
 236 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 237 // subtype of super_klass.
 238 //
 239 // Args:
 240 //      r0: superklass
 241 //      Rsub_klass: subklass
 242 //
 243 // Kills:
 244 //      r2, r5
 245 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 246                                                   Label& ok_is_subtype) {

 247   assert(Rsub_klass != r0, "r0 holds superklass");
 248   assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
 249   assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
 250 
 251   // Profile the not-null value's klass.
 252   profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5


 253 
 254   // Do the check.
 255   check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
 256 }
 257 
 258 // Java Expression Stack
 259 
 260 void InterpreterMacroAssembler::pop_ptr(Register r) {
 261   ldr(r, post(esp, wordSize));
 262 }
 263 
 264 void InterpreterMacroAssembler::pop_i(Register r) {
 265   ldrw(r, post(esp, wordSize));
 266 }
 267 
 268 void InterpreterMacroAssembler::pop_l(Register r) {
 269   ldr(r, post(esp, 2 * Interpreter::stackElementSize));
 270 }
 271 
 272 void InterpreterMacroAssembler::push_ptr(Register r) {

 600 
 601     add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
 602     bind(entry);
 603     cmp(c_rarg1, r19); // check if bottom reached
 604     br(Assembler::NE, loop); // if not at bottom then check this entry
 605   }
 606 
 607   bind(no_unlock);
 608 
 609   // jvmti support
 610   if (notify_jvmdi) {
 611     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
 612   } else {
 613     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
 614   }
 615 
 616   // remove activation
 617   // get sender esp
 618   ldr(rscratch2,
 619       Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));

 620   if (StackReservedPages > 0) {
 621     // testing if reserved zone needs to be re-enabled
 622     Label no_reserved_zone_enabling;
 623 
 624     // check if already enabled - if so no re-enabling needed
 625     assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
 626     ldrw(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
 627     cmpw(rscratch1, (u1)StackOverflow::stack_guard_enabled);
 628     br(Assembler::EQ, no_reserved_zone_enabling);
 629 
 630     // look for an overflow into the stack reserved zone, i.e.
 631     // interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
 632     ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
 633     cmp(rscratch2, rscratch1);
 634     br(Assembler::LS, no_reserved_zone_enabling);
 635 
 636     call_VM_leaf(
 637       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
 638     call_VM(noreg, CAST_FROM_FN_PTR(address,
 639                    InterpreterRuntime::throw_delayed_StackOverflowError));
 640     should_not_reach_here();
 641 
 642     bind(no_reserved_zone_enabling);
 643   }
 644 































 645   // restore sender esp
 646   mov(esp, rscratch2);
 647   // remove frame anchor
 648   leave();
 649   // If we're returning to interpreted code we will shortly be
 650   // adjusting SP to allow some space for ESP.  If we're returning to
 651   // compiled code the saved sender SP was saved in sender_sp, so this
 652   // restores it.
 653   andr(sp, esp, -16);
 654 }
 655 
 656 // Lock object
 657 //
 658 // Args:
 659 //      c_rarg1: BasicObjectLock to be used for locking
 660 //
 661 // Kills:
 662 //      r0
 663 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, .. (param regs)
 664 //      rscratch1, rscratch2 (scratch regs)

 685 
 686     Label slow_case;
 687 
 688     // Load object pointer into obj_reg %c_rarg3
 689     ldr(obj_reg, Address(lock_reg, obj_offset));
 690 
 691     if (DiagnoseSyncOnValueBasedClasses != 0) {
 692       load_klass(tmp, obj_reg);
 693       ldrb(tmp, Address(tmp, Klass::misc_flags_offset()));
 694       tst(tmp, KlassFlags::_misc_is_value_based_class);
 695       br(Assembler::NE, slow_case);
 696     }
 697 
 698     if (LockingMode == LM_LIGHTWEIGHT) {
 699       lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
 700       b(count);
 701     } else if (LockingMode == LM_LEGACY) {
 702       // Load (object->mark() | 1) into swap_reg
 703       ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 704       orr(swap_reg, rscratch1, 1);




 705 
 706       // Save (object->mark() | 1) into BasicLock's displaced header
 707       str(swap_reg, Address(lock_reg, mark_offset));
 708 
 709       assert(lock_offset == 0,
 710              "displached header must be first word in BasicObjectLock");
 711 
 712       Label fail;
 713       cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
 714 
 715       // Fast check for recursive lock.
 716       //
 717       // Can apply the optimization only if this is a stack lock
 718       // allocated in this thread. For efficiency, we can focus on
 719       // recently allocated stack locks (instead of reading the stack
 720       // base and checking whether 'mark' points inside the current
 721       // thread stack):
 722       //  1) (mark & 7) == 0, and
 723       //  2) sp <= mark < mark + os::pagesize()
 724       //

1038     Address data(mdp, in_bytes(JumpData::taken_offset()));
1039     ldr(bumped_count, data);
1040     assert(DataLayout::counter_increment == 1,
1041             "flow-free idiom only works with 1");
1042     // Intel does this to catch overflow
1043     // addptr(bumped_count, DataLayout::counter_increment);
1044     // sbbptr(bumped_count, 0);
1045     // so we do this
1046     adds(bumped_count, bumped_count, DataLayout::counter_increment);
1047     Label L;
1048     br(Assembler::CS, L);       // skip store if counter overflow
1049     str(bumped_count, data);
1050     bind(L);
1051     // The method data pointer needs to be updated to reflect the new target.
1052     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1053     bind(profile_continue);
1054   }
1055 }
1056 
1057 
1058 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1059   if (ProfileInterpreter) {
1060     Label profile_continue;
1061 
1062     // If no method data exists, go to profile_continue.
1063     test_method_data_pointer(mdp, profile_continue);
1064 
1065     // We are taking a branch.  Increment the not taken count.
1066     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1067 
1068     // The method data pointer needs to be updated to correspond to
1069     // the next bytecode
1070     update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
1071     bind(profile_continue);
1072   }
1073 }
1074 
1075 
1076 void InterpreterMacroAssembler::profile_call(Register mdp) {
1077   if (ProfileInterpreter) {
1078     Label profile_continue;
1079 
1080     // If no method data exists, go to profile_continue.
1081     test_method_data_pointer(mdp, profile_continue);
1082 
1083     // We are making a call.  Increment the count.
1084     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1085 
1086     // The method data pointer needs to be updated to reflect the new target.
1087     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1088     bind(profile_continue);
1089   }
1090 }

1373     // case_array_offset_in_bytes()
1374     movw(reg2, in_bytes(MultiBranchData::per_case_size()));
1375     movw(rscratch1, in_bytes(MultiBranchData::case_array_offset()));
1376     Assembler::maddw(index, index, reg2, rscratch1);
1377 
1378     // Update the case count
1379     increment_mdp_data_at(mdp,
1380                           index,
1381                           in_bytes(MultiBranchData::relative_count_offset()));
1382 
1383     // The method data pointer needs to be updated.
1384     update_mdp_by_offset(mdp,
1385                          index,
1386                          in_bytes(MultiBranchData::
1387                                   relative_displacement_offset()));
1388 
1389     bind(profile_continue);
1390   }
1391 }
1392 


















































































































1393 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1394   if (state == atos) {
1395     MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1396   }
1397 }
1398 
1399 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
1400 
1401 
1402 void InterpreterMacroAssembler::notify_method_entry() {
1403   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1404   // track stack depth.  If it is possible to enter interp_only_mode we add
1405   // the code to check if the event should be sent.
1406   if (JvmtiExport::can_post_interpreter_events()) {
1407     Label L;
1408     ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1409     cbzw(r3, L);
1410     call_VM(noreg, CAST_FROM_FN_PTR(address,
1411                                     InterpreterRuntime::post_method_entry));
1412     bind(L);

1625         profile_obj_type(tmp, mdo_arg_addr);
1626 
1627         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1628         off_to_args += to_add;
1629       }
1630 
1631       if (MethodData::profile_return()) {
1632         ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1633         sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1634       }
1635 
1636       add(rscratch1, mdp, off_to_args);
1637       bind(done);
1638       mov(mdp, rscratch1);
1639 
1640       if (MethodData::profile_return()) {
1641         // We're right after the type profile for the last
1642         // argument. tmp is the number of cells left in the
1643         // CallTypeData/VirtualCallTypeData to reach its end. Non null
1644         // if there's a return to profile.
1645         assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1646         add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size));
1647       }
1648       str(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1649     } else {
1650       assert(MethodData::profile_return(), "either profile call args or call ret");
1651       update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1652     }
1653 
1654     // mdp points right after the end of the
1655     // CallTypeData/VirtualCallTypeData, right after the cells for the
1656     // return value type if there's one
1657 
1658     bind(profile_continue);
1659   }
1660 }
1661 
1662 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1663   assert_different_registers(mdp, ret, tmp, rbcp);
1664   if (ProfileInterpreter && MethodData::profile_return()) {
1665     Label profile_continue, done;

1671 
1672       // If we don't profile all invoke bytecodes we must make sure
1673       // it's a bytecode we indeed profile. We can't go back to the
1674       // beginning of the ProfileData we intend to update to check its
1675       // type because we're right after it and we don't known its
1676       // length
1677       Label do_profile;
1678       ldrb(rscratch1, Address(rbcp, 0));
1679       cmp(rscratch1, (u1)Bytecodes::_invokedynamic);
1680       br(Assembler::EQ, do_profile);
1681       cmp(rscratch1, (u1)Bytecodes::_invokehandle);
1682       br(Assembler::EQ, do_profile);
1683       get_method(tmp);
1684       ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset()));
1685       subs(zr, rscratch1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1686       br(Assembler::NE, profile_continue);
1687 
1688       bind(do_profile);
1689     }
1690 
1691     Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
1692     mov(tmp, ret);
1693     profile_obj_type(tmp, mdo_ret_addr);
1694 
1695     bind(profile_continue);
1696   }
1697 }
1698 
1699 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
1700   assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2);
1701   if (ProfileInterpreter && MethodData::profile_parameters()) {
1702     Label profile_continue, done;
1703 
1704     test_method_data_pointer(mdp, profile_continue);
1705 
1706     // Load the offset of the area within the MDO used for
1707     // parameters. If it's negative we're not profiling any parameters
1708     ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
1709     tbnz(tmp1, 31, profile_continue);  // i.e. sign bit set
1710 
1711     // Compute a pointer to the area for parameters from the offset

  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "compiler/compiler_globals.hpp"
  29 #include "gc/shared/barrierSet.hpp"
  30 #include "gc/shared/barrierSetAssembler.hpp"
  31 #include "interp_masm_aarch64.hpp"
  32 #include "interpreter/interpreter.hpp"
  33 #include "interpreter/interpreterRuntime.hpp"
  34 #include "logging/log.hpp"
  35 #include "oops/arrayOop.hpp"
  36 #include "oops/constMethodFlags.hpp"
  37 #include "oops/markWord.hpp"
  38 #include "oops/method.hpp"
  39 #include "oops/methodData.hpp"
  40 #include "oops/inlineKlass.hpp"
  41 #include "oops/resolvedFieldEntry.hpp"
  42 #include "oops/resolvedIndyEntry.hpp"
  43 #include "oops/resolvedMethodEntry.hpp"
  44 #include "prims/jvmtiExport.hpp"
  45 #include "prims/jvmtiThreadState.hpp"
  46 #include "runtime/basicLock.hpp"
  47 #include "runtime/frame.inline.hpp"
  48 #include "runtime/javaThread.hpp"
  49 #include "runtime/safepointMechanism.hpp"
  50 #include "runtime/sharedRuntime.hpp"
  51 #include "utilities/powerOfTwo.hpp"
  52 
  53 void InterpreterMacroAssembler::narrow(Register result) {
  54 
  55   // Get method->_constMethod->_result_type
  56   ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
  57   ldr(rscratch1, Address(rscratch1, Method::const_offset()));
  58   ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
  59 
  60   Label done, notBool, notByte, notChar;

 194     ldrw(index, Address(rbcp, bcp_offset));
 195   } else if (index_size == sizeof(u1)) {
 196     load_unsigned_byte(index, Address(rbcp, bcp_offset));
 197   } else {
 198     ShouldNotReachHere();
 199   }
 200 }
 201 
 202 void InterpreterMacroAssembler::get_method_counters(Register method,
 203                                                     Register mcs, Label& skip) {
 204   Label has_counters;
 205   ldr(mcs, Address(method, Method::method_counters_offset()));
 206   cbnz(mcs, has_counters);
 207   call_VM(noreg, CAST_FROM_FN_PTR(address,
 208           InterpreterRuntime::build_method_counters), method);
 209   ldr(mcs, Address(method, Method::method_counters_offset()));
 210   cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory
 211   bind(has_counters);
 212 }
 213 
 214 void InterpreterMacroAssembler::allocate_instance(Register klass, Register new_obj,
 215                                                   Register t1, Register t2,
 216                                                   bool clear_fields, Label& alloc_failed) {
 217   MacroAssembler::allocate_instance(klass, new_obj, t1, t2, clear_fields, alloc_failed);
 218   if (DTraceMethodProbes) {
 219       // Trigger dtrace event for fastpath
 220     push(atos);
 221     call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), new_obj);
 222     pop(atos);
 223   }
 224 }
 225 
 226 void InterpreterMacroAssembler::read_flat_field(Register entry,
 227                                                 Register field_index, Register field_offset,
 228                                                 Register temp, Register obj) {
 229   Label alloc_failed, empty_value, done;
 230   const Register src = field_offset;
 231   const Register alloc_temp = r10;
 232   const Register dst_temp   = field_index;
 233   const Register layout_info = temp;
 234   assert_different_registers(obj, entry, field_index, field_offset, temp, alloc_temp);
 235 
 236   // Grab the inline field klass
 237   ldr(rscratch1, Address(entry, in_bytes(ResolvedFieldEntry::field_holder_offset())));
 238   inline_layout_info(rscratch1, field_index, layout_info);
 239 
 240   const Register field_klass = dst_temp;
 241   ldr(field_klass, Address(layout_info, in_bytes(InlineLayoutInfo::klass_offset())));
 242 
 243   // check for empty value klass
 244   test_klass_is_empty_inline_type(field_klass, rscratch1, empty_value);
 245 
 246   // allocate buffer
 247   push(obj); // save holder
 248   allocate_instance(field_klass, obj, alloc_temp, rscratch2, false, alloc_failed);
 249 
 250   // Have an oop instance buffer, copy into it
 251   data_for_oop(obj, dst_temp, field_klass);  // danger, uses rscratch1
 252   pop(alloc_temp);             // restore holder
 253   lea(src, Address(alloc_temp, field_offset));
 254   // call_VM_leaf, clobbers a few regs, save restore new obj
 255   push(obj);
 256   flat_field_copy(IS_DEST_UNINITIALIZED, src, dst_temp, layout_info);
 257   pop(obj);
 258   b(done);
 259 
 260   bind(empty_value);
 261   get_empty_inline_type_oop(field_klass, alloc_temp, obj);
 262   b(done);
 263 
 264   bind(alloc_failed);
 265   pop(obj);
 266   call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flat_field),
 267           obj, entry);
 268 
 269   bind(done);
 270   membar(Assembler::StoreStore);
 271 }
 272 
 273 // Load object from cpool->resolved_references(index)
 274 void InterpreterMacroAssembler::load_resolved_reference_at_index(
 275                                            Register result, Register index, Register tmp) {
 276   assert_different_registers(result, index);
 277 
 278   get_constant_pool(result);
 279   // load pointer for resolved_references[] objArray
 280   ldr(result, Address(result, ConstantPool::cache_offset()));
 281   ldr(result, Address(result, ConstantPoolCache::resolved_references_offset()));
 282   resolve_oop_handle(result, tmp, rscratch2);
 283   // Add in the index
 284   add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
 285   load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)), tmp, rscratch2);
 286 }
 287 
 288 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
 289                              Register cpool, Register index, Register klass, Register temp) {
 290   add(temp, cpool, index, LSL, LogBytesPerWord);
 291   ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
 292   ldr(klass, Address(cpool,  ConstantPool::resolved_klasses_offset())); // klass = cpool->_resolved_klasses
 293   add(klass, klass, temp, LSL, LogBytesPerWord);
 294   ldr(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
 295 }
 296 
 297 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
 298 // subtype of super_klass.
 299 //
 300 // Args:
 301 //      r0: superklass
 302 //      Rsub_klass: subklass
 303 //
 304 // Kills:
 305 //      r2, r5
 306 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 307                                                   Label& ok_is_subtype,
 308                                                   bool profile) {
 309   assert(Rsub_klass != r0, "r0 holds superklass");
 310   assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
 311   assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
 312 
 313   // Profile the not-null value's klass.
 314   if (profile) {
 315     profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
 316   }
 317 
 318   // Do the check.
 319   check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
 320 }
 321 
 322 // Java Expression Stack
 323 
 324 void InterpreterMacroAssembler::pop_ptr(Register r) {
 325   ldr(r, post(esp, wordSize));
 326 }
 327 
 328 void InterpreterMacroAssembler::pop_i(Register r) {
 329   ldrw(r, post(esp, wordSize));
 330 }
 331 
 332 void InterpreterMacroAssembler::pop_l(Register r) {
 333   ldr(r, post(esp, 2 * Interpreter::stackElementSize));
 334 }
 335 
 336 void InterpreterMacroAssembler::push_ptr(Register r) {

 664 
 665     add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
 666     bind(entry);
 667     cmp(c_rarg1, r19); // check if bottom reached
 668     br(Assembler::NE, loop); // if not at bottom then check this entry
 669   }
 670 
 671   bind(no_unlock);
 672 
 673   // jvmti support
 674   if (notify_jvmdi) {
 675     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
 676   } else {
 677     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
 678   }
 679 
 680   // remove activation
 681   // get sender esp
 682   ldr(rscratch2,
 683       Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
 684 
 685   if (StackReservedPages > 0) {
 686     // testing if reserved zone needs to be re-enabled
 687     Label no_reserved_zone_enabling;
 688 
 689     // check if already enabled - if so no re-enabling needed
 690     assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
 691     ldrw(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
 692     cmpw(rscratch1, (u1)StackOverflow::stack_guard_enabled);
 693     br(Assembler::EQ, no_reserved_zone_enabling);
 694 
 695     // look for an overflow into the stack reserved zone, i.e.
 696     // interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
 697     ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
 698     cmp(rscratch2, rscratch1);
 699     br(Assembler::LS, no_reserved_zone_enabling);
 700 
 701     call_VM_leaf(
 702       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
 703     call_VM(noreg, CAST_FROM_FN_PTR(address,
 704                    InterpreterRuntime::throw_delayed_StackOverflowError));
 705     should_not_reach_here();
 706 
 707     bind(no_reserved_zone_enabling);
 708   }
 709 
 710   if (state == atos && InlineTypeReturnedAsFields) {
 711     // Check if we are returning an non-null inline type and load its fields into registers
 712     Label skip;
 713     test_oop_is_not_inline_type(r0, rscratch2, skip);
 714 
 715     // Load fields from a buffered value with an inline class specific handler
 716     load_klass(rscratch1 /*dst*/, r0 /*src*/);
 717     ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
 718     ldr(rscratch1, Address(rscratch1, InlineKlass::unpack_handler_offset()));
 719     // Unpack handler can be null if inline type is not scalarizable in returns
 720     cbz(rscratch1, skip);
 721 
 722     blr(rscratch1);
 723 #ifdef ASSERT
 724     // TODO 8284443 Enable
 725     if (StressCallingConvention && false) {
 726       Label skip_stress;
 727       ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
 728       ldrw(rscratch1, Address(rscratch1, Method::flags_offset()));
 729       tstw(rscratch1, MethodFlags::has_scalarized_return_flag());
 730       br(Assembler::EQ, skip_stress);
 731       load_klass(r0, r0);
 732       orr(r0, r0, 1);
 733       bind(skip_stress);
 734     }
 735 #endif
 736     bind(skip);
 737     // Check above kills sender esp in rscratch2. Reload it.
 738     ldr(rscratch2, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
 739   }
 740 
 741   // restore sender esp
 742   mov(esp, rscratch2);
 743   // remove frame anchor
 744   leave();
 745   // If we're returning to interpreted code we will shortly be
 746   // adjusting SP to allow some space for ESP.  If we're returning to
 747   // compiled code the saved sender SP was saved in sender_sp, so this
 748   // restores it.
 749   andr(sp, esp, -16);
 750 }
 751 
 752 // Lock object
 753 //
 754 // Args:
 755 //      c_rarg1: BasicObjectLock to be used for locking
 756 //
 757 // Kills:
 758 //      r0
 759 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, .. (param regs)
 760 //      rscratch1, rscratch2 (scratch regs)

 781 
 782     Label slow_case;
 783 
 784     // Load object pointer into obj_reg %c_rarg3
 785     ldr(obj_reg, Address(lock_reg, obj_offset));
 786 
 787     if (DiagnoseSyncOnValueBasedClasses != 0) {
 788       load_klass(tmp, obj_reg);
 789       ldrb(tmp, Address(tmp, Klass::misc_flags_offset()));
 790       tst(tmp, KlassFlags::_misc_is_value_based_class);
 791       br(Assembler::NE, slow_case);
 792     }
 793 
 794     if (LockingMode == LM_LIGHTWEIGHT) {
 795       lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
 796       b(count);
 797     } else if (LockingMode == LM_LEGACY) {
 798       // Load (object->mark() | 1) into swap_reg
 799       ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 800       orr(swap_reg, rscratch1, 1);
 801       if (EnableValhalla) {
 802         // Mask inline_type bit such that we go to the slow path if object is an inline type
 803         andr(swap_reg, swap_reg, ~((int) markWord::inline_type_bit_in_place));
 804       }
 805 
 806       // Save (object->mark() | 1) into BasicLock's displaced header
 807       str(swap_reg, Address(lock_reg, mark_offset));
 808 
 809       assert(lock_offset == 0,
 810              "displached header must be first word in BasicObjectLock");
 811 
 812       Label fail;
 813       cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
 814 
 815       // Fast check for recursive lock.
 816       //
 817       // Can apply the optimization only if this is a stack lock
 818       // allocated in this thread. For efficiency, we can focus on
 819       // recently allocated stack locks (instead of reading the stack
 820       // base and checking whether 'mark' points inside the current
 821       // thread stack):
 822       //  1) (mark & 7) == 0, and
 823       //  2) sp <= mark < mark + os::pagesize()
 824       //

1138     Address data(mdp, in_bytes(JumpData::taken_offset()));
1139     ldr(bumped_count, data);
1140     assert(DataLayout::counter_increment == 1,
1141             "flow-free idiom only works with 1");
1142     // Intel does this to catch overflow
1143     // addptr(bumped_count, DataLayout::counter_increment);
1144     // sbbptr(bumped_count, 0);
1145     // so we do this
1146     adds(bumped_count, bumped_count, DataLayout::counter_increment);
1147     Label L;
1148     br(Assembler::CS, L);       // skip store if counter overflow
1149     str(bumped_count, data);
1150     bind(L);
1151     // The method data pointer needs to be updated to reflect the new target.
1152     update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1153     bind(profile_continue);
1154   }
1155 }
1156 
1157 
1158 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp, bool acmp) {
1159   if (ProfileInterpreter) {
1160     Label profile_continue;
1161 
1162     // If no method data exists, go to profile_continue.
1163     test_method_data_pointer(mdp, profile_continue);
1164 
1165     // We are taking a branch.  Increment the not taken count.
1166     increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1167 
1168     // The method data pointer needs to be updated to correspond to
1169     // the next bytecode
1170     update_mdp_by_constant(mdp, acmp ? in_bytes(ACmpData::acmp_data_size()) : in_bytes(BranchData::branch_data_size()));
1171     bind(profile_continue);
1172   }
1173 }
1174 
1175 
1176 void InterpreterMacroAssembler::profile_call(Register mdp) {
1177   if (ProfileInterpreter) {
1178     Label profile_continue;
1179 
1180     // If no method data exists, go to profile_continue.
1181     test_method_data_pointer(mdp, profile_continue);
1182 
1183     // We are making a call.  Increment the count.
1184     increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1185 
1186     // The method data pointer needs to be updated to reflect the new target.
1187     update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1188     bind(profile_continue);
1189   }
1190 }

1473     // case_array_offset_in_bytes()
1474     movw(reg2, in_bytes(MultiBranchData::per_case_size()));
1475     movw(rscratch1, in_bytes(MultiBranchData::case_array_offset()));
1476     Assembler::maddw(index, index, reg2, rscratch1);
1477 
1478     // Update the case count
1479     increment_mdp_data_at(mdp,
1480                           index,
1481                           in_bytes(MultiBranchData::relative_count_offset()));
1482 
1483     // The method data pointer needs to be updated.
1484     update_mdp_by_offset(mdp,
1485                          index,
1486                          in_bytes(MultiBranchData::
1487                                   relative_displacement_offset()));
1488 
1489     bind(profile_continue);
1490   }
1491 }
1492 
1493 template <class ArrayData> void InterpreterMacroAssembler::profile_array_type(Register mdp,
1494                                                                               Register array,
1495                                                                               Register tmp) {
1496   if (ProfileInterpreter) {
1497     Label profile_continue;
1498 
1499     // If no method data exists, go to profile_continue.
1500     test_method_data_pointer(mdp, profile_continue);
1501 
1502     mov(tmp, array);
1503     profile_obj_type(tmp, Address(mdp, in_bytes(ArrayData::array_offset())));
1504 
1505     Label not_flat;
1506     test_non_flat_array_oop(array, tmp, not_flat);
1507 
1508     set_mdp_flag_at(mdp, ArrayData::flat_array_byte_constant());
1509 
1510     bind(not_flat);
1511 
1512     Label not_null_free;
1513     test_non_null_free_array_oop(array, tmp, not_null_free);
1514 
1515     set_mdp_flag_at(mdp, ArrayData::null_free_array_byte_constant());
1516 
1517     bind(not_null_free);
1518 
1519     bind(profile_continue);
1520   }
1521 }
1522 
1523 template void InterpreterMacroAssembler::profile_array_type<ArrayLoadData>(Register mdp,
1524                                                                            Register array,
1525                                                                            Register tmp);
1526 template void InterpreterMacroAssembler::profile_array_type<ArrayStoreData>(Register mdp,
1527                                                                             Register array,
1528                                                                             Register tmp);
1529 
1530 void InterpreterMacroAssembler::profile_multiple_element_types(Register mdp, Register element, Register tmp, const Register tmp2) {
1531   if (ProfileInterpreter) {
1532     Label profile_continue;
1533 
1534     // If no method data exists, go to profile_continue.
1535     test_method_data_pointer(mdp, profile_continue);
1536 
1537     Label done, update;
1538     cbnz(element, update);
1539     set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
1540     b(done);
1541 
1542     bind(update);
1543     load_klass(tmp, element);
1544 
1545     // Record the object type.
1546     record_klass_in_profile(tmp, mdp, tmp2);
1547 
1548     bind(done);
1549 
1550     // The method data pointer needs to be updated.
1551     update_mdp_by_constant(mdp, in_bytes(ArrayStoreData::array_store_data_size()));
1552 
1553     bind(profile_continue);
1554   }
1555 }
1556 
1557 
1558 void InterpreterMacroAssembler::profile_element_type(Register mdp,
1559                                                      Register element,
1560                                                      Register tmp) {
1561   if (ProfileInterpreter) {
1562     Label profile_continue;
1563 
1564     // If no method data exists, go to profile_continue.
1565     test_method_data_pointer(mdp, profile_continue);
1566 
1567     mov(tmp, element);
1568     profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadData::element_offset())));
1569 
1570     // The method data pointer needs to be updated.
1571     update_mdp_by_constant(mdp, in_bytes(ArrayLoadData::array_load_data_size()));
1572 
1573     bind(profile_continue);
1574   }
1575 }
1576 
1577 void InterpreterMacroAssembler::profile_acmp(Register mdp,
1578                                              Register left,
1579                                              Register right,
1580                                              Register tmp) {
1581   if (ProfileInterpreter) {
1582     Label profile_continue;
1583 
1584     // If no method data exists, go to profile_continue.
1585     test_method_data_pointer(mdp, profile_continue);
1586 
1587     mov(tmp, left);
1588     profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::left_offset())));
1589 
1590     Label left_not_inline_type;
1591     test_oop_is_not_inline_type(left, tmp, left_not_inline_type);
1592     set_mdp_flag_at(mdp, ACmpData::left_inline_type_byte_constant());
1593     bind(left_not_inline_type);
1594 
1595     mov(tmp, right);
1596     profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::right_offset())));
1597 
1598     Label right_not_inline_type;
1599     test_oop_is_not_inline_type(right, tmp, right_not_inline_type);
1600     set_mdp_flag_at(mdp, ACmpData::right_inline_type_byte_constant());
1601     bind(right_not_inline_type);
1602 
1603     bind(profile_continue);
1604   }
1605 }
1606 
1607 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1608   if (state == atos) {
1609     MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1610   }
1611 }
1612 
1613 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) { ; }
1614 
1615 
1616 void InterpreterMacroAssembler::notify_method_entry() {
1617   // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1618   // track stack depth.  If it is possible to enter interp_only_mode we add
1619   // the code to check if the event should be sent.
1620   if (JvmtiExport::can_post_interpreter_events()) {
1621     Label L;
1622     ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1623     cbzw(r3, L);
1624     call_VM(noreg, CAST_FROM_FN_PTR(address,
1625                                     InterpreterRuntime::post_method_entry));
1626     bind(L);

1839         profile_obj_type(tmp, mdo_arg_addr);
1840 
1841         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1842         off_to_args += to_add;
1843       }
1844 
1845       if (MethodData::profile_return()) {
1846         ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1847         sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1848       }
1849 
1850       add(rscratch1, mdp, off_to_args);
1851       bind(done);
1852       mov(mdp, rscratch1);
1853 
1854       if (MethodData::profile_return()) {
1855         // We're right after the type profile for the last
1856         // argument. tmp is the number of cells left in the
1857         // CallTypeData/VirtualCallTypeData to reach its end. Non null
1858         // if there's a return to profile.
1859         assert(SingleTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1860         add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size));
1861       }
1862       str(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1863     } else {
1864       assert(MethodData::profile_return(), "either profile call args or call ret");
1865       update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1866     }
1867 
1868     // mdp points right after the end of the
1869     // CallTypeData/VirtualCallTypeData, right after the cells for the
1870     // return value type if there's one
1871 
1872     bind(profile_continue);
1873   }
1874 }
1875 
1876 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1877   assert_different_registers(mdp, ret, tmp, rbcp);
1878   if (ProfileInterpreter && MethodData::profile_return()) {
1879     Label profile_continue, done;

1885 
1886       // If we don't profile all invoke bytecodes we must make sure
1887       // it's a bytecode we indeed profile. We can't go back to the
1888       // beginning of the ProfileData we intend to update to check its
1889       // type because we're right after it and we don't known its
1890       // length
1891       Label do_profile;
1892       ldrb(rscratch1, Address(rbcp, 0));
1893       cmp(rscratch1, (u1)Bytecodes::_invokedynamic);
1894       br(Assembler::EQ, do_profile);
1895       cmp(rscratch1, (u1)Bytecodes::_invokehandle);
1896       br(Assembler::EQ, do_profile);
1897       get_method(tmp);
1898       ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset()));
1899       subs(zr, rscratch1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1900       br(Assembler::NE, profile_continue);
1901 
1902       bind(do_profile);
1903     }
1904 
1905     Address mdo_ret_addr(mdp, -in_bytes(SingleTypeEntry::size()));
1906     mov(tmp, ret);
1907     profile_obj_type(tmp, mdo_ret_addr);
1908 
1909     bind(profile_continue);
1910   }
1911 }
1912 
1913 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
1914   assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2);
1915   if (ProfileInterpreter && MethodData::profile_parameters()) {
1916     Label profile_continue, done;
1917 
1918     test_method_data_pointer(mdp, profile_continue);
1919 
1920     // Load the offset of the area within the MDO used for
1921     // parameters. If it's negative we're not profiling any parameters
1922     ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
1923     tbnz(tmp1, 31, profile_continue);  // i.e. sign bit set
1924 
1925     // Compute a pointer to the area for parameters from the offset
< prev index next >