15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/macroAssembler.inline.hpp"
27 #include "compiler/compiler_globals.hpp"
28 #include "gc/shared/barrierSet.hpp"
29 #include "gc/shared/barrierSetAssembler.hpp"
30 #include "interp_masm_aarch64.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "interpreter/interpreterRuntime.hpp"
33 #include "logging/log.hpp"
34 #include "oops/arrayOop.hpp"
35 #include "oops/markWord.hpp"
36 #include "oops/method.hpp"
37 #include "oops/methodData.hpp"
38 #include "oops/resolvedFieldEntry.hpp"
39 #include "oops/resolvedIndyEntry.hpp"
40 #include "oops/resolvedMethodEntry.hpp"
41 #include "prims/jvmtiExport.hpp"
42 #include "prims/jvmtiThreadState.hpp"
43 #include "runtime/basicLock.hpp"
44 #include "runtime/frame.inline.hpp"
45 #include "runtime/javaThread.hpp"
46 #include "runtime/safepointMechanism.hpp"
47 #include "runtime/sharedRuntime.hpp"
48 #include "utilities/powerOfTwo.hpp"
49
50 void InterpreterMacroAssembler::narrow(Register result) {
51
52 // Get method->_constMethod->_result_type
53 ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
54 ldr(rscratch1, Address(rscratch1, Method::const_offset()));
55 ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
56
57 Label done, notBool, notByte, notChar;
191 ldrw(index, Address(rbcp, bcp_offset));
192 } else if (index_size == sizeof(u1)) {
193 load_unsigned_byte(index, Address(rbcp, bcp_offset));
194 } else {
195 ShouldNotReachHere();
196 }
197 }
198
199 void InterpreterMacroAssembler::get_method_counters(Register method,
200 Register mcs, Label& skip) {
201 Label has_counters;
202 ldr(mcs, Address(method, Method::method_counters_offset()));
203 cbnz(mcs, has_counters);
204 call_VM(noreg, CAST_FROM_FN_PTR(address,
205 InterpreterRuntime::build_method_counters), method);
206 ldr(mcs, Address(method, Method::method_counters_offset()));
207 cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory
208 bind(has_counters);
209 }
210
211 // Load object from cpool->resolved_references(index)
212 void InterpreterMacroAssembler::load_resolved_reference_at_index(
213 Register result, Register index, Register tmp) {
214 assert_different_registers(result, index);
215
216 get_constant_pool(result);
217 // load pointer for resolved_references[] objArray
218 ldr(result, Address(result, ConstantPool::cache_offset()));
219 ldr(result, Address(result, ConstantPoolCache::resolved_references_offset()));
220 resolve_oop_handle(result, tmp, rscratch2);
221 // Add in the index
222 add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
223 load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)), tmp, rscratch2);
224 }
225
226 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
227 Register cpool, Register index, Register klass, Register temp) {
228 add(temp, cpool, index, LSL, LogBytesPerWord);
229 ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
230 ldr(klass, Address(cpool, ConstantPool::resolved_klasses_offset())); // klass = cpool->_resolved_klasses
231 add(klass, klass, temp, LSL, LogBytesPerWord);
232 ldr(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
233 }
234
235 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
236 // subtype of super_klass.
237 //
238 // Args:
239 // r0: superklass
240 // Rsub_klass: subklass
241 //
242 // Kills:
243 // r2, r5
244 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
245 Label& ok_is_subtype) {
246 assert(Rsub_klass != r0, "r0 holds superklass");
247 assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
248 assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
249
250 // Profile the not-null value's klass.
251 profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
252
253 // Do the check.
254 check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
255 }
256
257 // Java Expression Stack
258
259 void InterpreterMacroAssembler::pop_ptr(Register r) {
260 ldr(r, post(esp, wordSize));
261 }
262
263 void InterpreterMacroAssembler::pop_i(Register r) {
264 ldrw(r, post(esp, wordSize));
265 }
266
267 void InterpreterMacroAssembler::pop_l(Register r) {
268 ldr(r, post(esp, 2 * Interpreter::stackElementSize));
269 }
270
271 void InterpreterMacroAssembler::push_ptr(Register r) {
605
606 add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
607 bind(entry);
608 cmp(c_rarg1, r19); // check if bottom reached
609 br(Assembler::NE, loop); // if not at bottom then check this entry
610 }
611
612 bind(no_unlock);
613
614 // jvmti support
615 if (notify_jvmdi) {
616 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
617 } else {
618 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
619 }
620
621 // remove activation
622 // get sender esp
623 ldr(rscratch2,
624 Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
625 if (StackReservedPages > 0) {
626 // testing if reserved zone needs to be re-enabled
627 Label no_reserved_zone_enabling;
628
629 // check if already enabled - if so no re-enabling needed
630 assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
631 ldrw(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
632 cmpw(rscratch1, (u1)StackOverflow::stack_guard_enabled);
633 br(Assembler::EQ, no_reserved_zone_enabling);
634
635 // look for an overflow into the stack reserved zone, i.e.
636 // interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
637 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
638 cmp(rscratch2, rscratch1);
639 br(Assembler::LS, no_reserved_zone_enabling);
640
641 call_VM_leaf(
642 CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
643 call_VM(noreg, CAST_FROM_FN_PTR(address,
644 InterpreterRuntime::throw_delayed_StackOverflowError));
645 should_not_reach_here();
646
647 bind(no_reserved_zone_enabling);
648 }
649
650 // restore sender esp
651 mov(esp, rscratch2);
652 // remove frame anchor
653 leave();
654 // If we're returning to interpreted code we will shortly be
655 // adjusting SP to allow some space for ESP. If we're returning to
656 // compiled code the saved sender SP was saved in sender_sp, so this
657 // restores it.
658 andr(sp, esp, -16);
659 }
660
661 // Lock object
662 //
663 // Args:
664 // c_rarg1: BasicObjectLock to be used for locking
665 //
666 // Kills:
667 // r0
668 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, .. (param regs)
669 // rscratch1, rscratch2 (scratch regs)
690
691 Label slow_case;
692
693 // Load object pointer into obj_reg %c_rarg3
694 ldr(obj_reg, Address(lock_reg, obj_offset));
695
696 if (DiagnoseSyncOnValueBasedClasses != 0) {
697 load_klass(tmp, obj_reg);
698 ldrb(tmp, Address(tmp, Klass::misc_flags_offset()));
699 tst(tmp, KlassFlags::_misc_is_value_based_class);
700 br(Assembler::NE, slow_case);
701 }
702
703 if (LockingMode == LM_LIGHTWEIGHT) {
704 lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
705 b(done);
706 } else if (LockingMode == LM_LEGACY) {
707 // Load (object->mark() | 1) into swap_reg
708 ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
709 orr(swap_reg, rscratch1, 1);
710
711 // Save (object->mark() | 1) into BasicLock's displaced header
712 str(swap_reg, Address(lock_reg, mark_offset));
713
714 assert(lock_offset == 0,
715 "displached header must be first word in BasicObjectLock");
716
717 Label fail;
718 cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
719
720 // Fast check for recursive lock.
721 //
722 // Can apply the optimization only if this is a stack lock
723 // allocated in this thread. For efficiency, we can focus on
724 // recently allocated stack locks (instead of reading the stack
725 // base and checking whether 'mark' points inside the current
726 // thread stack):
727 // 1) (mark & 7) == 0, and
728 // 2) sp <= mark < mark + os::pagesize()
729 //
1043 Address data(mdp, in_bytes(JumpData::taken_offset()));
1044 ldr(bumped_count, data);
1045 assert(DataLayout::counter_increment == 1,
1046 "flow-free idiom only works with 1");
1047 // Intel does this to catch overflow
1048 // addptr(bumped_count, DataLayout::counter_increment);
1049 // sbbptr(bumped_count, 0);
1050 // so we do this
1051 adds(bumped_count, bumped_count, DataLayout::counter_increment);
1052 Label L;
1053 br(Assembler::CS, L); // skip store if counter overflow
1054 str(bumped_count, data);
1055 bind(L);
1056 // The method data pointer needs to be updated to reflect the new target.
1057 update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1058 bind(profile_continue);
1059 }
1060 }
1061
1062
1063 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
1064 if (ProfileInterpreter) {
1065 Label profile_continue;
1066
1067 // If no method data exists, go to profile_continue.
1068 test_method_data_pointer(mdp, profile_continue);
1069
1070 // We are taking a branch. Increment the not taken count.
1071 increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1072
1073 // The method data pointer needs to be updated to correspond to
1074 // the next bytecode
1075 update_mdp_by_constant(mdp, in_bytes(BranchData::branch_data_size()));
1076 bind(profile_continue);
1077 }
1078 }
1079
1080
1081 void InterpreterMacroAssembler::profile_call(Register mdp) {
1082 if (ProfileInterpreter) {
1083 Label profile_continue;
1084
1085 // If no method data exists, go to profile_continue.
1086 test_method_data_pointer(mdp, profile_continue);
1087
1088 // We are making a call. Increment the count.
1089 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1090
1091 // The method data pointer needs to be updated to reflect the new target.
1092 update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1093 bind(profile_continue);
1094 }
1095 }
1378 // case_array_offset_in_bytes()
1379 movw(reg2, in_bytes(MultiBranchData::per_case_size()));
1380 movw(rscratch1, in_bytes(MultiBranchData::case_array_offset()));
1381 Assembler::maddw(index, index, reg2, rscratch1);
1382
1383 // Update the case count
1384 increment_mdp_data_at(mdp,
1385 index,
1386 in_bytes(MultiBranchData::relative_count_offset()));
1387
1388 // The method data pointer needs to be updated.
1389 update_mdp_by_offset(mdp,
1390 index,
1391 in_bytes(MultiBranchData::
1392 relative_displacement_offset()));
1393
1394 bind(profile_continue);
1395 }
1396 }
1397
1398 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1399 if (state == atos) {
1400 MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1401 }
1402 }
1403
1404 void InterpreterMacroAssembler::notify_method_entry() {
1405 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1406 // track stack depth. If it is possible to enter interp_only_mode we add
1407 // the code to check if the event should be sent.
1408 if (JvmtiExport::can_post_interpreter_events()) {
1409 Label L;
1410 ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1411 cbzw(r3, L);
1412 call_VM(noreg, CAST_FROM_FN_PTR(address,
1413 InterpreterRuntime::post_method_entry));
1414 bind(L);
1415 }
1416
1417 if (DTraceMethodProbes) {
1676 profile_obj_type(tmp, mdo_arg_addr);
1677
1678 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1679 off_to_args += to_add;
1680 }
1681
1682 if (MethodData::profile_return()) {
1683 ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1684 sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1685 }
1686
1687 add(rscratch1, mdp, off_to_args);
1688 bind(done);
1689 mov(mdp, rscratch1);
1690
1691 if (MethodData::profile_return()) {
1692 // We're right after the type profile for the last
1693 // argument. tmp is the number of cells left in the
1694 // CallTypeData/VirtualCallTypeData to reach its end. Non null
1695 // if there's a return to profile.
1696 assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1697 add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size));
1698 }
1699 str(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1700 } else {
1701 assert(MethodData::profile_return(), "either profile call args or call ret");
1702 update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1703 }
1704
1705 // mdp points right after the end of the
1706 // CallTypeData/VirtualCallTypeData, right after the cells for the
1707 // return value type if there's one
1708
1709 bind(profile_continue);
1710 }
1711 }
1712
1713 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1714 assert_different_registers(mdp, ret, tmp, rbcp);
1715 if (ProfileInterpreter && MethodData::profile_return()) {
1716 Label profile_continue, done;
1722
1723 // If we don't profile all invoke bytecodes we must make sure
1724 // it's a bytecode we indeed profile. We can't go back to the
1725 // beginning of the ProfileData we intend to update to check its
1726 // type because we're right after it and we don't known its
1727 // length
1728 Label do_profile;
1729 ldrb(rscratch1, Address(rbcp, 0));
1730 cmp(rscratch1, (u1)Bytecodes::_invokedynamic);
1731 br(Assembler::EQ, do_profile);
1732 cmp(rscratch1, (u1)Bytecodes::_invokehandle);
1733 br(Assembler::EQ, do_profile);
1734 get_method(tmp);
1735 ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset()));
1736 subs(zr, rscratch1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1737 br(Assembler::NE, profile_continue);
1738
1739 bind(do_profile);
1740 }
1741
1742 Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
1743 mov(tmp, ret);
1744 profile_obj_type(tmp, mdo_ret_addr);
1745
1746 bind(profile_continue);
1747 }
1748 }
1749
1750 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
1751 assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2);
1752 if (ProfileInterpreter && MethodData::profile_parameters()) {
1753 Label profile_continue, done;
1754
1755 test_method_data_pointer(mdp, profile_continue);
1756
1757 // Load the offset of the area within the MDO used for
1758 // parameters. If it's negative we're not profiling any parameters
1759 ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
1760 tbnz(tmp1, 31, profile_continue); // i.e. sign bit set
1761
1762 // Compute a pointer to the area for parameters from the offset
|
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/macroAssembler.inline.hpp"
27 #include "compiler/compiler_globals.hpp"
28 #include "gc/shared/barrierSet.hpp"
29 #include "gc/shared/barrierSetAssembler.hpp"
30 #include "interp_masm_aarch64.hpp"
31 #include "interpreter/interpreter.hpp"
32 #include "interpreter/interpreterRuntime.hpp"
33 #include "logging/log.hpp"
34 #include "oops/arrayOop.hpp"
35 #include "oops/constMethodFlags.hpp"
36 #include "oops/markWord.hpp"
37 #include "oops/method.hpp"
38 #include "oops/methodData.hpp"
39 #include "oops/inlineKlass.hpp"
40 #include "oops/resolvedFieldEntry.hpp"
41 #include "oops/resolvedIndyEntry.hpp"
42 #include "oops/resolvedMethodEntry.hpp"
43 #include "prims/jvmtiExport.hpp"
44 #include "prims/jvmtiThreadState.hpp"
45 #include "runtime/basicLock.hpp"
46 #include "runtime/frame.inline.hpp"
47 #include "runtime/javaThread.hpp"
48 #include "runtime/safepointMechanism.hpp"
49 #include "runtime/sharedRuntime.hpp"
50 #include "utilities/powerOfTwo.hpp"
51
52 void InterpreterMacroAssembler::narrow(Register result) {
53
54 // Get method->_constMethod->_result_type
55 ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
56 ldr(rscratch1, Address(rscratch1, Method::const_offset()));
57 ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
58
59 Label done, notBool, notByte, notChar;
193 ldrw(index, Address(rbcp, bcp_offset));
194 } else if (index_size == sizeof(u1)) {
195 load_unsigned_byte(index, Address(rbcp, bcp_offset));
196 } else {
197 ShouldNotReachHere();
198 }
199 }
200
201 void InterpreterMacroAssembler::get_method_counters(Register method,
202 Register mcs, Label& skip) {
203 Label has_counters;
204 ldr(mcs, Address(method, Method::method_counters_offset()));
205 cbnz(mcs, has_counters);
206 call_VM(noreg, CAST_FROM_FN_PTR(address,
207 InterpreterRuntime::build_method_counters), method);
208 ldr(mcs, Address(method, Method::method_counters_offset()));
209 cbz(mcs, skip); // No MethodCounters allocated, OutOfMemory
210 bind(has_counters);
211 }
212
213 void InterpreterMacroAssembler::allocate_instance(Register klass, Register new_obj,
214 Register t1, Register t2,
215 bool clear_fields, Label& alloc_failed) {
216 MacroAssembler::allocate_instance(klass, new_obj, t1, t2, clear_fields, alloc_failed);
217 if (DTraceMethodProbes) {
218 // Trigger dtrace event for fastpath
219 push(atos);
220 call_VM_leaf(CAST_FROM_FN_PTR(address, static_cast<int (*)(oopDesc*)>(SharedRuntime::dtrace_object_alloc)), new_obj);
221 pop(atos);
222 }
223 }
224
225 void InterpreterMacroAssembler::read_flat_field(Register entry,
226 Register field_index, Register field_offset,
227 Register temp, Register obj) {
228 Label alloc_failed, done;
229 const Register src = field_offset;
230 const Register alloc_temp = r10;
231 const Register dst_temp = field_index;
232 const Register layout_info = temp;
233 assert_different_registers(obj, entry, field_index, field_offset, temp, alloc_temp);
234
235 // Grab the inline field klass
236 ldr(rscratch1, Address(entry, in_bytes(ResolvedFieldEntry::field_holder_offset())));
237 inline_layout_info(rscratch1, field_index, layout_info);
238
239 const Register field_klass = dst_temp;
240 ldr(field_klass, Address(layout_info, in_bytes(InlineLayoutInfo::klass_offset())));
241
242 // allocate buffer
243 push(obj); // save holder
244 allocate_instance(field_klass, obj, alloc_temp, rscratch2, false, alloc_failed);
245
246 // Have an oop instance buffer, copy into it
247 payload_address(obj, dst_temp, field_klass); // danger, uses rscratch1
248 pop(alloc_temp); // restore holder
249 lea(src, Address(alloc_temp, field_offset));
250 // call_VM_leaf, clobbers a few regs, save restore new obj
251 push(obj);
252 flat_field_copy(IS_DEST_UNINITIALIZED, src, dst_temp, layout_info);
253 pop(obj);
254 b(done);
255
256 bind(alloc_failed);
257 pop(obj);
258 call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flat_field),
259 obj, entry);
260
261 bind(done);
262 membar(Assembler::StoreStore);
263 }
264
265 // Load object from cpool->resolved_references(index)
266 void InterpreterMacroAssembler::load_resolved_reference_at_index(
267 Register result, Register index, Register tmp) {
268 assert_different_registers(result, index);
269
270 get_constant_pool(result);
271 // load pointer for resolved_references[] objArray
272 ldr(result, Address(result, ConstantPool::cache_offset()));
273 ldr(result, Address(result, ConstantPoolCache::resolved_references_offset()));
274 resolve_oop_handle(result, tmp, rscratch2);
275 // Add in the index
276 add(index, index, arrayOopDesc::base_offset_in_bytes(T_OBJECT) >> LogBytesPerHeapOop);
277 load_heap_oop(result, Address(result, index, Address::uxtw(LogBytesPerHeapOop)), tmp, rscratch2);
278 }
279
280 void InterpreterMacroAssembler::load_resolved_klass_at_offset(
281 Register cpool, Register index, Register klass, Register temp) {
282 add(temp, cpool, index, LSL, LogBytesPerWord);
283 ldrh(temp, Address(temp, sizeof(ConstantPool))); // temp = resolved_klass_index
284 ldr(klass, Address(cpool, ConstantPool::resolved_klasses_offset())); // klass = cpool->_resolved_klasses
285 add(klass, klass, temp, LSL, LogBytesPerWord);
286 ldr(klass, Address(klass, Array<Klass*>::base_offset_in_bytes()));
287 }
288
289 // Generate a subtype check: branch to ok_is_subtype if sub_klass is a
290 // subtype of super_klass.
291 //
292 // Args:
293 // r0: superklass
294 // Rsub_klass: subklass
295 //
296 // Kills:
297 // r2, r5
298 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
299 Label& ok_is_subtype,
300 bool profile) {
301 assert(Rsub_klass != r0, "r0 holds superklass");
302 assert(Rsub_klass != r2, "r2 holds 2ndary super array length");
303 assert(Rsub_klass != r5, "r5 holds 2ndary super array scan ptr");
304
305 // Profile the not-null value's klass.
306 if (profile) {
307 profile_typecheck(r2, Rsub_klass, r5); // blows r2, reloads r5
308 }
309
310 // Do the check.
311 check_klass_subtype(Rsub_klass, r0, r2, ok_is_subtype); // blows r2
312 }
313
314 // Java Expression Stack
315
316 void InterpreterMacroAssembler::pop_ptr(Register r) {
317 ldr(r, post(esp, wordSize));
318 }
319
320 void InterpreterMacroAssembler::pop_i(Register r) {
321 ldrw(r, post(esp, wordSize));
322 }
323
324 void InterpreterMacroAssembler::pop_l(Register r) {
325 ldr(r, post(esp, 2 * Interpreter::stackElementSize));
326 }
327
328 void InterpreterMacroAssembler::push_ptr(Register r) {
662
663 add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
664 bind(entry);
665 cmp(c_rarg1, r19); // check if bottom reached
666 br(Assembler::NE, loop); // if not at bottom then check this entry
667 }
668
669 bind(no_unlock);
670
671 // jvmti support
672 if (notify_jvmdi) {
673 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
674 } else {
675 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
676 }
677
678 // remove activation
679 // get sender esp
680 ldr(rscratch2,
681 Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
682
683 if (StackReservedPages > 0) {
684 // testing if reserved zone needs to be re-enabled
685 Label no_reserved_zone_enabling;
686
687 // check if already enabled - if so no re-enabling needed
688 assert(sizeof(StackOverflow::StackGuardState) == 4, "unexpected size");
689 ldrw(rscratch1, Address(rthread, JavaThread::stack_guard_state_offset()));
690 cmpw(rscratch1, (u1)StackOverflow::stack_guard_enabled);
691 br(Assembler::EQ, no_reserved_zone_enabling);
692
693 // look for an overflow into the stack reserved zone, i.e.
694 // interpreter_frame_sender_sp <= JavaThread::reserved_stack_activation
695 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
696 cmp(rscratch2, rscratch1);
697 br(Assembler::LS, no_reserved_zone_enabling);
698
699 call_VM_leaf(
700 CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
701 call_VM(noreg, CAST_FROM_FN_PTR(address,
702 InterpreterRuntime::throw_delayed_StackOverflowError));
703 should_not_reach_here();
704
705 bind(no_reserved_zone_enabling);
706 }
707
708 if (state == atos && InlineTypeReturnedAsFields) {
709 // Check if we are returning an non-null inline type and load its fields into registers
710 Label skip;
711 test_oop_is_not_inline_type(r0, rscratch2, skip);
712
713 // Load fields from a buffered value with an inline class specific handler
714 load_klass(rscratch1 /*dst*/, r0 /*src*/);
715 ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
716 ldr(rscratch1, Address(rscratch1, InlineKlass::unpack_handler_offset()));
717 // Unpack handler can be null if inline type is not scalarizable in returns
718 cbz(rscratch1, skip);
719
720 blr(rscratch1);
721 #ifdef ASSERT
722 // TODO 8284443 Enable
723 if (StressCallingConvention && false) {
724 Label skip_stress;
725 ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
726 ldrw(rscratch1, Address(rscratch1, Method::flags_offset()));
727 tstw(rscratch1, MethodFlags::has_scalarized_return_flag());
728 br(Assembler::EQ, skip_stress);
729 load_klass(r0, r0);
730 orr(r0, r0, 1);
731 bind(skip_stress);
732 }
733 #endif
734 bind(skip);
735 // Check above kills sender esp in rscratch2. Reload it.
736 ldr(rscratch2, Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
737 }
738
739 // restore sender esp
740 mov(esp, rscratch2);
741 // remove frame anchor
742 leave();
743 // If we're returning to interpreted code we will shortly be
744 // adjusting SP to allow some space for ESP. If we're returning to
745 // compiled code the saved sender SP was saved in sender_sp, so this
746 // restores it.
747 andr(sp, esp, -16);
748 }
749
750 // Lock object
751 //
752 // Args:
753 // c_rarg1: BasicObjectLock to be used for locking
754 //
755 // Kills:
756 // r0
757 // c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, .. (param regs)
758 // rscratch1, rscratch2 (scratch regs)
779
780 Label slow_case;
781
782 // Load object pointer into obj_reg %c_rarg3
783 ldr(obj_reg, Address(lock_reg, obj_offset));
784
785 if (DiagnoseSyncOnValueBasedClasses != 0) {
786 load_klass(tmp, obj_reg);
787 ldrb(tmp, Address(tmp, Klass::misc_flags_offset()));
788 tst(tmp, KlassFlags::_misc_is_value_based_class);
789 br(Assembler::NE, slow_case);
790 }
791
792 if (LockingMode == LM_LIGHTWEIGHT) {
793 lightweight_lock(lock_reg, obj_reg, tmp, tmp2, tmp3, slow_case);
794 b(done);
795 } else if (LockingMode == LM_LEGACY) {
796 // Load (object->mark() | 1) into swap_reg
797 ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
798 orr(swap_reg, rscratch1, 1);
799 if (EnableValhalla) {
800 // Mask inline_type bit such that we go to the slow path if object is an inline type
801 andr(swap_reg, swap_reg, ~((int) markWord::inline_type_bit_in_place));
802 }
803
804 // Save (object->mark() | 1) into BasicLock's displaced header
805 str(swap_reg, Address(lock_reg, mark_offset));
806
807 assert(lock_offset == 0,
808 "displached header must be first word in BasicObjectLock");
809
810 Label fail;
811 cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, count, /*fallthrough*/nullptr);
812
813 // Fast check for recursive lock.
814 //
815 // Can apply the optimization only if this is a stack lock
816 // allocated in this thread. For efficiency, we can focus on
817 // recently allocated stack locks (instead of reading the stack
818 // base and checking whether 'mark' points inside the current
819 // thread stack):
820 // 1) (mark & 7) == 0, and
821 // 2) sp <= mark < mark + os::pagesize()
822 //
1136 Address data(mdp, in_bytes(JumpData::taken_offset()));
1137 ldr(bumped_count, data);
1138 assert(DataLayout::counter_increment == 1,
1139 "flow-free idiom only works with 1");
1140 // Intel does this to catch overflow
1141 // addptr(bumped_count, DataLayout::counter_increment);
1142 // sbbptr(bumped_count, 0);
1143 // so we do this
1144 adds(bumped_count, bumped_count, DataLayout::counter_increment);
1145 Label L;
1146 br(Assembler::CS, L); // skip store if counter overflow
1147 str(bumped_count, data);
1148 bind(L);
1149 // The method data pointer needs to be updated to reflect the new target.
1150 update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
1151 bind(profile_continue);
1152 }
1153 }
1154
1155
1156 void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp, bool acmp) {
1157 if (ProfileInterpreter) {
1158 Label profile_continue;
1159
1160 // If no method data exists, go to profile_continue.
1161 test_method_data_pointer(mdp, profile_continue);
1162
1163 // We are taking a branch. Increment the not taken count.
1164 increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
1165
1166 // The method data pointer needs to be updated to correspond to
1167 // the next bytecode
1168 update_mdp_by_constant(mdp, acmp ? in_bytes(ACmpData::acmp_data_size()) : in_bytes(BranchData::branch_data_size()));
1169 bind(profile_continue);
1170 }
1171 }
1172
1173
1174 void InterpreterMacroAssembler::profile_call(Register mdp) {
1175 if (ProfileInterpreter) {
1176 Label profile_continue;
1177
1178 // If no method data exists, go to profile_continue.
1179 test_method_data_pointer(mdp, profile_continue);
1180
1181 // We are making a call. Increment the count.
1182 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset()));
1183
1184 // The method data pointer needs to be updated to reflect the new target.
1185 update_mdp_by_constant(mdp, in_bytes(CounterData::counter_data_size()));
1186 bind(profile_continue);
1187 }
1188 }
1471 // case_array_offset_in_bytes()
1472 movw(reg2, in_bytes(MultiBranchData::per_case_size()));
1473 movw(rscratch1, in_bytes(MultiBranchData::case_array_offset()));
1474 Assembler::maddw(index, index, reg2, rscratch1);
1475
1476 // Update the case count
1477 increment_mdp_data_at(mdp,
1478 index,
1479 in_bytes(MultiBranchData::relative_count_offset()));
1480
1481 // The method data pointer needs to be updated.
1482 update_mdp_by_offset(mdp,
1483 index,
1484 in_bytes(MultiBranchData::
1485 relative_displacement_offset()));
1486
1487 bind(profile_continue);
1488 }
1489 }
1490
1491 template <class ArrayData> void InterpreterMacroAssembler::profile_array_type(Register mdp,
1492 Register array,
1493 Register tmp) {
1494 if (ProfileInterpreter) {
1495 Label profile_continue;
1496
1497 // If no method data exists, go to profile_continue.
1498 test_method_data_pointer(mdp, profile_continue);
1499
1500 mov(tmp, array);
1501 profile_obj_type(tmp, Address(mdp, in_bytes(ArrayData::array_offset())));
1502
1503 Label not_flat;
1504 test_non_flat_array_oop(array, tmp, not_flat);
1505
1506 set_mdp_flag_at(mdp, ArrayData::flat_array_byte_constant());
1507
1508 bind(not_flat);
1509
1510 Label not_null_free;
1511 test_non_null_free_array_oop(array, tmp, not_null_free);
1512
1513 set_mdp_flag_at(mdp, ArrayData::null_free_array_byte_constant());
1514
1515 bind(not_null_free);
1516
1517 bind(profile_continue);
1518 }
1519 }
1520
1521 template void InterpreterMacroAssembler::profile_array_type<ArrayLoadData>(Register mdp,
1522 Register array,
1523 Register tmp);
1524 template void InterpreterMacroAssembler::profile_array_type<ArrayStoreData>(Register mdp,
1525 Register array,
1526 Register tmp);
1527
1528 void InterpreterMacroAssembler::profile_multiple_element_types(Register mdp, Register element, Register tmp, const Register tmp2) {
1529 if (ProfileInterpreter) {
1530 Label profile_continue;
1531
1532 // If no method data exists, go to profile_continue.
1533 test_method_data_pointer(mdp, profile_continue);
1534
1535 Label done, update;
1536 cbnz(element, update);
1537 set_mdp_flag_at(mdp, BitData::null_seen_byte_constant());
1538 b(done);
1539
1540 bind(update);
1541 load_klass(tmp, element);
1542
1543 // Record the object type.
1544 record_klass_in_profile(tmp, mdp, tmp2);
1545
1546 bind(done);
1547
1548 // The method data pointer needs to be updated.
1549 update_mdp_by_constant(mdp, in_bytes(ArrayStoreData::array_store_data_size()));
1550
1551 bind(profile_continue);
1552 }
1553 }
1554
1555
1556 void InterpreterMacroAssembler::profile_element_type(Register mdp,
1557 Register element,
1558 Register tmp) {
1559 if (ProfileInterpreter) {
1560 Label profile_continue;
1561
1562 // If no method data exists, go to profile_continue.
1563 test_method_data_pointer(mdp, profile_continue);
1564
1565 mov(tmp, element);
1566 profile_obj_type(tmp, Address(mdp, in_bytes(ArrayLoadData::element_offset())));
1567
1568 // The method data pointer needs to be updated.
1569 update_mdp_by_constant(mdp, in_bytes(ArrayLoadData::array_load_data_size()));
1570
1571 bind(profile_continue);
1572 }
1573 }
1574
1575 void InterpreterMacroAssembler::profile_acmp(Register mdp,
1576 Register left,
1577 Register right,
1578 Register tmp) {
1579 if (ProfileInterpreter) {
1580 Label profile_continue;
1581
1582 // If no method data exists, go to profile_continue.
1583 test_method_data_pointer(mdp, profile_continue);
1584
1585 mov(tmp, left);
1586 profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::left_offset())));
1587
1588 Label left_not_inline_type;
1589 test_oop_is_not_inline_type(left, tmp, left_not_inline_type);
1590 set_mdp_flag_at(mdp, ACmpData::left_inline_type_byte_constant());
1591 bind(left_not_inline_type);
1592
1593 mov(tmp, right);
1594 profile_obj_type(tmp, Address(mdp, in_bytes(ACmpData::right_offset())));
1595
1596 Label right_not_inline_type;
1597 test_oop_is_not_inline_type(right, tmp, right_not_inline_type);
1598 set_mdp_flag_at(mdp, ACmpData::right_inline_type_byte_constant());
1599 bind(right_not_inline_type);
1600
1601 bind(profile_continue);
1602 }
1603 }
1604
1605 void InterpreterMacroAssembler::_interp_verify_oop(Register reg, TosState state, const char* file, int line) {
1606 if (state == atos) {
1607 MacroAssembler::_verify_oop_checked(reg, "broken oop", file, line);
1608 }
1609 }
1610
1611 void InterpreterMacroAssembler::notify_method_entry() {
1612 // Whenever JVMTI is interp_only_mode, method entry/exit events are sent to
1613 // track stack depth. If it is possible to enter interp_only_mode we add
1614 // the code to check if the event should be sent.
1615 if (JvmtiExport::can_post_interpreter_events()) {
1616 Label L;
1617 ldrw(r3, Address(rthread, JavaThread::interp_only_mode_offset()));
1618 cbzw(r3, L);
1619 call_VM(noreg, CAST_FROM_FN_PTR(address,
1620 InterpreterRuntime::post_method_entry));
1621 bind(L);
1622 }
1623
1624 if (DTraceMethodProbes) {
1883 profile_obj_type(tmp, mdo_arg_addr);
1884
1885 int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1886 off_to_args += to_add;
1887 }
1888
1889 if (MethodData::profile_return()) {
1890 ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())));
1891 sub(tmp, tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1892 }
1893
1894 add(rscratch1, mdp, off_to_args);
1895 bind(done);
1896 mov(mdp, rscratch1);
1897
1898 if (MethodData::profile_return()) {
1899 // We're right after the type profile for the last
1900 // argument. tmp is the number of cells left in the
1901 // CallTypeData/VirtualCallTypeData to reach its end. Non null
1902 // if there's a return to profile.
1903 assert(SingleTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1904 add(mdp, mdp, tmp, LSL, exact_log2(DataLayout::cell_size));
1905 }
1906 str(mdp, Address(rfp, frame::interpreter_frame_mdp_offset * wordSize));
1907 } else {
1908 assert(MethodData::profile_return(), "either profile call args or call ret");
1909 update_mdp_by_constant(mdp, in_bytes(TypeEntriesAtCall::return_only_size()));
1910 }
1911
1912 // mdp points right after the end of the
1913 // CallTypeData/VirtualCallTypeData, right after the cells for the
1914 // return value type if there's one
1915
1916 bind(profile_continue);
1917 }
1918 }
1919
1920 void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
1921 assert_different_registers(mdp, ret, tmp, rbcp);
1922 if (ProfileInterpreter && MethodData::profile_return()) {
1923 Label profile_continue, done;
1929
1930 // If we don't profile all invoke bytecodes we must make sure
1931 // it's a bytecode we indeed profile. We can't go back to the
1932 // beginning of the ProfileData we intend to update to check its
1933 // type because we're right after it and we don't known its
1934 // length
1935 Label do_profile;
1936 ldrb(rscratch1, Address(rbcp, 0));
1937 cmp(rscratch1, (u1)Bytecodes::_invokedynamic);
1938 br(Assembler::EQ, do_profile);
1939 cmp(rscratch1, (u1)Bytecodes::_invokehandle);
1940 br(Assembler::EQ, do_profile);
1941 get_method(tmp);
1942 ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset()));
1943 subs(zr, rscratch1, static_cast<int>(vmIntrinsics::_compiledLambdaForm));
1944 br(Assembler::NE, profile_continue);
1945
1946 bind(do_profile);
1947 }
1948
1949 Address mdo_ret_addr(mdp, -in_bytes(SingleTypeEntry::size()));
1950 mov(tmp, ret);
1951 profile_obj_type(tmp, mdo_ret_addr);
1952
1953 bind(profile_continue);
1954 }
1955 }
1956
1957 void InterpreterMacroAssembler::profile_parameters_type(Register mdp, Register tmp1, Register tmp2) {
1958 assert_different_registers(rscratch1, rscratch2, mdp, tmp1, tmp2);
1959 if (ProfileInterpreter && MethodData::profile_parameters()) {
1960 Label profile_continue, done;
1961
1962 test_method_data_pointer(mdp, profile_continue);
1963
1964 // Load the offset of the area within the MDO used for
1965 // parameters. If it's negative we're not profiling any parameters
1966 ldrw(tmp1, Address(mdp, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset())));
1967 tbnz(tmp1, 31, profile_continue); // i.e. sign bit set
1968
1969 // Compute a pointer to the area for parameters from the offset
|