15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/macroAssembler.inline.hpp"
27 #include "asm/assembler.hpp"
28 #include "c1/c1_CodeStubs.hpp"
29 #include "c1/c1_Compilation.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArrayKlass.hpp"
35 #include "ci/ciInstance.hpp"
36 #include "code/compiledIC.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "gc/shared/gc_globals.hpp"
39 #include "nativeInst_aarch64.hpp"
40 #include "oops/objArrayKlass.hpp"
41 #include "runtime/frame.inline.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "runtime/stubRoutines.hpp"
44 #include "utilities/powerOfTwo.hpp"
45 #include "vmreg_aarch64.inline.hpp"
46
47
48 #ifndef PRODUCT
49 #define COMMENT(x) do { __ block_comment(x); } while (0)
50 #else
51 #define COMMENT(x)
52 #endif
53
54 NEEDS_CLEANUP // remove this definitions ?
55 const Register SYNC_header = r0; // synchronization header
56 const Register SHIFT_count = r0; // where count for shift operations must be
57
58 #define __ _masm->
59
60
413 if (LockingMode == LM_MONITOR) {
414 __ b(*stub->entry());
415 } else {
416 __ unlock_object(r5, r4, r0, r6, *stub->entry());
417 }
418 __ bind(*stub->continuation());
419 }
420
421 if (compilation()->env()->dtrace_method_probes()) {
422 __ mov(c_rarg0, rthread);
423 __ mov_metadata(c_rarg1, method()->constant_encoding());
424 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
425 }
426
427 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
428 __ mov(r0, r19); // Restore the exception
429 }
430
431 // remove the activation and dispatch to the unwind handler
432 __ block_comment("remove_frame and dispatch to the unwind handler");
433 __ remove_frame(initial_frame_size_in_bytes());
434 __ far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id)));
435
436 // Emit the slow path assembly
437 if (stub != nullptr) {
438 stub->emit_code(this);
439 }
440
441 return offset;
442 }
443
444
445 int LIR_Assembler::emit_deopt_handler() {
446 // generate code for exception handler
447 address handler_base = __ start_a_stub(deopt_handler_size());
448 if (handler_base == nullptr) {
449 // not enough space left for the handler
450 bailout("deopt handler overflow");
451 return -1;
452 }
453
457 __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
458 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
459 __ end_a_stub();
460
461 return offset;
462 }
463
464 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
465 _masm->code_section()->relocate(adr, relocInfo::poll_type);
466 int pc_offset = code_offset();
467 flush_debug_info(pc_offset);
468 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
469 if (info->exception_handlers() != nullptr) {
470 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
471 }
472 }
473
474 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
475 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
476
477 // Pop the stack before the safepoint code
478 __ remove_frame(initial_frame_size_in_bytes());
479
480 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
481 __ reserved_stack_check();
482 }
483
484 code_stub->set_safepoint_offset(__ offset());
485 __ relocate(relocInfo::poll_return_type);
486 __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
487 __ ret(lr);
488 }
489
490 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
491 guarantee(info != nullptr, "Shouldn't be null");
492 __ get_polling_page(rscratch1, relocInfo::poll_type);
493 add_debug_info_for_branch(info); // This isn't just debug info:
494 // it's the oop map
495 __ read_polling_page(rscratch1, relocInfo::poll_type);
496 return __ offset();
497 }
498
499
500 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
501 if (from_reg == r31_sp)
502 from_reg = sp;
503 if (to_reg == r31_sp)
504 to_reg = sp;
505 __ mov(to_reg, from_reg);
506 }
507
508 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
509
516 switch (c->type()) {
517 case T_INT: {
518 assert(patch_code == lir_patch_none, "no patching handled here");
519 __ movw(dest->as_register(), c->as_jint());
520 break;
521 }
522
523 case T_ADDRESS: {
524 assert(patch_code == lir_patch_none, "no patching handled here");
525 __ mov(dest->as_register(), c->as_jint());
526 break;
527 }
528
529 case T_LONG: {
530 assert(patch_code == lir_patch_none, "no patching handled here");
531 __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
532 break;
533 }
534
535 case T_OBJECT: {
536 if (patch_code == lir_patch_none) {
537 jobject2reg(c->as_jobject(), dest->as_register());
538 } else {
539 jobject2reg_with_patching(dest->as_register(), info);
540 }
541 break;
542 }
543
544 case T_METADATA: {
545 if (patch_code != lir_patch_none) {
546 klass2reg_with_patching(dest->as_register(), info);
547 } else {
548 __ mov_metadata(dest->as_register(), c->as_metadata());
549 }
550 break;
551 }
552
553 case T_FLOAT: {
554 if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
555 __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
556 } else {
557 __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
558 __ ldrs(dest->as_float_reg(), Address(rscratch1));
559 }
629 LIR_Const* c = src->as_constant_ptr();
630 LIR_Address* to_addr = dest->as_address_ptr();
631
632 void (Assembler::* insn)(Register Rt, const Address &adr);
633
634 switch (type) {
635 case T_ADDRESS:
636 assert(c->as_jint() == 0, "should be");
637 insn = &Assembler::str;
638 break;
639 case T_LONG:
640 assert(c->as_jlong() == 0, "should be");
641 insn = &Assembler::str;
642 break;
643 case T_INT:
644 assert(c->as_jint() == 0, "should be");
645 insn = &Assembler::strw;
646 break;
647 case T_OBJECT:
648 case T_ARRAY:
649 assert(c->as_jobject() == nullptr, "should be");
650 if (UseCompressedOops && !wide) {
651 insn = &Assembler::strw;
652 } else {
653 insn = &Assembler::str;
654 }
655 break;
656 case T_CHAR:
657 case T_SHORT:
658 assert(c->as_jint() == 0, "should be");
659 insn = &Assembler::strh;
660 break;
661 case T_BOOLEAN:
662 case T_BYTE:
663 assert(c->as_jint() == 0, "should be");
664 insn = &Assembler::strb;
665 break;
666 default:
667 ShouldNotReachHere();
668 insn = &Assembler::str; // unreachable
976 case T_CHAR:
977 __ ldrh(dest->as_register(), as_Address(from_addr));
978 break;
979 case T_SHORT:
980 __ ldrsh(dest->as_register(), as_Address(from_addr));
981 break;
982
983 default:
984 ShouldNotReachHere();
985 }
986
987 if (is_reference_type(type)) {
988 if (UseCompressedOops && !wide) {
989 __ decode_heap_oop(dest->as_register());
990 }
991
992 __ verify_oop(dest->as_register());
993 }
994 }
995
996
997 int LIR_Assembler::array_element_size(BasicType type) const {
998 int elem_size = type2aelembytes(type);
999 return exact_log2(elem_size);
1000 }
1001
1002
1003 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1004 switch (op->code()) {
1005 case lir_idiv:
1006 case lir_irem:
1007 arithmetic_idiv(op->code(),
1008 op->in_opr1(),
1009 op->in_opr2(),
1010 op->in_opr3(),
1011 op->result_opr(),
1012 op->info());
1013 break;
1014 case lir_fmad:
1015 __ fmaddd(op->result_opr()->as_double_reg(),
1167 __ lea(rscratch1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
1168 __ ldarb(rscratch1, rscratch1);
1169 __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1170 add_debug_info_for_null_check_here(op->stub()->info());
1171 __ br(Assembler::NE, *op->stub()->entry());
1172 }
1173 __ allocate_object(op->obj()->as_register(),
1174 op->tmp1()->as_register(),
1175 op->tmp2()->as_register(),
1176 op->header_size(),
1177 op->object_size(),
1178 op->klass()->as_register(),
1179 *op->stub()->entry());
1180 __ bind(*op->stub()->continuation());
1181 }
1182
1183 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1184 Register len = op->len()->as_register();
1185 __ uxtw(len, len);
1186
1187 if (UseSlowPath ||
1188 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1189 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1190 __ b(*op->stub()->entry());
1191 } else {
1192 Register tmp1 = op->tmp1()->as_register();
1193 Register tmp2 = op->tmp2()->as_register();
1194 Register tmp3 = op->tmp3()->as_register();
1195 if (len == tmp1) {
1196 tmp1 = tmp3;
1197 } else if (len == tmp2) {
1198 tmp2 = tmp3;
1199 } else if (len == tmp3) {
1200 // everything is ok
1201 } else {
1202 __ mov(tmp3, len);
1203 }
1204 __ allocate_array(op->obj()->as_register(),
1205 len,
1206 tmp1,
1207 tmp2,
1279 assert(data != nullptr, "need data for type check");
1280 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1281 }
1282 Label* success_target = success;
1283 Label* failure_target = failure;
1284
1285 if (obj == k_RInfo) {
1286 k_RInfo = dst;
1287 } else if (obj == klass_RInfo) {
1288 klass_RInfo = dst;
1289 }
1290 if (k->is_loaded() && !UseCompressedClassPointers) {
1291 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1292 } else {
1293 Rtmp1 = op->tmp3()->as_register();
1294 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1295 }
1296
1297 assert_different_registers(obj, k_RInfo, klass_RInfo);
1298
1299 if (should_profile) {
1300 Register mdo = klass_RInfo;
1301 __ mov_metadata(mdo, md->constant_encoding());
1302 Label not_null;
1303 __ cbnz(obj, not_null);
1304 // Object is null; update MDO and exit
1305 Address data_addr
1306 = __ form_address(rscratch2, mdo,
1307 md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1308 0);
1309 __ ldrb(rscratch1, data_addr);
1310 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1311 __ strb(rscratch1, data_addr);
1312 __ b(*obj_is_null);
1313 __ bind(not_null);
1314
1315 Label update_done;
1316 Register recv = k_RInfo;
1317 __ load_klass(recv, obj);
1318 type_profile_helper(mdo, md, data, recv, &update_done);
1319 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1320 __ addptr(counter_addr, DataLayout::counter_increment);
1321
1322 __ bind(update_done);
1323 } else {
1324 __ cbz(obj, *obj_is_null);
1325 }
1326
1327 if (!k->is_loaded()) {
1328 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1329 } else {
1330 __ mov_metadata(k_RInfo, k->constant_encoding());
1331 }
1332 __ verify_oop(obj);
1333
1334 if (op->fast_check()) {
1335 // get object class
1336 // not a safepoint as obj null check happens earlier
1337 __ load_klass(rscratch1, obj);
1338 __ cmp( rscratch1, k_RInfo);
1339
1340 __ br(Assembler::NE, *failure_target);
1341 // successful cast, fall through to profile or jump
1342 } else {
1343 // get object class
1344 // not a safepoint as obj null check happens earlier
1462 __ bind(success);
1463 if (dst != obj) {
1464 __ mov(dst, obj);
1465 }
1466 } else if (code == lir_instanceof) {
1467 Register obj = op->object()->as_register();
1468 Register dst = op->result_opr()->as_register();
1469 Label success, failure, done;
1470 emit_typecheck_helper(op, &success, &failure, &failure);
1471 __ bind(failure);
1472 __ mov(dst, zr);
1473 __ b(done);
1474 __ bind(success);
1475 __ mov(dst, 1);
1476 __ bind(done);
1477 } else {
1478 ShouldNotReachHere();
1479 }
1480 }
1481
1482 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1483 __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1484 __ cset(rscratch1, Assembler::NE);
1485 __ membar(__ AnyAny);
1486 }
1487
1488 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1489 __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1490 __ cset(rscratch1, Assembler::NE);
1491 __ membar(__ AnyAny);
1492 }
1493
1494
1495 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1496 Register addr;
1497 if (op->addr()->is_register()) {
1498 addr = as_reg(op->addr());
1499 } else {
1500 assert(op->addr()->is_address(), "what else?");
1501 LIR_Address* addr_ptr = op->addr()->as_address_ptr();
1975 __ cmp(left->as_register_lo(), right->as_register_lo());
1976 __ mov(dst->as_register(), (uint64_t)-1L);
1977 __ br(Assembler::LT, done);
1978 __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
1979 __ bind(done);
1980 } else {
1981 ShouldNotReachHere();
1982 }
1983 }
1984
1985
1986 void LIR_Assembler::align_call(LIR_Code code) { }
1987
1988
1989 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
1990 address call = __ trampoline_call(Address(op->addr(), rtype));
1991 if (call == nullptr) {
1992 bailout("trampoline stub overflow");
1993 return;
1994 }
1995 add_call_info(code_offset(), op->info());
1996 __ post_call_nop();
1997 }
1998
1999
2000 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2001 address call = __ ic_call(op->addr());
2002 if (call == nullptr) {
2003 bailout("trampoline stub overflow");
2004 return;
2005 }
2006 add_call_info(code_offset(), op->info());
2007 __ post_call_nop();
2008 }
2009
2010 void LIR_Assembler::emit_static_call_stub() {
2011 address call_pc = __ pc();
2012 address stub = __ start_a_stub(call_stub_size());
2013 if (stub == nullptr) {
2014 bailout("static call stub overflow");
2015 return;
2016 }
2017
2018 int start = __ offset();
2019
2020 __ relocate(static_stub_Relocation::spec(call_pc));
2021 __ emit_static_call_stub();
2022
2023 assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
2024 <= call_stub_size(), "stub too big");
2025 __ end_a_stub();
2026 }
2149
2150
2151 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2152 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2153 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2154 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2155 __ mov (rscratch1, c);
2156 __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2157 }
2158
2159
2160 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2161 ShouldNotReachHere();
2162 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2163 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2164 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2165 __ lea(rscratch1, __ constant_oop_address(o));
2166 __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2167 }
2168
2169
2170 // This code replaces a call to arraycopy; no exception may
2171 // be thrown in this code, they must be thrown in the System.arraycopy
2172 // activation frame; we could save some checks if this would not be the case
2173 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2174 ciArrayKlass* default_type = op->expected_type();
2175 Register src = op->src()->as_register();
2176 Register dst = op->dst()->as_register();
2177 Register src_pos = op->src_pos()->as_register();
2178 Register dst_pos = op->dst_pos()->as_register();
2179 Register length = op->length()->as_register();
2180 Register tmp = op->tmp()->as_register();
2181
2182 CodeStub* stub = op->stub();
2183 int flags = op->flags();
2184 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2185 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2186
2187 // if we don't know anything, just go through the generic arraycopy
2188 if (default_type == nullptr // || basic_type == T_OBJECT
2189 ) {
2190 Label done;
2191 assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2192
2193 // Save the arguments in case the generic arraycopy fails and we
2194 // have to fall back to the JNI stub
2195 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2196 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2197 __ str(src, Address(sp, 4*BytesPerWord));
2198
2199 address copyfunc_addr = StubRoutines::generic_arraycopy();
2200 assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2201
2202 // The arguments are in java calling convention so we shift them
2203 // to C convention
2204 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2205 __ mov(c_rarg0, j_rarg0);
2206 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2220 __ cbz(r0, *stub->continuation());
2221
2222 // Reload values from the stack so they are where the stub
2223 // expects them.
2224 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2225 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2226 __ ldr(src, Address(sp, 4*BytesPerWord));
2227
2228 // r0 is -1^K where K == partial copied count
2229 __ eonw(rscratch1, r0, zr);
2230 // adjust length down and src/end pos up by partial copied count
2231 __ subw(length, length, rscratch1);
2232 __ addw(src_pos, src_pos, rscratch1);
2233 __ addw(dst_pos, dst_pos, rscratch1);
2234 __ b(*stub->entry());
2235
2236 __ bind(*stub->continuation());
2237 return;
2238 }
2239
2240 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2241
2242 int elem_size = type2aelembytes(basic_type);
2243 int scale = exact_log2(elem_size);
2244
2245 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2246 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2247
2248 // test for null
2249 if (flags & LIR_OpArrayCopy::src_null_check) {
2250 __ cbz(src, *stub->entry());
2251 }
2252 if (flags & LIR_OpArrayCopy::dst_null_check) {
2253 __ cbz(dst, *stub->entry());
2254 }
2255
2256 // If the compiler was not able to prove that exact type of the source or the destination
2257 // of the arraycopy is an array type, check at runtime if the source or the destination is
2258 // an instance type.
2259 if (flags & LIR_OpArrayCopy::type_check) {
2774 __ verify_klass_ptr(tmp);
2775 #endif
2776 } else {
2777 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2778 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2779
2780 __ ldr(tmp, mdo_addr);
2781 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2782
2783 __ orr(tmp, tmp, TypeEntries::type_unknown);
2784 __ str(tmp, mdo_addr);
2785 // FIXME: Write barrier needed here?
2786 }
2787 }
2788
2789 __ bind(next);
2790 }
2791 COMMENT("} emit_profile_type");
2792 }
2793
2794
2795 void LIR_Assembler::align_backward_branch_target() {
2796 }
2797
2798
2799 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
2800 // tmp must be unused
2801 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
2802
2803 if (left->is_single_cpu()) {
2804 assert(dest->is_single_cpu(), "expect single result reg");
2805 __ negw(dest->as_register(), left->as_register());
2806 } else if (left->is_double_cpu()) {
2807 assert(dest->is_double_cpu(), "expect double result reg");
2808 __ neg(dest->as_register_lo(), left->as_register_lo());
2809 } else if (left->is_single_fpu()) {
2810 assert(dest->is_single_fpu(), "expect single float result reg");
2811 __ fnegs(dest->as_float_reg(), left->as_float_reg());
2812 } else {
2813 assert(left->is_double_fpu(), "expect double float operand reg");
2913 void LIR_Assembler::membar_loadload() {
2914 __ membar(Assembler::LoadLoad);
2915 }
2916
2917 void LIR_Assembler::membar_storestore() {
2918 __ membar(MacroAssembler::StoreStore);
2919 }
2920
2921 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
2922
2923 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
2924
2925 void LIR_Assembler::on_spin_wait() {
2926 __ spin_wait();
2927 }
2928
2929 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
2930 __ mov(result_reg->as_register(), rthread);
2931 }
2932
2933
2934 void LIR_Assembler::peephole(LIR_List *lir) {
2935 #if 0
2936 if (tableswitch_count >= max_tableswitches)
2937 return;
2938
2939 /*
2940 This finite-state automaton recognizes sequences of compare-and-
2941 branch instructions. We will turn them into a tableswitch. You
2942 could argue that C1 really shouldn't be doing this sort of
2943 optimization, but without it the code is really horrible.
2944 */
2945
2946 enum { start_s, cmp1_s, beq_s, cmp_s } state;
2947 int first_key, last_key = -2147483648;
2948 int next_key = 0;
2949 int start_insn = -1;
2950 int last_insn = -1;
2951 Register reg = noreg;
2952 LIR_Opr reg_opr;
|
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/macroAssembler.inline.hpp"
27 #include "asm/assembler.hpp"
28 #include "c1/c1_CodeStubs.hpp"
29 #include "c1/c1_Compilation.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArrayKlass.hpp"
35 #include "ci/ciInlineKlass.hpp"
36 #include "ci/ciInstance.hpp"
37 #include "code/compiledIC.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/gc_globals.hpp"
40 #include "nativeInst_aarch64.hpp"
41 #include "oops/objArrayKlass.hpp"
42 #include "oops/oop.inline.hpp"
43 #include "runtime/frame.inline.hpp"
44 #include "runtime/sharedRuntime.hpp"
45 #include "runtime/stubRoutines.hpp"
46 #include "utilities/powerOfTwo.hpp"
47 #include "vmreg_aarch64.inline.hpp"
48
49
50 #ifndef PRODUCT
51 #define COMMENT(x) do { __ block_comment(x); } while (0)
52 #else
53 #define COMMENT(x)
54 #endif
55
56 NEEDS_CLEANUP // remove this definitions ?
57 const Register SYNC_header = r0; // synchronization header
58 const Register SHIFT_count = r0; // where count for shift operations must be
59
60 #define __ _masm->
61
62
415 if (LockingMode == LM_MONITOR) {
416 __ b(*stub->entry());
417 } else {
418 __ unlock_object(r5, r4, r0, r6, *stub->entry());
419 }
420 __ bind(*stub->continuation());
421 }
422
423 if (compilation()->env()->dtrace_method_probes()) {
424 __ mov(c_rarg0, rthread);
425 __ mov_metadata(c_rarg1, method()->constant_encoding());
426 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
427 }
428
429 if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
430 __ mov(r0, r19); // Restore the exception
431 }
432
433 // remove the activation and dispatch to the unwind handler
434 __ block_comment("remove_frame and dispatch to the unwind handler");
435 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
436 __ far_jump(RuntimeAddress(Runtime1::entry_for(C1StubId::unwind_exception_id)));
437
438 // Emit the slow path assembly
439 if (stub != nullptr) {
440 stub->emit_code(this);
441 }
442
443 return offset;
444 }
445
446
447 int LIR_Assembler::emit_deopt_handler() {
448 // generate code for exception handler
449 address handler_base = __ start_a_stub(deopt_handler_size());
450 if (handler_base == nullptr) {
451 // not enough space left for the handler
452 bailout("deopt handler overflow");
453 return -1;
454 }
455
459 __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
460 guarantee(code_offset() - offset <= deopt_handler_size(), "overflow");
461 __ end_a_stub();
462
463 return offset;
464 }
465
466 void LIR_Assembler::add_debug_info_for_branch(address adr, CodeEmitInfo* info) {
467 _masm->code_section()->relocate(adr, relocInfo::poll_type);
468 int pc_offset = code_offset();
469 flush_debug_info(pc_offset);
470 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
471 if (info->exception_handlers() != nullptr) {
472 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
473 }
474 }
475
476 void LIR_Assembler::return_op(LIR_Opr result, C1SafepointPollStub* code_stub) {
477 assert(result->is_illegal() || !result->is_single_cpu() || result->as_register() == r0, "word returns are in r0,");
478
479 if (InlineTypeReturnedAsFields) {
480 // Check if we are returning an non-null inline type and load its fields into registers
481 ciType* return_type = compilation()->method()->return_type();
482 if (return_type->is_inlinetype()) {
483 ciInlineKlass* vk = return_type->as_inline_klass();
484 if (vk->can_be_returned_as_fields()) {
485 address unpack_handler = vk->unpack_handler();
486 assert(unpack_handler != nullptr, "must be");
487 __ far_call(RuntimeAddress(unpack_handler));
488 }
489 } else if (return_type->is_instance_klass() && (!return_type->is_loaded() || StressCallingConvention)) {
490 Label skip;
491 __ test_oop_is_not_inline_type(r0, rscratch2, skip);
492
493 // Load fields from a buffered value with an inline class specific handler
494 __ load_klass(rscratch1 /*dst*/, r0 /*src*/);
495 __ ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_inlineklass_fixed_block_offset()));
496 __ ldr(rscratch1, Address(rscratch1, InlineKlass::unpack_handler_offset()));
497 // Unpack handler can be null if inline type is not scalarizable in returns
498 __ cbz(rscratch1, skip);
499 __ blr(rscratch1);
500
501 __ bind(skip);
502 }
503 // At this point, r0 points to the value object (for interpreter or C1 caller).
504 // The fields of the object are copied into registers (for C2 caller).
505 }
506
507 // Pop the stack before the safepoint code
508 __ remove_frame(initial_frame_size_in_bytes(), needs_stack_repair());
509
510 if (StackReservedPages > 0 && compilation()->has_reserved_stack_access()) {
511 __ reserved_stack_check();
512 }
513
514 code_stub->set_safepoint_offset(__ offset());
515 __ relocate(relocInfo::poll_return_type);
516 __ safepoint_poll(*code_stub->entry(), true /* at_return */, false /* acquire */, true /* in_nmethod */);
517 __ ret(lr);
518 }
519
520 int LIR_Assembler::store_inline_type_fields_to_buf(ciInlineKlass* vk) {
521 return (__ store_inline_type_fields_to_buf(vk, false));
522 }
523
524 int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
525 guarantee(info != nullptr, "Shouldn't be null");
526 __ get_polling_page(rscratch1, relocInfo::poll_type);
527 add_debug_info_for_branch(info); // This isn't just debug info:
528 // it's the oop map
529 __ read_polling_page(rscratch1, relocInfo::poll_type);
530 return __ offset();
531 }
532
533
534 void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
535 if (from_reg == r31_sp)
536 from_reg = sp;
537 if (to_reg == r31_sp)
538 to_reg = sp;
539 __ mov(to_reg, from_reg);
540 }
541
542 void LIR_Assembler::swap_reg(Register a, Register b) { Unimplemented(); }
543
550 switch (c->type()) {
551 case T_INT: {
552 assert(patch_code == lir_patch_none, "no patching handled here");
553 __ movw(dest->as_register(), c->as_jint());
554 break;
555 }
556
557 case T_ADDRESS: {
558 assert(patch_code == lir_patch_none, "no patching handled here");
559 __ mov(dest->as_register(), c->as_jint());
560 break;
561 }
562
563 case T_LONG: {
564 assert(patch_code == lir_patch_none, "no patching handled here");
565 __ mov(dest->as_register_lo(), (intptr_t)c->as_jlong());
566 break;
567 }
568
569 case T_OBJECT: {
570 if (patch_code != lir_patch_none) {
571 jobject2reg_with_patching(dest->as_register(), info);
572 } else {
573 jobject2reg(c->as_jobject(), dest->as_register());
574 }
575 break;
576 }
577
578 case T_METADATA: {
579 if (patch_code != lir_patch_none) {
580 klass2reg_with_patching(dest->as_register(), info);
581 } else {
582 __ mov_metadata(dest->as_register(), c->as_metadata());
583 }
584 break;
585 }
586
587 case T_FLOAT: {
588 if (__ operand_valid_for_float_immediate(c->as_jfloat())) {
589 __ fmovs(dest->as_float_reg(), (c->as_jfloat()));
590 } else {
591 __ adr(rscratch1, InternalAddress(float_constant(c->as_jfloat())));
592 __ ldrs(dest->as_float_reg(), Address(rscratch1));
593 }
663 LIR_Const* c = src->as_constant_ptr();
664 LIR_Address* to_addr = dest->as_address_ptr();
665
666 void (Assembler::* insn)(Register Rt, const Address &adr);
667
668 switch (type) {
669 case T_ADDRESS:
670 assert(c->as_jint() == 0, "should be");
671 insn = &Assembler::str;
672 break;
673 case T_LONG:
674 assert(c->as_jlong() == 0, "should be");
675 insn = &Assembler::str;
676 break;
677 case T_INT:
678 assert(c->as_jint() == 0, "should be");
679 insn = &Assembler::strw;
680 break;
681 case T_OBJECT:
682 case T_ARRAY:
683 // Non-null case is not handled on aarch64 but handled on x86
684 // FIXME: do we need to add it here?
685 assert(c->as_jobject() == nullptr, "should be");
686 if (UseCompressedOops && !wide) {
687 insn = &Assembler::strw;
688 } else {
689 insn = &Assembler::str;
690 }
691 break;
692 case T_CHAR:
693 case T_SHORT:
694 assert(c->as_jint() == 0, "should be");
695 insn = &Assembler::strh;
696 break;
697 case T_BOOLEAN:
698 case T_BYTE:
699 assert(c->as_jint() == 0, "should be");
700 insn = &Assembler::strb;
701 break;
702 default:
703 ShouldNotReachHere();
704 insn = &Assembler::str; // unreachable
1012 case T_CHAR:
1013 __ ldrh(dest->as_register(), as_Address(from_addr));
1014 break;
1015 case T_SHORT:
1016 __ ldrsh(dest->as_register(), as_Address(from_addr));
1017 break;
1018
1019 default:
1020 ShouldNotReachHere();
1021 }
1022
1023 if (is_reference_type(type)) {
1024 if (UseCompressedOops && !wide) {
1025 __ decode_heap_oop(dest->as_register());
1026 }
1027
1028 __ verify_oop(dest->as_register());
1029 }
1030 }
1031
1032 void LIR_Assembler::move(LIR_Opr src, LIR_Opr dst) {
1033 assert(dst->is_cpu_register(), "must be");
1034 assert(dst->type() == src->type(), "must be");
1035
1036 if (src->is_cpu_register()) {
1037 reg2reg(src, dst);
1038 } else if (src->is_stack()) {
1039 stack2reg(src, dst, dst->type());
1040 } else if (src->is_constant()) {
1041 const2reg(src, dst, lir_patch_none, nullptr);
1042 } else {
1043 ShouldNotReachHere();
1044 }
1045 }
1046
1047 int LIR_Assembler::array_element_size(BasicType type) const {
1048 int elem_size = type2aelembytes(type);
1049 return exact_log2(elem_size);
1050 }
1051
1052
1053 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1054 switch (op->code()) {
1055 case lir_idiv:
1056 case lir_irem:
1057 arithmetic_idiv(op->code(),
1058 op->in_opr1(),
1059 op->in_opr2(),
1060 op->in_opr3(),
1061 op->result_opr(),
1062 op->info());
1063 break;
1064 case lir_fmad:
1065 __ fmaddd(op->result_opr()->as_double_reg(),
1217 __ lea(rscratch1, Address(op->klass()->as_register(), InstanceKlass::init_state_offset()));
1218 __ ldarb(rscratch1, rscratch1);
1219 __ cmpw(rscratch1, InstanceKlass::fully_initialized);
1220 add_debug_info_for_null_check_here(op->stub()->info());
1221 __ br(Assembler::NE, *op->stub()->entry());
1222 }
1223 __ allocate_object(op->obj()->as_register(),
1224 op->tmp1()->as_register(),
1225 op->tmp2()->as_register(),
1226 op->header_size(),
1227 op->object_size(),
1228 op->klass()->as_register(),
1229 *op->stub()->entry());
1230 __ bind(*op->stub()->continuation());
1231 }
1232
1233 void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
1234 Register len = op->len()->as_register();
1235 __ uxtw(len, len);
1236
1237 if (UseSlowPath || op->is_null_free() ||
1238 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1239 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1240 __ b(*op->stub()->entry());
1241 } else {
1242 Register tmp1 = op->tmp1()->as_register();
1243 Register tmp2 = op->tmp2()->as_register();
1244 Register tmp3 = op->tmp3()->as_register();
1245 if (len == tmp1) {
1246 tmp1 = tmp3;
1247 } else if (len == tmp2) {
1248 tmp2 = tmp3;
1249 } else if (len == tmp3) {
1250 // everything is ok
1251 } else {
1252 __ mov(tmp3, len);
1253 }
1254 __ allocate_array(op->obj()->as_register(),
1255 len,
1256 tmp1,
1257 tmp2,
1329 assert(data != nullptr, "need data for type check");
1330 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
1331 }
1332 Label* success_target = success;
1333 Label* failure_target = failure;
1334
1335 if (obj == k_RInfo) {
1336 k_RInfo = dst;
1337 } else if (obj == klass_RInfo) {
1338 klass_RInfo = dst;
1339 }
1340 if (k->is_loaded() && !UseCompressedClassPointers) {
1341 select_different_registers(obj, dst, k_RInfo, klass_RInfo);
1342 } else {
1343 Rtmp1 = op->tmp3()->as_register();
1344 select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
1345 }
1346
1347 assert_different_registers(obj, k_RInfo, klass_RInfo);
1348
1349 if (op->need_null_check()) {
1350 if (should_profile) {
1351 Register mdo = klass_RInfo;
1352 __ mov_metadata(mdo, md->constant_encoding());
1353 Label not_null;
1354 __ cbnz(obj, not_null);
1355 // Object is null; update MDO and exit
1356 Address data_addr
1357 = __ form_address(rscratch2, mdo,
1358 md->byte_offset_of_slot(data, DataLayout::flags_offset()),
1359 0);
1360 __ ldrb(rscratch1, data_addr);
1361 __ orr(rscratch1, rscratch1, BitData::null_seen_byte_constant());
1362 __ strb(rscratch1, data_addr);
1363 __ b(*obj_is_null);
1364 __ bind(not_null);
1365
1366 Label update_done;
1367 Register recv = k_RInfo;
1368 __ load_klass(recv, obj);
1369 type_profile_helper(mdo, md, data, recv, &update_done);
1370 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
1371 __ addptr(counter_addr, DataLayout::counter_increment);
1372
1373 __ bind(update_done);
1374 } else {
1375 __ cbz(obj, *obj_is_null);
1376 }
1377 }
1378
1379 if (!k->is_loaded()) {
1380 klass2reg_with_patching(k_RInfo, op->info_for_patch());
1381 } else {
1382 __ mov_metadata(k_RInfo, k->constant_encoding());
1383 }
1384 __ verify_oop(obj);
1385
1386 if (op->fast_check()) {
1387 // get object class
1388 // not a safepoint as obj null check happens earlier
1389 __ load_klass(rscratch1, obj);
1390 __ cmp( rscratch1, k_RInfo);
1391
1392 __ br(Assembler::NE, *failure_target);
1393 // successful cast, fall through to profile or jump
1394 } else {
1395 // get object class
1396 // not a safepoint as obj null check happens earlier
1514 __ bind(success);
1515 if (dst != obj) {
1516 __ mov(dst, obj);
1517 }
1518 } else if (code == lir_instanceof) {
1519 Register obj = op->object()->as_register();
1520 Register dst = op->result_opr()->as_register();
1521 Label success, failure, done;
1522 emit_typecheck_helper(op, &success, &failure, &failure);
1523 __ bind(failure);
1524 __ mov(dst, zr);
1525 __ b(done);
1526 __ bind(success);
1527 __ mov(dst, 1);
1528 __ bind(done);
1529 } else {
1530 ShouldNotReachHere();
1531 }
1532 }
1533
1534 void LIR_Assembler::emit_opFlattenedArrayCheck(LIR_OpFlattenedArrayCheck* op) {
1535 // We are loading/storing from/to an array that *may* be a flat array (the
1536 // declared type is Object[], abstract[], interface[] or VT.ref[]).
1537 // If this array is a flat array, take the slow path.
1538 __ test_flat_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1539 if (!op->value()->is_illegal()) {
1540 // The array is not a flat array, but it might be null-free. If we are storing
1541 // a null into a null-free array, take the slow path (which will throw NPE).
1542 Label skip;
1543 __ cbnz(op->value()->as_register(), skip);
1544 __ test_null_free_array_oop(op->array()->as_register(), op->tmp()->as_register(), *op->stub()->entry());
1545 __ bind(skip);
1546 }
1547 }
1548
1549 void LIR_Assembler::emit_opNullFreeArrayCheck(LIR_OpNullFreeArrayCheck* op) {
1550 // We are storing into an array that *may* be null-free (the declared type is
1551 // Object[], abstract[], interface[] or VT.ref[]).
1552 Label test_mark_word;
1553 Register tmp = op->tmp()->as_register();
1554 __ ldr(tmp, Address(op->array()->as_register(), oopDesc::mark_offset_in_bytes()));
1555 __ tst(tmp, markWord::unlocked_value);
1556 __ br(Assembler::NE, test_mark_word);
1557 __ load_prototype_header(tmp, op->array()->as_register());
1558 __ bind(test_mark_word);
1559 __ tst(tmp, markWord::null_free_array_bit_in_place);
1560 }
1561
1562 void LIR_Assembler::emit_opSubstitutabilityCheck(LIR_OpSubstitutabilityCheck* op) {
1563 Label L_oops_equal;
1564 Label L_oops_not_equal;
1565 Label L_end;
1566
1567 Register left = op->left()->as_register();
1568 Register right = op->right()->as_register();
1569
1570 __ cmp(left, right);
1571 __ br(Assembler::EQ, L_oops_equal);
1572
1573 // (1) Null check -- if one of the operands is null, the other must not be null (because
1574 // the two references are not equal), so they are not substitutable,
1575 // FIXME: do null check only if the operand is nullable
1576 {
1577 __ cbz(left, L_oops_not_equal);
1578 __ cbz(right, L_oops_not_equal);
1579 }
1580
1581 ciKlass* left_klass = op->left_klass();
1582 ciKlass* right_klass = op->right_klass();
1583
1584 // (2) Inline type check -- if either of the operands is not a inline type,
1585 // they are not substitutable. We do this only if we are not sure that the
1586 // operands are inline type
1587 if ((left_klass == nullptr || right_klass == nullptr) ||// The klass is still unloaded, or came from a Phi node.
1588 !left_klass->is_inlinetype() || !right_klass->is_inlinetype()) {
1589 Register tmp1 = op->tmp1()->as_register();
1590 __ mov(tmp1, markWord::inline_type_pattern);
1591 __ ldr(rscratch1, Address(left, oopDesc::mark_offset_in_bytes()));
1592 __ andr(tmp1, tmp1, rscratch1);
1593 __ ldr(rscratch1, Address(right, oopDesc::mark_offset_in_bytes()));
1594 __ andr(tmp1, tmp1, rscratch1);
1595 __ cmp(tmp1, (u1)markWord::inline_type_pattern);
1596 __ br(Assembler::NE, L_oops_not_equal);
1597 }
1598
1599 // (3) Same klass check: if the operands are of different klasses, they are not substitutable.
1600 if (left_klass != nullptr && left_klass->is_inlinetype() && left_klass == right_klass) {
1601 // No need to load klass -- the operands are statically known to be the same inline klass.
1602 __ b(*op->stub()->entry());
1603 } else {
1604 Register left_klass_op = op->left_klass_op()->as_register();
1605 Register right_klass_op = op->right_klass_op()->as_register();
1606
1607 if (UseCompressedClassPointers) {
1608 __ ldrw(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes()));
1609 __ ldrw(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1610 __ cmpw(left_klass_op, right_klass_op);
1611 } else {
1612 __ ldr(left_klass_op, Address(left, oopDesc::klass_offset_in_bytes()));
1613 __ ldr(right_klass_op, Address(right, oopDesc::klass_offset_in_bytes()));
1614 __ cmp(left_klass_op, right_klass_op);
1615 }
1616
1617 __ br(Assembler::EQ, *op->stub()->entry()); // same klass -> do slow check
1618 // fall through to L_oops_not_equal
1619 }
1620
1621 __ bind(L_oops_not_equal);
1622 move(op->not_equal_result(), op->result_opr());
1623 __ b(L_end);
1624
1625 __ bind(L_oops_equal);
1626 move(op->equal_result(), op->result_opr());
1627 __ b(L_end);
1628
1629 // We've returned from the stub. R0 contains 0x0 IFF the two
1630 // operands are not substitutable. (Don't compare against 0x1 in case the
1631 // C compiler is naughty)
1632 __ bind(*op->stub()->continuation());
1633 __ cbz(r0, L_oops_not_equal); // (call_stub() == 0x0) -> not_equal
1634 move(op->equal_result(), op->result_opr()); // (call_stub() != 0x0) -> equal
1635 // fall-through
1636 __ bind(L_end);
1637 }
1638
1639
1640 void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
1641 __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1642 __ cset(rscratch1, Assembler::NE);
1643 __ membar(__ AnyAny);
1644 }
1645
1646 void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
1647 __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
1648 __ cset(rscratch1, Assembler::NE);
1649 __ membar(__ AnyAny);
1650 }
1651
1652
1653 void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
1654 Register addr;
1655 if (op->addr()->is_register()) {
1656 addr = as_reg(op->addr());
1657 } else {
1658 assert(op->addr()->is_address(), "what else?");
1659 LIR_Address* addr_ptr = op->addr()->as_address_ptr();
2133 __ cmp(left->as_register_lo(), right->as_register_lo());
2134 __ mov(dst->as_register(), (uint64_t)-1L);
2135 __ br(Assembler::LT, done);
2136 __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2137 __ bind(done);
2138 } else {
2139 ShouldNotReachHere();
2140 }
2141 }
2142
2143
2144 void LIR_Assembler::align_call(LIR_Code code) { }
2145
2146
2147 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2148 address call = __ trampoline_call(Address(op->addr(), rtype));
2149 if (call == nullptr) {
2150 bailout("trampoline stub overflow");
2151 return;
2152 }
2153 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2154 __ post_call_nop();
2155 }
2156
2157
2158 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2159 address call = __ ic_call(op->addr());
2160 if (call == nullptr) {
2161 bailout("trampoline stub overflow");
2162 return;
2163 }
2164 add_call_info(code_offset(), op->info(), op->maybe_return_as_fields());
2165 __ post_call_nop();
2166 }
2167
2168 void LIR_Assembler::emit_static_call_stub() {
2169 address call_pc = __ pc();
2170 address stub = __ start_a_stub(call_stub_size());
2171 if (stub == nullptr) {
2172 bailout("static call stub overflow");
2173 return;
2174 }
2175
2176 int start = __ offset();
2177
2178 __ relocate(static_stub_Relocation::spec(call_pc));
2179 __ emit_static_call_stub();
2180
2181 assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
2182 <= call_stub_size(), "stub too big");
2183 __ end_a_stub();
2184 }
2307
2308
2309 void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
2310 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2311 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2312 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2313 __ mov (rscratch1, c);
2314 __ str (rscratch1, Address(sp, offset_from_rsp_in_bytes));
2315 }
2316
2317
2318 void LIR_Assembler::store_parameter(jobject o, int offset_from_rsp_in_words) {
2319 ShouldNotReachHere();
2320 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
2321 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
2322 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
2323 __ lea(rscratch1, __ constant_oop_address(o));
2324 __ str(rscratch1, Address(sp, offset_from_rsp_in_bytes));
2325 }
2326
2327 void LIR_Assembler::arraycopy_inlinetype_check(Register obj, Register tmp, CodeStub* slow_path, bool is_dest, bool null_check) {
2328 if (null_check) {
2329 __ cbz(obj, *slow_path->entry());
2330 }
2331 if (is_dest) {
2332 __ test_null_free_array_oop(obj, tmp, *slow_path->entry());
2333 // TODO 8350865 Flat no longer implies null-free, so we need to check for flat dest. Can we do better here?
2334 __ test_flat_array_oop(obj, tmp, *slow_path->entry());
2335 } else {
2336 __ test_flat_array_oop(obj, tmp, *slow_path->entry());
2337 }
2338 }
2339
2340 // This code replaces a call to arraycopy; no exception may
2341 // be thrown in this code, they must be thrown in the System.arraycopy
2342 // activation frame; we could save some checks if this would not be the case
2343 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
2344 ciArrayKlass* default_type = op->expected_type();
2345 Register src = op->src()->as_register();
2346 Register dst = op->dst()->as_register();
2347 Register src_pos = op->src_pos()->as_register();
2348 Register dst_pos = op->dst_pos()->as_register();
2349 Register length = op->length()->as_register();
2350 Register tmp = op->tmp()->as_register();
2351
2352 CodeStub* stub = op->stub();
2353 int flags = op->flags();
2354 BasicType basic_type = default_type != nullptr ? default_type->element_type()->basic_type() : T_ILLEGAL;
2355 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
2356
2357 if (flags & LIR_OpArrayCopy::always_slow_path) {
2358 __ b(*stub->entry());
2359 __ bind(*stub->continuation());
2360 return;
2361 }
2362
2363 // if we don't know anything, just go through the generic arraycopy
2364 if (default_type == nullptr // || basic_type == T_OBJECT
2365 ) {
2366 Label done;
2367 assert(src == r1 && src_pos == r2, "mismatch in calling convention");
2368
2369 // Save the arguments in case the generic arraycopy fails and we
2370 // have to fall back to the JNI stub
2371 __ stp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2372 __ stp(length, src_pos, Address(sp, 2*BytesPerWord));
2373 __ str(src, Address(sp, 4*BytesPerWord));
2374
2375 address copyfunc_addr = StubRoutines::generic_arraycopy();
2376 assert(copyfunc_addr != nullptr, "generic arraycopy stub required");
2377
2378 // The arguments are in java calling convention so we shift them
2379 // to C convention
2380 assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
2381 __ mov(c_rarg0, j_rarg0);
2382 assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
2396 __ cbz(r0, *stub->continuation());
2397
2398 // Reload values from the stack so they are where the stub
2399 // expects them.
2400 __ ldp(dst, dst_pos, Address(sp, 0*BytesPerWord));
2401 __ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
2402 __ ldr(src, Address(sp, 4*BytesPerWord));
2403
2404 // r0 is -1^K where K == partial copied count
2405 __ eonw(rscratch1, r0, zr);
2406 // adjust length down and src/end pos up by partial copied count
2407 __ subw(length, length, rscratch1);
2408 __ addw(src_pos, src_pos, rscratch1);
2409 __ addw(dst_pos, dst_pos, rscratch1);
2410 __ b(*stub->entry());
2411
2412 __ bind(*stub->continuation());
2413 return;
2414 }
2415
2416 // Handle inline type arrays
2417 if (flags & LIR_OpArrayCopy::src_inlinetype_check) {
2418 arraycopy_inlinetype_check(src, tmp, stub, false, (flags & LIR_OpArrayCopy::src_null_check));
2419 }
2420 if (flags & LIR_OpArrayCopy::dst_inlinetype_check) {
2421 arraycopy_inlinetype_check(dst, tmp, stub, true, (flags & LIR_OpArrayCopy::dst_null_check));
2422 }
2423
2424 assert(default_type != nullptr && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
2425
2426 int elem_size = type2aelembytes(basic_type);
2427 int scale = exact_log2(elem_size);
2428
2429 Address src_length_addr = Address(src, arrayOopDesc::length_offset_in_bytes());
2430 Address dst_length_addr = Address(dst, arrayOopDesc::length_offset_in_bytes());
2431
2432 // test for null
2433 if (flags & LIR_OpArrayCopy::src_null_check) {
2434 __ cbz(src, *stub->entry());
2435 }
2436 if (flags & LIR_OpArrayCopy::dst_null_check) {
2437 __ cbz(dst, *stub->entry());
2438 }
2439
2440 // If the compiler was not able to prove that exact type of the source or the destination
2441 // of the arraycopy is an array type, check at runtime if the source or the destination is
2442 // an instance type.
2443 if (flags & LIR_OpArrayCopy::type_check) {
2958 __ verify_klass_ptr(tmp);
2959 #endif
2960 } else {
2961 assert(ciTypeEntries::valid_ciklass(current_klass) != nullptr &&
2962 ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
2963
2964 __ ldr(tmp, mdo_addr);
2965 __ tbnz(tmp, exact_log2(TypeEntries::type_unknown), next); // already unknown. Nothing to do anymore.
2966
2967 __ orr(tmp, tmp, TypeEntries::type_unknown);
2968 __ str(tmp, mdo_addr);
2969 // FIXME: Write barrier needed here?
2970 }
2971 }
2972
2973 __ bind(next);
2974 }
2975 COMMENT("} emit_profile_type");
2976 }
2977
2978 void LIR_Assembler::emit_profile_inline_type(LIR_OpProfileInlineType* op) {
2979 Register obj = op->obj()->as_register();
2980 Register tmp = op->tmp()->as_pointer_register();
2981 bool not_null = op->not_null();
2982 int flag = op->flag();
2983
2984 Label not_inline_type;
2985 if (!not_null) {
2986 __ cbz(obj, not_inline_type);
2987 }
2988
2989 __ test_oop_is_not_inline_type(obj, tmp, not_inline_type);
2990
2991 Address mdo_addr = as_Address(op->mdp()->as_address_ptr(), rscratch2);
2992 __ ldrb(rscratch1, mdo_addr);
2993 __ orr(rscratch1, rscratch1, flag);
2994 __ strb(rscratch1, mdo_addr);
2995
2996 __ bind(not_inline_type);
2997 }
2998
2999 void LIR_Assembler::align_backward_branch_target() {
3000 }
3001
3002
3003 void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest, LIR_Opr tmp) {
3004 // tmp must be unused
3005 assert(tmp->is_illegal(), "wasting a register if tmp is allocated");
3006
3007 if (left->is_single_cpu()) {
3008 assert(dest->is_single_cpu(), "expect single result reg");
3009 __ negw(dest->as_register(), left->as_register());
3010 } else if (left->is_double_cpu()) {
3011 assert(dest->is_double_cpu(), "expect double result reg");
3012 __ neg(dest->as_register_lo(), left->as_register_lo());
3013 } else if (left->is_single_fpu()) {
3014 assert(dest->is_single_fpu(), "expect single float result reg");
3015 __ fnegs(dest->as_float_reg(), left->as_float_reg());
3016 } else {
3017 assert(left->is_double_fpu(), "expect double float operand reg");
3117 void LIR_Assembler::membar_loadload() {
3118 __ membar(Assembler::LoadLoad);
3119 }
3120
3121 void LIR_Assembler::membar_storestore() {
3122 __ membar(MacroAssembler::StoreStore);
3123 }
3124
3125 void LIR_Assembler::membar_loadstore() { __ membar(MacroAssembler::LoadStore); }
3126
3127 void LIR_Assembler::membar_storeload() { __ membar(MacroAssembler::StoreLoad); }
3128
3129 void LIR_Assembler::on_spin_wait() {
3130 __ spin_wait();
3131 }
3132
3133 void LIR_Assembler::get_thread(LIR_Opr result_reg) {
3134 __ mov(result_reg->as_register(), rthread);
3135 }
3136
3137 void LIR_Assembler::check_orig_pc() {
3138 __ ldr(rscratch2, frame_map()->address_for_orig_pc_addr());
3139 __ cmp(rscratch2, (u1)NULL_WORD);
3140 }
3141
3142 void LIR_Assembler::peephole(LIR_List *lir) {
3143 #if 0
3144 if (tableswitch_count >= max_tableswitches)
3145 return;
3146
3147 /*
3148 This finite-state automaton recognizes sequences of compare-and-
3149 branch instructions. We will turn them into a tableswitch. You
3150 could argue that C1 really shouldn't be doing this sort of
3151 optimization, but without it the code is really horrible.
3152 */
3153
3154 enum { start_s, cmp1_s, beq_s, cmp_s } state;
3155 int first_key, last_key = -2147483648;
3156 int next_key = 0;
3157 int start_insn = -1;
3158 int last_insn = -1;
3159 Register reg = noreg;
3160 LIR_Opr reg_opr;
|