9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/assembler.hpp"
27 #include "asm/assembler.inline.hpp"
28 #include "ci/ciEnv.hpp"
29 #include "code/compiledIC.hpp"
30 #include "compiler/compileTask.hpp"
31 #include "compiler/disassembler.hpp"
32 #include "compiler/oopMap.hpp"
33 #include "gc/shared/barrierSet.hpp"
34 #include "gc/shared/barrierSetAssembler.hpp"
35 #include "gc/shared/cardTableBarrierSet.hpp"
36 #include "gc/shared/cardTable.hpp"
37 #include "gc/shared/collectedHeap.hpp"
38 #include "gc/shared/tlab_globals.hpp"
39 #include "interpreter/bytecodeHistogram.hpp"
40 #include "interpreter/interpreter.hpp"
41 #include "interpreter/interpreterRuntime.hpp"
42 #include "jvm.h"
43 #include "memory/resourceArea.hpp"
44 #include "memory/universe.hpp"
45 #include "nativeInst_aarch64.hpp"
46 #include "oops/accessDecorators.hpp"
47 #include "oops/compressedKlass.inline.hpp"
48 #include "oops/compressedOops.inline.hpp"
331 uint32_t insn2 = insn_at(insn_addr, 1);
332 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
333 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
334 guarantee(((dest >> size) << size) == dest, "misaligned target");
335 return 2;
336 }
337 static int adrpAdd_impl(address insn_addr, address &target) {
338 uintptr_t dest = (uintptr_t)target;
339 int offset_lo = dest & 0xfff;
340 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
341 return 2;
342 }
343 static int adrpMovk_impl(address insn_addr, address &target) {
344 uintptr_t dest = uintptr_t(target);
345 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
346 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
347 target = address(dest);
348 return 2;
349 }
350 virtual int immediate(address insn_addr, address &target) {
351 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
352 uint64_t dest = (uint64_t)target;
353 // Move wide constant
354 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
355 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
356 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
357 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
358 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
359 return 3;
360 }
361 virtual void verify(address insn_addr, address &target) {
362 #ifdef ASSERT
363 address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
364 if (!(address_is == target)) {
365 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
366 disnm((intptr_t)insn_addr);
367 assert(address_is == target, "should be");
368 }
369 #endif
370 }
461 uint32_t insn2 = insn_at(insn_addr, 1);
462 uint64_t dest = uint64_t(target);
463 dest = (dest & 0xffff0000ffffffff) |
464 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
465 target = address(dest);
466
467 // We know the destination 4k page. Maybe we have a third
468 // instruction.
469 uint32_t insn = insn_at(insn_addr, 0);
470 uint32_t insn3 = insn_at(insn_addr, 2);
471 ptrdiff_t byte_offset;
472 if (offset_for(insn, insn3, byte_offset)) {
473 target += byte_offset;
474 return 3;
475 } else {
476 return 2;
477 }
478 }
479 virtual int immediate(address insn_addr, address &target) {
480 uint32_t *insns = (uint32_t *)insn_addr;
481 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
482 // Move wide constant: movz, movk, movk. See movptr().
483 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
484 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
485 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5))
486 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
487 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
488 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
489 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
490 return 3;
491 }
492 virtual void verify(address insn_addr, address &target) {
493 }
494 };
495
496 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) {
497 AArch64Decoder decoder(insn_addr, insn);
498 address target;
499 decoder.run(insn_addr, target);
500 return target;
658 JavaThread::frame_anchor_offset()
659 + JavaFrameAnchor::last_Java_pc_offset()));
660
661 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch);
662 }
663
664 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
665 Register last_java_fp,
666 Label &L,
667 Register scratch) {
668 if (L.is_bound()) {
669 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
670 } else {
671 InstructionMark im(this);
672 L.add_patch_at(code(), locator());
673 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch);
674 }
675 }
676
677 static inline bool target_needs_far_branch(address addr) {
678 // codecache size <= 128M
679 if (!MacroAssembler::far_branches()) {
680 return false;
681 }
682 // codecache size > 240M
683 if (MacroAssembler::codestub_branch_needs_far_jump()) {
684 return true;
685 }
686 // codecache size: 128M..240M
687 return !CodeCache::is_non_nmethod(addr);
688 }
689
690 void MacroAssembler::far_call(Address entry, Register tmp) {
691 assert(ReservedCodeCacheSize < 4*G, "branch out of range");
692 assert(CodeCache::find_blob(entry.target()) != nullptr,
693 "destination of far call not found in code cache");
694 assert(entry.rspec().type() == relocInfo::external_word_type
695 || entry.rspec().type() == relocInfo::runtime_call_type
696 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
697 if (target_needs_far_branch(entry.target())) {
842 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset())));
843 Label ok;
844 cbz(rscratch1, ok);
845 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
846 br(rscratch1);
847 bind(ok);
848 }
849
850 // get oop result if there is one and reset the value in the thread
851 if (oop_result->is_valid()) {
852 get_vm_result(oop_result, java_thread);
853 }
854 }
855
856 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
857 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
858 }
859
860 // Check the entry target is always reachable from any branch.
861 static bool is_always_within_branch_range(Address entry) {
862 const address target = entry.target();
863
864 if (!CodeCache::contains(target)) {
865 // We always use trampolines for callees outside CodeCache.
866 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target");
867 return false;
868 }
869
870 if (!MacroAssembler::far_branches()) {
871 return true;
872 }
873
874 if (entry.rspec().type() == relocInfo::runtime_call_type) {
875 // Runtime calls are calls of a non-compiled method (stubs, adapters).
876 // Non-compiled methods stay forever in CodeCache.
877 // We check whether the longest possible branch is within the branch range.
878 assert(CodeCache::find_blob(target) != nullptr &&
879 !CodeCache::find_blob(target)->is_nmethod(),
880 "runtime call of compiled method");
881 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size;
2140 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
2141 assert(offset1 - offset == stackElementSize, "correct arithmetic");
2142 #endif
2143 if (arg_slot.is_constant()) {
2144 return Address(esp, arg_slot.as_constant() * stackElementSize
2145 + offset);
2146 } else {
2147 add(rscratch1, esp, arg_slot.as_register(),
2148 ext::uxtx, exact_log2(stackElementSize));
2149 return Address(rscratch1, offset);
2150 }
2151 }
2152
2153 void MacroAssembler::call_VM_leaf_base(address entry_point,
2154 int number_of_arguments,
2155 Label *retaddr) {
2156 Label E, L;
2157
2158 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize)));
2159
2160 mov(rscratch1, entry_point);
2161 blr(rscratch1);
2162 if (retaddr)
2163 bind(*retaddr);
2164
2165 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize)));
2166 }
2167
2168 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
2169 call_VM_leaf_base(entry_point, number_of_arguments);
2170 }
2171
2172 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
2173 pass_arg0(this, arg_0);
2174 call_VM_leaf_base(entry_point, 1);
2175 }
2176
2177 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2178 assert_different_registers(arg_1, c_rarg0);
2179 pass_arg0(this, arg_0);
2180 pass_arg1(this, arg_1);
3218
3219 #ifdef ASSERT
3220 {
3221 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10);
3222 Label valid_global_tag;
3223 tbnz(value, 1, valid_global_tag); // Test for global tag
3224 stop("non global jobject using resolve_global_jobject");
3225 bind(valid_global_tag);
3226 }
3227 #endif
3228
3229 // Resolve global handle
3230 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3231 verify_oop(value);
3232
3233 bind(done);
3234 }
3235
3236 void MacroAssembler::stop(const char* msg) {
3237 BLOCK_COMMENT(msg);
3238 dcps1(0xdeae);
3239 emit_int64((uintptr_t)msg);
3240 }
3241
3242 void MacroAssembler::unimplemented(const char* what) {
3243 const char* buf = nullptr;
3244 {
3245 ResourceMark rm;
3246 stringStream ss;
3247 ss.print("unimplemented: %s", what);
3248 buf = code_string(ss.as_string());
3249 }
3250 stop(buf);
3251 }
3252
3253 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) {
3254 #ifdef ASSERT
3255 Label OK;
3256 br(cc, OK);
3257 stop(msg);
3258 bind(OK);
3259 #endif
3317
3318 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3319 if (decrement.is_register()) {
3320 sub(Rd, Rn, decrement.as_register());
3321 } else {
3322 sub(Rd, Rn, decrement.as_constant());
3323 }
3324 }
3325
3326 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3327 if (decrement.is_register()) {
3328 subw(Rd, Rn, decrement.as_register());
3329 } else {
3330 subw(Rd, Rn, decrement.as_constant());
3331 }
3332 }
3333
3334 void MacroAssembler::reinit_heapbase()
3335 {
3336 if (UseCompressedOops) {
3337 if (Universe::is_fully_initialized()) {
3338 mov(rheapbase, CompressedOops::base());
3339 } else {
3340 lea(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3341 ldr(rheapbase, Address(rheapbase));
3342 }
3343 }
3344 }
3345
3346 // this simulates the behaviour of the x86 cmpxchg instruction using a
3347 // load linked/store conditional pair. we use the acquire/release
3348 // versions of these instructions so that we flush pending writes as
3349 // per Java semantics.
3350
3351 // n.b the x86 version assumes the old value to be compared against is
3352 // in rax and updates rax with the value located in memory if the
3353 // cmpxchg fails. we supply a register for the old value explicitly
3354
3355 // the aarch64 load linked/store conditional instructions do not
3356 // accept an offset. so, unlike x86, we must provide a plain register
3357 // to identify the memory word to be compared/exchanged rather than a
5686 // the code cache so that if it is relocated we know it will still reach
5687 if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
5688 _adrp(reg1, dest.target());
5689 } else {
5690 uint64_t target = (uint64_t)dest.target();
5691 uint64_t adrp_target
5692 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
5693
5694 _adrp(reg1, (address)adrp_target);
5695 movk(reg1, target >> 32, 32);
5696 }
5697 byte_offset = (uint64_t)dest.target() & 0xfff;
5698 }
5699
5700 void MacroAssembler::load_byte_map_base(Register reg) {
5701 CardTable::CardValue* byte_map_base =
5702 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
5703
5704 // Strictly speaking the byte_map_base isn't an address at all, and it might
5705 // even be negative. It is thus materialised as a constant.
5706 mov(reg, (uint64_t)byte_map_base);
5707 }
5708
5709 void MacroAssembler::build_frame(int framesize) {
5710 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5711 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5712 protect_return_address();
5713 if (framesize < ((1 << 9) + 2 * wordSize)) {
5714 sub(sp, sp, framesize);
5715 stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5716 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
5717 } else {
5718 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
5719 if (PreserveFramePointer) mov(rfp, sp);
5720 if (framesize < ((1 << 12) + 2 * wordSize))
5721 sub(sp, sp, framesize - 2 * wordSize);
5722 else {
5723 mov(rscratch1, framesize - 2 * wordSize);
5724 sub(sp, sp, rscratch1);
5725 }
5726 }
|
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "asm/assembler.hpp"
27 #include "asm/assembler.inline.hpp"
28 #include "ci/ciEnv.hpp"
29 #include "ci/ciUtilities.hpp"
30 #include "code/compiledIC.hpp"
31 #include "compiler/compileTask.hpp"
32 #include "compiler/disassembler.hpp"
33 #include "compiler/oopMap.hpp"
34 #include "gc/shared/barrierSet.hpp"
35 #include "gc/shared/barrierSetAssembler.hpp"
36 #include "gc/shared/cardTableBarrierSet.hpp"
37 #include "gc/shared/cardTable.hpp"
38 #include "gc/shared/collectedHeap.hpp"
39 #include "gc/shared/tlab_globals.hpp"
40 #include "interpreter/bytecodeHistogram.hpp"
41 #include "interpreter/interpreter.hpp"
42 #include "interpreter/interpreterRuntime.hpp"
43 #include "jvm.h"
44 #include "memory/resourceArea.hpp"
45 #include "memory/universe.hpp"
46 #include "nativeInst_aarch64.hpp"
47 #include "oops/accessDecorators.hpp"
48 #include "oops/compressedKlass.inline.hpp"
49 #include "oops/compressedOops.inline.hpp"
332 uint32_t insn2 = insn_at(insn_addr, 1);
333 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30);
334 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size);
335 guarantee(((dest >> size) << size) == dest, "misaligned target");
336 return 2;
337 }
338 static int adrpAdd_impl(address insn_addr, address &target) {
339 uintptr_t dest = (uintptr_t)target;
340 int offset_lo = dest & 0xfff;
341 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo);
342 return 2;
343 }
344 static int adrpMovk_impl(address insn_addr, address &target) {
345 uintptr_t dest = uintptr_t(target);
346 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32);
347 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL);
348 target = address(dest);
349 return 2;
350 }
351 virtual int immediate(address insn_addr, address &target) {
352 // Metadata pointers are either narrow (32 bits) or wide (48 bits).
353 // We encode narrow ones by setting the upper 16 bits in the first
354 // instruction.
355 if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) {
356 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
357 narrowKlass nk = CompressedKlassPointers::encode((Klass*)target);
358 Instruction_aarch64::patch(insn_addr, 20, 5, nk >> 16);
359 Instruction_aarch64::patch(insn_addr+4, 20, 5, nk & 0xffff);
360 return 2;
361 }
362 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
363 uint64_t dest = (uint64_t)target;
364 // Move wide constant
365 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
366 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
367 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff);
368 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff);
369 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff);
370 return 3;
371 }
372 virtual void verify(address insn_addr, address &target) {
373 #ifdef ASSERT
374 address address_is = MacroAssembler::target_addr_for_insn(insn_addr);
375 if (!(address_is == target)) {
376 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target);
377 disnm((intptr_t)insn_addr);
378 assert(address_is == target, "should be");
379 }
380 #endif
381 }
472 uint32_t insn2 = insn_at(insn_addr, 1);
473 uint64_t dest = uint64_t(target);
474 dest = (dest & 0xffff0000ffffffff) |
475 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32);
476 target = address(dest);
477
478 // We know the destination 4k page. Maybe we have a third
479 // instruction.
480 uint32_t insn = insn_at(insn_addr, 0);
481 uint32_t insn3 = insn_at(insn_addr, 2);
482 ptrdiff_t byte_offset;
483 if (offset_for(insn, insn3, byte_offset)) {
484 target += byte_offset;
485 return 3;
486 } else {
487 return 2;
488 }
489 }
490 virtual int immediate(address insn_addr, address &target) {
491 uint32_t *insns = (uint32_t *)insn_addr;
492 // Metadata pointers are either narrow (32 bits) or wide (48 bits).
493 // We encode narrow ones by setting the upper 16 bits in the first
494 // instruction.
495 if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) {
496 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
497 narrowKlass nk = (narrowKlass)((uint32_t(Instruction_aarch64::extract(_insn, 20, 5)) << 16)
498 + uint32_t(Instruction_aarch64::extract(insns[1], 20, 5)));
499 target = (address)CompressedKlassPointers::decode(nk);
500 return 2;
501 }
502 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be");
503 // Move wide constant: movz, movk, movk. See movptr().
504 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch");
505 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch");
506 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5))
507 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16)
508 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32));
509 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch");
510 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch");
511 return 3;
512 }
513 virtual void verify(address insn_addr, address &target) {
514 }
515 };
516
517 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) {
518 AArch64Decoder decoder(insn_addr, insn);
519 address target;
520 decoder.run(insn_addr, target);
521 return target;
679 JavaThread::frame_anchor_offset()
680 + JavaFrameAnchor::last_Java_pc_offset()));
681
682 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch);
683 }
684
685 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
686 Register last_java_fp,
687 Label &L,
688 Register scratch) {
689 if (L.is_bound()) {
690 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch);
691 } else {
692 InstructionMark im(this);
693 L.add_patch_at(code(), locator());
694 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch);
695 }
696 }
697
698 static inline bool target_needs_far_branch(address addr) {
699 if (AOTCodeCache::is_on_for_write()) {
700 return true;
701 }
702 // codecache size <= 128M
703 if (!MacroAssembler::far_branches()) {
704 return false;
705 }
706 // codecache size > 240M
707 if (MacroAssembler::codestub_branch_needs_far_jump()) {
708 return true;
709 }
710 // codecache size: 128M..240M
711 return !CodeCache::is_non_nmethod(addr);
712 }
713
714 void MacroAssembler::far_call(Address entry, Register tmp) {
715 assert(ReservedCodeCacheSize < 4*G, "branch out of range");
716 assert(CodeCache::find_blob(entry.target()) != nullptr,
717 "destination of far call not found in code cache");
718 assert(entry.rspec().type() == relocInfo::external_word_type
719 || entry.rspec().type() == relocInfo::runtime_call_type
720 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type");
721 if (target_needs_far_branch(entry.target())) {
866 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset())));
867 Label ok;
868 cbz(rscratch1, ok);
869 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry()));
870 br(rscratch1);
871 bind(ok);
872 }
873
874 // get oop result if there is one and reset the value in the thread
875 if (oop_result->is_valid()) {
876 get_vm_result(oop_result, java_thread);
877 }
878 }
879
880 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
881 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
882 }
883
884 // Check the entry target is always reachable from any branch.
885 static bool is_always_within_branch_range(Address entry) {
886 if (AOTCodeCache::is_on_for_write()) {
887 return false;
888 }
889 const address target = entry.target();
890
891 if (!CodeCache::contains(target)) {
892 // We always use trampolines for callees outside CodeCache.
893 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target");
894 return false;
895 }
896
897 if (!MacroAssembler::far_branches()) {
898 return true;
899 }
900
901 if (entry.rspec().type() == relocInfo::runtime_call_type) {
902 // Runtime calls are calls of a non-compiled method (stubs, adapters).
903 // Non-compiled methods stay forever in CodeCache.
904 // We check whether the longest possible branch is within the branch range.
905 assert(CodeCache::find_blob(target) != nullptr &&
906 !CodeCache::find_blob(target)->is_nmethod(),
907 "runtime call of compiled method");
908 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size;
2167 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
2168 assert(offset1 - offset == stackElementSize, "correct arithmetic");
2169 #endif
2170 if (arg_slot.is_constant()) {
2171 return Address(esp, arg_slot.as_constant() * stackElementSize
2172 + offset);
2173 } else {
2174 add(rscratch1, esp, arg_slot.as_register(),
2175 ext::uxtx, exact_log2(stackElementSize));
2176 return Address(rscratch1, offset);
2177 }
2178 }
2179
2180 void MacroAssembler::call_VM_leaf_base(address entry_point,
2181 int number_of_arguments,
2182 Label *retaddr) {
2183 Label E, L;
2184
2185 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize)));
2186
2187 mov(rscratch1, RuntimeAddress(entry_point));
2188 blr(rscratch1);
2189 if (retaddr)
2190 bind(*retaddr);
2191
2192 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize)));
2193 }
2194
2195 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
2196 call_VM_leaf_base(entry_point, number_of_arguments);
2197 }
2198
2199 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
2200 pass_arg0(this, arg_0);
2201 call_VM_leaf_base(entry_point, 1);
2202 }
2203
2204 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2205 assert_different_registers(arg_1, c_rarg0);
2206 pass_arg0(this, arg_0);
2207 pass_arg1(this, arg_1);
3245
3246 #ifdef ASSERT
3247 {
3248 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10);
3249 Label valid_global_tag;
3250 tbnz(value, 1, valid_global_tag); // Test for global tag
3251 stop("non global jobject using resolve_global_jobject");
3252 bind(valid_global_tag);
3253 }
3254 #endif
3255
3256 // Resolve global handle
3257 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2);
3258 verify_oop(value);
3259
3260 bind(done);
3261 }
3262
3263 void MacroAssembler::stop(const char* msg) {
3264 BLOCK_COMMENT(msg);
3265 // load msg into r0 so we can access it from the signal handler
3266 // ExternalAddress enables saving and restoring via the code cache
3267 lea(c_rarg0, ExternalAddress((address) msg));
3268 dcps1(0xdeae);
3269 AOTCodeCache::add_C_string(msg);
3270 }
3271
3272 void MacroAssembler::unimplemented(const char* what) {
3273 const char* buf = nullptr;
3274 {
3275 ResourceMark rm;
3276 stringStream ss;
3277 ss.print("unimplemented: %s", what);
3278 buf = code_string(ss.as_string());
3279 }
3280 stop(buf);
3281 }
3282
3283 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) {
3284 #ifdef ASSERT
3285 Label OK;
3286 br(cc, OK);
3287 stop(msg);
3288 bind(OK);
3289 #endif
3347
3348 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) {
3349 if (decrement.is_register()) {
3350 sub(Rd, Rn, decrement.as_register());
3351 } else {
3352 sub(Rd, Rn, decrement.as_constant());
3353 }
3354 }
3355
3356 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) {
3357 if (decrement.is_register()) {
3358 subw(Rd, Rn, decrement.as_register());
3359 } else {
3360 subw(Rd, Rn, decrement.as_constant());
3361 }
3362 }
3363
3364 void MacroAssembler::reinit_heapbase()
3365 {
3366 if (UseCompressedOops) {
3367 if (Universe::is_fully_initialized() && !AOTCodeCache::is_on_for_write()) {
3368 mov(rheapbase, CompressedOops::base());
3369 } else {
3370 lea(rheapbase, ExternalAddress(CompressedOops::base_addr()));
3371 ldr(rheapbase, Address(rheapbase));
3372 }
3373 }
3374 }
3375
3376 // this simulates the behaviour of the x86 cmpxchg instruction using a
3377 // load linked/store conditional pair. we use the acquire/release
3378 // versions of these instructions so that we flush pending writes as
3379 // per Java semantics.
3380
3381 // n.b the x86 version assumes the old value to be compared against is
3382 // in rax and updates rax with the value located in memory if the
3383 // cmpxchg fails. we supply a register for the old value explicitly
3384
3385 // the aarch64 load linked/store conditional instructions do not
3386 // accept an offset. so, unlike x86, we must provide a plain register
3387 // to identify the memory word to be compared/exchanged rather than a
5716 // the code cache so that if it is relocated we know it will still reach
5717 if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
5718 _adrp(reg1, dest.target());
5719 } else {
5720 uint64_t target = (uint64_t)dest.target();
5721 uint64_t adrp_target
5722 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
5723
5724 _adrp(reg1, (address)adrp_target);
5725 movk(reg1, target >> 32, 32);
5726 }
5727 byte_offset = (uint64_t)dest.target() & 0xfff;
5728 }
5729
5730 void MacroAssembler::load_byte_map_base(Register reg) {
5731 CardTable::CardValue* byte_map_base =
5732 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base();
5733
5734 // Strictly speaking the byte_map_base isn't an address at all, and it might
5735 // even be negative. It is thus materialised as a constant.
5736 #if INCLUDE_CDS
5737 if (AOTCodeCache::is_on_for_write()) {
5738 // AOT code needs relocation info for card table base
5739 lea(reg, ExternalAddress(reinterpret_cast<address>(byte_map_base)));
5740 } else {
5741 #endif
5742 mov(reg, (uint64_t)byte_map_base);
5743 #if INCLUDE_CDS
5744 }
5745 #endif
5746 }
5747
5748 void MacroAssembler::load_aotrc_address(Register reg, address a) {
5749 #if INCLUDE_CDS
5750 assert(AOTRuntimeConstants::contains(a), "address out of range for data area");
5751 if (AOTCodeCache::is_on_for_write()) {
5752 // all aotrc field addresses should be registered in the AOTCodeCache address table
5753 lea(reg, ExternalAddress(a));
5754 } else {
5755 mov(reg, (uint64_t)a);
5756 }
5757 #else
5758 ShouldNotReachHere();
5759 #endif
5760 }
5761
5762 void MacroAssembler::build_frame(int framesize) {
5763 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR");
5764 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment");
5765 protect_return_address();
5766 if (framesize < ((1 << 9) + 2 * wordSize)) {
5767 sub(sp, sp, framesize);
5768 stp(rfp, lr, Address(sp, framesize - 2 * wordSize));
5769 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize);
5770 } else {
5771 stp(rfp, lr, Address(pre(sp, -2 * wordSize)));
5772 if (PreserveFramePointer) mov(rfp, sp);
5773 if (framesize < ((1 << 12) + 2 * wordSize))
5774 sub(sp, sp, framesize - 2 * wordSize);
5775 else {
5776 mov(rscratch1, framesize - 2 * wordSize);
5777 sub(sp, sp, rscratch1);
5778 }
5779 }
|