1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * Copyright (c) 2014, 2024, Red Hat Inc. All rights reserved. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #include "asm/assembler.hpp" 27 #include "asm/assembler.inline.hpp" 28 #include "ci/ciEnv.hpp" 29 #include "ci/ciUtilities.hpp" 30 #include "code/compiledIC.hpp" 31 #include "compiler/compileTask.hpp" 32 #include "compiler/disassembler.hpp" 33 #include "compiler/oopMap.hpp" 34 #include "gc/shared/barrierSet.hpp" 35 #include "gc/shared/barrierSetAssembler.hpp" 36 #include "gc/shared/cardTableBarrierSet.hpp" 37 #include "gc/shared/cardTable.hpp" 38 #include "gc/shared/collectedHeap.hpp" 39 #include "gc/shared/tlab_globals.hpp" 40 #include "interpreter/bytecodeHistogram.hpp" 41 #include "interpreter/interpreter.hpp" 42 #include "interpreter/interpreterRuntime.hpp" 43 #include "jvm.h" 44 #include "memory/resourceArea.hpp" 45 #include "memory/universe.hpp" 46 #include "nativeInst_aarch64.hpp" 47 #include "oops/accessDecorators.hpp" 48 #include "oops/compressedKlass.inline.hpp" 49 #include "oops/compressedOops.inline.hpp" 50 #include "oops/klass.inline.hpp" 51 #include "runtime/continuation.hpp" 52 #include "runtime/icache.hpp" 53 #include "runtime/interfaceSupport.inline.hpp" 54 #include "runtime/javaThread.hpp" 55 #include "runtime/jniHandles.inline.hpp" 56 #include "runtime/sharedRuntime.hpp" 57 #include "runtime/stubRoutines.hpp" 58 #include "utilities/globalDefinitions.hpp" 59 #include "utilities/powerOfTwo.hpp" 60 #ifdef COMPILER1 61 #include "c1/c1_LIRAssembler.hpp" 62 #endif 63 #ifdef COMPILER2 64 #include "oops/oop.hpp" 65 #include "opto/compile.hpp" 66 #include "opto/node.hpp" 67 #include "opto/output.hpp" 68 #endif 69 70 #include <sys/types.h> 71 72 #ifdef PRODUCT 73 #define BLOCK_COMMENT(str) /* nothing */ 74 #else 75 #define BLOCK_COMMENT(str) block_comment(str) 76 #endif 77 #define STOP(str) stop(str); 78 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 79 80 #ifdef ASSERT 81 extern "C" void disnm(intptr_t p); 82 #endif 83 // Target-dependent relocation processing 84 // 85 // Instruction sequences whose target may need to be retrieved or 86 // patched are distinguished by their leading instruction, sorting 87 // them into three main instruction groups and related subgroups. 88 // 89 // 1) Branch, Exception and System (insn count = 1) 90 // 1a) Unconditional branch (immediate): 91 // b/bl imm19 92 // 1b) Compare & branch (immediate): 93 // cbz/cbnz Rt imm19 94 // 1c) Test & branch (immediate): 95 // tbz/tbnz Rt imm14 96 // 1d) Conditional branch (immediate): 97 // b.cond imm19 98 // 99 // 2) Loads and Stores (insn count = 1) 100 // 2a) Load register literal: 101 // ldr Rt imm19 102 // 103 // 3) Data Processing Immediate (insn count = 2 or 3) 104 // 3a) PC-rel. addressing 105 // adr/adrp Rx imm21; ldr/str Ry Rx #imm12 106 // adr/adrp Rx imm21; add Ry Rx #imm12 107 // adr/adrp Rx imm21; movk Rx #imm16<<32; ldr/str Ry, [Rx, #offset_in_page] 108 // adr/adrp Rx imm21 109 // adr/adrp Rx imm21; movk Rx #imm16<<32 110 // adr/adrp Rx imm21; movk Rx #imm16<<32; add Ry, Rx, #offset_in_page 111 // The latter form can only happen when the target is an 112 // ExternalAddress, and (by definition) ExternalAddresses don't 113 // move. Because of that property, there is never any need to 114 // patch the last of the three instructions. However, 115 // MacroAssembler::target_addr_for_insn takes all three 116 // instructions into account and returns the correct address. 117 // 3b) Move wide (immediate) 118 // movz Rx #imm16; movk Rx #imm16 << 16; movk Rx #imm16 << 32; 119 // 120 // A switch on a subset of the instruction's bits provides an 121 // efficient dispatch to these subcases. 122 // 123 // insn[28:26] -> main group ('x' == don't care) 124 // 00x -> UNALLOCATED 125 // 100 -> Data Processing Immediate 126 // 101 -> Branch, Exception and System 127 // x1x -> Loads and Stores 128 // 129 // insn[30:25] -> subgroup ('_' == group, 'x' == don't care). 130 // n.b. in some cases extra bits need to be checked to verify the 131 // instruction is as expected 132 // 133 // 1) ... xx101x Branch, Exception and System 134 // 1a) 00___x Unconditional branch (immediate) 135 // 1b) 01___0 Compare & branch (immediate) 136 // 1c) 01___1 Test & branch (immediate) 137 // 1d) 10___0 Conditional branch (immediate) 138 // other Should not happen 139 // 140 // 2) ... xxx1x0 Loads and Stores 141 // 2a) xx1__00 Load/Store register (insn[28] == 1 && insn[24] == 0) 142 // 2aa) x01__00 Load register literal (i.e. requires insn[29] == 0) 143 // strictly should be 64 bit non-FP/SIMD i.e. 144 // 0101_000 (i.e. requires insn[31:24] == 01011000) 145 // 146 // 3) ... xx100x Data Processing Immediate 147 // 3a) xx___00 PC-rel. addressing (n.b. requires insn[24] == 0) 148 // 3b) xx___101 Move wide (immediate) (n.b. requires insn[24:23] == 01) 149 // strictly should be 64 bit movz #imm16<<0 150 // 110___10100 (i.e. requires insn[31:21] == 11010010100) 151 // 152 class RelocActions { 153 protected: 154 typedef int (*reloc_insn)(address insn_addr, address &target); 155 156 virtual reloc_insn adrpMem() = 0; 157 virtual reloc_insn adrpAdd() = 0; 158 virtual reloc_insn adrpMovk() = 0; 159 160 const address _insn_addr; 161 const uint32_t _insn; 162 163 static uint32_t insn_at(address insn_addr, int n) { 164 return ((uint32_t*)insn_addr)[n]; 165 } 166 uint32_t insn_at(int n) const { 167 return insn_at(_insn_addr, n); 168 } 169 170 public: 171 172 RelocActions(address insn_addr) : _insn_addr(insn_addr), _insn(insn_at(insn_addr, 0)) {} 173 RelocActions(address insn_addr, uint32_t insn) 174 : _insn_addr(insn_addr), _insn(insn) {} 175 176 virtual int unconditionalBranch(address insn_addr, address &target) = 0; 177 virtual int conditionalBranch(address insn_addr, address &target) = 0; 178 virtual int testAndBranch(address insn_addr, address &target) = 0; 179 virtual int loadStore(address insn_addr, address &target) = 0; 180 virtual int adr(address insn_addr, address &target) = 0; 181 virtual int adrp(address insn_addr, address &target, reloc_insn inner) = 0; 182 virtual int immediate(address insn_addr, address &target) = 0; 183 virtual void verify(address insn_addr, address &target) = 0; 184 185 int ALWAYSINLINE run(address insn_addr, address &target) { 186 int instructions = 1; 187 188 uint32_t dispatch = Instruction_aarch64::extract(_insn, 30, 25); 189 switch(dispatch) { 190 case 0b001010: 191 case 0b001011: { 192 instructions = unconditionalBranch(insn_addr, target); 193 break; 194 } 195 case 0b101010: // Conditional branch (immediate) 196 case 0b011010: { // Compare & branch (immediate) 197 instructions = conditionalBranch(insn_addr, target); 198 break; 199 } 200 case 0b011011: { 201 instructions = testAndBranch(insn_addr, target); 202 break; 203 } 204 case 0b001100: 205 case 0b001110: 206 case 0b011100: 207 case 0b011110: 208 case 0b101100: 209 case 0b101110: 210 case 0b111100: 211 case 0b111110: { 212 // load/store 213 if ((Instruction_aarch64::extract(_insn, 29, 24) & 0b111011) == 0b011000) { 214 // Load register (literal) 215 instructions = loadStore(insn_addr, target); 216 break; 217 } else { 218 // nothing to do 219 assert(target == nullptr, "did not expect to relocate target for polling page load"); 220 } 221 break; 222 } 223 case 0b001000: 224 case 0b011000: 225 case 0b101000: 226 case 0b111000: { 227 // adr/adrp 228 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 229 int shift = Instruction_aarch64::extract(_insn, 31, 31); 230 if (shift) { 231 uint32_t insn2 = insn_at(1); 232 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 233 Instruction_aarch64::extract(_insn, 4, 0) == 234 Instruction_aarch64::extract(insn2, 9, 5)) { 235 instructions = adrp(insn_addr, target, adrpMem()); 236 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 237 Instruction_aarch64::extract(_insn, 4, 0) == 238 Instruction_aarch64::extract(insn2, 4, 0)) { 239 instructions = adrp(insn_addr, target, adrpAdd()); 240 } else if (Instruction_aarch64::extract(insn2, 31, 21) == 0b11110010110 && 241 Instruction_aarch64::extract(_insn, 4, 0) == 242 Instruction_aarch64::extract(insn2, 4, 0)) { 243 instructions = adrp(insn_addr, target, adrpMovk()); 244 } else { 245 ShouldNotReachHere(); 246 } 247 } else { 248 instructions = adr(insn_addr, target); 249 } 250 break; 251 } 252 case 0b001001: 253 case 0b011001: 254 case 0b101001: 255 case 0b111001: { 256 instructions = immediate(insn_addr, target); 257 break; 258 } 259 default: { 260 ShouldNotReachHere(); 261 } 262 } 263 264 verify(insn_addr, target); 265 return instructions * NativeInstruction::instruction_size; 266 } 267 }; 268 269 class Patcher : public RelocActions { 270 virtual reloc_insn adrpMem() { return &Patcher::adrpMem_impl; } 271 virtual reloc_insn adrpAdd() { return &Patcher::adrpAdd_impl; } 272 virtual reloc_insn adrpMovk() { return &Patcher::adrpMovk_impl; } 273 274 public: 275 Patcher(address insn_addr) : RelocActions(insn_addr) {} 276 277 virtual int unconditionalBranch(address insn_addr, address &target) { 278 intptr_t offset = (target - insn_addr) >> 2; 279 Instruction_aarch64::spatch(insn_addr, 25, 0, offset); 280 return 1; 281 } 282 virtual int conditionalBranch(address insn_addr, address &target) { 283 intptr_t offset = (target - insn_addr) >> 2; 284 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 285 return 1; 286 } 287 virtual int testAndBranch(address insn_addr, address &target) { 288 intptr_t offset = (target - insn_addr) >> 2; 289 Instruction_aarch64::spatch(insn_addr, 18, 5, offset); 290 return 1; 291 } 292 virtual int loadStore(address insn_addr, address &target) { 293 intptr_t offset = (target - insn_addr) >> 2; 294 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 295 return 1; 296 } 297 virtual int adr(address insn_addr, address &target) { 298 #ifdef ASSERT 299 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 300 #endif 301 // PC-rel. addressing 302 ptrdiff_t offset = target - insn_addr; 303 int offset_lo = offset & 3; 304 offset >>= 2; 305 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 306 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 307 return 1; 308 } 309 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 310 int instructions = 1; 311 #ifdef ASSERT 312 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 313 #endif 314 ptrdiff_t offset = target - insn_addr; 315 instructions = 2; 316 precond(inner != nullptr); 317 // Give the inner reloc a chance to modify the target. 318 address adjusted_target = target; 319 instructions = (*inner)(insn_addr, adjusted_target); 320 uintptr_t pc_page = (uintptr_t)insn_addr >> 12; 321 uintptr_t adr_page = (uintptr_t)adjusted_target >> 12; 322 offset = adr_page - pc_page; 323 int offset_lo = offset & 3; 324 offset >>= 2; 325 Instruction_aarch64::spatch(insn_addr, 23, 5, offset); 326 Instruction_aarch64::patch(insn_addr, 30, 29, offset_lo); 327 return instructions; 328 } 329 static int adrpMem_impl(address insn_addr, address &target) { 330 uintptr_t dest = (uintptr_t)target; 331 int offset_lo = dest & 0xfff; 332 uint32_t insn2 = insn_at(insn_addr, 1); 333 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 334 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo >> size); 335 guarantee(((dest >> size) << size) == dest, "misaligned target"); 336 return 2; 337 } 338 static int adrpAdd_impl(address insn_addr, address &target) { 339 uintptr_t dest = (uintptr_t)target; 340 int offset_lo = dest & 0xfff; 341 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 21, 10, offset_lo); 342 return 2; 343 } 344 static int adrpMovk_impl(address insn_addr, address &target) { 345 uintptr_t dest = uintptr_t(target); 346 Instruction_aarch64::patch(insn_addr + sizeof (uint32_t), 20, 5, (uintptr_t)target >> 32); 347 dest = (dest & 0xffffffffULL) | (uintptr_t(insn_addr) & 0xffff00000000ULL); 348 target = address(dest); 349 return 2; 350 } 351 virtual int immediate(address insn_addr, address &target) { 352 // Metadata pointers are either narrow (32 bits) or wide (48 bits). 353 // We encode narrow ones by setting the upper 16 bits in the first 354 // instruction. 355 if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) { 356 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 357 narrowKlass nk = CompressedKlassPointers::encode((Klass*)target); 358 Instruction_aarch64::patch(insn_addr, 20, 5, nk >> 16); 359 Instruction_aarch64::patch(insn_addr+4, 20, 5, nk & 0xffff); 360 return 2; 361 } 362 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 363 uint64_t dest = (uint64_t)target; 364 // Move wide constant 365 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 366 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 367 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 368 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 369 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 370 return 3; 371 } 372 virtual void verify(address insn_addr, address &target) { 373 #ifdef ASSERT 374 address address_is = MacroAssembler::target_addr_for_insn(insn_addr); 375 if (!(address_is == target)) { 376 tty->print_cr("%p at %p should be %p", address_is, insn_addr, target); 377 disnm((intptr_t)insn_addr); 378 assert(address_is == target, "should be"); 379 } 380 #endif 381 } 382 }; 383 384 // If insn1 and insn2 use the same register to form an address, either 385 // by an offsetted LDR or a simple ADD, return the offset. If the 386 // second instruction is an LDR, the offset may be scaled. 387 static bool offset_for(uint32_t insn1, uint32_t insn2, ptrdiff_t &byte_offset) { 388 if (Instruction_aarch64::extract(insn2, 29, 24) == 0b111001 && 389 Instruction_aarch64::extract(insn1, 4, 0) == 390 Instruction_aarch64::extract(insn2, 9, 5)) { 391 // Load/store register (unsigned immediate) 392 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 393 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 394 byte_offset <<= size; 395 return true; 396 } else if (Instruction_aarch64::extract(insn2, 31, 22) == 0b1001000100 && 397 Instruction_aarch64::extract(insn1, 4, 0) == 398 Instruction_aarch64::extract(insn2, 4, 0)) { 399 // add (immediate) 400 byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 401 return true; 402 } 403 return false; 404 } 405 406 class AArch64Decoder : public RelocActions { 407 virtual reloc_insn adrpMem() { return &AArch64Decoder::adrpMem_impl; } 408 virtual reloc_insn adrpAdd() { return &AArch64Decoder::adrpAdd_impl; } 409 virtual reloc_insn adrpMovk() { return &AArch64Decoder::adrpMovk_impl; } 410 411 public: 412 AArch64Decoder(address insn_addr, uint32_t insn) : RelocActions(insn_addr, insn) {} 413 414 virtual int loadStore(address insn_addr, address &target) { 415 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 416 target = insn_addr + (offset << 2); 417 return 1; 418 } 419 virtual int unconditionalBranch(address insn_addr, address &target) { 420 intptr_t offset = Instruction_aarch64::sextract(_insn, 25, 0); 421 target = insn_addr + (offset << 2); 422 return 1; 423 } 424 virtual int conditionalBranch(address insn_addr, address &target) { 425 intptr_t offset = Instruction_aarch64::sextract(_insn, 23, 5); 426 target = address(((uint64_t)insn_addr + (offset << 2))); 427 return 1; 428 } 429 virtual int testAndBranch(address insn_addr, address &target) { 430 intptr_t offset = Instruction_aarch64::sextract(_insn, 18, 5); 431 target = address(((uint64_t)insn_addr + (offset << 2))); 432 return 1; 433 } 434 virtual int adr(address insn_addr, address &target) { 435 // PC-rel. addressing 436 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 437 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 438 target = address((uint64_t)insn_addr + offset); 439 return 1; 440 } 441 virtual int adrp(address insn_addr, address &target, reloc_insn inner) { 442 assert(Instruction_aarch64::extract(_insn, 28, 24) == 0b10000, "must be"); 443 intptr_t offset = Instruction_aarch64::extract(_insn, 30, 29); 444 offset |= Instruction_aarch64::sextract(_insn, 23, 5) << 2; 445 int shift = 12; 446 offset <<= shift; 447 uint64_t target_page = ((uint64_t)insn_addr) + offset; 448 target_page &= ((uint64_t)-1) << shift; 449 uint32_t insn2 = insn_at(1); 450 target = address(target_page); 451 precond(inner != nullptr); 452 (*inner)(insn_addr, target); 453 return 2; 454 } 455 static int adrpMem_impl(address insn_addr, address &target) { 456 uint32_t insn2 = insn_at(insn_addr, 1); 457 // Load/store register (unsigned immediate) 458 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 459 uint32_t size = Instruction_aarch64::extract(insn2, 31, 30); 460 byte_offset <<= size; 461 target += byte_offset; 462 return 2; 463 } 464 static int adrpAdd_impl(address insn_addr, address &target) { 465 uint32_t insn2 = insn_at(insn_addr, 1); 466 // add (immediate) 467 ptrdiff_t byte_offset = Instruction_aarch64::extract(insn2, 21, 10); 468 target += byte_offset; 469 return 2; 470 } 471 static int adrpMovk_impl(address insn_addr, address &target) { 472 uint32_t insn2 = insn_at(insn_addr, 1); 473 uint64_t dest = uint64_t(target); 474 dest = (dest & 0xffff0000ffffffff) | 475 ((uint64_t)Instruction_aarch64::extract(insn2, 20, 5) << 32); 476 target = address(dest); 477 478 // We know the destination 4k page. Maybe we have a third 479 // instruction. 480 uint32_t insn = insn_at(insn_addr, 0); 481 uint32_t insn3 = insn_at(insn_addr, 2); 482 ptrdiff_t byte_offset; 483 if (offset_for(insn, insn3, byte_offset)) { 484 target += byte_offset; 485 return 3; 486 } else { 487 return 2; 488 } 489 } 490 virtual int immediate(address insn_addr, address &target) { 491 uint32_t *insns = (uint32_t *)insn_addr; 492 // Metadata pointers are either narrow (32 bits) or wide (48 bits). 493 // We encode narrow ones by setting the upper 16 bits in the first 494 // instruction. 495 if (Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010101) { 496 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 497 narrowKlass nk = (narrowKlass)((uint32_t(Instruction_aarch64::extract(_insn, 20, 5)) << 16) 498 + uint32_t(Instruction_aarch64::extract(insns[1], 20, 5))); 499 target = (address)CompressedKlassPointers::decode(nk); 500 return 2; 501 } 502 assert(Instruction_aarch64::extract(_insn, 31, 21) == 0b11010010100, "must be"); 503 // Move wide constant: movz, movk, movk. See movptr(). 504 assert(nativeInstruction_at(insns+1)->is_movk(), "wrong insns in patch"); 505 assert(nativeInstruction_at(insns+2)->is_movk(), "wrong insns in patch"); 506 target = address(uint64_t(Instruction_aarch64::extract(_insn, 20, 5)) 507 + (uint64_t(Instruction_aarch64::extract(insns[1], 20, 5)) << 16) 508 + (uint64_t(Instruction_aarch64::extract(insns[2], 20, 5)) << 32)); 509 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 510 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 511 return 3; 512 } 513 virtual void verify(address insn_addr, address &target) { 514 } 515 }; 516 517 address MacroAssembler::target_addr_for_insn(address insn_addr, uint32_t insn) { 518 AArch64Decoder decoder(insn_addr, insn); 519 address target; 520 decoder.run(insn_addr, target); 521 return target; 522 } 523 524 // Patch any kind of instruction; there may be several instructions. 525 // Return the total length (in bytes) of the instructions. 526 int MacroAssembler::pd_patch_instruction_size(address insn_addr, address target) { 527 Patcher patcher(insn_addr); 528 return patcher.run(insn_addr, target); 529 } 530 531 int MacroAssembler::patch_oop(address insn_addr, address o) { 532 int instructions; 533 unsigned insn = *(unsigned*)insn_addr; 534 assert(nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 535 536 // OOPs are either narrow (32 bits) or wide (48 bits). We encode 537 // narrow OOPs by setting the upper 16 bits in the first 538 // instruction. 539 if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) { 540 // Move narrow OOP 541 uint32_t n = CompressedOops::narrow_oop_value(cast_to_oop(o)); 542 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 543 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 544 instructions = 2; 545 } else { 546 // Move wide OOP 547 assert(nativeInstruction_at(insn_addr+8)->is_movk(), "wrong insns in patch"); 548 uintptr_t dest = (uintptr_t)o; 549 Instruction_aarch64::patch(insn_addr, 20, 5, dest & 0xffff); 550 Instruction_aarch64::patch(insn_addr+4, 20, 5, (dest >>= 16) & 0xffff); 551 Instruction_aarch64::patch(insn_addr+8, 20, 5, (dest >>= 16) & 0xffff); 552 instructions = 3; 553 } 554 return instructions * NativeInstruction::instruction_size; 555 } 556 557 int MacroAssembler::patch_narrow_klass(address insn_addr, narrowKlass n) { 558 // Metadata pointers are either narrow (32 bits) or wide (48 bits). 559 // We encode narrow ones by setting the upper 16 bits in the first 560 // instruction. 561 NativeInstruction *insn = nativeInstruction_at(insn_addr); 562 assert(Instruction_aarch64::extract(insn->encoding(), 31, 21) == 0b11010010101 && 563 nativeInstruction_at(insn_addr+4)->is_movk(), "wrong insns in patch"); 564 565 Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16); 566 Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff); 567 return 2 * NativeInstruction::instruction_size; 568 } 569 570 address MacroAssembler::target_addr_for_insn_or_null(address insn_addr, unsigned insn) { 571 if (NativeInstruction::is_ldrw_to_zr(address(&insn))) { 572 return nullptr; 573 } 574 return MacroAssembler::target_addr_for_insn(insn_addr, insn); 575 } 576 577 void MacroAssembler::safepoint_poll(Label& slow_path, bool at_return, bool acquire, bool in_nmethod, Register tmp) { 578 if (acquire) { 579 lea(tmp, Address(rthread, JavaThread::polling_word_offset())); 580 ldar(tmp, tmp); 581 } else { 582 ldr(tmp, Address(rthread, JavaThread::polling_word_offset())); 583 } 584 if (at_return) { 585 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore, 586 // we may safely use the sp instead to perform the stack watermark check. 587 cmp(in_nmethod ? sp : rfp, tmp); 588 br(Assembler::HI, slow_path); 589 } else { 590 tbnz(tmp, log2i_exact(SafepointMechanism::poll_bit()), slow_path); 591 } 592 } 593 594 void MacroAssembler::rt_call(address dest, Register tmp) { 595 CodeBlob *cb = CodeCache::find_blob(dest); 596 if (cb) { 597 far_call(RuntimeAddress(dest)); 598 } else { 599 lea(tmp, RuntimeAddress(dest)); 600 blr(tmp); 601 } 602 } 603 604 void MacroAssembler::push_cont_fastpath(Register java_thread) { 605 if (!Continuations::enabled()) return; 606 Label done; 607 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 608 cmp(sp, rscratch1); 609 br(Assembler::LS, done); 610 mov(rscratch1, sp); // we can't use sp as the source in str 611 str(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 612 bind(done); 613 } 614 615 void MacroAssembler::pop_cont_fastpath(Register java_thread) { 616 if (!Continuations::enabled()) return; 617 Label done; 618 ldr(rscratch1, Address(java_thread, JavaThread::cont_fastpath_offset())); 619 cmp(sp, rscratch1); 620 br(Assembler::LO, done); 621 str(zr, Address(java_thread, JavaThread::cont_fastpath_offset())); 622 bind(done); 623 } 624 625 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 626 // we must set sp to zero to clear frame 627 str(zr, Address(rthread, JavaThread::last_Java_sp_offset())); 628 629 // must clear fp, so that compiled frames are not confused; it is 630 // possible that we need it only for debugging 631 if (clear_fp) { 632 str(zr, Address(rthread, JavaThread::last_Java_fp_offset())); 633 } 634 635 // Always clear the pc because it could have been set by make_walkable() 636 str(zr, Address(rthread, JavaThread::last_Java_pc_offset())); 637 } 638 639 // Calls to C land 640 // 641 // When entering C land, the rfp, & resp of the last Java frame have to be recorded 642 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 643 // has to be reset to 0. This is required to allow proper stack traversal. 644 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 645 Register last_java_fp, 646 Register last_java_pc, 647 Register scratch) { 648 649 if (last_java_pc->is_valid()) { 650 str(last_java_pc, Address(rthread, 651 JavaThread::frame_anchor_offset() 652 + JavaFrameAnchor::last_Java_pc_offset())); 653 } 654 655 // determine last_java_sp register 656 if (last_java_sp == sp) { 657 mov(scratch, sp); 658 last_java_sp = scratch; 659 } else if (!last_java_sp->is_valid()) { 660 last_java_sp = esp; 661 } 662 663 str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset())); 664 665 // last_java_fp is optional 666 if (last_java_fp->is_valid()) { 667 str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset())); 668 } 669 } 670 671 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 672 Register last_java_fp, 673 address last_java_pc, 674 Register scratch) { 675 assert(last_java_pc != nullptr, "must provide a valid PC"); 676 677 adr(scratch, last_java_pc); 678 str(scratch, Address(rthread, 679 JavaThread::frame_anchor_offset() 680 + JavaFrameAnchor::last_Java_pc_offset())); 681 682 set_last_Java_frame(last_java_sp, last_java_fp, noreg, scratch); 683 } 684 685 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 686 Register last_java_fp, 687 Label &L, 688 Register scratch) { 689 if (L.is_bound()) { 690 set_last_Java_frame(last_java_sp, last_java_fp, target(L), scratch); 691 } else { 692 InstructionMark im(this); 693 L.add_patch_at(code(), locator()); 694 set_last_Java_frame(last_java_sp, last_java_fp, pc() /* Patched later */, scratch); 695 } 696 } 697 698 static inline bool target_needs_far_branch(address addr) { 699 if (AOTCodeCache::is_on_for_write()) { 700 return true; 701 } 702 // codecache size <= 128M 703 if (!MacroAssembler::far_branches()) { 704 return false; 705 } 706 // codecache size > 240M 707 if (MacroAssembler::codestub_branch_needs_far_jump()) { 708 return true; 709 } 710 // codecache size: 128M..240M 711 return !CodeCache::is_non_nmethod(addr); 712 } 713 714 void MacroAssembler::far_call(Address entry, Register tmp) { 715 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 716 assert(CodeCache::find_blob(entry.target()) != nullptr, 717 "destination of far call not found in code cache"); 718 assert(entry.rspec().type() == relocInfo::external_word_type 719 || entry.rspec().type() == relocInfo::runtime_call_type 720 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 721 if (target_needs_far_branch(entry.target())) { 722 uint64_t offset; 723 // We can use ADRP here because we know that the total size of 724 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 725 adrp(tmp, entry, offset); 726 add(tmp, tmp, offset); 727 blr(tmp); 728 } else { 729 bl(entry); 730 } 731 } 732 733 int MacroAssembler::far_jump(Address entry, Register tmp) { 734 assert(ReservedCodeCacheSize < 4*G, "branch out of range"); 735 assert(CodeCache::find_blob(entry.target()) != nullptr, 736 "destination of far call not found in code cache"); 737 assert(entry.rspec().type() == relocInfo::external_word_type 738 || entry.rspec().type() == relocInfo::runtime_call_type 739 || entry.rspec().type() == relocInfo::none, "wrong entry relocInfo type"); 740 address start = pc(); 741 if (target_needs_far_branch(entry.target())) { 742 uint64_t offset; 743 // We can use ADRP here because we know that the total size of 744 // the code cache cannot exceed 2Gb (ADRP limit is 4GB). 745 adrp(tmp, entry, offset); 746 add(tmp, tmp, offset); 747 br(tmp); 748 } else { 749 b(entry); 750 } 751 return pc() - start; 752 } 753 754 void MacroAssembler::reserved_stack_check() { 755 // testing if reserved zone needs to be enabled 756 Label no_reserved_zone_enabling; 757 758 ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset())); 759 cmp(sp, rscratch1); 760 br(Assembler::LO, no_reserved_zone_enabling); 761 762 enter(); // LR and FP are live. 763 lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone))); 764 mov(c_rarg0, rthread); 765 blr(rscratch1); 766 leave(); 767 768 // We have already removed our own frame. 769 // throw_delayed_StackOverflowError will think that it's been 770 // called by our caller. 771 lea(rscratch1, RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry())); 772 br(rscratch1); 773 should_not_reach_here(); 774 775 bind(no_reserved_zone_enabling); 776 } 777 778 static void pass_arg0(MacroAssembler* masm, Register arg) { 779 if (c_rarg0 != arg ) { 780 masm->mov(c_rarg0, arg); 781 } 782 } 783 784 static void pass_arg1(MacroAssembler* masm, Register arg) { 785 if (c_rarg1 != arg ) { 786 masm->mov(c_rarg1, arg); 787 } 788 } 789 790 static void pass_arg2(MacroAssembler* masm, Register arg) { 791 if (c_rarg2 != arg ) { 792 masm->mov(c_rarg2, arg); 793 } 794 } 795 796 static void pass_arg3(MacroAssembler* masm, Register arg) { 797 if (c_rarg3 != arg ) { 798 masm->mov(c_rarg3, arg); 799 } 800 } 801 802 static bool is_preemptable(address entry_point) { 803 return entry_point == CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter); 804 } 805 806 void MacroAssembler::call_VM_base(Register oop_result, 807 Register java_thread, 808 Register last_java_sp, 809 address entry_point, 810 int number_of_arguments, 811 bool check_exceptions) { 812 // determine java_thread register 813 if (!java_thread->is_valid()) { 814 java_thread = rthread; 815 } 816 817 // determine last_java_sp register 818 if (!last_java_sp->is_valid()) { 819 last_java_sp = esp; 820 } 821 822 // debugging support 823 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 824 assert(java_thread == rthread, "unexpected register"); 825 #ifdef ASSERT 826 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 827 // if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?"); 828 #endif // ASSERT 829 830 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 831 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 832 833 // push java thread (becomes first argument of C function) 834 835 mov(c_rarg0, java_thread); 836 837 // set last Java frame before call 838 assert(last_java_sp != rfp, "can't use rfp"); 839 840 Label l; 841 if (is_preemptable(entry_point)) { 842 // skip setting last_pc since we already set it to desired value. 843 set_last_Java_frame(last_java_sp, rfp, noreg, rscratch1); 844 } else { 845 set_last_Java_frame(last_java_sp, rfp, l, rscratch1); 846 } 847 848 // do the call, remove parameters 849 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments, &l); 850 851 // lr could be poisoned with PAC signature during throw_pending_exception 852 // if it was tail-call optimized by compiler, since lr is not callee-saved 853 // reload it with proper value 854 adr(lr, l); 855 856 // reset last Java frame 857 // Only interpreter should have to clear fp 858 reset_last_Java_frame(true); 859 860 // C++ interp handles this in the interpreter 861 check_and_handle_popframe(java_thread); 862 check_and_handle_earlyret(java_thread); 863 864 if (check_exceptions) { 865 // check for pending exceptions (java_thread is set upon return) 866 ldr(rscratch1, Address(java_thread, in_bytes(Thread::pending_exception_offset()))); 867 Label ok; 868 cbz(rscratch1, ok); 869 lea(rscratch1, RuntimeAddress(StubRoutines::forward_exception_entry())); 870 br(rscratch1); 871 bind(ok); 872 } 873 874 // get oop result if there is one and reset the value in the thread 875 if (oop_result->is_valid()) { 876 get_vm_result(oop_result, java_thread); 877 } 878 } 879 880 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 881 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions); 882 } 883 884 // Check the entry target is always reachable from any branch. 885 static bool is_always_within_branch_range(Address entry) { 886 if (AOTCodeCache::is_on_for_write()) { 887 return false; 888 } 889 const address target = entry.target(); 890 891 if (!CodeCache::contains(target)) { 892 // We always use trampolines for callees outside CodeCache. 893 assert(entry.rspec().type() == relocInfo::runtime_call_type, "non-runtime call of an external target"); 894 return false; 895 } 896 897 if (!MacroAssembler::far_branches()) { 898 return true; 899 } 900 901 if (entry.rspec().type() == relocInfo::runtime_call_type) { 902 // Runtime calls are calls of a non-compiled method (stubs, adapters). 903 // Non-compiled methods stay forever in CodeCache. 904 // We check whether the longest possible branch is within the branch range. 905 assert(CodeCache::find_blob(target) != nullptr && 906 !CodeCache::find_blob(target)->is_nmethod(), 907 "runtime call of compiled method"); 908 const address right_longest_branch_start = CodeCache::high_bound() - NativeInstruction::instruction_size; 909 const address left_longest_branch_start = CodeCache::low_bound(); 910 const bool is_reachable = Assembler::reachable_from_branch_at(left_longest_branch_start, target) && 911 Assembler::reachable_from_branch_at(right_longest_branch_start, target); 912 return is_reachable; 913 } 914 915 return false; 916 } 917 918 // Maybe emit a call via a trampoline. If the code cache is small 919 // trampolines won't be emitted. 920 address MacroAssembler::trampoline_call(Address entry) { 921 assert(entry.rspec().type() == relocInfo::runtime_call_type 922 || entry.rspec().type() == relocInfo::opt_virtual_call_type 923 || entry.rspec().type() == relocInfo::static_call_type 924 || entry.rspec().type() == relocInfo::virtual_call_type, "wrong reloc type"); 925 926 address target = entry.target(); 927 928 if (!is_always_within_branch_range(entry)) { 929 if (!in_scratch_emit_size()) { 930 // We don't want to emit a trampoline if C2 is generating dummy 931 // code during its branch shortening phase. 932 if (entry.rspec().type() == relocInfo::runtime_call_type) { 933 assert(CodeBuffer::supports_shared_stubs(), "must support shared stubs"); 934 code()->share_trampoline_for(entry.target(), offset()); 935 } else { 936 address stub = emit_trampoline_stub(offset(), target); 937 if (stub == nullptr) { 938 postcond(pc() == badAddress); 939 return nullptr; // CodeCache is full 940 } 941 } 942 } 943 target = pc(); 944 } 945 946 address call_pc = pc(); 947 relocate(entry.rspec()); 948 bl(target); 949 950 postcond(pc() != badAddress); 951 return call_pc; 952 } 953 954 // Emit a trampoline stub for a call to a target which is too far away. 955 // 956 // code sequences: 957 // 958 // call-site: 959 // branch-and-link to <destination> or <trampoline stub> 960 // 961 // Related trampoline stub for this call site in the stub section: 962 // load the call target from the constant pool 963 // branch (LR still points to the call site above) 964 965 address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset, 966 address dest) { 967 // Max stub size: alignment nop, TrampolineStub. 968 address stub = start_a_stub(max_trampoline_stub_size()); 969 if (stub == nullptr) { 970 return nullptr; // CodeBuffer::expand failed 971 } 972 973 // Create a trampoline stub relocation which relates this trampoline stub 974 // with the call instruction at insts_call_instruction_offset in the 975 // instructions code-section. 976 align(wordSize); 977 relocate(trampoline_stub_Relocation::spec(code()->insts()->start() 978 + insts_call_instruction_offset)); 979 const int stub_start_offset = offset(); 980 981 // Now, create the trampoline stub's code: 982 // - load the call 983 // - call 984 Label target; 985 ldr(rscratch1, target); 986 br(rscratch1); 987 bind(target); 988 assert(offset() - stub_start_offset == NativeCallTrampolineStub::data_offset, 989 "should be"); 990 emit_int64((int64_t)dest); 991 992 const address stub_start_addr = addr_at(stub_start_offset); 993 994 assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline"); 995 996 end_a_stub(); 997 return stub_start_addr; 998 } 999 1000 int MacroAssembler::max_trampoline_stub_size() { 1001 // Max stub size: alignment nop, TrampolineStub. 1002 return NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size; 1003 } 1004 1005 void MacroAssembler::emit_static_call_stub() { 1006 // CompiledDirectCall::set_to_interpreted knows the 1007 // exact layout of this stub. 1008 1009 isb(); 1010 mov_metadata(rmethod, nullptr); 1011 1012 // Jump to the entry point of the c2i stub. 1013 movptr(rscratch1, 0); 1014 br(rscratch1); 1015 } 1016 1017 int MacroAssembler::static_call_stub_size() { 1018 // isb; movk; movz; movz; movk; movz; movz; br 1019 return 8 * NativeInstruction::instruction_size; 1020 } 1021 1022 void MacroAssembler::c2bool(Register x) { 1023 // implements x == 0 ? 0 : 1 1024 // note: must only look at least-significant byte of x 1025 // since C-style booleans are stored in one byte 1026 // only! (was bug) 1027 tst(x, 0xff); 1028 cset(x, Assembler::NE); 1029 } 1030 1031 address MacroAssembler::ic_call(address entry, jint method_index) { 1032 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 1033 // address const_ptr = long_constant((jlong)Universe::non_oop_word()); 1034 // uintptr_t offset; 1035 // ldr_constant(rscratch2, const_ptr); 1036 movptr(rscratch2, (intptr_t)Universe::non_oop_word()); 1037 return trampoline_call(Address(entry, rh)); 1038 } 1039 1040 int MacroAssembler::ic_check_size() { 1041 int extra_instructions = UseCompactObjectHeaders ? 1 : 0; 1042 if (target_needs_far_branch(CAST_FROM_FN_PTR(address, SharedRuntime::get_ic_miss_stub()))) { 1043 return NativeInstruction::instruction_size * (7 + extra_instructions); 1044 } else { 1045 return NativeInstruction::instruction_size * (5 + extra_instructions); 1046 } 1047 } 1048 1049 int MacroAssembler::ic_check(int end_alignment) { 1050 Register receiver = j_rarg0; 1051 Register data = rscratch2; 1052 Register tmp1 = rscratch1; 1053 Register tmp2 = r10; 1054 1055 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed 1056 // before the inline cache check, so we don't have to execute any nop instructions when dispatching 1057 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align 1058 // before the inline cache check here, and not after 1059 align(end_alignment, offset() + ic_check_size()); 1060 1061 int uep_offset = offset(); 1062 1063 if (UseCompactObjectHeaders) { 1064 load_narrow_klass_compact(tmp1, receiver); 1065 ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1066 cmpw(tmp1, tmp2); 1067 } else if (UseCompressedClassPointers) { 1068 ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); 1069 ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1070 cmpw(tmp1, tmp2); 1071 } else { 1072 ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes())); 1073 ldr(tmp2, Address(data, CompiledICData::speculated_klass_offset())); 1074 cmp(tmp1, tmp2); 1075 } 1076 1077 Label dont; 1078 br(Assembler::EQ, dont); 1079 far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1080 bind(dont); 1081 assert((offset() % end_alignment) == 0, "Misaligned verified entry point"); 1082 1083 return uep_offset; 1084 } 1085 1086 // Implementation of call_VM versions 1087 1088 void MacroAssembler::call_VM(Register oop_result, 1089 address entry_point, 1090 bool check_exceptions) { 1091 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 1092 } 1093 1094 void MacroAssembler::call_VM(Register oop_result, 1095 address entry_point, 1096 Register arg_1, 1097 bool check_exceptions) { 1098 pass_arg1(this, arg_1); 1099 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 1100 } 1101 1102 void MacroAssembler::call_VM(Register oop_result, 1103 address entry_point, 1104 Register arg_1, 1105 Register arg_2, 1106 bool check_exceptions) { 1107 assert_different_registers(arg_1, c_rarg2); 1108 pass_arg2(this, arg_2); 1109 pass_arg1(this, arg_1); 1110 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 1111 } 1112 1113 void MacroAssembler::call_VM(Register oop_result, 1114 address entry_point, 1115 Register arg_1, 1116 Register arg_2, 1117 Register arg_3, 1118 bool check_exceptions) { 1119 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1120 assert_different_registers(arg_2, c_rarg3); 1121 pass_arg3(this, arg_3); 1122 1123 pass_arg2(this, arg_2); 1124 1125 pass_arg1(this, arg_1); 1126 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 1127 } 1128 1129 void MacroAssembler::call_VM(Register oop_result, 1130 Register last_java_sp, 1131 address entry_point, 1132 int number_of_arguments, 1133 bool check_exceptions) { 1134 call_VM_base(oop_result, rthread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1135 } 1136 1137 void MacroAssembler::call_VM(Register oop_result, 1138 Register last_java_sp, 1139 address entry_point, 1140 Register arg_1, 1141 bool check_exceptions) { 1142 pass_arg1(this, arg_1); 1143 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1144 } 1145 1146 void MacroAssembler::call_VM(Register oop_result, 1147 Register last_java_sp, 1148 address entry_point, 1149 Register arg_1, 1150 Register arg_2, 1151 bool check_exceptions) { 1152 1153 assert_different_registers(arg_1, c_rarg2); 1154 pass_arg2(this, arg_2); 1155 pass_arg1(this, arg_1); 1156 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1157 } 1158 1159 void MacroAssembler::call_VM(Register oop_result, 1160 Register last_java_sp, 1161 address entry_point, 1162 Register arg_1, 1163 Register arg_2, 1164 Register arg_3, 1165 bool check_exceptions) { 1166 assert_different_registers(arg_1, c_rarg2, c_rarg3); 1167 assert_different_registers(arg_2, c_rarg3); 1168 pass_arg3(this, arg_3); 1169 pass_arg2(this, arg_2); 1170 pass_arg1(this, arg_1); 1171 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1172 } 1173 1174 1175 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 1176 ldr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 1177 str(zr, Address(java_thread, JavaThread::vm_result_offset())); 1178 verify_oop_msg(oop_result, "broken oop in call_VM_base"); 1179 } 1180 1181 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 1182 ldr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 1183 str(zr, Address(java_thread, JavaThread::vm_result_2_offset())); 1184 } 1185 1186 void MacroAssembler::align(int modulus) { 1187 align(modulus, offset()); 1188 } 1189 1190 // Ensure that the code at target bytes offset from the current offset() is aligned 1191 // according to modulus. 1192 void MacroAssembler::align(int modulus, int target) { 1193 int delta = target - offset(); 1194 while ((offset() + delta) % modulus != 0) nop(); 1195 } 1196 1197 void MacroAssembler::post_call_nop() { 1198 if (!Continuations::enabled()) { 1199 return; 1200 } 1201 InstructionMark im(this); 1202 relocate(post_call_nop_Relocation::spec()); 1203 InlineSkippedInstructionsCounter skipCounter(this); 1204 nop(); 1205 movk(zr, 0); 1206 movk(zr, 0); 1207 } 1208 1209 // these are no-ops overridden by InterpreterMacroAssembler 1210 1211 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { } 1212 1213 void MacroAssembler::check_and_handle_popframe(Register java_thread) { } 1214 1215 // Look up the method for a megamorphic invokeinterface call. 1216 // The target method is determined by <intf_klass, itable_index>. 1217 // The receiver klass is in recv_klass. 1218 // On success, the result will be in method_result, and execution falls through. 1219 // On failure, execution transfers to the given label. 1220 void MacroAssembler::lookup_interface_method(Register recv_klass, 1221 Register intf_klass, 1222 RegisterOrConstant itable_index, 1223 Register method_result, 1224 Register scan_temp, 1225 Label& L_no_such_interface, 1226 bool return_method) { 1227 assert_different_registers(recv_klass, intf_klass, scan_temp); 1228 assert_different_registers(method_result, intf_klass, scan_temp); 1229 assert(recv_klass != method_result || !return_method, 1230 "recv_klass can be destroyed when method isn't needed"); 1231 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 1232 "caller must use same register for non-constant itable index as for method"); 1233 1234 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 1235 int vtable_base = in_bytes(Klass::vtable_start_offset()); 1236 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 1237 int scan_step = itableOffsetEntry::size() * wordSize; 1238 int vte_size = vtableEntry::size_in_bytes(); 1239 assert(vte_size == wordSize, "else adjust times_vte_scale"); 1240 1241 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 1242 1243 // Could store the aligned, prescaled offset in the klass. 1244 // lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 1245 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(3))); 1246 add(scan_temp, scan_temp, vtable_base); 1247 1248 if (return_method) { 1249 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 1250 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 1251 // lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 1252 lea(recv_klass, Address(recv_klass, itable_index, Address::lsl(3))); 1253 if (itentry_off) 1254 add(recv_klass, recv_klass, itentry_off); 1255 } 1256 1257 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { 1258 // if (scan->interface() == intf) { 1259 // result = (klass + scan->offset() + itable_index); 1260 // } 1261 // } 1262 Label search, found_method; 1263 1264 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1265 cmp(intf_klass, method_result); 1266 br(Assembler::EQ, found_method); 1267 bind(search); 1268 // Check that the previous entry is non-null. A null entry means that 1269 // the receiver class doesn't implement the interface, and wasn't the 1270 // same as when the caller was compiled. 1271 cbz(method_result, L_no_such_interface); 1272 if (itableOffsetEntry::interface_offset() != 0) { 1273 add(scan_temp, scan_temp, scan_step); 1274 ldr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 1275 } else { 1276 ldr(method_result, Address(pre(scan_temp, scan_step))); 1277 } 1278 cmp(intf_klass, method_result); 1279 br(Assembler::NE, search); 1280 1281 bind(found_method); 1282 1283 // Got a hit. 1284 if (return_method) { 1285 ldrw(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset())); 1286 ldr(method_result, Address(recv_klass, scan_temp, Address::uxtw(0))); 1287 } 1288 } 1289 1290 // Look up the method for a megamorphic invokeinterface call in a single pass over itable: 1291 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData 1292 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index 1293 // The target method is determined by <holder_klass, itable_index>. 1294 // The receiver klass is in recv_klass. 1295 // On success, the result will be in method_result, and execution falls through. 1296 // On failure, execution transfers to the given label. 1297 void MacroAssembler::lookup_interface_method_stub(Register recv_klass, 1298 Register holder_klass, 1299 Register resolved_klass, 1300 Register method_result, 1301 Register temp_itbl_klass, 1302 Register scan_temp, 1303 int itable_index, 1304 Label& L_no_such_interface) { 1305 // 'method_result' is only used as output register at the very end of this method. 1306 // Until then we can reuse it as 'holder_offset'. 1307 Register holder_offset = method_result; 1308 assert_different_registers(resolved_klass, recv_klass, holder_klass, temp_itbl_klass, scan_temp, holder_offset); 1309 1310 int vtable_start_offset = in_bytes(Klass::vtable_start_offset()); 1311 int itable_offset_entry_size = itableOffsetEntry::size() * wordSize; 1312 int ioffset = in_bytes(itableOffsetEntry::interface_offset()); 1313 int ooffset = in_bytes(itableOffsetEntry::offset_offset()); 1314 1315 Label L_loop_search_resolved_entry, L_resolved_found, L_holder_found; 1316 1317 ldrw(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 1318 add(recv_klass, recv_klass, vtable_start_offset + ioffset); 1319 // itableOffsetEntry[] itable = recv_klass + Klass::vtable_start_offset() + sizeof(vtableEntry) * recv_klass->_vtable_len; 1320 // temp_itbl_klass = itable[0]._interface; 1321 int vtblEntrySize = vtableEntry::size_in_bytes(); 1322 assert(vtblEntrySize == wordSize, "ldr lsl shift amount must be 3"); 1323 ldr(temp_itbl_klass, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize)))); 1324 mov(holder_offset, zr); 1325 // scan_temp = &(itable[0]._interface) 1326 lea(scan_temp, Address(recv_klass, scan_temp, Address::lsl(exact_log2(vtblEntrySize)))); 1327 1328 // Initial checks: 1329 // - if (holder_klass != resolved_klass), go to "scan for resolved" 1330 // - if (itable[0] == holder_klass), shortcut to "holder found" 1331 // - if (itable[0] == 0), no such interface 1332 cmp(resolved_klass, holder_klass); 1333 br(Assembler::NE, L_loop_search_resolved_entry); 1334 cmp(holder_klass, temp_itbl_klass); 1335 br(Assembler::EQ, L_holder_found); 1336 cbz(temp_itbl_klass, L_no_such_interface); 1337 1338 // Loop: Look for holder_klass record in itable 1339 // do { 1340 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size); 1341 // if (temp_itbl_klass == holder_klass) { 1342 // goto L_holder_found; // Found! 1343 // } 1344 // } while (temp_itbl_klass != 0); 1345 // goto L_no_such_interface // Not found. 1346 Label L_search_holder; 1347 bind(L_search_holder); 1348 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size))); 1349 cmp(holder_klass, temp_itbl_klass); 1350 br(Assembler::EQ, L_holder_found); 1351 cbnz(temp_itbl_klass, L_search_holder); 1352 1353 b(L_no_such_interface); 1354 1355 // Loop: Look for resolved_class record in itable 1356 // while (true) { 1357 // temp_itbl_klass = *(scan_temp += itable_offset_entry_size); 1358 // if (temp_itbl_klass == 0) { 1359 // goto L_no_such_interface; 1360 // } 1361 // if (temp_itbl_klass == resolved_klass) { 1362 // goto L_resolved_found; // Found! 1363 // } 1364 // if (temp_itbl_klass == holder_klass) { 1365 // holder_offset = scan_temp; 1366 // } 1367 // } 1368 // 1369 Label L_loop_search_resolved; 1370 bind(L_loop_search_resolved); 1371 ldr(temp_itbl_klass, Address(pre(scan_temp, itable_offset_entry_size))); 1372 bind(L_loop_search_resolved_entry); 1373 cbz(temp_itbl_klass, L_no_such_interface); 1374 cmp(resolved_klass, temp_itbl_klass); 1375 br(Assembler::EQ, L_resolved_found); 1376 cmp(holder_klass, temp_itbl_klass); 1377 br(Assembler::NE, L_loop_search_resolved); 1378 mov(holder_offset, scan_temp); 1379 b(L_loop_search_resolved); 1380 1381 // See if we already have a holder klass. If not, go and scan for it. 1382 bind(L_resolved_found); 1383 cbz(holder_offset, L_search_holder); 1384 mov(scan_temp, holder_offset); 1385 1386 // Finally, scan_temp contains holder_klass vtable offset 1387 bind(L_holder_found); 1388 ldrw(method_result, Address(scan_temp, ooffset - ioffset)); 1389 add(recv_klass, recv_klass, itable_index * wordSize + in_bytes(itableMethodEntry::method_offset()) 1390 - vtable_start_offset - ioffset); // substract offsets to restore the original value of recv_klass 1391 ldr(method_result, Address(recv_klass, method_result, Address::uxtw(0))); 1392 } 1393 1394 // virtual method calling 1395 void MacroAssembler::lookup_virtual_method(Register recv_klass, 1396 RegisterOrConstant vtable_index, 1397 Register method_result) { 1398 assert(vtableEntry::size() * wordSize == 8, 1399 "adjust the scaling in the code below"); 1400 int64_t vtable_offset_in_bytes = in_bytes(Klass::vtable_start_offset() + vtableEntry::method_offset()); 1401 1402 if (vtable_index.is_register()) { 1403 lea(method_result, Address(recv_klass, 1404 vtable_index.as_register(), 1405 Address::lsl(LogBytesPerWord))); 1406 ldr(method_result, Address(method_result, vtable_offset_in_bytes)); 1407 } else { 1408 vtable_offset_in_bytes += vtable_index.as_constant() * wordSize; 1409 ldr(method_result, 1410 form_address(rscratch1, recv_klass, vtable_offset_in_bytes, 0)); 1411 } 1412 } 1413 1414 void MacroAssembler::check_klass_subtype(Register sub_klass, 1415 Register super_klass, 1416 Register temp_reg, 1417 Label& L_success) { 1418 Label L_failure; 1419 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); 1420 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); 1421 bind(L_failure); 1422 } 1423 1424 1425 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 1426 Register super_klass, 1427 Register temp_reg, 1428 Label* L_success, 1429 Label* L_failure, 1430 Label* L_slow_path, 1431 Register super_check_offset) { 1432 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset); 1433 bool must_load_sco = ! super_check_offset->is_valid(); 1434 if (must_load_sco) { 1435 assert(temp_reg != noreg, "supply either a temp or a register offset"); 1436 } 1437 1438 Label L_fallthrough; 1439 int label_nulls = 0; 1440 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1441 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1442 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } 1443 assert(label_nulls <= 1, "at most one null in the batch"); 1444 1445 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 1446 Address super_check_offset_addr(super_klass, sco_offset); 1447 1448 // Hacked jmp, which may only be used just before L_fallthrough. 1449 #define final_jmp(label) \ 1450 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 1451 else b(label) /*omit semi*/ 1452 1453 // If the pointers are equal, we are done (e.g., String[] elements). 1454 // This self-check enables sharing of secondary supertype arrays among 1455 // non-primary types such as array-of-interface. Otherwise, each such 1456 // type would need its own customized SSA. 1457 // We move this check to the front of the fast path because many 1458 // type checks are in fact trivially successful in this manner, 1459 // so we get a nicely predicted branch right at the start of the check. 1460 cmp(sub_klass, super_klass); 1461 br(Assembler::EQ, *L_success); 1462 1463 // Check the supertype display: 1464 if (must_load_sco) { 1465 ldrw(temp_reg, super_check_offset_addr); 1466 super_check_offset = temp_reg; 1467 } 1468 1469 Address super_check_addr(sub_klass, super_check_offset); 1470 ldr(rscratch1, super_check_addr); 1471 cmp(super_klass, rscratch1); // load displayed supertype 1472 br(Assembler::EQ, *L_success); 1473 1474 // This check has worked decisively for primary supers. 1475 // Secondary supers are sought in the super_cache ('super_cache_addr'). 1476 // (Secondary supers are interfaces and very deeply nested subtypes.) 1477 // This works in the same check above because of a tricky aliasing 1478 // between the super_cache and the primary super display elements. 1479 // (The 'super_check_addr' can address either, as the case requires.) 1480 // Note that the cache is updated below if it does not help us find 1481 // what we need immediately. 1482 // So if it was a primary super, we can just fail immediately. 1483 // Otherwise, it's the slow path for us (no success at this point). 1484 1485 sub(rscratch1, super_check_offset, in_bytes(Klass::secondary_super_cache_offset())); 1486 if (L_failure == &L_fallthrough) { 1487 cbz(rscratch1, *L_slow_path); 1488 } else { 1489 cbnz(rscratch1, *L_failure); 1490 final_jmp(*L_slow_path); 1491 } 1492 1493 bind(L_fallthrough); 1494 1495 #undef final_jmp 1496 } 1497 1498 // These two are taken from x86, but they look generally useful 1499 1500 // scans count pointer sized words at [addr] for occurrence of value, 1501 // generic 1502 void MacroAssembler::repne_scan(Register addr, Register value, Register count, 1503 Register scratch) { 1504 Label Lloop, Lexit; 1505 cbz(count, Lexit); 1506 bind(Lloop); 1507 ldr(scratch, post(addr, wordSize)); 1508 cmp(value, scratch); 1509 br(EQ, Lexit); 1510 sub(count, count, 1); 1511 cbnz(count, Lloop); 1512 bind(Lexit); 1513 } 1514 1515 // scans count 4 byte words at [addr] for occurrence of value, 1516 // generic 1517 void MacroAssembler::repne_scanw(Register addr, Register value, Register count, 1518 Register scratch) { 1519 Label Lloop, Lexit; 1520 cbz(count, Lexit); 1521 bind(Lloop); 1522 ldrw(scratch, post(addr, wordSize)); 1523 cmpw(value, scratch); 1524 br(EQ, Lexit); 1525 sub(count, count, 1); 1526 cbnz(count, Lloop); 1527 bind(Lexit); 1528 } 1529 1530 void MacroAssembler::check_klass_subtype_slow_path_linear(Register sub_klass, 1531 Register super_klass, 1532 Register temp_reg, 1533 Register temp2_reg, 1534 Label* L_success, 1535 Label* L_failure, 1536 bool set_cond_codes) { 1537 // NB! Callers may assume that, when temp2_reg is a valid register, 1538 // this code sets it to a nonzero value. 1539 1540 assert_different_registers(sub_klass, super_klass, temp_reg); 1541 if (temp2_reg != noreg) 1542 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1); 1543 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 1544 1545 Label L_fallthrough; 1546 int label_nulls = 0; 1547 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1548 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1549 assert(label_nulls <= 1, "at most one null in the batch"); 1550 1551 // a couple of useful fields in sub_klass: 1552 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 1553 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 1554 Address secondary_supers_addr(sub_klass, ss_offset); 1555 Address super_cache_addr( sub_klass, sc_offset); 1556 1557 BLOCK_COMMENT("check_klass_subtype_slow_path"); 1558 1559 // Do a linear scan of the secondary super-klass chain. 1560 // This code is rarely used, so simplicity is a virtue here. 1561 // The repne_scan instruction uses fixed registers, which we must spill. 1562 // Don't worry too much about pre-existing connections with the input regs. 1563 1564 assert(sub_klass != r0, "killed reg"); // killed by mov(r0, super) 1565 assert(sub_klass != r2, "killed reg"); // killed by lea(r2, &pst_counter) 1566 1567 RegSet pushed_registers; 1568 if (!IS_A_TEMP(r2)) pushed_registers += r2; 1569 if (!IS_A_TEMP(r5)) pushed_registers += r5; 1570 1571 if (super_klass != r0) { 1572 if (!IS_A_TEMP(r0)) pushed_registers += r0; 1573 } 1574 1575 push(pushed_registers, sp); 1576 1577 // Get super_klass value into r0 (even if it was in r5 or r2). 1578 if (super_klass != r0) { 1579 mov(r0, super_klass); 1580 } 1581 1582 #ifndef PRODUCT 1583 incrementw(ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr)); 1584 #endif //PRODUCT 1585 1586 // We will consult the secondary-super array. 1587 ldr(r5, secondary_supers_addr); 1588 // Load the array length. 1589 ldrw(r2, Address(r5, Array<Klass*>::length_offset_in_bytes())); 1590 // Skip to start of data. 1591 add(r5, r5, Array<Klass*>::base_offset_in_bytes()); 1592 1593 cmp(sp, zr); // Clear Z flag; SP is never zero 1594 // Scan R2 words at [R5] for an occurrence of R0. 1595 // Set NZ/Z based on last compare. 1596 repne_scan(r5, r0, r2, rscratch1); 1597 1598 // Unspill the temp. registers: 1599 pop(pushed_registers, sp); 1600 1601 br(Assembler::NE, *L_failure); 1602 1603 // Success. Cache the super we found and proceed in triumph. 1604 1605 if (UseSecondarySupersCache) { 1606 str(super_klass, super_cache_addr); 1607 } 1608 1609 if (L_success != &L_fallthrough) { 1610 b(*L_success); 1611 } 1612 1613 #undef IS_A_TEMP 1614 1615 bind(L_fallthrough); 1616 } 1617 1618 // If Register r is invalid, remove a new register from 1619 // available_regs, and add new register to regs_to_push. 1620 Register MacroAssembler::allocate_if_noreg(Register r, 1621 RegSetIterator<Register> &available_regs, 1622 RegSet ®s_to_push) { 1623 if (!r->is_valid()) { 1624 r = *available_regs++; 1625 regs_to_push += r; 1626 } 1627 return r; 1628 } 1629 1630 // check_klass_subtype_slow_path_table() looks for super_klass in the 1631 // hash table belonging to super_klass, branching to L_success or 1632 // L_failure as appropriate. This is essentially a shim which 1633 // allocates registers as necessary then calls 1634 // lookup_secondary_supers_table() to do the work. Any of the temp 1635 // regs may be noreg, in which case this logic will chooses some 1636 // registers push and pop them from the stack. 1637 void MacroAssembler::check_klass_subtype_slow_path_table(Register sub_klass, 1638 Register super_klass, 1639 Register temp_reg, 1640 Register temp2_reg, 1641 Register temp3_reg, 1642 Register result_reg, 1643 FloatRegister vtemp, 1644 Label* L_success, 1645 Label* L_failure, 1646 bool set_cond_codes) { 1647 RegSet temps = RegSet::of(temp_reg, temp2_reg, temp3_reg); 1648 1649 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, rscratch1); 1650 1651 Label L_fallthrough; 1652 int label_nulls = 0; 1653 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 1654 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 1655 assert(label_nulls <= 1, "at most one null in the batch"); 1656 1657 BLOCK_COMMENT("check_klass_subtype_slow_path"); 1658 1659 RegSetIterator<Register> available_regs 1660 = (RegSet::range(r0, r15) - temps - sub_klass - super_klass).begin(); 1661 1662 RegSet pushed_regs; 1663 1664 temp_reg = allocate_if_noreg(temp_reg, available_regs, pushed_regs); 1665 temp2_reg = allocate_if_noreg(temp2_reg, available_regs, pushed_regs); 1666 temp3_reg = allocate_if_noreg(temp3_reg, available_regs, pushed_regs); 1667 result_reg = allocate_if_noreg(result_reg, available_regs, pushed_regs); 1668 1669 push(pushed_regs, sp); 1670 1671 lookup_secondary_supers_table_var(sub_klass, 1672 super_klass, 1673 temp_reg, temp2_reg, temp3_reg, vtemp, result_reg, 1674 nullptr); 1675 cmp(result_reg, zr); 1676 1677 // Unspill the temp. registers: 1678 pop(pushed_regs, sp); 1679 1680 // NB! Callers may assume that, when set_cond_codes is true, this 1681 // code sets temp2_reg to a nonzero value. 1682 if (set_cond_codes) { 1683 mov(temp2_reg, 1); 1684 } 1685 1686 br(Assembler::NE, *L_failure); 1687 1688 if (L_success != &L_fallthrough) { 1689 b(*L_success); 1690 } 1691 1692 bind(L_fallthrough); 1693 } 1694 1695 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 1696 Register super_klass, 1697 Register temp_reg, 1698 Register temp2_reg, 1699 Label* L_success, 1700 Label* L_failure, 1701 bool set_cond_codes) { 1702 if (UseSecondarySupersTable) { 1703 check_klass_subtype_slow_path_table 1704 (sub_klass, super_klass, temp_reg, temp2_reg, /*temp3*/noreg, /*result*/noreg, 1705 /*vtemp*/fnoreg, 1706 L_success, L_failure, set_cond_codes); 1707 } else { 1708 check_klass_subtype_slow_path_linear 1709 (sub_klass, super_klass, temp_reg, temp2_reg, L_success, L_failure, set_cond_codes); 1710 } 1711 } 1712 1713 1714 // Ensure that the inline code and the stub are using the same registers. 1715 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \ 1716 do { \ 1717 assert(r_super_klass == r0 && \ 1718 r_array_base == r1 && \ 1719 r_array_length == r2 && \ 1720 (r_array_index == r3 || r_array_index == noreg) && \ 1721 (r_sub_klass == r4 || r_sub_klass == noreg) && \ 1722 (r_bitmap == rscratch2 || r_bitmap == noreg) && \ 1723 (result == r5 || result == noreg), "registers must match aarch64.ad"); \ 1724 } while(0) 1725 1726 bool MacroAssembler::lookup_secondary_supers_table_const(Register r_sub_klass, 1727 Register r_super_klass, 1728 Register temp1, 1729 Register temp2, 1730 Register temp3, 1731 FloatRegister vtemp, 1732 Register result, 1733 u1 super_klass_slot, 1734 bool stub_is_near) { 1735 assert_different_registers(r_sub_klass, temp1, temp2, temp3, result, rscratch1, rscratch2); 1736 1737 Label L_fallthrough; 1738 1739 BLOCK_COMMENT("lookup_secondary_supers_table {"); 1740 1741 const Register 1742 r_array_base = temp1, // r1 1743 r_array_length = temp2, // r2 1744 r_array_index = temp3, // r3 1745 r_bitmap = rscratch2; 1746 1747 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1748 1749 u1 bit = super_klass_slot; 1750 1751 // Make sure that result is nonzero if the TBZ below misses. 1752 mov(result, 1); 1753 1754 // We're going to need the bitmap in a vector reg and in a core reg, 1755 // so load both now. 1756 ldr(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset())); 1757 if (bit != 0) { 1758 ldrd(vtemp, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset())); 1759 } 1760 // First check the bitmap to see if super_klass might be present. If 1761 // the bit is zero, we are certain that super_klass is not one of 1762 // the secondary supers. 1763 tbz(r_bitmap, bit, L_fallthrough); 1764 1765 // Get the first array index that can contain super_klass into r_array_index. 1766 if (bit != 0) { 1767 shld(vtemp, vtemp, Klass::SECONDARY_SUPERS_TABLE_MASK - bit); 1768 cnt(vtemp, T8B, vtemp); 1769 addv(vtemp, T8B, vtemp); 1770 fmovd(r_array_index, vtemp); 1771 } else { 1772 mov(r_array_index, (u1)1); 1773 } 1774 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 1775 1776 // We will consult the secondary-super array. 1777 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 1778 1779 // The value i in r_array_index is >= 1, so even though r_array_base 1780 // points to the length, we don't need to adjust it to point to the 1781 // data. 1782 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 1783 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 1784 1785 ldr(result, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord))); 1786 eor(result, result, r_super_klass); 1787 cbz(result, L_fallthrough); // Found a match 1788 1789 // Is there another entry to check? Consult the bitmap. 1790 tbz(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK, L_fallthrough); 1791 1792 // Linear probe. 1793 if (bit != 0) { 1794 ror(r_bitmap, r_bitmap, bit); 1795 } 1796 1797 // The slot we just inspected is at secondary_supers[r_array_index - 1]. 1798 // The next slot to be inspected, by the stub we're about to call, 1799 // is secondary_supers[r_array_index]. Bits 0 and 1 in the bitmap 1800 // have been checked. 1801 Address stub = RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub()); 1802 if (stub_is_near) { 1803 bl(stub); 1804 } else { 1805 address call = trampoline_call(stub); 1806 if (call == nullptr) { 1807 return false; // trampoline allocation failed 1808 } 1809 } 1810 1811 BLOCK_COMMENT("} lookup_secondary_supers_table"); 1812 1813 bind(L_fallthrough); 1814 1815 if (VerifySecondarySupers) { 1816 verify_secondary_supers_table(r_sub_klass, r_super_klass, // r4, r0 1817 temp1, temp2, result); // r1, r2, r5 1818 } 1819 return true; 1820 } 1821 1822 // At runtime, return 0 in result if r_super_klass is a superclass of 1823 // r_sub_klass, otherwise return nonzero. Use this version of 1824 // lookup_secondary_supers_table() if you don't know ahead of time 1825 // which superclass will be searched for. Used by interpreter and 1826 // runtime stubs. It is larger and has somewhat greater latency than 1827 // the version above, which takes a constant super_klass_slot. 1828 void MacroAssembler::lookup_secondary_supers_table_var(Register r_sub_klass, 1829 Register r_super_klass, 1830 Register temp1, 1831 Register temp2, 1832 Register temp3, 1833 FloatRegister vtemp, 1834 Register result, 1835 Label *L_success) { 1836 assert_different_registers(r_sub_klass, temp1, temp2, temp3, result, rscratch1, rscratch2); 1837 1838 Label L_fallthrough; 1839 1840 BLOCK_COMMENT("lookup_secondary_supers_table {"); 1841 1842 const Register 1843 r_array_index = temp3, 1844 slot = rscratch1, 1845 r_bitmap = rscratch2; 1846 1847 ldrb(slot, Address(r_super_klass, Klass::hash_slot_offset())); 1848 1849 // Make sure that result is nonzero if the test below misses. 1850 mov(result, 1); 1851 1852 ldr(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset())); 1853 1854 // First check the bitmap to see if super_klass might be present. If 1855 // the bit is zero, we are certain that super_klass is not one of 1856 // the secondary supers. 1857 1858 // This next instruction is equivalent to: 1859 // mov(tmp_reg, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1)); 1860 // sub(temp2, tmp_reg, slot); 1861 eor(temp2, slot, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1)); 1862 lslv(temp2, r_bitmap, temp2); 1863 tbz(temp2, Klass::SECONDARY_SUPERS_TABLE_SIZE - 1, L_fallthrough); 1864 1865 bool must_save_v0 = (vtemp == fnoreg); 1866 if (must_save_v0) { 1867 // temp1 and result are free, so use them to preserve vtemp 1868 vtemp = v0; 1869 mov(temp1, vtemp, D, 0); 1870 mov(result, vtemp, D, 1); 1871 } 1872 1873 // Get the first array index that can contain super_klass into r_array_index. 1874 mov(vtemp, D, 0, temp2); 1875 cnt(vtemp, T8B, vtemp); 1876 addv(vtemp, T8B, vtemp); 1877 mov(r_array_index, vtemp, D, 0); 1878 1879 if (must_save_v0) { 1880 mov(vtemp, D, 0, temp1 ); 1881 mov(vtemp, D, 1, result); 1882 } 1883 1884 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 1885 1886 const Register 1887 r_array_base = temp1, 1888 r_array_length = temp2; 1889 1890 // The value i in r_array_index is >= 1, so even though r_array_base 1891 // points to the length, we don't need to adjust it to point to the 1892 // data. 1893 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 1894 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 1895 1896 // We will consult the secondary-super array. 1897 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 1898 1899 ldr(result, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord))); 1900 eor(result, result, r_super_klass); 1901 cbz(result, L_success ? *L_success : L_fallthrough); // Found a match 1902 1903 // Is there another entry to check? Consult the bitmap. 1904 rorv(r_bitmap, r_bitmap, slot); 1905 // rol(r_bitmap, r_bitmap, 1); 1906 tbz(r_bitmap, 1, L_fallthrough); 1907 1908 // The slot we just inspected is at secondary_supers[r_array_index - 1]. 1909 // The next slot to be inspected, by the logic we're about to call, 1910 // is secondary_supers[r_array_index]. Bits 0 and 1 in the bitmap 1911 // have been checked. 1912 lookup_secondary_supers_table_slow_path(r_super_klass, r_array_base, r_array_index, 1913 r_bitmap, r_array_length, result, /*is_stub*/false); 1914 1915 BLOCK_COMMENT("} lookup_secondary_supers_table"); 1916 1917 bind(L_fallthrough); 1918 1919 if (VerifySecondarySupers) { 1920 verify_secondary_supers_table(r_sub_klass, r_super_klass, // r4, r0 1921 temp1, temp2, result); // r1, r2, r5 1922 } 1923 1924 if (L_success) { 1925 cbz(result, *L_success); 1926 } 1927 } 1928 1929 // Called by code generated by check_klass_subtype_slow_path 1930 // above. This is called when there is a collision in the hashed 1931 // lookup in the secondary supers array. 1932 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass, 1933 Register r_array_base, 1934 Register r_array_index, 1935 Register r_bitmap, 1936 Register temp1, 1937 Register result, 1938 bool is_stub) { 1939 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, result, rscratch1); 1940 1941 const Register 1942 r_array_length = temp1, 1943 r_sub_klass = noreg; // unused 1944 1945 if (is_stub) { 1946 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 1947 } 1948 1949 Label L_fallthrough, L_huge; 1950 1951 // Load the array length. 1952 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 1953 // And adjust the array base to point to the data. 1954 // NB! Effectively increments current slot index by 1. 1955 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, ""); 1956 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes()); 1957 1958 // The bitmap is full to bursting. 1959 // Implicit invariant: BITMAP_FULL implies (length > 0) 1960 assert(Klass::SECONDARY_SUPERS_BITMAP_FULL == ~uintx(0), ""); 1961 cmpw(r_array_length, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 2)); 1962 br(GT, L_huge); 1963 1964 // NB! Our caller has checked bits 0 and 1 in the bitmap. The 1965 // current slot (at secondary_supers[r_array_index]) has not yet 1966 // been inspected, and r_array_index may be out of bounds if we 1967 // wrapped around the end of the array. 1968 1969 { // This is conventional linear probing, but instead of terminating 1970 // when a null entry is found in the table, we maintain a bitmap 1971 // in which a 0 indicates missing entries. 1972 // As long as the bitmap is not completely full, 1973 // array_length == popcount(bitmap). The array_length check above 1974 // guarantees there are 0s in the bitmap, so the loop eventually 1975 // terminates. 1976 Label L_loop; 1977 bind(L_loop); 1978 1979 // Check for wraparound. 1980 cmp(r_array_index, r_array_length); 1981 csel(r_array_index, zr, r_array_index, GE); 1982 1983 ldr(rscratch1, Address(r_array_base, r_array_index, Address::lsl(LogBytesPerWord))); 1984 eor(result, rscratch1, r_super_klass); 1985 cbz(result, L_fallthrough); 1986 1987 tbz(r_bitmap, 2, L_fallthrough); // look-ahead check (Bit 2); result is non-zero 1988 1989 ror(r_bitmap, r_bitmap, 1); 1990 add(r_array_index, r_array_index, 1); 1991 b(L_loop); 1992 } 1993 1994 { // Degenerate case: more than 64 secondary supers. 1995 // FIXME: We could do something smarter here, maybe a vectorized 1996 // comparison or a binary search, but is that worth any added 1997 // complexity? 1998 bind(L_huge); 1999 cmp(sp, zr); // Clear Z flag; SP is never zero 2000 repne_scan(r_array_base, r_super_klass, r_array_length, rscratch1); 2001 cset(result, NE); // result == 0 iff we got a match. 2002 } 2003 2004 bind(L_fallthrough); 2005 } 2006 2007 // Make sure that the hashed lookup and a linear scan agree. 2008 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass, 2009 Register r_super_klass, 2010 Register temp1, 2011 Register temp2, 2012 Register result) { 2013 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, result, rscratch1); 2014 2015 const Register 2016 r_array_base = temp1, 2017 r_array_length = temp2, 2018 r_array_index = noreg, // unused 2019 r_bitmap = noreg; // unused 2020 2021 BLOCK_COMMENT("verify_secondary_supers_table {"); 2022 2023 // We will consult the secondary-super array. 2024 ldr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 2025 2026 // Load the array length. 2027 ldrw(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 2028 // And adjust the array base to point to the data. 2029 add(r_array_base, r_array_base, Array<Klass*>::base_offset_in_bytes()); 2030 2031 cmp(sp, zr); // Clear Z flag; SP is never zero 2032 // Scan R2 words at [R5] for an occurrence of R0. 2033 // Set NZ/Z based on last compare. 2034 repne_scan(/*addr*/r_array_base, /*value*/r_super_klass, /*count*/r_array_length, rscratch2); 2035 // rscratch1 == 0 iff we got a match. 2036 cset(rscratch1, NE); 2037 2038 Label passed; 2039 cmp(result, zr); 2040 cset(result, NE); // normalize result to 0/1 for comparison 2041 2042 cmp(rscratch1, result); 2043 br(EQ, passed); 2044 { 2045 mov(r0, r_super_klass); // r0 <- r0 2046 mov(r1, r_sub_klass); // r1 <- r4 2047 mov(r2, /*expected*/rscratch1); // r2 <- r8 2048 mov(r3, result); // r3 <- r5 2049 mov(r4, (address)("mismatch")); // r4 <- const 2050 rt_call(CAST_FROM_FN_PTR(address, Klass::on_secondary_supers_verification_failure), rscratch2); 2051 should_not_reach_here(); 2052 } 2053 bind(passed); 2054 2055 BLOCK_COMMENT("} verify_secondary_supers_table"); 2056 } 2057 2058 void MacroAssembler::clinit_barrier(Register klass, Register scratch, Label* L_fast_path, Label* L_slow_path) { 2059 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); 2060 assert_different_registers(klass, rthread, scratch); 2061 2062 Label L_fallthrough, L_tmp; 2063 if (L_fast_path == nullptr) { 2064 L_fast_path = &L_fallthrough; 2065 } else if (L_slow_path == nullptr) { 2066 L_slow_path = &L_fallthrough; 2067 } 2068 // Fast path check: class is fully initialized 2069 lea(scratch, Address(klass, InstanceKlass::init_state_offset())); 2070 ldarb(scratch, scratch); 2071 subs(zr, scratch, InstanceKlass::fully_initialized); 2072 br(Assembler::EQ, *L_fast_path); 2073 2074 // Fast path check: current thread is initializer thread 2075 ldr(scratch, Address(klass, InstanceKlass::init_thread_offset())); 2076 cmp(rthread, scratch); 2077 2078 if (L_slow_path == &L_fallthrough) { 2079 br(Assembler::EQ, *L_fast_path); 2080 bind(*L_slow_path); 2081 } else if (L_fast_path == &L_fallthrough) { 2082 br(Assembler::NE, *L_slow_path); 2083 bind(*L_fast_path); 2084 } else { 2085 Unimplemented(); 2086 } 2087 } 2088 2089 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 2090 if (!VerifyOops) return; 2091 2092 // Pass register number to verify_oop_subroutine 2093 const char* b = nullptr; 2094 { 2095 ResourceMark rm; 2096 stringStream ss; 2097 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); 2098 b = code_string(ss.as_string()); 2099 } 2100 BLOCK_COMMENT("verify_oop {"); 2101 2102 strip_return_address(); // This might happen within a stack frame. 2103 protect_return_address(); 2104 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 2105 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 2106 2107 mov(r0, reg); 2108 movptr(rscratch1, (uintptr_t)(address)b); 2109 2110 // call indirectly to solve generation ordering problem 2111 lea(rscratch2, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address())); 2112 ldr(rscratch2, Address(rscratch2)); 2113 blr(rscratch2); 2114 2115 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 2116 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 2117 authenticate_return_address(); 2118 2119 BLOCK_COMMENT("} verify_oop"); 2120 } 2121 2122 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 2123 if (!VerifyOops) return; 2124 2125 const char* b = nullptr; 2126 { 2127 ResourceMark rm; 2128 stringStream ss; 2129 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); 2130 b = code_string(ss.as_string()); 2131 } 2132 BLOCK_COMMENT("verify_oop_addr {"); 2133 2134 strip_return_address(); // This might happen within a stack frame. 2135 protect_return_address(); 2136 stp(r0, rscratch1, Address(pre(sp, -2 * wordSize))); 2137 stp(rscratch2, lr, Address(pre(sp, -2 * wordSize))); 2138 2139 // addr may contain sp so we will have to adjust it based on the 2140 // pushes that we just did. 2141 if (addr.uses(sp)) { 2142 lea(r0, addr); 2143 ldr(r0, Address(r0, 4 * wordSize)); 2144 } else { 2145 ldr(r0, addr); 2146 } 2147 movptr(rscratch1, (uintptr_t)(address)b); 2148 2149 // call indirectly to solve generation ordering problem 2150 lea(rscratch2, RuntimeAddress(StubRoutines::verify_oop_subroutine_entry_address())); 2151 ldr(rscratch2, Address(rscratch2)); 2152 blr(rscratch2); 2153 2154 ldp(rscratch2, lr, Address(post(sp, 2 * wordSize))); 2155 ldp(r0, rscratch1, Address(post(sp, 2 * wordSize))); 2156 authenticate_return_address(); 2157 2158 BLOCK_COMMENT("} verify_oop_addr"); 2159 } 2160 2161 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 2162 int extra_slot_offset) { 2163 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 2164 int stackElementSize = Interpreter::stackElementSize; 2165 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 2166 #ifdef ASSERT 2167 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 2168 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 2169 #endif 2170 if (arg_slot.is_constant()) { 2171 return Address(esp, arg_slot.as_constant() * stackElementSize 2172 + offset); 2173 } else { 2174 add(rscratch1, esp, arg_slot.as_register(), 2175 ext::uxtx, exact_log2(stackElementSize)); 2176 return Address(rscratch1, offset); 2177 } 2178 } 2179 2180 void MacroAssembler::call_VM_leaf_base(address entry_point, 2181 int number_of_arguments, 2182 Label *retaddr) { 2183 Label E, L; 2184 2185 stp(rscratch1, rmethod, Address(pre(sp, -2 * wordSize))); 2186 2187 mov(rscratch1, RuntimeAddress(entry_point)); 2188 blr(rscratch1); 2189 if (retaddr) 2190 bind(*retaddr); 2191 2192 ldp(rscratch1, rmethod, Address(post(sp, 2 * wordSize))); 2193 } 2194 2195 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 2196 call_VM_leaf_base(entry_point, number_of_arguments); 2197 } 2198 2199 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 2200 pass_arg0(this, arg_0); 2201 call_VM_leaf_base(entry_point, 1); 2202 } 2203 2204 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 2205 assert_different_registers(arg_1, c_rarg0); 2206 pass_arg0(this, arg_0); 2207 pass_arg1(this, arg_1); 2208 call_VM_leaf_base(entry_point, 2); 2209 } 2210 2211 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, 2212 Register arg_1, Register arg_2) { 2213 assert_different_registers(arg_1, c_rarg0); 2214 assert_different_registers(arg_2, c_rarg0, c_rarg1); 2215 pass_arg0(this, arg_0); 2216 pass_arg1(this, arg_1); 2217 pass_arg2(this, arg_2); 2218 call_VM_leaf_base(entry_point, 3); 2219 } 2220 2221 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 2222 pass_arg0(this, arg_0); 2223 MacroAssembler::call_VM_leaf_base(entry_point, 1); 2224 } 2225 2226 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 2227 2228 assert_different_registers(arg_0, c_rarg1); 2229 pass_arg1(this, arg_1); 2230 pass_arg0(this, arg_0); 2231 MacroAssembler::call_VM_leaf_base(entry_point, 2); 2232 } 2233 2234 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 2235 assert_different_registers(arg_0, c_rarg1, c_rarg2); 2236 assert_different_registers(arg_1, c_rarg2); 2237 pass_arg2(this, arg_2); 2238 pass_arg1(this, arg_1); 2239 pass_arg0(this, arg_0); 2240 MacroAssembler::call_VM_leaf_base(entry_point, 3); 2241 } 2242 2243 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 2244 assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3); 2245 assert_different_registers(arg_1, c_rarg2, c_rarg3); 2246 assert_different_registers(arg_2, c_rarg3); 2247 pass_arg3(this, arg_3); 2248 pass_arg2(this, arg_2); 2249 pass_arg1(this, arg_1); 2250 pass_arg0(this, arg_0); 2251 MacroAssembler::call_VM_leaf_base(entry_point, 4); 2252 } 2253 2254 void MacroAssembler::null_check(Register reg, int offset) { 2255 if (needs_explicit_null_check(offset)) { 2256 // provoke OS null exception if reg is null by 2257 // accessing M[reg] w/o changing any registers 2258 // NOTE: this is plenty to provoke a segv 2259 ldr(zr, Address(reg)); 2260 } else { 2261 // nothing to do, (later) access of M[reg + offset] 2262 // will provoke OS null exception if reg is null 2263 } 2264 } 2265 2266 // MacroAssembler protected routines needed to implement 2267 // public methods 2268 2269 void MacroAssembler::mov(Register r, Address dest) { 2270 code_section()->relocate(pc(), dest.rspec()); 2271 uint64_t imm64 = (uint64_t)dest.target(); 2272 movptr(r, imm64); 2273 } 2274 2275 // Move a constant pointer into r. In AArch64 mode the virtual 2276 // address space is 48 bits in size, so we only need three 2277 // instructions to create a patchable instruction sequence that can 2278 // reach anywhere. 2279 void MacroAssembler::movptr(Register r, uintptr_t imm64) { 2280 #ifndef PRODUCT 2281 { 2282 char buffer[64]; 2283 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, (uint64_t)imm64); 2284 block_comment(buffer); 2285 } 2286 #endif 2287 assert(imm64 < (1ull << 48), "48-bit overflow in address constant"); 2288 movz(r, imm64 & 0xffff); 2289 imm64 >>= 16; 2290 movk(r, imm64 & 0xffff, 16); 2291 imm64 >>= 16; 2292 movk(r, imm64 & 0xffff, 32); 2293 } 2294 2295 // Macro to mov replicated immediate to vector register. 2296 // imm64: only the lower 8/16/32 bits are considered for B/H/S type. That is, 2297 // the upper 56/48/32 bits must be zeros for B/H/S type. 2298 // Vd will get the following values for different arrangements in T 2299 // imm64 == hex 000000gh T8B: Vd = ghghghghghghghgh 2300 // imm64 == hex 000000gh T16B: Vd = ghghghghghghghghghghghghghghghgh 2301 // imm64 == hex 0000efgh T4H: Vd = efghefghefghefgh 2302 // imm64 == hex 0000efgh T8H: Vd = efghefghefghefghefghefghefghefgh 2303 // imm64 == hex abcdefgh T2S: Vd = abcdefghabcdefgh 2304 // imm64 == hex abcdefgh T4S: Vd = abcdefghabcdefghabcdefghabcdefgh 2305 // imm64 == hex abcdefgh T1D: Vd = 00000000abcdefgh 2306 // imm64 == hex abcdefgh T2D: Vd = 00000000abcdefgh00000000abcdefgh 2307 // Clobbers rscratch1 2308 void MacroAssembler::mov(FloatRegister Vd, SIMD_Arrangement T, uint64_t imm64) { 2309 assert(T != T1Q, "unsupported"); 2310 if (T == T1D || T == T2D) { 2311 int imm = operand_valid_for_movi_immediate(imm64, T); 2312 if (-1 != imm) { 2313 movi(Vd, T, imm); 2314 } else { 2315 mov(rscratch1, imm64); 2316 dup(Vd, T, rscratch1); 2317 } 2318 return; 2319 } 2320 2321 #ifdef ASSERT 2322 if (T == T8B || T == T16B) assert((imm64 & ~0xff) == 0, "extraneous bits (T8B/T16B)"); 2323 if (T == T4H || T == T8H) assert((imm64 & ~0xffff) == 0, "extraneous bits (T4H/T8H)"); 2324 if (T == T2S || T == T4S) assert((imm64 & ~0xffffffff) == 0, "extraneous bits (T2S/T4S)"); 2325 #endif 2326 int shift = operand_valid_for_movi_immediate(imm64, T); 2327 uint32_t imm32 = imm64 & 0xffffffffULL; 2328 if (shift >= 0) { 2329 movi(Vd, T, (imm32 >> shift) & 0xff, shift); 2330 } else { 2331 movw(rscratch1, imm32); 2332 dup(Vd, T, rscratch1); 2333 } 2334 } 2335 2336 void MacroAssembler::mov_immediate64(Register dst, uint64_t imm64) 2337 { 2338 #ifndef PRODUCT 2339 { 2340 char buffer[64]; 2341 snprintf(buffer, sizeof(buffer), "0x%" PRIX64, imm64); 2342 block_comment(buffer); 2343 } 2344 #endif 2345 if (operand_valid_for_logical_immediate(false, imm64)) { 2346 orr(dst, zr, imm64); 2347 } else { 2348 // we can use a combination of MOVZ or MOVN with 2349 // MOVK to build up the constant 2350 uint64_t imm_h[4]; 2351 int zero_count = 0; 2352 int neg_count = 0; 2353 int i; 2354 for (i = 0; i < 4; i++) { 2355 imm_h[i] = ((imm64 >> (i * 16)) & 0xffffL); 2356 if (imm_h[i] == 0) { 2357 zero_count++; 2358 } else if (imm_h[i] == 0xffffL) { 2359 neg_count++; 2360 } 2361 } 2362 if (zero_count == 4) { 2363 // one MOVZ will do 2364 movz(dst, 0); 2365 } else if (neg_count == 4) { 2366 // one MOVN will do 2367 movn(dst, 0); 2368 } else if (zero_count == 3) { 2369 for (i = 0; i < 4; i++) { 2370 if (imm_h[i] != 0L) { 2371 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2372 break; 2373 } 2374 } 2375 } else if (neg_count == 3) { 2376 // one MOVN will do 2377 for (int i = 0; i < 4; i++) { 2378 if (imm_h[i] != 0xffffL) { 2379 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2380 break; 2381 } 2382 } 2383 } else if (zero_count == 2) { 2384 // one MOVZ and one MOVK will do 2385 for (i = 0; i < 3; i++) { 2386 if (imm_h[i] != 0L) { 2387 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2388 i++; 2389 break; 2390 } 2391 } 2392 for (;i < 4; i++) { 2393 if (imm_h[i] != 0L) { 2394 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2395 } 2396 } 2397 } else if (neg_count == 2) { 2398 // one MOVN and one MOVK will do 2399 for (i = 0; i < 4; i++) { 2400 if (imm_h[i] != 0xffffL) { 2401 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2402 i++; 2403 break; 2404 } 2405 } 2406 for (;i < 4; i++) { 2407 if (imm_h[i] != 0xffffL) { 2408 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2409 } 2410 } 2411 } else if (zero_count == 1) { 2412 // one MOVZ and two MOVKs will do 2413 for (i = 0; i < 4; i++) { 2414 if (imm_h[i] != 0L) { 2415 movz(dst, (uint32_t)imm_h[i], (i << 4)); 2416 i++; 2417 break; 2418 } 2419 } 2420 for (;i < 4; i++) { 2421 if (imm_h[i] != 0x0L) { 2422 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2423 } 2424 } 2425 } else if (neg_count == 1) { 2426 // one MOVN and two MOVKs will do 2427 for (i = 0; i < 4; i++) { 2428 if (imm_h[i] != 0xffffL) { 2429 movn(dst, (uint32_t)imm_h[i] ^ 0xffffL, (i << 4)); 2430 i++; 2431 break; 2432 } 2433 } 2434 for (;i < 4; i++) { 2435 if (imm_h[i] != 0xffffL) { 2436 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2437 } 2438 } 2439 } else { 2440 // use a MOVZ and 3 MOVKs (makes it easier to debug) 2441 movz(dst, (uint32_t)imm_h[0], 0); 2442 for (i = 1; i < 4; i++) { 2443 movk(dst, (uint32_t)imm_h[i], (i << 4)); 2444 } 2445 } 2446 } 2447 } 2448 2449 void MacroAssembler::mov_immediate32(Register dst, uint32_t imm32) 2450 { 2451 #ifndef PRODUCT 2452 { 2453 char buffer[64]; 2454 snprintf(buffer, sizeof(buffer), "0x%" PRIX32, imm32); 2455 block_comment(buffer); 2456 } 2457 #endif 2458 if (operand_valid_for_logical_immediate(true, imm32)) { 2459 orrw(dst, zr, imm32); 2460 } else { 2461 // we can use MOVZ, MOVN or two calls to MOVK to build up the 2462 // constant 2463 uint32_t imm_h[2]; 2464 imm_h[0] = imm32 & 0xffff; 2465 imm_h[1] = ((imm32 >> 16) & 0xffff); 2466 if (imm_h[0] == 0) { 2467 movzw(dst, imm_h[1], 16); 2468 } else if (imm_h[0] == 0xffff) { 2469 movnw(dst, imm_h[1] ^ 0xffff, 16); 2470 } else if (imm_h[1] == 0) { 2471 movzw(dst, imm_h[0], 0); 2472 } else if (imm_h[1] == 0xffff) { 2473 movnw(dst, imm_h[0] ^ 0xffff, 0); 2474 } else { 2475 // use a MOVZ and MOVK (makes it easier to debug) 2476 movzw(dst, imm_h[0], 0); 2477 movkw(dst, imm_h[1], 16); 2478 } 2479 } 2480 } 2481 2482 // Form an address from base + offset in Rd. Rd may or may 2483 // not actually be used: you must use the Address that is returned. 2484 // It is up to you to ensure that the shift provided matches the size 2485 // of your data. 2486 Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset, int shift) { 2487 if (Address::offset_ok_for_immed(byte_offset, shift)) 2488 // It fits; no need for any heroics 2489 return Address(base, byte_offset); 2490 2491 // Don't do anything clever with negative or misaligned offsets 2492 unsigned mask = (1 << shift) - 1; 2493 if (byte_offset < 0 || byte_offset & mask) { 2494 mov(Rd, byte_offset); 2495 add(Rd, base, Rd); 2496 return Address(Rd); 2497 } 2498 2499 // See if we can do this with two 12-bit offsets 2500 { 2501 uint64_t word_offset = byte_offset >> shift; 2502 uint64_t masked_offset = word_offset & 0xfff000; 2503 if (Address::offset_ok_for_immed(word_offset - masked_offset, 0) 2504 && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) { 2505 add(Rd, base, masked_offset << shift); 2506 word_offset -= masked_offset; 2507 return Address(Rd, word_offset << shift); 2508 } 2509 } 2510 2511 // Do it the hard way 2512 mov(Rd, byte_offset); 2513 add(Rd, base, Rd); 2514 return Address(Rd); 2515 } 2516 2517 int MacroAssembler::corrected_idivl(Register result, Register ra, Register rb, 2518 bool want_remainder, Register scratch) 2519 { 2520 // Full implementation of Java idiv and irem. The function 2521 // returns the (pc) offset of the div instruction - may be needed 2522 // for implicit exceptions. 2523 // 2524 // constraint : ra/rb =/= scratch 2525 // normal case 2526 // 2527 // input : ra: dividend 2528 // rb: divisor 2529 // 2530 // result: either 2531 // quotient (= ra idiv rb) 2532 // remainder (= ra irem rb) 2533 2534 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 2535 2536 int idivl_offset = offset(); 2537 if (! want_remainder) { 2538 sdivw(result, ra, rb); 2539 } else { 2540 sdivw(scratch, ra, rb); 2541 Assembler::msubw(result, scratch, rb, ra); 2542 } 2543 2544 return idivl_offset; 2545 } 2546 2547 int MacroAssembler::corrected_idivq(Register result, Register ra, Register rb, 2548 bool want_remainder, Register scratch) 2549 { 2550 // Full implementation of Java ldiv and lrem. The function 2551 // returns the (pc) offset of the div instruction - may be needed 2552 // for implicit exceptions. 2553 // 2554 // constraint : ra/rb =/= scratch 2555 // normal case 2556 // 2557 // input : ra: dividend 2558 // rb: divisor 2559 // 2560 // result: either 2561 // quotient (= ra idiv rb) 2562 // remainder (= ra irem rb) 2563 2564 assert(ra != scratch && rb != scratch, "reg cannot be scratch"); 2565 2566 int idivq_offset = offset(); 2567 if (! want_remainder) { 2568 sdiv(result, ra, rb); 2569 } else { 2570 sdiv(scratch, ra, rb); 2571 Assembler::msub(result, scratch, rb, ra); 2572 } 2573 2574 return idivq_offset; 2575 } 2576 2577 void MacroAssembler::membar(Membar_mask_bits order_constraint) { 2578 address prev = pc() - NativeMembar::instruction_size; 2579 address last = code()->last_insn(); 2580 if (last != nullptr && nativeInstruction_at(last)->is_Membar() && prev == last) { 2581 NativeMembar *bar = NativeMembar_at(prev); 2582 if (AlwaysMergeDMB) { 2583 bar->set_kind(bar->get_kind() | order_constraint); 2584 BLOCK_COMMENT("merged membar(always)"); 2585 return; 2586 } 2587 // Don't promote DMB ST|DMB LD to DMB (a full barrier) because 2588 // doing so would introduce a StoreLoad which the caller did not 2589 // intend 2590 if (bar->get_kind() == order_constraint 2591 || bar->get_kind() == AnyAny 2592 || order_constraint == AnyAny) { 2593 // We are merging two memory barrier instructions. On AArch64 we 2594 // can do this simply by ORing them together. 2595 bar->set_kind(bar->get_kind() | order_constraint); 2596 BLOCK_COMMENT("merged membar"); 2597 return; 2598 } else { 2599 // A special case like "DMB ST;DMB LD;DMB ST", the last DMB can be skipped 2600 // We need check the last 2 instructions 2601 address prev2 = prev - NativeMembar::instruction_size; 2602 if (last != code()->last_label() && nativeInstruction_at(prev2)->is_Membar()) { 2603 NativeMembar *bar2 = NativeMembar_at(prev2); 2604 assert(bar2->get_kind() == order_constraint, "it should be merged before"); 2605 BLOCK_COMMENT("merged membar(elided)"); 2606 return; 2607 } 2608 } 2609 } 2610 code()->set_last_insn(pc()); 2611 dmb(Assembler::barrier(order_constraint)); 2612 } 2613 2614 bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) { 2615 if (ldst_can_merge(rt, adr, size_in_bytes, is_store)) { 2616 merge_ldst(rt, adr, size_in_bytes, is_store); 2617 code()->clear_last_insn(); 2618 return true; 2619 } else { 2620 assert(size_in_bytes == 8 || size_in_bytes == 4, "only 8 bytes or 4 bytes load/store is supported."); 2621 const uint64_t mask = size_in_bytes - 1; 2622 if (adr.getMode() == Address::base_plus_offset && 2623 (adr.offset() & mask) == 0) { // only supports base_plus_offset. 2624 code()->set_last_insn(pc()); 2625 } 2626 return false; 2627 } 2628 } 2629 2630 void MacroAssembler::ldr(Register Rx, const Address &adr) { 2631 // We always try to merge two adjacent loads into one ldp. 2632 if (!try_merge_ldst(Rx, adr, 8, false)) { 2633 Assembler::ldr(Rx, adr); 2634 } 2635 } 2636 2637 void MacroAssembler::ldrw(Register Rw, const Address &adr) { 2638 // We always try to merge two adjacent loads into one ldp. 2639 if (!try_merge_ldst(Rw, adr, 4, false)) { 2640 Assembler::ldrw(Rw, adr); 2641 } 2642 } 2643 2644 void MacroAssembler::str(Register Rx, const Address &adr) { 2645 // We always try to merge two adjacent stores into one stp. 2646 if (!try_merge_ldst(Rx, adr, 8, true)) { 2647 Assembler::str(Rx, adr); 2648 } 2649 } 2650 2651 void MacroAssembler::strw(Register Rw, const Address &adr) { 2652 // We always try to merge two adjacent stores into one stp. 2653 if (!try_merge_ldst(Rw, adr, 4, true)) { 2654 Assembler::strw(Rw, adr); 2655 } 2656 } 2657 2658 // MacroAssembler routines found actually to be needed 2659 2660 void MacroAssembler::push(Register src) 2661 { 2662 str(src, Address(pre(esp, -1 * wordSize))); 2663 } 2664 2665 void MacroAssembler::pop(Register dst) 2666 { 2667 ldr(dst, Address(post(esp, 1 * wordSize))); 2668 } 2669 2670 // Note: load_unsigned_short used to be called load_unsigned_word. 2671 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 2672 int off = offset(); 2673 ldrh(dst, src); 2674 return off; 2675 } 2676 2677 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 2678 int off = offset(); 2679 ldrb(dst, src); 2680 return off; 2681 } 2682 2683 int MacroAssembler::load_signed_short(Register dst, Address src) { 2684 int off = offset(); 2685 ldrsh(dst, src); 2686 return off; 2687 } 2688 2689 int MacroAssembler::load_signed_byte(Register dst, Address src) { 2690 int off = offset(); 2691 ldrsb(dst, src); 2692 return off; 2693 } 2694 2695 int MacroAssembler::load_signed_short32(Register dst, Address src) { 2696 int off = offset(); 2697 ldrshw(dst, src); 2698 return off; 2699 } 2700 2701 int MacroAssembler::load_signed_byte32(Register dst, Address src) { 2702 int off = offset(); 2703 ldrsbw(dst, src); 2704 return off; 2705 } 2706 2707 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) { 2708 switch (size_in_bytes) { 2709 case 8: ldr(dst, src); break; 2710 case 4: ldrw(dst, src); break; 2711 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 2712 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 2713 default: ShouldNotReachHere(); 2714 } 2715 } 2716 2717 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes) { 2718 switch (size_in_bytes) { 2719 case 8: str(src, dst); break; 2720 case 4: strw(src, dst); break; 2721 case 2: strh(src, dst); break; 2722 case 1: strb(src, dst); break; 2723 default: ShouldNotReachHere(); 2724 } 2725 } 2726 2727 void MacroAssembler::decrementw(Register reg, int value) 2728 { 2729 if (value < 0) { incrementw(reg, -value); return; } 2730 if (value == 0) { return; } 2731 if (value < (1 << 12)) { subw(reg, reg, value); return; } 2732 /* else */ { 2733 guarantee(reg != rscratch2, "invalid dst for register decrement"); 2734 movw(rscratch2, (unsigned)value); 2735 subw(reg, reg, rscratch2); 2736 } 2737 } 2738 2739 void MacroAssembler::decrement(Register reg, int value) 2740 { 2741 if (value < 0) { increment(reg, -value); return; } 2742 if (value == 0) { return; } 2743 if (value < (1 << 12)) { sub(reg, reg, value); return; } 2744 /* else */ { 2745 assert(reg != rscratch2, "invalid dst for register decrement"); 2746 mov(rscratch2, (uint64_t)value); 2747 sub(reg, reg, rscratch2); 2748 } 2749 } 2750 2751 void MacroAssembler::decrementw(Address dst, int value) 2752 { 2753 assert(!dst.uses(rscratch1), "invalid dst for address decrement"); 2754 if (dst.getMode() == Address::literal) { 2755 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2756 lea(rscratch2, dst); 2757 dst = Address(rscratch2); 2758 } 2759 ldrw(rscratch1, dst); 2760 decrementw(rscratch1, value); 2761 strw(rscratch1, dst); 2762 } 2763 2764 void MacroAssembler::decrement(Address dst, int value) 2765 { 2766 assert(!dst.uses(rscratch1), "invalid address for decrement"); 2767 if (dst.getMode() == Address::literal) { 2768 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2769 lea(rscratch2, dst); 2770 dst = Address(rscratch2); 2771 } 2772 ldr(rscratch1, dst); 2773 decrement(rscratch1, value); 2774 str(rscratch1, dst); 2775 } 2776 2777 void MacroAssembler::incrementw(Register reg, int value) 2778 { 2779 if (value < 0) { decrementw(reg, -value); return; } 2780 if (value == 0) { return; } 2781 if (value < (1 << 12)) { addw(reg, reg, value); return; } 2782 /* else */ { 2783 assert(reg != rscratch2, "invalid dst for register increment"); 2784 movw(rscratch2, (unsigned)value); 2785 addw(reg, reg, rscratch2); 2786 } 2787 } 2788 2789 void MacroAssembler::increment(Register reg, int value) 2790 { 2791 if (value < 0) { decrement(reg, -value); return; } 2792 if (value == 0) { return; } 2793 if (value < (1 << 12)) { add(reg, reg, value); return; } 2794 /* else */ { 2795 assert(reg != rscratch2, "invalid dst for register increment"); 2796 movw(rscratch2, (unsigned)value); 2797 add(reg, reg, rscratch2); 2798 } 2799 } 2800 2801 void MacroAssembler::incrementw(Address dst, int value) 2802 { 2803 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2804 if (dst.getMode() == Address::literal) { 2805 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2806 lea(rscratch2, dst); 2807 dst = Address(rscratch2); 2808 } 2809 ldrw(rscratch1, dst); 2810 incrementw(rscratch1, value); 2811 strw(rscratch1, dst); 2812 } 2813 2814 void MacroAssembler::increment(Address dst, int value) 2815 { 2816 assert(!dst.uses(rscratch1), "invalid dst for address increment"); 2817 if (dst.getMode() == Address::literal) { 2818 assert(abs(value) < (1 << 12), "invalid value and address mode combination"); 2819 lea(rscratch2, dst); 2820 dst = Address(rscratch2); 2821 } 2822 ldr(rscratch1, dst); 2823 increment(rscratch1, value); 2824 str(rscratch1, dst); 2825 } 2826 2827 // Push lots of registers in the bit set supplied. Don't push sp. 2828 // Return the number of words pushed 2829 int MacroAssembler::push(unsigned int bitset, Register stack) { 2830 int words_pushed = 0; 2831 2832 // Scan bitset to accumulate register pairs 2833 unsigned char regs[32]; 2834 int count = 0; 2835 for (int reg = 0; reg <= 30; reg++) { 2836 if (1 & bitset) 2837 regs[count++] = reg; 2838 bitset >>= 1; 2839 } 2840 regs[count++] = zr->raw_encoding(); 2841 count &= ~1; // Only push an even number of regs 2842 2843 if (count) { 2844 stp(as_Register(regs[0]), as_Register(regs[1]), 2845 Address(pre(stack, -count * wordSize))); 2846 words_pushed += 2; 2847 } 2848 for (int i = 2; i < count; i += 2) { 2849 stp(as_Register(regs[i]), as_Register(regs[i+1]), 2850 Address(stack, i * wordSize)); 2851 words_pushed += 2; 2852 } 2853 2854 assert(words_pushed == count, "oops, pushed != count"); 2855 2856 return count; 2857 } 2858 2859 int MacroAssembler::pop(unsigned int bitset, Register stack) { 2860 int words_pushed = 0; 2861 2862 // Scan bitset to accumulate register pairs 2863 unsigned char regs[32]; 2864 int count = 0; 2865 for (int reg = 0; reg <= 30; reg++) { 2866 if (1 & bitset) 2867 regs[count++] = reg; 2868 bitset >>= 1; 2869 } 2870 regs[count++] = zr->raw_encoding(); 2871 count &= ~1; 2872 2873 for (int i = 2; i < count; i += 2) { 2874 ldp(as_Register(regs[i]), as_Register(regs[i+1]), 2875 Address(stack, i * wordSize)); 2876 words_pushed += 2; 2877 } 2878 if (count) { 2879 ldp(as_Register(regs[0]), as_Register(regs[1]), 2880 Address(post(stack, count * wordSize))); 2881 words_pushed += 2; 2882 } 2883 2884 assert(words_pushed == count, "oops, pushed != count"); 2885 2886 return count; 2887 } 2888 2889 // Push lots of registers in the bit set supplied. Don't push sp. 2890 // Return the number of dwords pushed 2891 int MacroAssembler::push_fp(unsigned int bitset, Register stack, FpPushPopMode mode) { 2892 int words_pushed = 0; 2893 bool use_sve = false; 2894 int sve_vector_size_in_bytes = 0; 2895 2896 #ifdef COMPILER2 2897 use_sve = Matcher::supports_scalable_vector(); 2898 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 2899 #endif 2900 2901 // Scan bitset to accumulate register pairs 2902 unsigned char regs[32]; 2903 int count = 0; 2904 for (int reg = 0; reg <= 31; reg++) { 2905 if (1 & bitset) 2906 regs[count++] = reg; 2907 bitset >>= 1; 2908 } 2909 2910 if (count == 0) { 2911 return 0; 2912 } 2913 2914 if (mode == PushPopFull) { 2915 if (use_sve && sve_vector_size_in_bytes > 16) { 2916 mode = PushPopSVE; 2917 } else { 2918 mode = PushPopNeon; 2919 } 2920 } 2921 2922 #ifndef PRODUCT 2923 { 2924 char buffer[48]; 2925 if (mode == PushPopSVE) { 2926 snprintf(buffer, sizeof(buffer), "push_fp: %d SVE registers", count); 2927 } else if (mode == PushPopNeon) { 2928 snprintf(buffer, sizeof(buffer), "push_fp: %d Neon registers", count); 2929 } else { 2930 snprintf(buffer, sizeof(buffer), "push_fp: %d fp registers", count); 2931 } 2932 block_comment(buffer); 2933 } 2934 #endif 2935 2936 if (mode == PushPopSVE) { 2937 sub(stack, stack, sve_vector_size_in_bytes * count); 2938 for (int i = 0; i < count; i++) { 2939 sve_str(as_FloatRegister(regs[i]), Address(stack, i)); 2940 } 2941 return count * sve_vector_size_in_bytes / 8; 2942 } 2943 2944 if (mode == PushPopNeon) { 2945 if (count == 1) { 2946 strq(as_FloatRegister(regs[0]), Address(pre(stack, -wordSize * 2))); 2947 return 2; 2948 } 2949 2950 bool odd = (count & 1) == 1; 2951 int push_slots = count + (odd ? 1 : 0); 2952 2953 // Always pushing full 128 bit registers. 2954 stpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize * 2))); 2955 words_pushed += 2; 2956 2957 for (int i = 2; i + 1 < count; i += 2) { 2958 stpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 2959 words_pushed += 2; 2960 } 2961 2962 if (odd) { 2963 strq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 2964 words_pushed++; 2965 } 2966 2967 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 2968 return count * 2; 2969 } 2970 2971 if (mode == PushPopFp) { 2972 bool odd = (count & 1) == 1; 2973 int push_slots = count + (odd ? 1 : 0); 2974 2975 if (count == 1) { 2976 // Stack pointer must be 16 bytes aligned 2977 strd(as_FloatRegister(regs[0]), Address(pre(stack, -push_slots * wordSize))); 2978 return 1; 2979 } 2980 2981 stpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(pre(stack, -push_slots * wordSize))); 2982 words_pushed += 2; 2983 2984 for (int i = 2; i + 1 < count; i += 2) { 2985 stpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize)); 2986 words_pushed += 2; 2987 } 2988 2989 if (odd) { 2990 // Stack pointer must be 16 bytes aligned 2991 strd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize)); 2992 words_pushed++; 2993 } 2994 2995 assert(words_pushed == count, "oops, pushed != count"); 2996 2997 return count; 2998 } 2999 3000 return 0; 3001 } 3002 3003 // Return the number of dwords popped 3004 int MacroAssembler::pop_fp(unsigned int bitset, Register stack, FpPushPopMode mode) { 3005 int words_pushed = 0; 3006 bool use_sve = false; 3007 int sve_vector_size_in_bytes = 0; 3008 3009 #ifdef COMPILER2 3010 use_sve = Matcher::supports_scalable_vector(); 3011 sve_vector_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE); 3012 #endif 3013 // Scan bitset to accumulate register pairs 3014 unsigned char regs[32]; 3015 int count = 0; 3016 for (int reg = 0; reg <= 31; reg++) { 3017 if (1 & bitset) 3018 regs[count++] = reg; 3019 bitset >>= 1; 3020 } 3021 3022 if (count == 0) { 3023 return 0; 3024 } 3025 3026 if (mode == PushPopFull) { 3027 if (use_sve && sve_vector_size_in_bytes > 16) { 3028 mode = PushPopSVE; 3029 } else { 3030 mode = PushPopNeon; 3031 } 3032 } 3033 3034 #ifndef PRODUCT 3035 { 3036 char buffer[48]; 3037 if (mode == PushPopSVE) { 3038 snprintf(buffer, sizeof(buffer), "pop_fp: %d SVE registers", count); 3039 } else if (mode == PushPopNeon) { 3040 snprintf(buffer, sizeof(buffer), "pop_fp: %d Neon registers", count); 3041 } else { 3042 snprintf(buffer, sizeof(buffer), "pop_fp: %d fp registers", count); 3043 } 3044 block_comment(buffer); 3045 } 3046 #endif 3047 3048 if (mode == PushPopSVE) { 3049 for (int i = count - 1; i >= 0; i--) { 3050 sve_ldr(as_FloatRegister(regs[i]), Address(stack, i)); 3051 } 3052 add(stack, stack, sve_vector_size_in_bytes * count); 3053 return count * sve_vector_size_in_bytes / 8; 3054 } 3055 3056 if (mode == PushPopNeon) { 3057 if (count == 1) { 3058 ldrq(as_FloatRegister(regs[0]), Address(post(stack, wordSize * 2))); 3059 return 2; 3060 } 3061 3062 bool odd = (count & 1) == 1; 3063 int push_slots = count + (odd ? 1 : 0); 3064 3065 if (odd) { 3066 ldrq(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize * 2)); 3067 words_pushed++; 3068 } 3069 3070 for (int i = 2; i + 1 < count; i += 2) { 3071 ldpq(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize * 2)); 3072 words_pushed += 2; 3073 } 3074 3075 ldpq(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize * 2))); 3076 words_pushed += 2; 3077 3078 assert(words_pushed == count, "oops, pushed(%d) != count(%d)", words_pushed, count); 3079 3080 return count * 2; 3081 } 3082 3083 if (mode == PushPopFp) { 3084 bool odd = (count & 1) == 1; 3085 int push_slots = count + (odd ? 1 : 0); 3086 3087 if (count == 1) { 3088 ldrd(as_FloatRegister(regs[0]), Address(post(stack, push_slots * wordSize))); 3089 return 1; 3090 } 3091 3092 if (odd) { 3093 ldrd(as_FloatRegister(regs[count - 1]), Address(stack, (count - 1) * wordSize)); 3094 words_pushed++; 3095 } 3096 3097 for (int i = 2; i + 1 < count; i += 2) { 3098 ldpd(as_FloatRegister(regs[i]), as_FloatRegister(regs[i+1]), Address(stack, i * wordSize)); 3099 words_pushed += 2; 3100 } 3101 3102 ldpd(as_FloatRegister(regs[0]), as_FloatRegister(regs[1]), Address(post(stack, push_slots * wordSize))); 3103 words_pushed += 2; 3104 3105 assert(words_pushed == count, "oops, pushed != count"); 3106 3107 return count; 3108 } 3109 3110 return 0; 3111 } 3112 3113 // Return the number of dwords pushed 3114 int MacroAssembler::push_p(unsigned int bitset, Register stack) { 3115 bool use_sve = false; 3116 int sve_predicate_size_in_slots = 0; 3117 3118 #ifdef COMPILER2 3119 use_sve = Matcher::supports_scalable_vector(); 3120 if (use_sve) { 3121 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 3122 } 3123 #endif 3124 3125 if (!use_sve) { 3126 return 0; 3127 } 3128 3129 unsigned char regs[PRegister::number_of_registers]; 3130 int count = 0; 3131 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 3132 if (1 & bitset) 3133 regs[count++] = reg; 3134 bitset >>= 1; 3135 } 3136 3137 if (count == 0) { 3138 return 0; 3139 } 3140 3141 int total_push_bytes = align_up(sve_predicate_size_in_slots * 3142 VMRegImpl::stack_slot_size * count, 16); 3143 sub(stack, stack, total_push_bytes); 3144 for (int i = 0; i < count; i++) { 3145 sve_str(as_PRegister(regs[i]), Address(stack, i)); 3146 } 3147 return total_push_bytes / 8; 3148 } 3149 3150 // Return the number of dwords popped 3151 int MacroAssembler::pop_p(unsigned int bitset, Register stack) { 3152 bool use_sve = false; 3153 int sve_predicate_size_in_slots = 0; 3154 3155 #ifdef COMPILER2 3156 use_sve = Matcher::supports_scalable_vector(); 3157 if (use_sve) { 3158 sve_predicate_size_in_slots = Matcher::scalable_predicate_reg_slots(); 3159 } 3160 #endif 3161 3162 if (!use_sve) { 3163 return 0; 3164 } 3165 3166 unsigned char regs[PRegister::number_of_registers]; 3167 int count = 0; 3168 for (int reg = 0; reg < PRegister::number_of_registers; reg++) { 3169 if (1 & bitset) 3170 regs[count++] = reg; 3171 bitset >>= 1; 3172 } 3173 3174 if (count == 0) { 3175 return 0; 3176 } 3177 3178 int total_pop_bytes = align_up(sve_predicate_size_in_slots * 3179 VMRegImpl::stack_slot_size * count, 16); 3180 for (int i = count - 1; i >= 0; i--) { 3181 sve_ldr(as_PRegister(regs[i]), Address(stack, i)); 3182 } 3183 add(stack, stack, total_pop_bytes); 3184 return total_pop_bytes / 8; 3185 } 3186 3187 #ifdef ASSERT 3188 void MacroAssembler::verify_heapbase(const char* msg) { 3189 #if 0 3190 assert (UseCompressedOops || UseCompressedClassPointers, "should be compressed"); 3191 assert (Universe::heap() != nullptr, "java heap should be initialized"); 3192 if (!UseCompressedOops || Universe::ptr_base() == nullptr) { 3193 // rheapbase is allocated as general register 3194 return; 3195 } 3196 if (CheckCompressedOops) { 3197 Label ok; 3198 push(1 << rscratch1->encoding(), sp); // cmpptr trashes rscratch1 3199 cmpptr(rheapbase, ExternalAddress(CompressedOops::base_addr())); 3200 br(Assembler::EQ, ok); 3201 stop(msg); 3202 bind(ok); 3203 pop(1 << rscratch1->encoding(), sp); 3204 } 3205 #endif 3206 } 3207 #endif 3208 3209 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) { 3210 assert_different_registers(value, tmp1, tmp2); 3211 Label done, tagged, weak_tagged; 3212 3213 cbz(value, done); // Use null as-is. 3214 tst(value, JNIHandles::tag_mask); // Test for tag. 3215 br(Assembler::NE, tagged); 3216 3217 // Resolve local handle 3218 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp1, tmp2); 3219 verify_oop(value); 3220 b(done); 3221 3222 bind(tagged); 3223 STATIC_ASSERT(JNIHandles::TypeTag::weak_global == 0b1); 3224 tbnz(value, 0, weak_tagged); // Test for weak tag. 3225 3226 // Resolve global handle 3227 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 3228 verify_oop(value); 3229 b(done); 3230 3231 bind(weak_tagged); 3232 // Resolve jweak. 3233 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 3234 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp1, tmp2); 3235 verify_oop(value); 3236 3237 bind(done); 3238 } 3239 3240 void MacroAssembler::resolve_global_jobject(Register value, Register tmp1, Register tmp2) { 3241 assert_different_registers(value, tmp1, tmp2); 3242 Label done; 3243 3244 cbz(value, done); // Use null as-is. 3245 3246 #ifdef ASSERT 3247 { 3248 STATIC_ASSERT(JNIHandles::TypeTag::global == 0b10); 3249 Label valid_global_tag; 3250 tbnz(value, 1, valid_global_tag); // Test for global tag 3251 stop("non global jobject using resolve_global_jobject"); 3252 bind(valid_global_tag); 3253 } 3254 #endif 3255 3256 // Resolve global handle 3257 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp1, tmp2); 3258 verify_oop(value); 3259 3260 bind(done); 3261 } 3262 3263 void MacroAssembler::stop(const char* msg) { 3264 BLOCK_COMMENT(msg); 3265 // load msg into r0 so we can access it from the signal handler 3266 // ExternalAddress enables saving and restoring via the code cache 3267 lea(c_rarg0, ExternalAddress((address) msg)); 3268 dcps1(0xdeae); 3269 AOTCodeCache::add_C_string(msg); 3270 } 3271 3272 void MacroAssembler::unimplemented(const char* what) { 3273 const char* buf = nullptr; 3274 { 3275 ResourceMark rm; 3276 stringStream ss; 3277 ss.print("unimplemented: %s", what); 3278 buf = code_string(ss.as_string()); 3279 } 3280 stop(buf); 3281 } 3282 3283 void MacroAssembler::_assert_asm(Assembler::Condition cc, const char* msg) { 3284 #ifdef ASSERT 3285 Label OK; 3286 br(cc, OK); 3287 stop(msg); 3288 bind(OK); 3289 #endif 3290 } 3291 3292 // If a constant does not fit in an immediate field, generate some 3293 // number of MOV instructions and then perform the operation. 3294 void MacroAssembler::wrap_add_sub_imm_insn(Register Rd, Register Rn, uint64_t imm, 3295 add_sub_imm_insn insn1, 3296 add_sub_reg_insn insn2, 3297 bool is32) { 3298 assert(Rd != zr, "Rd = zr and not setting flags?"); 3299 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 3300 if (fits) { 3301 (this->*insn1)(Rd, Rn, imm); 3302 } else { 3303 if (uabs(imm) < (1 << 24)) { 3304 (this->*insn1)(Rd, Rn, imm & -(1 << 12)); 3305 (this->*insn1)(Rd, Rd, imm & ((1 << 12)-1)); 3306 } else { 3307 assert_different_registers(Rd, Rn); 3308 mov(Rd, imm); 3309 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 3310 } 3311 } 3312 } 3313 3314 // Separate vsn which sets the flags. Optimisations are more restricted 3315 // because we must set the flags correctly. 3316 void MacroAssembler::wrap_adds_subs_imm_insn(Register Rd, Register Rn, uint64_t imm, 3317 add_sub_imm_insn insn1, 3318 add_sub_reg_insn insn2, 3319 bool is32) { 3320 bool fits = operand_valid_for_add_sub_immediate(is32 ? (int32_t)imm : imm); 3321 if (fits) { 3322 (this->*insn1)(Rd, Rn, imm); 3323 } else { 3324 assert_different_registers(Rd, Rn); 3325 assert(Rd != zr, "overflow in immediate operand"); 3326 mov(Rd, imm); 3327 (this->*insn2)(Rd, Rn, Rd, LSL, 0); 3328 } 3329 } 3330 3331 3332 void MacroAssembler::add(Register Rd, Register Rn, RegisterOrConstant increment) { 3333 if (increment.is_register()) { 3334 add(Rd, Rn, increment.as_register()); 3335 } else { 3336 add(Rd, Rn, increment.as_constant()); 3337 } 3338 } 3339 3340 void MacroAssembler::addw(Register Rd, Register Rn, RegisterOrConstant increment) { 3341 if (increment.is_register()) { 3342 addw(Rd, Rn, increment.as_register()); 3343 } else { 3344 addw(Rd, Rn, increment.as_constant()); 3345 } 3346 } 3347 3348 void MacroAssembler::sub(Register Rd, Register Rn, RegisterOrConstant decrement) { 3349 if (decrement.is_register()) { 3350 sub(Rd, Rn, decrement.as_register()); 3351 } else { 3352 sub(Rd, Rn, decrement.as_constant()); 3353 } 3354 } 3355 3356 void MacroAssembler::subw(Register Rd, Register Rn, RegisterOrConstant decrement) { 3357 if (decrement.is_register()) { 3358 subw(Rd, Rn, decrement.as_register()); 3359 } else { 3360 subw(Rd, Rn, decrement.as_constant()); 3361 } 3362 } 3363 3364 void MacroAssembler::reinit_heapbase() 3365 { 3366 if (UseCompressedOops) { 3367 if (Universe::is_fully_initialized() && !AOTCodeCache::is_on_for_write()) { 3368 mov(rheapbase, CompressedOops::base()); 3369 } else { 3370 lea(rheapbase, ExternalAddress(CompressedOops::base_addr())); 3371 ldr(rheapbase, Address(rheapbase)); 3372 } 3373 } 3374 } 3375 3376 // this simulates the behaviour of the x86 cmpxchg instruction using a 3377 // load linked/store conditional pair. we use the acquire/release 3378 // versions of these instructions so that we flush pending writes as 3379 // per Java semantics. 3380 3381 // n.b the x86 version assumes the old value to be compared against is 3382 // in rax and updates rax with the value located in memory if the 3383 // cmpxchg fails. we supply a register for the old value explicitly 3384 3385 // the aarch64 load linked/store conditional instructions do not 3386 // accept an offset. so, unlike x86, we must provide a plain register 3387 // to identify the memory word to be compared/exchanged rather than a 3388 // register+offset Address. 3389 3390 void MacroAssembler::cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, 3391 Label &succeed, Label *fail) { 3392 // oldv holds comparison value 3393 // newv holds value to write in exchange 3394 // addr identifies memory word to compare against/update 3395 if (UseLSE) { 3396 mov(tmp, oldv); 3397 casal(Assembler::xword, oldv, newv, addr); 3398 cmp(tmp, oldv); 3399 br(Assembler::EQ, succeed); 3400 membar(AnyAny); 3401 } else { 3402 Label retry_load, nope; 3403 prfm(Address(addr), PSTL1STRM); 3404 bind(retry_load); 3405 // flush and load exclusive from the memory location 3406 // and fail if it is not what we expect 3407 ldaxr(tmp, addr); 3408 cmp(tmp, oldv); 3409 br(Assembler::NE, nope); 3410 // if we store+flush with no intervening write tmp will be zero 3411 stlxr(tmp, newv, addr); 3412 cbzw(tmp, succeed); 3413 // retry so we only ever return after a load fails to compare 3414 // ensures we don't return a stale value after a failed write. 3415 b(retry_load); 3416 // if the memory word differs we return it in oldv and signal a fail 3417 bind(nope); 3418 membar(AnyAny); 3419 mov(oldv, tmp); 3420 } 3421 if (fail) 3422 b(*fail); 3423 } 3424 3425 void MacroAssembler::cmpxchg_obj_header(Register oldv, Register newv, Register obj, Register tmp, 3426 Label &succeed, Label *fail) { 3427 assert(oopDesc::mark_offset_in_bytes() == 0, "assumption"); 3428 cmpxchgptr(oldv, newv, obj, tmp, succeed, fail); 3429 } 3430 3431 void MacroAssembler::cmpxchgw(Register oldv, Register newv, Register addr, Register tmp, 3432 Label &succeed, Label *fail) { 3433 // oldv holds comparison value 3434 // newv holds value to write in exchange 3435 // addr identifies memory word to compare against/update 3436 // tmp returns 0/1 for success/failure 3437 if (UseLSE) { 3438 mov(tmp, oldv); 3439 casal(Assembler::word, oldv, newv, addr); 3440 cmp(tmp, oldv); 3441 br(Assembler::EQ, succeed); 3442 membar(AnyAny); 3443 } else { 3444 Label retry_load, nope; 3445 prfm(Address(addr), PSTL1STRM); 3446 bind(retry_load); 3447 // flush and load exclusive from the memory location 3448 // and fail if it is not what we expect 3449 ldaxrw(tmp, addr); 3450 cmp(tmp, oldv); 3451 br(Assembler::NE, nope); 3452 // if we store+flush with no intervening write tmp will be zero 3453 stlxrw(tmp, newv, addr); 3454 cbzw(tmp, succeed); 3455 // retry so we only ever return after a load fails to compare 3456 // ensures we don't return a stale value after a failed write. 3457 b(retry_load); 3458 // if the memory word differs we return it in oldv and signal a fail 3459 bind(nope); 3460 membar(AnyAny); 3461 mov(oldv, tmp); 3462 } 3463 if (fail) 3464 b(*fail); 3465 } 3466 3467 // A generic CAS; success or failure is in the EQ flag. A weak CAS 3468 // doesn't retry and may fail spuriously. If the oldval is wanted, 3469 // Pass a register for the result, otherwise pass noreg. 3470 3471 // Clobbers rscratch1 3472 void MacroAssembler::cmpxchg(Register addr, Register expected, 3473 Register new_val, 3474 enum operand_size size, 3475 bool acquire, bool release, 3476 bool weak, 3477 Register result) { 3478 if (result == noreg) result = rscratch1; 3479 BLOCK_COMMENT("cmpxchg {"); 3480 if (UseLSE) { 3481 mov(result, expected); 3482 lse_cas(result, new_val, addr, size, acquire, release, /*not_pair*/ true); 3483 compare_eq(result, expected, size); 3484 #ifdef ASSERT 3485 // Poison rscratch1 which is written on !UseLSE branch 3486 mov(rscratch1, 0x1f1f1f1f1f1f1f1f); 3487 #endif 3488 } else { 3489 Label retry_load, done; 3490 prfm(Address(addr), PSTL1STRM); 3491 bind(retry_load); 3492 load_exclusive(result, addr, size, acquire); 3493 compare_eq(result, expected, size); 3494 br(Assembler::NE, done); 3495 store_exclusive(rscratch1, new_val, addr, size, release); 3496 if (weak) { 3497 cmpw(rscratch1, 0u); // If the store fails, return NE to our caller. 3498 } else { 3499 cbnzw(rscratch1, retry_load); 3500 } 3501 bind(done); 3502 } 3503 BLOCK_COMMENT("} cmpxchg"); 3504 } 3505 3506 // A generic comparison. Only compares for equality, clobbers rscratch1. 3507 void MacroAssembler::compare_eq(Register rm, Register rn, enum operand_size size) { 3508 if (size == xword) { 3509 cmp(rm, rn); 3510 } else if (size == word) { 3511 cmpw(rm, rn); 3512 } else if (size == halfword) { 3513 eorw(rscratch1, rm, rn); 3514 ands(zr, rscratch1, 0xffff); 3515 } else if (size == byte) { 3516 eorw(rscratch1, rm, rn); 3517 ands(zr, rscratch1, 0xff); 3518 } else { 3519 ShouldNotReachHere(); 3520 } 3521 } 3522 3523 3524 static bool different(Register a, RegisterOrConstant b, Register c) { 3525 if (b.is_constant()) 3526 return a != c; 3527 else 3528 return a != b.as_register() && a != c && b.as_register() != c; 3529 } 3530 3531 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \ 3532 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \ 3533 if (UseLSE) { \ 3534 prev = prev->is_valid() ? prev : zr; \ 3535 if (incr.is_register()) { \ 3536 AOP(sz, incr.as_register(), prev, addr); \ 3537 } else { \ 3538 mov(rscratch2, incr.as_constant()); \ 3539 AOP(sz, rscratch2, prev, addr); \ 3540 } \ 3541 return; \ 3542 } \ 3543 Register result = rscratch2; \ 3544 if (prev->is_valid()) \ 3545 result = different(prev, incr, addr) ? prev : rscratch2; \ 3546 \ 3547 Label retry_load; \ 3548 prfm(Address(addr), PSTL1STRM); \ 3549 bind(retry_load); \ 3550 LDXR(result, addr); \ 3551 OP(rscratch1, result, incr); \ 3552 STXR(rscratch2, rscratch1, addr); \ 3553 cbnzw(rscratch2, retry_load); \ 3554 if (prev->is_valid() && prev != result) { \ 3555 IOP(prev, rscratch1, incr); \ 3556 } \ 3557 } 3558 3559 ATOMIC_OP(add, ldxr, add, sub, ldadd, stxr, Assembler::xword) 3560 ATOMIC_OP(addw, ldxrw, addw, subw, ldadd, stxrw, Assembler::word) 3561 ATOMIC_OP(addal, ldaxr, add, sub, ldaddal, stlxr, Assembler::xword) 3562 ATOMIC_OP(addalw, ldaxrw, addw, subw, ldaddal, stlxrw, Assembler::word) 3563 3564 #undef ATOMIC_OP 3565 3566 #define ATOMIC_XCHG(OP, AOP, LDXR, STXR, sz) \ 3567 void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \ 3568 if (UseLSE) { \ 3569 prev = prev->is_valid() ? prev : zr; \ 3570 AOP(sz, newv, prev, addr); \ 3571 return; \ 3572 } \ 3573 Register result = rscratch2; \ 3574 if (prev->is_valid()) \ 3575 result = different(prev, newv, addr) ? prev : rscratch2; \ 3576 \ 3577 Label retry_load; \ 3578 prfm(Address(addr), PSTL1STRM); \ 3579 bind(retry_load); \ 3580 LDXR(result, addr); \ 3581 STXR(rscratch1, newv, addr); \ 3582 cbnzw(rscratch1, retry_load); \ 3583 if (prev->is_valid() && prev != result) \ 3584 mov(prev, result); \ 3585 } 3586 3587 ATOMIC_XCHG(xchg, swp, ldxr, stxr, Assembler::xword) 3588 ATOMIC_XCHG(xchgw, swp, ldxrw, stxrw, Assembler::word) 3589 ATOMIC_XCHG(xchgl, swpl, ldxr, stlxr, Assembler::xword) 3590 ATOMIC_XCHG(xchglw, swpl, ldxrw, stlxrw, Assembler::word) 3591 ATOMIC_XCHG(xchgal, swpal, ldaxr, stlxr, Assembler::xword) 3592 ATOMIC_XCHG(xchgalw, swpal, ldaxrw, stlxrw, Assembler::word) 3593 3594 #undef ATOMIC_XCHG 3595 3596 #ifndef PRODUCT 3597 extern "C" void findpc(intptr_t x); 3598 #endif 3599 3600 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) 3601 { 3602 // In order to get locks to work, we need to fake a in_VM state 3603 if (ShowMessageBoxOnError ) { 3604 JavaThread* thread = JavaThread::current(); 3605 JavaThreadState saved_state = thread->thread_state(); 3606 thread->set_thread_state(_thread_in_vm); 3607 #ifndef PRODUCT 3608 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 3609 ttyLocker ttyl; 3610 BytecodeCounter::print(); 3611 } 3612 #endif 3613 if (os::message_box(msg, "Execution stopped, print registers?")) { 3614 ttyLocker ttyl; 3615 tty->print_cr(" pc = 0x%016" PRIx64, pc); 3616 #ifndef PRODUCT 3617 tty->cr(); 3618 findpc(pc); 3619 tty->cr(); 3620 #endif 3621 tty->print_cr(" r0 = 0x%016" PRIx64, regs[0]); 3622 tty->print_cr(" r1 = 0x%016" PRIx64, regs[1]); 3623 tty->print_cr(" r2 = 0x%016" PRIx64, regs[2]); 3624 tty->print_cr(" r3 = 0x%016" PRIx64, regs[3]); 3625 tty->print_cr(" r4 = 0x%016" PRIx64, regs[4]); 3626 tty->print_cr(" r5 = 0x%016" PRIx64, regs[5]); 3627 tty->print_cr(" r6 = 0x%016" PRIx64, regs[6]); 3628 tty->print_cr(" r7 = 0x%016" PRIx64, regs[7]); 3629 tty->print_cr(" r8 = 0x%016" PRIx64, regs[8]); 3630 tty->print_cr(" r9 = 0x%016" PRIx64, regs[9]); 3631 tty->print_cr("r10 = 0x%016" PRIx64, regs[10]); 3632 tty->print_cr("r11 = 0x%016" PRIx64, regs[11]); 3633 tty->print_cr("r12 = 0x%016" PRIx64, regs[12]); 3634 tty->print_cr("r13 = 0x%016" PRIx64, regs[13]); 3635 tty->print_cr("r14 = 0x%016" PRIx64, regs[14]); 3636 tty->print_cr("r15 = 0x%016" PRIx64, regs[15]); 3637 tty->print_cr("r16 = 0x%016" PRIx64, regs[16]); 3638 tty->print_cr("r17 = 0x%016" PRIx64, regs[17]); 3639 tty->print_cr("r18 = 0x%016" PRIx64, regs[18]); 3640 tty->print_cr("r19 = 0x%016" PRIx64, regs[19]); 3641 tty->print_cr("r20 = 0x%016" PRIx64, regs[20]); 3642 tty->print_cr("r21 = 0x%016" PRIx64, regs[21]); 3643 tty->print_cr("r22 = 0x%016" PRIx64, regs[22]); 3644 tty->print_cr("r23 = 0x%016" PRIx64, regs[23]); 3645 tty->print_cr("r24 = 0x%016" PRIx64, regs[24]); 3646 tty->print_cr("r25 = 0x%016" PRIx64, regs[25]); 3647 tty->print_cr("r26 = 0x%016" PRIx64, regs[26]); 3648 tty->print_cr("r27 = 0x%016" PRIx64, regs[27]); 3649 tty->print_cr("r28 = 0x%016" PRIx64, regs[28]); 3650 tty->print_cr("r30 = 0x%016" PRIx64, regs[30]); 3651 tty->print_cr("r31 = 0x%016" PRIx64, regs[31]); 3652 BREAKPOINT; 3653 } 3654 } 3655 fatal("DEBUG MESSAGE: %s", msg); 3656 } 3657 3658 RegSet MacroAssembler::call_clobbered_gp_registers() { 3659 RegSet regs = RegSet::range(r0, r17) - RegSet::of(rscratch1, rscratch2); 3660 #ifndef R18_RESERVED 3661 regs += r18_tls; 3662 #endif 3663 return regs; 3664 } 3665 3666 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude) { 3667 int step = 4 * wordSize; 3668 push(call_clobbered_gp_registers() - exclude, sp); 3669 sub(sp, sp, step); 3670 mov(rscratch1, -step); 3671 // Push v0-v7, v16-v31. 3672 for (int i = 31; i>= 4; i -= 4) { 3673 if (i <= v7->encoding() || i >= v16->encoding()) 3674 st1(as_FloatRegister(i-3), as_FloatRegister(i-2), as_FloatRegister(i-1), 3675 as_FloatRegister(i), T1D, Address(post(sp, rscratch1))); 3676 } 3677 st1(as_FloatRegister(0), as_FloatRegister(1), as_FloatRegister(2), 3678 as_FloatRegister(3), T1D, Address(sp)); 3679 } 3680 3681 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude) { 3682 for (int i = 0; i < 32; i += 4) { 3683 if (i <= v7->encoding() || i >= v16->encoding()) 3684 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3685 as_FloatRegister(i+3), T1D, Address(post(sp, 4 * wordSize))); 3686 } 3687 3688 reinitialize_ptrue(); 3689 3690 pop(call_clobbered_gp_registers() - exclude, sp); 3691 } 3692 3693 void MacroAssembler::push_CPU_state(bool save_vectors, bool use_sve, 3694 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 3695 push(RegSet::range(r0, r29), sp); // integer registers except lr & sp 3696 if (save_vectors && use_sve && sve_vector_size_in_bytes > 16) { 3697 sub(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 3698 for (int i = 0; i < FloatRegister::number_of_registers; i++) { 3699 sve_str(as_FloatRegister(i), Address(sp, i)); 3700 } 3701 } else { 3702 int step = (save_vectors ? 8 : 4) * wordSize; 3703 mov(rscratch1, -step); 3704 sub(sp, sp, step); 3705 for (int i = 28; i >= 4; i -= 4) { 3706 st1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3707 as_FloatRegister(i+3), save_vectors ? T2D : T1D, Address(post(sp, rscratch1))); 3708 } 3709 st1(v0, v1, v2, v3, save_vectors ? T2D : T1D, sp); 3710 } 3711 if (save_vectors && use_sve && total_predicate_in_bytes > 0) { 3712 sub(sp, sp, total_predicate_in_bytes); 3713 for (int i = 0; i < PRegister::number_of_registers; i++) { 3714 sve_str(as_PRegister(i), Address(sp, i)); 3715 } 3716 } 3717 } 3718 3719 void MacroAssembler::pop_CPU_state(bool restore_vectors, bool use_sve, 3720 int sve_vector_size_in_bytes, int total_predicate_in_bytes) { 3721 if (restore_vectors && use_sve && total_predicate_in_bytes > 0) { 3722 for (int i = PRegister::number_of_registers - 1; i >= 0; i--) { 3723 sve_ldr(as_PRegister(i), Address(sp, i)); 3724 } 3725 add(sp, sp, total_predicate_in_bytes); 3726 } 3727 if (restore_vectors && use_sve && sve_vector_size_in_bytes > 16) { 3728 for (int i = FloatRegister::number_of_registers - 1; i >= 0; i--) { 3729 sve_ldr(as_FloatRegister(i), Address(sp, i)); 3730 } 3731 add(sp, sp, sve_vector_size_in_bytes * FloatRegister::number_of_registers); 3732 } else { 3733 int step = (restore_vectors ? 8 : 4) * wordSize; 3734 for (int i = 0; i <= 28; i += 4) 3735 ld1(as_FloatRegister(i), as_FloatRegister(i+1), as_FloatRegister(i+2), 3736 as_FloatRegister(i+3), restore_vectors ? T2D : T1D, Address(post(sp, step))); 3737 } 3738 3739 // We may use predicate registers and rely on ptrue with SVE, 3740 // regardless of wide vector (> 8 bytes) used or not. 3741 if (use_sve) { 3742 reinitialize_ptrue(); 3743 } 3744 3745 // integer registers except lr & sp 3746 pop(RegSet::range(r0, r17), sp); 3747 #ifdef R18_RESERVED 3748 ldp(zr, r19, Address(post(sp, 2 * wordSize))); 3749 pop(RegSet::range(r20, r29), sp); 3750 #else 3751 pop(RegSet::range(r18_tls, r29), sp); 3752 #endif 3753 } 3754 3755 /** 3756 * Helpers for multiply_to_len(). 3757 */ 3758 void MacroAssembler::add2_with_carry(Register final_dest_hi, Register dest_hi, Register dest_lo, 3759 Register src1, Register src2) { 3760 adds(dest_lo, dest_lo, src1); 3761 adc(dest_hi, dest_hi, zr); 3762 adds(dest_lo, dest_lo, src2); 3763 adc(final_dest_hi, dest_hi, zr); 3764 } 3765 3766 // Generate an address from (r + r1 extend offset). "size" is the 3767 // size of the operand. The result may be in rscratch2. 3768 Address MacroAssembler::offsetted_address(Register r, Register r1, 3769 Address::extend ext, int offset, int size) { 3770 if (offset || (ext.shift() % size != 0)) { 3771 lea(rscratch2, Address(r, r1, ext)); 3772 return Address(rscratch2, offset); 3773 } else { 3774 return Address(r, r1, ext); 3775 } 3776 } 3777 3778 Address MacroAssembler::spill_address(int size, int offset, Register tmp) 3779 { 3780 assert(offset >= 0, "spill to negative address?"); 3781 // Offset reachable ? 3782 // Not aligned - 9 bits signed offset 3783 // Aligned - 12 bits unsigned offset shifted 3784 Register base = sp; 3785 if ((offset & (size-1)) && offset >= (1<<8)) { 3786 add(tmp, base, offset & ((1<<12)-1)); 3787 base = tmp; 3788 offset &= -1u<<12; 3789 } 3790 3791 if (offset >= (1<<12) * size) { 3792 add(tmp, base, offset & (((1<<12)-1)<<12)); 3793 base = tmp; 3794 offset &= ~(((1<<12)-1)<<12); 3795 } 3796 3797 return Address(base, offset); 3798 } 3799 3800 Address MacroAssembler::sve_spill_address(int sve_reg_size_in_bytes, int offset, Register tmp) { 3801 assert(offset >= 0, "spill to negative address?"); 3802 3803 Register base = sp; 3804 3805 // An immediate offset in the range 0 to 255 which is multiplied 3806 // by the current vector or predicate register size in bytes. 3807 if (offset % sve_reg_size_in_bytes == 0 && offset < ((1<<8)*sve_reg_size_in_bytes)) { 3808 return Address(base, offset / sve_reg_size_in_bytes); 3809 } 3810 3811 add(tmp, base, offset); 3812 return Address(tmp); 3813 } 3814 3815 // Checks whether offset is aligned. 3816 // Returns true if it is, else false. 3817 bool MacroAssembler::merge_alignment_check(Register base, 3818 size_t size, 3819 int64_t cur_offset, 3820 int64_t prev_offset) const { 3821 if (AvoidUnalignedAccesses) { 3822 if (base == sp) { 3823 // Checks whether low offset if aligned to pair of registers. 3824 int64_t pair_mask = size * 2 - 1; 3825 int64_t offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3826 return (offset & pair_mask) == 0; 3827 } else { // If base is not sp, we can't guarantee the access is aligned. 3828 return false; 3829 } 3830 } else { 3831 int64_t mask = size - 1; 3832 // Load/store pair instruction only supports element size aligned offset. 3833 return (cur_offset & mask) == 0 && (prev_offset & mask) == 0; 3834 } 3835 } 3836 3837 // Checks whether current and previous loads/stores can be merged. 3838 // Returns true if it can be merged, else false. 3839 bool MacroAssembler::ldst_can_merge(Register rt, 3840 const Address &adr, 3841 size_t cur_size_in_bytes, 3842 bool is_store) const { 3843 address prev = pc() - NativeInstruction::instruction_size; 3844 address last = code()->last_insn(); 3845 3846 if (last == nullptr || !nativeInstruction_at(last)->is_Imm_LdSt()) { 3847 return false; 3848 } 3849 3850 if (adr.getMode() != Address::base_plus_offset || prev != last) { 3851 return false; 3852 } 3853 3854 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3855 size_t prev_size_in_bytes = prev_ldst->size_in_bytes(); 3856 3857 assert(prev_size_in_bytes == 4 || prev_size_in_bytes == 8, "only supports 64/32bit merging."); 3858 assert(cur_size_in_bytes == 4 || cur_size_in_bytes == 8, "only supports 64/32bit merging."); 3859 3860 if (cur_size_in_bytes != prev_size_in_bytes || is_store != prev_ldst->is_store()) { 3861 return false; 3862 } 3863 3864 int64_t max_offset = 63 * prev_size_in_bytes; 3865 int64_t min_offset = -64 * prev_size_in_bytes; 3866 3867 assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged."); 3868 3869 // Only same base can be merged. 3870 if (adr.base() != prev_ldst->base()) { 3871 return false; 3872 } 3873 3874 int64_t cur_offset = adr.offset(); 3875 int64_t prev_offset = prev_ldst->offset(); 3876 size_t diff = abs(cur_offset - prev_offset); 3877 if (diff != prev_size_in_bytes) { 3878 return false; 3879 } 3880 3881 // Following cases can not be merged: 3882 // ldr x2, [x2, #8] 3883 // ldr x3, [x2, #16] 3884 // or: 3885 // ldr x2, [x3, #8] 3886 // ldr x2, [x3, #16] 3887 // If t1 and t2 is the same in "ldp t1, t2, [xn, #imm]", we'll get SIGILL. 3888 if (!is_store && (adr.base() == prev_ldst->target() || rt == prev_ldst->target())) { 3889 return false; 3890 } 3891 3892 int64_t low_offset = prev_offset > cur_offset ? cur_offset : prev_offset; 3893 // Offset range must be in ldp/stp instruction's range. 3894 if (low_offset > max_offset || low_offset < min_offset) { 3895 return false; 3896 } 3897 3898 if (merge_alignment_check(adr.base(), prev_size_in_bytes, cur_offset, prev_offset)) { 3899 return true; 3900 } 3901 3902 return false; 3903 } 3904 3905 // Merge current load/store with previous load/store into ldp/stp. 3906 void MacroAssembler::merge_ldst(Register rt, 3907 const Address &adr, 3908 size_t cur_size_in_bytes, 3909 bool is_store) { 3910 3911 assert(ldst_can_merge(rt, adr, cur_size_in_bytes, is_store) == true, "cur and prev must be able to be merged."); 3912 3913 Register rt_low, rt_high; 3914 address prev = pc() - NativeInstruction::instruction_size; 3915 NativeLdSt* prev_ldst = NativeLdSt_at(prev); 3916 3917 int64_t offset; 3918 3919 if (adr.offset() < prev_ldst->offset()) { 3920 offset = adr.offset(); 3921 rt_low = rt; 3922 rt_high = prev_ldst->target(); 3923 } else { 3924 offset = prev_ldst->offset(); 3925 rt_low = prev_ldst->target(); 3926 rt_high = rt; 3927 } 3928 3929 Address adr_p = Address(prev_ldst->base(), offset); 3930 // Overwrite previous generated binary. 3931 code_section()->set_end(prev); 3932 3933 const size_t sz = prev_ldst->size_in_bytes(); 3934 assert(sz == 8 || sz == 4, "only supports 64/32bit merging."); 3935 if (!is_store) { 3936 BLOCK_COMMENT("merged ldr pair"); 3937 if (sz == 8) { 3938 ldp(rt_low, rt_high, adr_p); 3939 } else { 3940 ldpw(rt_low, rt_high, adr_p); 3941 } 3942 } else { 3943 BLOCK_COMMENT("merged str pair"); 3944 if (sz == 8) { 3945 stp(rt_low, rt_high, adr_p); 3946 } else { 3947 stpw(rt_low, rt_high, adr_p); 3948 } 3949 } 3950 } 3951 3952 /** 3953 * Multiply 64 bit by 64 bit first loop. 3954 */ 3955 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 3956 Register y, Register y_idx, Register z, 3957 Register carry, Register product, 3958 Register idx, Register kdx) { 3959 // 3960 // jlong carry, x[], y[], z[]; 3961 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 3962 // huge_128 product = y[idx] * x[xstart] + carry; 3963 // z[kdx] = (jlong)product; 3964 // carry = (jlong)(product >>> 64); 3965 // } 3966 // z[xstart] = carry; 3967 // 3968 3969 Label L_first_loop, L_first_loop_exit; 3970 Label L_one_x, L_one_y, L_multiply; 3971 3972 subsw(xstart, xstart, 1); 3973 br(Assembler::MI, L_one_x); 3974 3975 lea(rscratch1, Address(x, xstart, Address::lsl(LogBytesPerInt))); 3976 ldr(x_xstart, Address(rscratch1)); 3977 ror(x_xstart, x_xstart, 32); // convert big-endian to little-endian 3978 3979 bind(L_first_loop); 3980 subsw(idx, idx, 1); 3981 br(Assembler::MI, L_first_loop_exit); 3982 subsw(idx, idx, 1); 3983 br(Assembler::MI, L_one_y); 3984 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 3985 ldr(y_idx, Address(rscratch1)); 3986 ror(y_idx, y_idx, 32); // convert big-endian to little-endian 3987 bind(L_multiply); 3988 3989 // AArch64 has a multiply-accumulate instruction that we can't use 3990 // here because it has no way to process carries, so we have to use 3991 // separate add and adc instructions. Bah. 3992 umulh(rscratch1, x_xstart, y_idx); // x_xstart * y_idx -> rscratch1:product 3993 mul(product, x_xstart, y_idx); 3994 adds(product, product, carry); 3995 adc(carry, rscratch1, zr); // x_xstart * y_idx + carry -> carry:product 3996 3997 subw(kdx, kdx, 2); 3998 ror(product, product, 32); // back to big-endian 3999 str(product, offsetted_address(z, kdx, Address::uxtw(LogBytesPerInt), 0, BytesPerLong)); 4000 4001 b(L_first_loop); 4002 4003 bind(L_one_y); 4004 ldrw(y_idx, Address(y, 0)); 4005 b(L_multiply); 4006 4007 bind(L_one_x); 4008 ldrw(x_xstart, Address(x, 0)); 4009 b(L_first_loop); 4010 4011 bind(L_first_loop_exit); 4012 } 4013 4014 /** 4015 * Multiply 128 bit by 128. Unrolled inner loop. 4016 * 4017 */ 4018 void MacroAssembler::multiply_128_x_128_loop(Register y, Register z, 4019 Register carry, Register carry2, 4020 Register idx, Register jdx, 4021 Register yz_idx1, Register yz_idx2, 4022 Register tmp, Register tmp3, Register tmp4, 4023 Register tmp6, Register product_hi) { 4024 4025 // jlong carry, x[], y[], z[]; 4026 // int kdx = ystart+1; 4027 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 4028 // huge_128 tmp3 = (y[idx+1] * product_hi) + z[kdx+idx+1] + carry; 4029 // jlong carry2 = (jlong)(tmp3 >>> 64); 4030 // huge_128 tmp4 = (y[idx] * product_hi) + z[kdx+idx] + carry2; 4031 // carry = (jlong)(tmp4 >>> 64); 4032 // z[kdx+idx+1] = (jlong)tmp3; 4033 // z[kdx+idx] = (jlong)tmp4; 4034 // } 4035 // idx += 2; 4036 // if (idx > 0) { 4037 // yz_idx1 = (y[idx] * product_hi) + z[kdx+idx] + carry; 4038 // z[kdx+idx] = (jlong)yz_idx1; 4039 // carry = (jlong)(yz_idx1 >>> 64); 4040 // } 4041 // 4042 4043 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 4044 4045 lsrw(jdx, idx, 2); 4046 4047 bind(L_third_loop); 4048 4049 subsw(jdx, jdx, 1); 4050 br(Assembler::MI, L_third_loop_exit); 4051 subw(idx, idx, 4); 4052 4053 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 4054 4055 ldp(yz_idx2, yz_idx1, Address(rscratch1, 0)); 4056 4057 lea(tmp6, Address(z, idx, Address::uxtw(LogBytesPerInt))); 4058 4059 ror(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 4060 ror(yz_idx2, yz_idx2, 32); 4061 4062 ldp(rscratch2, rscratch1, Address(tmp6, 0)); 4063 4064 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 4065 umulh(tmp4, product_hi, yz_idx1); 4066 4067 ror(rscratch1, rscratch1, 32); // convert big-endian to little-endian 4068 ror(rscratch2, rscratch2, 32); 4069 4070 mul(tmp, product_hi, yz_idx2); // yz_idx2 * product_hi -> carry2:tmp 4071 umulh(carry2, product_hi, yz_idx2); 4072 4073 // propagate sum of both multiplications into carry:tmp4:tmp3 4074 adds(tmp3, tmp3, carry); 4075 adc(tmp4, tmp4, zr); 4076 adds(tmp3, tmp3, rscratch1); 4077 adcs(tmp4, tmp4, tmp); 4078 adc(carry, carry2, zr); 4079 adds(tmp4, tmp4, rscratch2); 4080 adc(carry, carry, zr); 4081 4082 ror(tmp3, tmp3, 32); // convert little-endian to big-endian 4083 ror(tmp4, tmp4, 32); 4084 stp(tmp4, tmp3, Address(tmp6, 0)); 4085 4086 b(L_third_loop); 4087 bind (L_third_loop_exit); 4088 4089 andw (idx, idx, 0x3); 4090 cbz(idx, L_post_third_loop_done); 4091 4092 Label L_check_1; 4093 subsw(idx, idx, 2); 4094 br(Assembler::MI, L_check_1); 4095 4096 lea(rscratch1, Address(y, idx, Address::uxtw(LogBytesPerInt))); 4097 ldr(yz_idx1, Address(rscratch1, 0)); 4098 ror(yz_idx1, yz_idx1, 32); 4099 mul(tmp3, product_hi, yz_idx1); // yz_idx1 * product_hi -> tmp4:tmp3 4100 umulh(tmp4, product_hi, yz_idx1); 4101 lea(rscratch1, Address(z, idx, Address::uxtw(LogBytesPerInt))); 4102 ldr(yz_idx2, Address(rscratch1, 0)); 4103 ror(yz_idx2, yz_idx2, 32); 4104 4105 add2_with_carry(carry, tmp4, tmp3, carry, yz_idx2); 4106 4107 ror(tmp3, tmp3, 32); 4108 str(tmp3, Address(rscratch1, 0)); 4109 4110 bind (L_check_1); 4111 4112 andw (idx, idx, 0x1); 4113 subsw(idx, idx, 1); 4114 br(Assembler::MI, L_post_third_loop_done); 4115 ldrw(tmp4, Address(y, idx, Address::uxtw(LogBytesPerInt))); 4116 mul(tmp3, tmp4, product_hi); // tmp4 * product_hi -> carry2:tmp3 4117 umulh(carry2, tmp4, product_hi); 4118 ldrw(tmp4, Address(z, idx, Address::uxtw(LogBytesPerInt))); 4119 4120 add2_with_carry(carry2, tmp3, tmp4, carry); 4121 4122 strw(tmp3, Address(z, idx, Address::uxtw(LogBytesPerInt))); 4123 extr(carry, carry2, tmp3, 32); 4124 4125 bind(L_post_third_loop_done); 4126 } 4127 4128 /** 4129 * Code for BigInteger::multiplyToLen() intrinsic. 4130 * 4131 * r0: x 4132 * r1: xlen 4133 * r2: y 4134 * r3: ylen 4135 * r4: z 4136 * r5: tmp0 4137 * r10: tmp1 4138 * r11: tmp2 4139 * r12: tmp3 4140 * r13: tmp4 4141 * r14: tmp5 4142 * r15: tmp6 4143 * r16: tmp7 4144 * 4145 */ 4146 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, 4147 Register z, Register tmp0, 4148 Register tmp1, Register tmp2, Register tmp3, Register tmp4, 4149 Register tmp5, Register tmp6, Register product_hi) { 4150 4151 assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, product_hi); 4152 4153 const Register idx = tmp1; 4154 const Register kdx = tmp2; 4155 const Register xstart = tmp3; 4156 4157 const Register y_idx = tmp4; 4158 const Register carry = tmp5; 4159 const Register product = xlen; 4160 const Register x_xstart = tmp0; 4161 4162 // First Loop. 4163 // 4164 // final static long LONG_MASK = 0xffffffffL; 4165 // int xstart = xlen - 1; 4166 // int ystart = ylen - 1; 4167 // long carry = 0; 4168 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 4169 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 4170 // z[kdx] = (int)product; 4171 // carry = product >>> 32; 4172 // } 4173 // z[xstart] = (int)carry; 4174 // 4175 4176 movw(idx, ylen); // idx = ylen; 4177 addw(kdx, xlen, ylen); // kdx = xlen+ylen; 4178 mov(carry, zr); // carry = 0; 4179 4180 Label L_done; 4181 4182 movw(xstart, xlen); 4183 subsw(xstart, xstart, 1); 4184 br(Assembler::MI, L_done); 4185 4186 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 4187 4188 Label L_second_loop; 4189 cbzw(kdx, L_second_loop); 4190 4191 Label L_carry; 4192 subw(kdx, kdx, 1); 4193 cbzw(kdx, L_carry); 4194 4195 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 4196 lsr(carry, carry, 32); 4197 subw(kdx, kdx, 1); 4198 4199 bind(L_carry); 4200 strw(carry, Address(z, kdx, Address::uxtw(LogBytesPerInt))); 4201 4202 // Second and third (nested) loops. 4203 // 4204 // for (int i = xstart-1; i >= 0; i--) { // Second loop 4205 // carry = 0; 4206 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 4207 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 4208 // (z[k] & LONG_MASK) + carry; 4209 // z[k] = (int)product; 4210 // carry = product >>> 32; 4211 // } 4212 // z[i] = (int)carry; 4213 // } 4214 // 4215 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = product_hi 4216 4217 const Register jdx = tmp1; 4218 4219 bind(L_second_loop); 4220 mov(carry, zr); // carry = 0; 4221 movw(jdx, ylen); // j = ystart+1 4222 4223 subsw(xstart, xstart, 1); // i = xstart-1; 4224 br(Assembler::MI, L_done); 4225 4226 str(z, Address(pre(sp, -4 * wordSize))); 4227 4228 Label L_last_x; 4229 lea(z, offsetted_address(z, xstart, Address::uxtw(LogBytesPerInt), 4, BytesPerInt)); // z = z + k - j 4230 subsw(xstart, xstart, 1); // i = xstart-1; 4231 br(Assembler::MI, L_last_x); 4232 4233 lea(rscratch1, Address(x, xstart, Address::uxtw(LogBytesPerInt))); 4234 ldr(product_hi, Address(rscratch1)); 4235 ror(product_hi, product_hi, 32); // convert big-endian to little-endian 4236 4237 Label L_third_loop_prologue; 4238 bind(L_third_loop_prologue); 4239 4240 str(ylen, Address(sp, wordSize)); 4241 stp(x, xstart, Address(sp, 2 * wordSize)); 4242 multiply_128_x_128_loop(y, z, carry, x, jdx, ylen, product, 4243 tmp2, x_xstart, tmp3, tmp4, tmp6, product_hi); 4244 ldp(z, ylen, Address(post(sp, 2 * wordSize))); 4245 ldp(x, xlen, Address(post(sp, 2 * wordSize))); // copy old xstart -> xlen 4246 4247 addw(tmp3, xlen, 1); 4248 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 4249 subsw(tmp3, tmp3, 1); 4250 br(Assembler::MI, L_done); 4251 4252 lsr(carry, carry, 32); 4253 strw(carry, Address(z, tmp3, Address::uxtw(LogBytesPerInt))); 4254 b(L_second_loop); 4255 4256 // Next infrequent code is moved outside loops. 4257 bind(L_last_x); 4258 ldrw(product_hi, Address(x, 0)); 4259 b(L_third_loop_prologue); 4260 4261 bind(L_done); 4262 } 4263 4264 // Code for BigInteger::mulAdd intrinsic 4265 // out = r0 4266 // in = r1 4267 // offset = r2 (already out.length-offset) 4268 // len = r3 4269 // k = r4 4270 // 4271 // pseudo code from java implementation: 4272 // carry = 0; 4273 // offset = out.length-offset - 1; 4274 // for (int j=len-1; j >= 0; j--) { 4275 // product = (in[j] & LONG_MASK) * kLong + (out[offset] & LONG_MASK) + carry; 4276 // out[offset--] = (int)product; 4277 // carry = product >>> 32; 4278 // } 4279 // return (int)carry; 4280 void MacroAssembler::mul_add(Register out, Register in, Register offset, 4281 Register len, Register k) { 4282 Label LOOP, END; 4283 // pre-loop 4284 cmp(len, zr); // cmp, not cbz/cbnz: to use condition twice => less branches 4285 csel(out, zr, out, Assembler::EQ); 4286 br(Assembler::EQ, END); 4287 add(in, in, len, LSL, 2); // in[j+1] address 4288 add(offset, out, offset, LSL, 2); // out[offset + 1] address 4289 mov(out, zr); // used to keep carry now 4290 BIND(LOOP); 4291 ldrw(rscratch1, Address(pre(in, -4))); 4292 madd(rscratch1, rscratch1, k, out); 4293 ldrw(rscratch2, Address(pre(offset, -4))); 4294 add(rscratch1, rscratch1, rscratch2); 4295 strw(rscratch1, Address(offset)); 4296 lsr(out, rscratch1, 32); 4297 subs(len, len, 1); 4298 br(Assembler::NE, LOOP); 4299 BIND(END); 4300 } 4301 4302 /** 4303 * Emits code to update CRC-32 with a byte value according to constants in table 4304 * 4305 * @param [in,out]crc Register containing the crc. 4306 * @param [in]val Register containing the byte to fold into the CRC. 4307 * @param [in]table Register containing the table of crc constants. 4308 * 4309 * uint32_t crc; 4310 * val = crc_table[(val ^ crc) & 0xFF]; 4311 * crc = val ^ (crc >> 8); 4312 * 4313 */ 4314 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 4315 eor(val, val, crc); 4316 andr(val, val, 0xff); 4317 ldrw(val, Address(table, val, Address::lsl(2))); 4318 eor(crc, val, crc, Assembler::LSR, 8); 4319 } 4320 4321 /** 4322 * Emits code to update CRC-32 with a 32-bit value according to tables 0 to 3 4323 * 4324 * @param [in,out]crc Register containing the crc. 4325 * @param [in]v Register containing the 32-bit to fold into the CRC. 4326 * @param [in]table0 Register containing table 0 of crc constants. 4327 * @param [in]table1 Register containing table 1 of crc constants. 4328 * @param [in]table2 Register containing table 2 of crc constants. 4329 * @param [in]table3 Register containing table 3 of crc constants. 4330 * 4331 * uint32_t crc; 4332 * v = crc ^ v 4333 * crc = table3[v&0xff]^table2[(v>>8)&0xff]^table1[(v>>16)&0xff]^table0[v>>24] 4334 * 4335 */ 4336 void MacroAssembler::update_word_crc32(Register crc, Register v, Register tmp, 4337 Register table0, Register table1, Register table2, Register table3, 4338 bool upper) { 4339 eor(v, crc, v, upper ? LSR:LSL, upper ? 32:0); 4340 uxtb(tmp, v); 4341 ldrw(crc, Address(table3, tmp, Address::lsl(2))); 4342 ubfx(tmp, v, 8, 8); 4343 ldrw(tmp, Address(table2, tmp, Address::lsl(2))); 4344 eor(crc, crc, tmp); 4345 ubfx(tmp, v, 16, 8); 4346 ldrw(tmp, Address(table1, tmp, Address::lsl(2))); 4347 eor(crc, crc, tmp); 4348 ubfx(tmp, v, 24, 8); 4349 ldrw(tmp, Address(table0, tmp, Address::lsl(2))); 4350 eor(crc, crc, tmp); 4351 } 4352 4353 void MacroAssembler::kernel_crc32_using_crypto_pmull(Register crc, Register buf, 4354 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 4355 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 4356 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4357 4358 subs(tmp0, len, 384); 4359 mvnw(crc, crc); 4360 br(Assembler::GE, CRC_by128_pre); 4361 BIND(CRC_less128); 4362 subs(len, len, 32); 4363 br(Assembler::GE, CRC_by32_loop); 4364 BIND(CRC_less32); 4365 adds(len, len, 32 - 4); 4366 br(Assembler::GE, CRC_by4_loop); 4367 adds(len, len, 4); 4368 br(Assembler::GT, CRC_by1_loop); 4369 b(L_exit); 4370 4371 BIND(CRC_by32_loop); 4372 ldp(tmp0, tmp1, Address(buf)); 4373 crc32x(crc, crc, tmp0); 4374 ldp(tmp2, tmp3, Address(buf, 16)); 4375 crc32x(crc, crc, tmp1); 4376 add(buf, buf, 32); 4377 crc32x(crc, crc, tmp2); 4378 subs(len, len, 32); 4379 crc32x(crc, crc, tmp3); 4380 br(Assembler::GE, CRC_by32_loop); 4381 cmn(len, (u1)32); 4382 br(Assembler::NE, CRC_less32); 4383 b(L_exit); 4384 4385 BIND(CRC_by4_loop); 4386 ldrw(tmp0, Address(post(buf, 4))); 4387 subs(len, len, 4); 4388 crc32w(crc, crc, tmp0); 4389 br(Assembler::GE, CRC_by4_loop); 4390 adds(len, len, 4); 4391 br(Assembler::LE, L_exit); 4392 BIND(CRC_by1_loop); 4393 ldrb(tmp0, Address(post(buf, 1))); 4394 subs(len, len, 1); 4395 crc32b(crc, crc, tmp0); 4396 br(Assembler::GT, CRC_by1_loop); 4397 b(L_exit); 4398 4399 BIND(CRC_by128_pre); 4400 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 4401 4*256*sizeof(juint) + 8*sizeof(juint)); 4402 mov(crc, 0); 4403 crc32x(crc, crc, tmp0); 4404 crc32x(crc, crc, tmp1); 4405 4406 cbnz(len, CRC_less128); 4407 4408 BIND(L_exit); 4409 mvnw(crc, crc); 4410 } 4411 4412 void MacroAssembler::kernel_crc32_using_crc32(Register crc, Register buf, 4413 Register len, Register tmp0, Register tmp1, Register tmp2, 4414 Register tmp3) { 4415 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 4416 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 4417 4418 mvnw(crc, crc); 4419 4420 subs(len, len, 128); 4421 br(Assembler::GE, CRC_by64_pre); 4422 BIND(CRC_less64); 4423 adds(len, len, 128-32); 4424 br(Assembler::GE, CRC_by32_loop); 4425 BIND(CRC_less32); 4426 adds(len, len, 32-4); 4427 br(Assembler::GE, CRC_by4_loop); 4428 adds(len, len, 4); 4429 br(Assembler::GT, CRC_by1_loop); 4430 b(L_exit); 4431 4432 BIND(CRC_by32_loop); 4433 ldp(tmp0, tmp1, Address(post(buf, 16))); 4434 subs(len, len, 32); 4435 crc32x(crc, crc, tmp0); 4436 ldr(tmp2, Address(post(buf, 8))); 4437 crc32x(crc, crc, tmp1); 4438 ldr(tmp3, Address(post(buf, 8))); 4439 crc32x(crc, crc, tmp2); 4440 crc32x(crc, crc, tmp3); 4441 br(Assembler::GE, CRC_by32_loop); 4442 cmn(len, (u1)32); 4443 br(Assembler::NE, CRC_less32); 4444 b(L_exit); 4445 4446 BIND(CRC_by4_loop); 4447 ldrw(tmp0, Address(post(buf, 4))); 4448 subs(len, len, 4); 4449 crc32w(crc, crc, tmp0); 4450 br(Assembler::GE, CRC_by4_loop); 4451 adds(len, len, 4); 4452 br(Assembler::LE, L_exit); 4453 BIND(CRC_by1_loop); 4454 ldrb(tmp0, Address(post(buf, 1))); 4455 subs(len, len, 1); 4456 crc32b(crc, crc, tmp0); 4457 br(Assembler::GT, CRC_by1_loop); 4458 b(L_exit); 4459 4460 BIND(CRC_by64_pre); 4461 sub(buf, buf, 8); 4462 ldp(tmp0, tmp1, Address(buf, 8)); 4463 crc32x(crc, crc, tmp0); 4464 ldr(tmp2, Address(buf, 24)); 4465 crc32x(crc, crc, tmp1); 4466 ldr(tmp3, Address(buf, 32)); 4467 crc32x(crc, crc, tmp2); 4468 ldr(tmp0, Address(buf, 40)); 4469 crc32x(crc, crc, tmp3); 4470 ldr(tmp1, Address(buf, 48)); 4471 crc32x(crc, crc, tmp0); 4472 ldr(tmp2, Address(buf, 56)); 4473 crc32x(crc, crc, tmp1); 4474 ldr(tmp3, Address(pre(buf, 64))); 4475 4476 b(CRC_by64_loop); 4477 4478 align(CodeEntryAlignment); 4479 BIND(CRC_by64_loop); 4480 subs(len, len, 64); 4481 crc32x(crc, crc, tmp2); 4482 ldr(tmp0, Address(buf, 8)); 4483 crc32x(crc, crc, tmp3); 4484 ldr(tmp1, Address(buf, 16)); 4485 crc32x(crc, crc, tmp0); 4486 ldr(tmp2, Address(buf, 24)); 4487 crc32x(crc, crc, tmp1); 4488 ldr(tmp3, Address(buf, 32)); 4489 crc32x(crc, crc, tmp2); 4490 ldr(tmp0, Address(buf, 40)); 4491 crc32x(crc, crc, tmp3); 4492 ldr(tmp1, Address(buf, 48)); 4493 crc32x(crc, crc, tmp0); 4494 ldr(tmp2, Address(buf, 56)); 4495 crc32x(crc, crc, tmp1); 4496 ldr(tmp3, Address(pre(buf, 64))); 4497 br(Assembler::GE, CRC_by64_loop); 4498 4499 // post-loop 4500 crc32x(crc, crc, tmp2); 4501 crc32x(crc, crc, tmp3); 4502 4503 sub(len, len, 64); 4504 add(buf, buf, 8); 4505 cmn(len, (u1)128); 4506 br(Assembler::NE, CRC_less64); 4507 BIND(L_exit); 4508 mvnw(crc, crc); 4509 } 4510 4511 /** 4512 * @param crc register containing existing CRC (32-bit) 4513 * @param buf register pointing to input byte buffer (byte*) 4514 * @param len register containing number of bytes 4515 * @param table register that will contain address of CRC table 4516 * @param tmp scratch register 4517 */ 4518 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, 4519 Register table0, Register table1, Register table2, Register table3, 4520 Register tmp, Register tmp2, Register tmp3) { 4521 Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit; 4522 4523 if (UseCryptoPmullForCRC32) { 4524 kernel_crc32_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 4525 return; 4526 } 4527 4528 if (UseCRC32) { 4529 kernel_crc32_using_crc32(crc, buf, len, table0, table1, table2, table3); 4530 return; 4531 } 4532 4533 mvnw(crc, crc); 4534 4535 { 4536 uint64_t offset; 4537 adrp(table0, ExternalAddress(StubRoutines::crc_table_addr()), offset); 4538 add(table0, table0, offset); 4539 } 4540 add(table1, table0, 1*256*sizeof(juint)); 4541 add(table2, table0, 2*256*sizeof(juint)); 4542 add(table3, table0, 3*256*sizeof(juint)); 4543 4544 { // Neon code start 4545 cmp(len, (u1)64); 4546 br(Assembler::LT, L_by16); 4547 eor(v16, T16B, v16, v16); 4548 4549 Label L_fold; 4550 4551 add(tmp, table0, 4*256*sizeof(juint)); // Point at the Neon constants 4552 4553 ld1(v0, v1, T2D, post(buf, 32)); 4554 ld1r(v4, T2D, post(tmp, 8)); 4555 ld1r(v5, T2D, post(tmp, 8)); 4556 ld1r(v6, T2D, post(tmp, 8)); 4557 ld1r(v7, T2D, post(tmp, 8)); 4558 mov(v16, S, 0, crc); 4559 4560 eor(v0, T16B, v0, v16); 4561 sub(len, len, 64); 4562 4563 BIND(L_fold); 4564 pmull(v22, T8H, v0, v5, T8B); 4565 pmull(v20, T8H, v0, v7, T8B); 4566 pmull(v23, T8H, v0, v4, T8B); 4567 pmull(v21, T8H, v0, v6, T8B); 4568 4569 pmull2(v18, T8H, v0, v5, T16B); 4570 pmull2(v16, T8H, v0, v7, T16B); 4571 pmull2(v19, T8H, v0, v4, T16B); 4572 pmull2(v17, T8H, v0, v6, T16B); 4573 4574 uzp1(v24, T8H, v20, v22); 4575 uzp2(v25, T8H, v20, v22); 4576 eor(v20, T16B, v24, v25); 4577 4578 uzp1(v26, T8H, v16, v18); 4579 uzp2(v27, T8H, v16, v18); 4580 eor(v16, T16B, v26, v27); 4581 4582 ushll2(v22, T4S, v20, T8H, 8); 4583 ushll(v20, T4S, v20, T4H, 8); 4584 4585 ushll2(v18, T4S, v16, T8H, 8); 4586 ushll(v16, T4S, v16, T4H, 8); 4587 4588 eor(v22, T16B, v23, v22); 4589 eor(v18, T16B, v19, v18); 4590 eor(v20, T16B, v21, v20); 4591 eor(v16, T16B, v17, v16); 4592 4593 uzp1(v17, T2D, v16, v20); 4594 uzp2(v21, T2D, v16, v20); 4595 eor(v17, T16B, v17, v21); 4596 4597 ushll2(v20, T2D, v17, T4S, 16); 4598 ushll(v16, T2D, v17, T2S, 16); 4599 4600 eor(v20, T16B, v20, v22); 4601 eor(v16, T16B, v16, v18); 4602 4603 uzp1(v17, T2D, v20, v16); 4604 uzp2(v21, T2D, v20, v16); 4605 eor(v28, T16B, v17, v21); 4606 4607 pmull(v22, T8H, v1, v5, T8B); 4608 pmull(v20, T8H, v1, v7, T8B); 4609 pmull(v23, T8H, v1, v4, T8B); 4610 pmull(v21, T8H, v1, v6, T8B); 4611 4612 pmull2(v18, T8H, v1, v5, T16B); 4613 pmull2(v16, T8H, v1, v7, T16B); 4614 pmull2(v19, T8H, v1, v4, T16B); 4615 pmull2(v17, T8H, v1, v6, T16B); 4616 4617 ld1(v0, v1, T2D, post(buf, 32)); 4618 4619 uzp1(v24, T8H, v20, v22); 4620 uzp2(v25, T8H, v20, v22); 4621 eor(v20, T16B, v24, v25); 4622 4623 uzp1(v26, T8H, v16, v18); 4624 uzp2(v27, T8H, v16, v18); 4625 eor(v16, T16B, v26, v27); 4626 4627 ushll2(v22, T4S, v20, T8H, 8); 4628 ushll(v20, T4S, v20, T4H, 8); 4629 4630 ushll2(v18, T4S, v16, T8H, 8); 4631 ushll(v16, T4S, v16, T4H, 8); 4632 4633 eor(v22, T16B, v23, v22); 4634 eor(v18, T16B, v19, v18); 4635 eor(v20, T16B, v21, v20); 4636 eor(v16, T16B, v17, v16); 4637 4638 uzp1(v17, T2D, v16, v20); 4639 uzp2(v21, T2D, v16, v20); 4640 eor(v16, T16B, v17, v21); 4641 4642 ushll2(v20, T2D, v16, T4S, 16); 4643 ushll(v16, T2D, v16, T2S, 16); 4644 4645 eor(v20, T16B, v22, v20); 4646 eor(v16, T16B, v16, v18); 4647 4648 uzp1(v17, T2D, v20, v16); 4649 uzp2(v21, T2D, v20, v16); 4650 eor(v20, T16B, v17, v21); 4651 4652 shl(v16, T2D, v28, 1); 4653 shl(v17, T2D, v20, 1); 4654 4655 eor(v0, T16B, v0, v16); 4656 eor(v1, T16B, v1, v17); 4657 4658 subs(len, len, 32); 4659 br(Assembler::GE, L_fold); 4660 4661 mov(crc, 0); 4662 mov(tmp, v0, D, 0); 4663 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4664 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4665 mov(tmp, v0, D, 1); 4666 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4667 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4668 mov(tmp, v1, D, 0); 4669 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4670 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4671 mov(tmp, v1, D, 1); 4672 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4673 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4674 4675 add(len, len, 32); 4676 } // Neon code end 4677 4678 BIND(L_by16); 4679 subs(len, len, 16); 4680 br(Assembler::GE, L_by16_loop); 4681 adds(len, len, 16-4); 4682 br(Assembler::GE, L_by4_loop); 4683 adds(len, len, 4); 4684 br(Assembler::GT, L_by1_loop); 4685 b(L_exit); 4686 4687 BIND(L_by4_loop); 4688 ldrw(tmp, Address(post(buf, 4))); 4689 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3); 4690 subs(len, len, 4); 4691 br(Assembler::GE, L_by4_loop); 4692 adds(len, len, 4); 4693 br(Assembler::LE, L_exit); 4694 BIND(L_by1_loop); 4695 subs(len, len, 1); 4696 ldrb(tmp, Address(post(buf, 1))); 4697 update_byte_crc32(crc, tmp, table0); 4698 br(Assembler::GT, L_by1_loop); 4699 b(L_exit); 4700 4701 align(CodeEntryAlignment); 4702 BIND(L_by16_loop); 4703 subs(len, len, 16); 4704 ldp(tmp, tmp3, Address(post(buf, 16))); 4705 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, false); 4706 update_word_crc32(crc, tmp, tmp2, table0, table1, table2, table3, true); 4707 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, false); 4708 update_word_crc32(crc, tmp3, tmp2, table0, table1, table2, table3, true); 4709 br(Assembler::GE, L_by16_loop); 4710 adds(len, len, 16-4); 4711 br(Assembler::GE, L_by4_loop); 4712 adds(len, len, 4); 4713 br(Assembler::GT, L_by1_loop); 4714 BIND(L_exit); 4715 mvnw(crc, crc); 4716 } 4717 4718 void MacroAssembler::kernel_crc32c_using_crypto_pmull(Register crc, Register buf, 4719 Register len, Register tmp0, Register tmp1, Register tmp2, Register tmp3) { 4720 Label CRC_by4_loop, CRC_by1_loop, CRC_less128, CRC_by128_pre, CRC_by32_loop, CRC_less32, L_exit; 4721 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4722 4723 subs(tmp0, len, 384); 4724 br(Assembler::GE, CRC_by128_pre); 4725 BIND(CRC_less128); 4726 subs(len, len, 32); 4727 br(Assembler::GE, CRC_by32_loop); 4728 BIND(CRC_less32); 4729 adds(len, len, 32 - 4); 4730 br(Assembler::GE, CRC_by4_loop); 4731 adds(len, len, 4); 4732 br(Assembler::GT, CRC_by1_loop); 4733 b(L_exit); 4734 4735 BIND(CRC_by32_loop); 4736 ldp(tmp0, tmp1, Address(buf)); 4737 crc32cx(crc, crc, tmp0); 4738 ldr(tmp2, Address(buf, 16)); 4739 crc32cx(crc, crc, tmp1); 4740 ldr(tmp3, Address(buf, 24)); 4741 crc32cx(crc, crc, tmp2); 4742 add(buf, buf, 32); 4743 subs(len, len, 32); 4744 crc32cx(crc, crc, tmp3); 4745 br(Assembler::GE, CRC_by32_loop); 4746 cmn(len, (u1)32); 4747 br(Assembler::NE, CRC_less32); 4748 b(L_exit); 4749 4750 BIND(CRC_by4_loop); 4751 ldrw(tmp0, Address(post(buf, 4))); 4752 subs(len, len, 4); 4753 crc32cw(crc, crc, tmp0); 4754 br(Assembler::GE, CRC_by4_loop); 4755 adds(len, len, 4); 4756 br(Assembler::LE, L_exit); 4757 BIND(CRC_by1_loop); 4758 ldrb(tmp0, Address(post(buf, 1))); 4759 subs(len, len, 1); 4760 crc32cb(crc, crc, tmp0); 4761 br(Assembler::GT, CRC_by1_loop); 4762 b(L_exit); 4763 4764 BIND(CRC_by128_pre); 4765 kernel_crc32_common_fold_using_crypto_pmull(crc, buf, len, tmp0, tmp1, tmp2, 4766 4*256*sizeof(juint) + 8*sizeof(juint) + 0x50); 4767 mov(crc, 0); 4768 crc32cx(crc, crc, tmp0); 4769 crc32cx(crc, crc, tmp1); 4770 4771 cbnz(len, CRC_less128); 4772 4773 BIND(L_exit); 4774 } 4775 4776 void MacroAssembler::kernel_crc32c_using_crc32c(Register crc, Register buf, 4777 Register len, Register tmp0, Register tmp1, Register tmp2, 4778 Register tmp3) { 4779 Label CRC_by64_loop, CRC_by4_loop, CRC_by1_loop, CRC_less64, CRC_by64_pre, CRC_by32_loop, CRC_less32, L_exit; 4780 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2, tmp3); 4781 4782 subs(len, len, 128); 4783 br(Assembler::GE, CRC_by64_pre); 4784 BIND(CRC_less64); 4785 adds(len, len, 128-32); 4786 br(Assembler::GE, CRC_by32_loop); 4787 BIND(CRC_less32); 4788 adds(len, len, 32-4); 4789 br(Assembler::GE, CRC_by4_loop); 4790 adds(len, len, 4); 4791 br(Assembler::GT, CRC_by1_loop); 4792 b(L_exit); 4793 4794 BIND(CRC_by32_loop); 4795 ldp(tmp0, tmp1, Address(post(buf, 16))); 4796 subs(len, len, 32); 4797 crc32cx(crc, crc, tmp0); 4798 ldr(tmp2, Address(post(buf, 8))); 4799 crc32cx(crc, crc, tmp1); 4800 ldr(tmp3, Address(post(buf, 8))); 4801 crc32cx(crc, crc, tmp2); 4802 crc32cx(crc, crc, tmp3); 4803 br(Assembler::GE, CRC_by32_loop); 4804 cmn(len, (u1)32); 4805 br(Assembler::NE, CRC_less32); 4806 b(L_exit); 4807 4808 BIND(CRC_by4_loop); 4809 ldrw(tmp0, Address(post(buf, 4))); 4810 subs(len, len, 4); 4811 crc32cw(crc, crc, tmp0); 4812 br(Assembler::GE, CRC_by4_loop); 4813 adds(len, len, 4); 4814 br(Assembler::LE, L_exit); 4815 BIND(CRC_by1_loop); 4816 ldrb(tmp0, Address(post(buf, 1))); 4817 subs(len, len, 1); 4818 crc32cb(crc, crc, tmp0); 4819 br(Assembler::GT, CRC_by1_loop); 4820 b(L_exit); 4821 4822 BIND(CRC_by64_pre); 4823 sub(buf, buf, 8); 4824 ldp(tmp0, tmp1, Address(buf, 8)); 4825 crc32cx(crc, crc, tmp0); 4826 ldr(tmp2, Address(buf, 24)); 4827 crc32cx(crc, crc, tmp1); 4828 ldr(tmp3, Address(buf, 32)); 4829 crc32cx(crc, crc, tmp2); 4830 ldr(tmp0, Address(buf, 40)); 4831 crc32cx(crc, crc, tmp3); 4832 ldr(tmp1, Address(buf, 48)); 4833 crc32cx(crc, crc, tmp0); 4834 ldr(tmp2, Address(buf, 56)); 4835 crc32cx(crc, crc, tmp1); 4836 ldr(tmp3, Address(pre(buf, 64))); 4837 4838 b(CRC_by64_loop); 4839 4840 align(CodeEntryAlignment); 4841 BIND(CRC_by64_loop); 4842 subs(len, len, 64); 4843 crc32cx(crc, crc, tmp2); 4844 ldr(tmp0, Address(buf, 8)); 4845 crc32cx(crc, crc, tmp3); 4846 ldr(tmp1, Address(buf, 16)); 4847 crc32cx(crc, crc, tmp0); 4848 ldr(tmp2, Address(buf, 24)); 4849 crc32cx(crc, crc, tmp1); 4850 ldr(tmp3, Address(buf, 32)); 4851 crc32cx(crc, crc, tmp2); 4852 ldr(tmp0, Address(buf, 40)); 4853 crc32cx(crc, crc, tmp3); 4854 ldr(tmp1, Address(buf, 48)); 4855 crc32cx(crc, crc, tmp0); 4856 ldr(tmp2, Address(buf, 56)); 4857 crc32cx(crc, crc, tmp1); 4858 ldr(tmp3, Address(pre(buf, 64))); 4859 br(Assembler::GE, CRC_by64_loop); 4860 4861 // post-loop 4862 crc32cx(crc, crc, tmp2); 4863 crc32cx(crc, crc, tmp3); 4864 4865 sub(len, len, 64); 4866 add(buf, buf, 8); 4867 cmn(len, (u1)128); 4868 br(Assembler::NE, CRC_less64); 4869 BIND(L_exit); 4870 } 4871 4872 /** 4873 * @param crc register containing existing CRC (32-bit) 4874 * @param buf register pointing to input byte buffer (byte*) 4875 * @param len register containing number of bytes 4876 * @param table register that will contain address of CRC table 4877 * @param tmp scratch register 4878 */ 4879 void MacroAssembler::kernel_crc32c(Register crc, Register buf, Register len, 4880 Register table0, Register table1, Register table2, Register table3, 4881 Register tmp, Register tmp2, Register tmp3) { 4882 if (UseCryptoPmullForCRC32) { 4883 kernel_crc32c_using_crypto_pmull(crc, buf, len, table0, table1, table2, table3); 4884 } else { 4885 kernel_crc32c_using_crc32c(crc, buf, len, table0, table1, table2, table3); 4886 } 4887 } 4888 4889 void MacroAssembler::kernel_crc32_common_fold_using_crypto_pmull(Register crc, Register buf, 4890 Register len, Register tmp0, Register tmp1, Register tmp2, size_t table_offset) { 4891 Label CRC_by128_loop; 4892 assert_different_registers(crc, buf, len, tmp0, tmp1, tmp2); 4893 4894 sub(len, len, 256); 4895 Register table = tmp0; 4896 { 4897 uint64_t offset; 4898 adrp(table, ExternalAddress(StubRoutines::crc_table_addr()), offset); 4899 add(table, table, offset); 4900 } 4901 add(table, table, table_offset); 4902 4903 // Registers v0..v7 are used as data registers. 4904 // Registers v16..v31 are used as tmp registers. 4905 sub(buf, buf, 0x10); 4906 ldrq(v0, Address(buf, 0x10)); 4907 ldrq(v1, Address(buf, 0x20)); 4908 ldrq(v2, Address(buf, 0x30)); 4909 ldrq(v3, Address(buf, 0x40)); 4910 ldrq(v4, Address(buf, 0x50)); 4911 ldrq(v5, Address(buf, 0x60)); 4912 ldrq(v6, Address(buf, 0x70)); 4913 ldrq(v7, Address(pre(buf, 0x80))); 4914 4915 movi(v31, T4S, 0); 4916 mov(v31, S, 0, crc); 4917 eor(v0, T16B, v0, v31); 4918 4919 // Register v16 contains constants from the crc table. 4920 ldrq(v16, Address(table)); 4921 b(CRC_by128_loop); 4922 4923 align(OptoLoopAlignment); 4924 BIND(CRC_by128_loop); 4925 pmull (v17, T1Q, v0, v16, T1D); 4926 pmull2(v18, T1Q, v0, v16, T2D); 4927 ldrq(v0, Address(buf, 0x10)); 4928 eor3(v0, T16B, v17, v18, v0); 4929 4930 pmull (v19, T1Q, v1, v16, T1D); 4931 pmull2(v20, T1Q, v1, v16, T2D); 4932 ldrq(v1, Address(buf, 0x20)); 4933 eor3(v1, T16B, v19, v20, v1); 4934 4935 pmull (v21, T1Q, v2, v16, T1D); 4936 pmull2(v22, T1Q, v2, v16, T2D); 4937 ldrq(v2, Address(buf, 0x30)); 4938 eor3(v2, T16B, v21, v22, v2); 4939 4940 pmull (v23, T1Q, v3, v16, T1D); 4941 pmull2(v24, T1Q, v3, v16, T2D); 4942 ldrq(v3, Address(buf, 0x40)); 4943 eor3(v3, T16B, v23, v24, v3); 4944 4945 pmull (v25, T1Q, v4, v16, T1D); 4946 pmull2(v26, T1Q, v4, v16, T2D); 4947 ldrq(v4, Address(buf, 0x50)); 4948 eor3(v4, T16B, v25, v26, v4); 4949 4950 pmull (v27, T1Q, v5, v16, T1D); 4951 pmull2(v28, T1Q, v5, v16, T2D); 4952 ldrq(v5, Address(buf, 0x60)); 4953 eor3(v5, T16B, v27, v28, v5); 4954 4955 pmull (v29, T1Q, v6, v16, T1D); 4956 pmull2(v30, T1Q, v6, v16, T2D); 4957 ldrq(v6, Address(buf, 0x70)); 4958 eor3(v6, T16B, v29, v30, v6); 4959 4960 // Reuse registers v23, v24. 4961 // Using them won't block the first instruction of the next iteration. 4962 pmull (v23, T1Q, v7, v16, T1D); 4963 pmull2(v24, T1Q, v7, v16, T2D); 4964 ldrq(v7, Address(pre(buf, 0x80))); 4965 eor3(v7, T16B, v23, v24, v7); 4966 4967 subs(len, len, 0x80); 4968 br(Assembler::GE, CRC_by128_loop); 4969 4970 // fold into 512 bits 4971 // Use v31 for constants because v16 can be still in use. 4972 ldrq(v31, Address(table, 0x10)); 4973 4974 pmull (v17, T1Q, v0, v31, T1D); 4975 pmull2(v18, T1Q, v0, v31, T2D); 4976 eor3(v0, T16B, v17, v18, v4); 4977 4978 pmull (v19, T1Q, v1, v31, T1D); 4979 pmull2(v20, T1Q, v1, v31, T2D); 4980 eor3(v1, T16B, v19, v20, v5); 4981 4982 pmull (v21, T1Q, v2, v31, T1D); 4983 pmull2(v22, T1Q, v2, v31, T2D); 4984 eor3(v2, T16B, v21, v22, v6); 4985 4986 pmull (v23, T1Q, v3, v31, T1D); 4987 pmull2(v24, T1Q, v3, v31, T2D); 4988 eor3(v3, T16B, v23, v24, v7); 4989 4990 // fold into 128 bits 4991 // Use v17 for constants because v31 can be still in use. 4992 ldrq(v17, Address(table, 0x20)); 4993 pmull (v25, T1Q, v0, v17, T1D); 4994 pmull2(v26, T1Q, v0, v17, T2D); 4995 eor3(v3, T16B, v3, v25, v26); 4996 4997 // Use v18 for constants because v17 can be still in use. 4998 ldrq(v18, Address(table, 0x30)); 4999 pmull (v27, T1Q, v1, v18, T1D); 5000 pmull2(v28, T1Q, v1, v18, T2D); 5001 eor3(v3, T16B, v3, v27, v28); 5002 5003 // Use v19 for constants because v18 can be still in use. 5004 ldrq(v19, Address(table, 0x40)); 5005 pmull (v29, T1Q, v2, v19, T1D); 5006 pmull2(v30, T1Q, v2, v19, T2D); 5007 eor3(v0, T16B, v3, v29, v30); 5008 5009 add(len, len, 0x80); 5010 add(buf, buf, 0x10); 5011 5012 mov(tmp0, v0, D, 0); 5013 mov(tmp1, v0, D, 1); 5014 } 5015 5016 void MacroAssembler::addptr(const Address &dst, int32_t src) { 5017 Address adr; 5018 switch(dst.getMode()) { 5019 case Address::base_plus_offset: 5020 // This is the expected mode, although we allow all the other 5021 // forms below. 5022 adr = form_address(rscratch2, dst.base(), dst.offset(), LogBytesPerWord); 5023 break; 5024 default: 5025 lea(rscratch2, dst); 5026 adr = Address(rscratch2); 5027 break; 5028 } 5029 ldr(rscratch1, adr); 5030 add(rscratch1, rscratch1, src); 5031 str(rscratch1, adr); 5032 } 5033 5034 void MacroAssembler::cmpptr(Register src1, Address src2) { 5035 uint64_t offset; 5036 adrp(rscratch1, src2, offset); 5037 ldr(rscratch1, Address(rscratch1, offset)); 5038 cmp(src1, rscratch1); 5039 } 5040 5041 void MacroAssembler::cmpoop(Register obj1, Register obj2) { 5042 cmp(obj1, obj2); 5043 } 5044 5045 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 5046 load_method_holder(rresult, rmethod); 5047 ldr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 5048 } 5049 5050 void MacroAssembler::load_method_holder(Register holder, Register method) { 5051 ldr(holder, Address(method, Method::const_offset())); // ConstMethod* 5052 ldr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 5053 ldr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* 5054 } 5055 5056 // Loads the obj's Klass* into dst. 5057 // Preserves all registers (incl src, rscratch1 and rscratch2). 5058 // Input: 5059 // src - the oop we want to load the klass from. 5060 // dst - output narrow klass. 5061 void MacroAssembler::load_narrow_klass_compact(Register dst, Register src) { 5062 assert(UseCompactObjectHeaders, "expects UseCompactObjectHeaders"); 5063 ldr(dst, Address(src, oopDesc::mark_offset_in_bytes())); 5064 lsr(dst, dst, markWord::klass_shift); 5065 } 5066 5067 void MacroAssembler::load_klass(Register dst, Register src) { 5068 if (UseCompactObjectHeaders) { 5069 load_narrow_klass_compact(dst, src); 5070 decode_klass_not_null(dst); 5071 } else if (UseCompressedClassPointers) { 5072 ldrw(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5073 decode_klass_not_null(dst); 5074 } else { 5075 ldr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 5076 } 5077 } 5078 5079 void MacroAssembler::restore_cpu_control_state_after_jni(Register tmp1, Register tmp2) { 5080 if (RestoreMXCSROnJNICalls) { 5081 Label OK; 5082 get_fpcr(tmp1); 5083 mov(tmp2, tmp1); 5084 // Set FPCR to the state we need. We do want Round to Nearest. We 5085 // don't want non-IEEE rounding modes or floating-point traps. 5086 bfi(tmp1, zr, 22, 4); // Clear DN, FZ, and Rmode 5087 bfi(tmp1, zr, 8, 5); // Clear exception-control bits (8-12) 5088 bfi(tmp1, zr, 0, 2); // Clear AH:FIZ 5089 eor(tmp2, tmp1, tmp2); 5090 cbz(tmp2, OK); // Only reset FPCR if it's wrong 5091 set_fpcr(tmp1); 5092 bind(OK); 5093 } 5094 } 5095 5096 // ((OopHandle)result).resolve(); 5097 void MacroAssembler::resolve_oop_handle(Register result, Register tmp1, Register tmp2) { 5098 // OopHandle::resolve is an indirection. 5099 access_load_at(T_OBJECT, IN_NATIVE, result, Address(result, 0), tmp1, tmp2); 5100 } 5101 5102 // ((WeakHandle)result).resolve(); 5103 void MacroAssembler::resolve_weak_handle(Register result, Register tmp1, Register tmp2) { 5104 assert_different_registers(result, tmp1, tmp2); 5105 Label resolved; 5106 5107 // A null weak handle resolves to null. 5108 cbz(result, resolved); 5109 5110 // Only 64 bit platforms support GCs that require a tmp register 5111 // WeakHandle::resolve is an indirection like jweak. 5112 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 5113 result, Address(result), tmp1, tmp2); 5114 bind(resolved); 5115 } 5116 5117 void MacroAssembler::load_mirror(Register dst, Register method, Register tmp1, Register tmp2) { 5118 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 5119 ldr(dst, Address(rmethod, Method::const_offset())); 5120 ldr(dst, Address(dst, ConstMethod::constants_offset())); 5121 ldr(dst, Address(dst, ConstantPool::pool_holder_offset())); 5122 ldr(dst, Address(dst, mirror_offset)); 5123 resolve_oop_handle(dst, tmp1, tmp2); 5124 } 5125 5126 void MacroAssembler::cmp_klass(Register obj, Register klass, Register tmp) { 5127 assert_different_registers(obj, klass, tmp); 5128 if (UseCompressedClassPointers) { 5129 if (UseCompactObjectHeaders) { 5130 load_narrow_klass_compact(tmp, obj); 5131 } else { 5132 ldrw(tmp, Address(obj, oopDesc::klass_offset_in_bytes())); 5133 } 5134 if (CompressedKlassPointers::base() == nullptr) { 5135 cmp(klass, tmp, LSL, CompressedKlassPointers::shift()); 5136 return; 5137 } else if (((uint64_t)CompressedKlassPointers::base() & 0xffffffff) == 0 5138 && CompressedKlassPointers::shift() == 0) { 5139 // Only the bottom 32 bits matter 5140 cmpw(klass, tmp); 5141 return; 5142 } 5143 decode_klass_not_null(tmp); 5144 } else { 5145 ldr(tmp, Address(obj, oopDesc::klass_offset_in_bytes())); 5146 } 5147 cmp(klass, tmp); 5148 } 5149 5150 void MacroAssembler::cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2) { 5151 if (UseCompactObjectHeaders) { 5152 load_narrow_klass_compact(tmp1, obj1); 5153 load_narrow_klass_compact(tmp2, obj2); 5154 cmpw(tmp1, tmp2); 5155 } else if (UseCompressedClassPointers) { 5156 ldrw(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); 5157 ldrw(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes())); 5158 cmpw(tmp1, tmp2); 5159 } else { 5160 ldr(tmp1, Address(obj1, oopDesc::klass_offset_in_bytes())); 5161 ldr(tmp2, Address(obj2, oopDesc::klass_offset_in_bytes())); 5162 cmp(tmp1, tmp2); 5163 } 5164 } 5165 5166 void MacroAssembler::store_klass(Register dst, Register src) { 5167 // FIXME: Should this be a store release? concurrent gcs assumes 5168 // klass length is valid if klass field is not null. 5169 assert(!UseCompactObjectHeaders, "not with compact headers"); 5170 if (UseCompressedClassPointers) { 5171 encode_klass_not_null(src); 5172 strw(src, Address(dst, oopDesc::klass_offset_in_bytes())); 5173 } else { 5174 str(src, Address(dst, oopDesc::klass_offset_in_bytes())); 5175 } 5176 } 5177 5178 void MacroAssembler::store_klass_gap(Register dst, Register src) { 5179 assert(!UseCompactObjectHeaders, "not with compact headers"); 5180 if (UseCompressedClassPointers) { 5181 // Store to klass gap in destination 5182 strw(src, Address(dst, oopDesc::klass_gap_offset_in_bytes())); 5183 } 5184 } 5185 5186 // Algorithm must match CompressedOops::encode. 5187 void MacroAssembler::encode_heap_oop(Register d, Register s) { 5188 #ifdef ASSERT 5189 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 5190 #endif 5191 verify_oop_msg(s, "broken oop in encode_heap_oop"); 5192 if (CompressedOops::base() == nullptr) { 5193 if (CompressedOops::shift() != 0) { 5194 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5195 lsr(d, s, LogMinObjAlignmentInBytes); 5196 } else { 5197 mov(d, s); 5198 } 5199 } else { 5200 subs(d, s, rheapbase); 5201 csel(d, d, zr, Assembler::HS); 5202 lsr(d, d, LogMinObjAlignmentInBytes); 5203 5204 /* Old algorithm: is this any worse? 5205 Label nonnull; 5206 cbnz(r, nonnull); 5207 sub(r, r, rheapbase); 5208 bind(nonnull); 5209 lsr(r, r, LogMinObjAlignmentInBytes); 5210 */ 5211 } 5212 } 5213 5214 void MacroAssembler::encode_heap_oop_not_null(Register r) { 5215 #ifdef ASSERT 5216 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 5217 if (CheckCompressedOops) { 5218 Label ok; 5219 cbnz(r, ok); 5220 stop("null oop passed to encode_heap_oop_not_null"); 5221 bind(ok); 5222 } 5223 #endif 5224 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); 5225 if (CompressedOops::base() != nullptr) { 5226 sub(r, r, rheapbase); 5227 } 5228 if (CompressedOops::shift() != 0) { 5229 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5230 lsr(r, r, LogMinObjAlignmentInBytes); 5231 } 5232 } 5233 5234 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 5235 #ifdef ASSERT 5236 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 5237 if (CheckCompressedOops) { 5238 Label ok; 5239 cbnz(src, ok); 5240 stop("null oop passed to encode_heap_oop_not_null2"); 5241 bind(ok); 5242 } 5243 #endif 5244 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); 5245 5246 Register data = src; 5247 if (CompressedOops::base() != nullptr) { 5248 sub(dst, src, rheapbase); 5249 data = dst; 5250 } 5251 if (CompressedOops::shift() != 0) { 5252 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5253 lsr(dst, data, LogMinObjAlignmentInBytes); 5254 data = dst; 5255 } 5256 if (data == src) 5257 mov(dst, src); 5258 } 5259 5260 void MacroAssembler::decode_heap_oop(Register d, Register s) { 5261 #ifdef ASSERT 5262 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 5263 #endif 5264 if (CompressedOops::base() == nullptr) { 5265 if (CompressedOops::shift() != 0) { 5266 lsl(d, s, CompressedOops::shift()); 5267 } else if (d != s) { 5268 mov(d, s); 5269 } 5270 } else { 5271 Label done; 5272 if (d != s) 5273 mov(d, s); 5274 cbz(s, done); 5275 add(d, rheapbase, s, Assembler::LSL, LogMinObjAlignmentInBytes); 5276 bind(done); 5277 } 5278 verify_oop_msg(d, "broken oop in decode_heap_oop"); 5279 } 5280 5281 void MacroAssembler::decode_heap_oop_not_null(Register r) { 5282 assert (UseCompressedOops, "should only be used for compressed headers"); 5283 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5284 // Cannot assert, unverified entry point counts instructions (see .ad file) 5285 // vtableStubs also counts instructions in pd_code_size_limit. 5286 // Also do not verify_oop as this is called by verify_oop. 5287 if (CompressedOops::shift() != 0) { 5288 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5289 if (CompressedOops::base() != nullptr) { 5290 add(r, rheapbase, r, Assembler::LSL, LogMinObjAlignmentInBytes); 5291 } else { 5292 add(r, zr, r, Assembler::LSL, LogMinObjAlignmentInBytes); 5293 } 5294 } else { 5295 assert (CompressedOops::base() == nullptr, "sanity"); 5296 } 5297 } 5298 5299 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 5300 assert (UseCompressedOops, "should only be used for compressed headers"); 5301 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5302 // Cannot assert, unverified entry point counts instructions (see .ad file) 5303 // vtableStubs also counts instructions in pd_code_size_limit. 5304 // Also do not verify_oop as this is called by verify_oop. 5305 if (CompressedOops::shift() != 0) { 5306 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 5307 if (CompressedOops::base() != nullptr) { 5308 add(dst, rheapbase, src, Assembler::LSL, LogMinObjAlignmentInBytes); 5309 } else { 5310 add(dst, zr, src, Assembler::LSL, LogMinObjAlignmentInBytes); 5311 } 5312 } else { 5313 assert (CompressedOops::base() == nullptr, "sanity"); 5314 if (dst != src) { 5315 mov(dst, src); 5316 } 5317 } 5318 } 5319 5320 MacroAssembler::KlassDecodeMode MacroAssembler::_klass_decode_mode(KlassDecodeNone); 5321 5322 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode() { 5323 assert(Metaspace::initialized(), "metaspace not initialized yet"); 5324 assert(_klass_decode_mode != KlassDecodeNone, "should be initialized"); 5325 return _klass_decode_mode; 5326 } 5327 5328 MacroAssembler::KlassDecodeMode MacroAssembler::klass_decode_mode(address base, int shift, const size_t range) { 5329 assert(UseCompressedClassPointers, "not using compressed class pointers"); 5330 5331 // KlassDecodeMode shouldn't be set already. 5332 assert(_klass_decode_mode == KlassDecodeNone, "set once"); 5333 5334 if (base == nullptr) { 5335 return KlassDecodeZero; 5336 } 5337 5338 if (operand_valid_for_logical_immediate( 5339 /*is32*/false, (uint64_t)base)) { 5340 const uint64_t range_mask = right_n_bits(log2i_ceil(range)); 5341 if (((uint64_t)base & range_mask) == 0) { 5342 return KlassDecodeXor; 5343 } 5344 } 5345 5346 const uint64_t shifted_base = 5347 (uint64_t)base >> shift; 5348 if ((shifted_base & 0xffff0000ffffffff) == 0) { 5349 return KlassDecodeMovk; 5350 } 5351 5352 // No valid encoding. 5353 return KlassDecodeNone; 5354 } 5355 5356 // Check if one of the above decoding modes will work for given base, shift and range. 5357 bool MacroAssembler::check_klass_decode_mode(address base, int shift, const size_t range) { 5358 return klass_decode_mode(base, shift, range) != KlassDecodeNone; 5359 } 5360 5361 bool MacroAssembler::set_klass_decode_mode(address base, int shift, const size_t range) { 5362 _klass_decode_mode = klass_decode_mode(base, shift, range); 5363 return _klass_decode_mode != KlassDecodeNone; 5364 } 5365 5366 void MacroAssembler::encode_klass_not_null(Register dst, Register src) { 5367 switch (klass_decode_mode()) { 5368 case KlassDecodeZero: 5369 if (CompressedKlassPointers::shift() != 0) { 5370 lsr(dst, src, CompressedKlassPointers::shift()); 5371 } else { 5372 if (dst != src) mov(dst, src); 5373 } 5374 break; 5375 5376 case KlassDecodeXor: 5377 if (CompressedKlassPointers::shift() != 0) { 5378 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5379 lsr(dst, dst, CompressedKlassPointers::shift()); 5380 } else { 5381 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5382 } 5383 break; 5384 5385 case KlassDecodeMovk: 5386 if (CompressedKlassPointers::shift() != 0) { 5387 ubfx(dst, src, CompressedKlassPointers::shift(), 32); 5388 } else { 5389 movw(dst, src); 5390 } 5391 break; 5392 5393 case KlassDecodeNone: 5394 ShouldNotReachHere(); 5395 break; 5396 } 5397 } 5398 5399 void MacroAssembler::encode_klass_not_null(Register r) { 5400 encode_klass_not_null(r, r); 5401 } 5402 5403 void MacroAssembler::decode_klass_not_null(Register dst, Register src) { 5404 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5405 5406 switch (klass_decode_mode()) { 5407 case KlassDecodeZero: 5408 if (CompressedKlassPointers::shift() != 0) { 5409 lsl(dst, src, CompressedKlassPointers::shift()); 5410 } else { 5411 if (dst != src) mov(dst, src); 5412 } 5413 break; 5414 5415 case KlassDecodeXor: 5416 if (CompressedKlassPointers::shift() != 0) { 5417 lsl(dst, src, CompressedKlassPointers::shift()); 5418 eor(dst, dst, (uint64_t)CompressedKlassPointers::base()); 5419 } else { 5420 eor(dst, src, (uint64_t)CompressedKlassPointers::base()); 5421 } 5422 break; 5423 5424 case KlassDecodeMovk: { 5425 const uint64_t shifted_base = 5426 (uint64_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift(); 5427 5428 if (dst != src) movw(dst, src); 5429 movk(dst, shifted_base >> 32, 32); 5430 5431 if (CompressedKlassPointers::shift() != 0) { 5432 lsl(dst, dst, CompressedKlassPointers::shift()); 5433 } 5434 5435 break; 5436 } 5437 5438 case KlassDecodeNone: 5439 ShouldNotReachHere(); 5440 break; 5441 } 5442 } 5443 5444 void MacroAssembler::decode_klass_not_null(Register r) { 5445 decode_klass_not_null(r, r); 5446 } 5447 5448 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 5449 #ifdef ASSERT 5450 { 5451 ThreadInVMfromUnknown tiv; 5452 assert (UseCompressedOops, "should only be used for compressed oops"); 5453 assert (Universe::heap() != nullptr, "java heap should be initialized"); 5454 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5455 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 5456 } 5457 #endif 5458 int oop_index = oop_recorder()->find_index(obj); 5459 InstructionMark im(this); 5460 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5461 code_section()->relocate(inst_mark(), rspec); 5462 movz(dst, 0xDEAD, 16); 5463 movk(dst, 0xBEEF); 5464 } 5465 5466 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 5467 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 5468 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5469 int index = oop_recorder()->find_index(k); 5470 assert(! Universe::heap()->is_in(k), "should not be an oop"); 5471 5472 InstructionMark im(this); 5473 RelocationHolder rspec = metadata_Relocation::spec(index); 5474 code_section()->relocate(inst_mark(), rspec); 5475 narrowKlass nk = CompressedKlassPointers::encode(k); 5476 movz(dst, (nk >> 16), 16); 5477 movk(dst, nk & 0xffff); 5478 } 5479 5480 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, 5481 Register dst, Address src, 5482 Register tmp1, Register tmp2) { 5483 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5484 decorators = AccessInternal::decorator_fixup(decorators, type); 5485 bool as_raw = (decorators & AS_RAW) != 0; 5486 if (as_raw) { 5487 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, tmp2); 5488 } else { 5489 bs->load_at(this, decorators, type, dst, src, tmp1, tmp2); 5490 } 5491 } 5492 5493 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, 5494 Address dst, Register val, 5495 Register tmp1, Register tmp2, Register tmp3) { 5496 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5497 decorators = AccessInternal::decorator_fixup(decorators, type); 5498 bool as_raw = (decorators & AS_RAW) != 0; 5499 if (as_raw) { 5500 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5501 } else { 5502 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 5503 } 5504 } 5505 5506 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, 5507 Register tmp2, DecoratorSet decorators) { 5508 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, tmp2); 5509 } 5510 5511 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, 5512 Register tmp2, DecoratorSet decorators) { 5513 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, tmp2); 5514 } 5515 5516 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, 5517 Register tmp2, Register tmp3, DecoratorSet decorators) { 5518 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); 5519 } 5520 5521 // Used for storing nulls. 5522 void MacroAssembler::store_heap_oop_null(Address dst) { 5523 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); 5524 } 5525 5526 Address MacroAssembler::allocate_metadata_address(Metadata* obj) { 5527 assert(oop_recorder() != nullptr, "this assembler needs a Recorder"); 5528 int index = oop_recorder()->allocate_metadata_index(obj); 5529 RelocationHolder rspec = metadata_Relocation::spec(index); 5530 return Address((address)obj, rspec); 5531 } 5532 5533 // Move an oop into a register. 5534 void MacroAssembler::movoop(Register dst, jobject obj) { 5535 int oop_index; 5536 if (obj == nullptr) { 5537 oop_index = oop_recorder()->allocate_oop_index(obj); 5538 } else { 5539 #ifdef ASSERT 5540 { 5541 ThreadInVMfromUnknown tiv; 5542 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "should be real oop"); 5543 } 5544 #endif 5545 oop_index = oop_recorder()->find_index(obj); 5546 } 5547 RelocationHolder rspec = oop_Relocation::spec(oop_index); 5548 5549 if (BarrierSet::barrier_set()->barrier_set_assembler()->supports_instruction_patching()) { 5550 mov(dst, Address((address)obj, rspec)); 5551 } else { 5552 address dummy = address(uintptr_t(pc()) & -wordSize); // A nearby aligned address 5553 ldr_constant(dst, Address(dummy, rspec)); 5554 } 5555 5556 } 5557 5558 // Move a metadata address into a register. 5559 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 5560 int oop_index; 5561 if (obj == nullptr) { 5562 oop_index = oop_recorder()->allocate_metadata_index(obj); 5563 } else { 5564 oop_index = oop_recorder()->find_index(obj); 5565 } 5566 RelocationHolder rspec = metadata_Relocation::spec(oop_index); 5567 mov(dst, Address((address)obj, rspec)); 5568 } 5569 5570 Address MacroAssembler::constant_oop_address(jobject obj) { 5571 #ifdef ASSERT 5572 { 5573 ThreadInVMfromUnknown tiv; 5574 assert(oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 5575 assert(Universe::heap()->is_in(JNIHandles::resolve(obj)), "not an oop"); 5576 } 5577 #endif 5578 int oop_index = oop_recorder()->find_index(obj); 5579 return Address((address)obj, oop_Relocation::spec(oop_index)); 5580 } 5581 5582 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 5583 void MacroAssembler::tlab_allocate(Register obj, 5584 Register var_size_in_bytes, 5585 int con_size_in_bytes, 5586 Register t1, 5587 Register t2, 5588 Label& slow_case) { 5589 BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler(); 5590 bs->tlab_allocate(this, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 5591 } 5592 5593 void MacroAssembler::inc_held_monitor_count(Register tmp) { 5594 Address dst(rthread, JavaThread::held_monitor_count_offset()); 5595 #ifdef ASSERT 5596 ldr(tmp, dst); 5597 increment(tmp); 5598 str(tmp, dst); 5599 Label ok; 5600 tbz(tmp, 63, ok); 5601 STOP("assert(held monitor count underflow)"); 5602 should_not_reach_here(); 5603 bind(ok); 5604 #else 5605 increment(dst); 5606 #endif 5607 } 5608 5609 void MacroAssembler::dec_held_monitor_count(Register tmp) { 5610 Address dst(rthread, JavaThread::held_monitor_count_offset()); 5611 #ifdef ASSERT 5612 ldr(tmp, dst); 5613 decrement(tmp); 5614 str(tmp, dst); 5615 Label ok; 5616 tbz(tmp, 63, ok); 5617 STOP("assert(held monitor count underflow)"); 5618 should_not_reach_here(); 5619 bind(ok); 5620 #else 5621 decrement(dst); 5622 #endif 5623 } 5624 5625 void MacroAssembler::verify_tlab() { 5626 #ifdef ASSERT 5627 if (UseTLAB && VerifyOops) { 5628 Label next, ok; 5629 5630 stp(rscratch2, rscratch1, Address(pre(sp, -16))); 5631 5632 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 5633 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_start_offset()))); 5634 cmp(rscratch2, rscratch1); 5635 br(Assembler::HS, next); 5636 STOP("assert(top >= start)"); 5637 should_not_reach_here(); 5638 5639 bind(next); 5640 ldr(rscratch2, Address(rthread, in_bytes(JavaThread::tlab_end_offset()))); 5641 ldr(rscratch1, Address(rthread, in_bytes(JavaThread::tlab_top_offset()))); 5642 cmp(rscratch2, rscratch1); 5643 br(Assembler::HS, ok); 5644 STOP("assert(top <= end)"); 5645 should_not_reach_here(); 5646 5647 bind(ok); 5648 ldp(rscratch2, rscratch1, Address(post(sp, 16))); 5649 } 5650 #endif 5651 } 5652 5653 // Writes to stack successive pages until offset reached to check for 5654 // stack overflow + shadow pages. This clobbers tmp. 5655 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 5656 assert_different_registers(tmp, size, rscratch1); 5657 mov(tmp, sp); 5658 // Bang stack for total size given plus shadow page size. 5659 // Bang one page at a time because large size can bang beyond yellow and 5660 // red zones. 5661 Label loop; 5662 mov(rscratch1, (int)os::vm_page_size()); 5663 bind(loop); 5664 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 5665 subsw(size, size, rscratch1); 5666 str(size, Address(tmp)); 5667 br(Assembler::GT, loop); 5668 5669 // Bang down shadow pages too. 5670 // At this point, (tmp-0) is the last address touched, so don't 5671 // touch it again. (It was touched as (tmp-pagesize) but then tmp 5672 // was post-decremented.) Skip this address by starting at i=1, and 5673 // touch a few more pages below. N.B. It is important to touch all 5674 // the way down to and including i=StackShadowPages. 5675 for (int i = 0; i < (int)(StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()) - 1; i++) { 5676 // this could be any sized move but this is can be a debugging crumb 5677 // so the bigger the better. 5678 lea(tmp, Address(tmp, -(int)os::vm_page_size())); 5679 str(size, Address(tmp)); 5680 } 5681 } 5682 5683 // Move the address of the polling page into dest. 5684 void MacroAssembler::get_polling_page(Register dest, relocInfo::relocType rtype) { 5685 ldr(dest, Address(rthread, JavaThread::polling_page_offset())); 5686 } 5687 5688 // Read the polling page. The address of the polling page must 5689 // already be in r. 5690 address MacroAssembler::read_polling_page(Register r, relocInfo::relocType rtype) { 5691 address mark; 5692 { 5693 InstructionMark im(this); 5694 code_section()->relocate(inst_mark(), rtype); 5695 ldrw(zr, Address(r, 0)); 5696 mark = inst_mark(); 5697 } 5698 verify_cross_modify_fence_not_required(); 5699 return mark; 5700 } 5701 5702 void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) { 5703 relocInfo::relocType rtype = dest.rspec().reloc()->type(); 5704 uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12; 5705 uint64_t high_page = (uint64_t)(CodeCache::high_bound()-1) >> 12; 5706 uint64_t dest_page = (uint64_t)dest.target() >> 12; 5707 int64_t offset_low = dest_page - low_page; 5708 int64_t offset_high = dest_page - high_page; 5709 5710 assert(is_valid_AArch64_address(dest.target()), "bad address"); 5711 assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address"); 5712 5713 InstructionMark im(this); 5714 code_section()->relocate(inst_mark(), dest.rspec()); 5715 // 8143067: Ensure that the adrp can reach the dest from anywhere within 5716 // the code cache so that if it is relocated we know it will still reach 5717 if (offset_high >= -(1<<20) && offset_low < (1<<20)) { 5718 _adrp(reg1, dest.target()); 5719 } else { 5720 uint64_t target = (uint64_t)dest.target(); 5721 uint64_t adrp_target 5722 = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL); 5723 5724 _adrp(reg1, (address)adrp_target); 5725 movk(reg1, target >> 32, 32); 5726 } 5727 byte_offset = (uint64_t)dest.target() & 0xfff; 5728 } 5729 5730 void MacroAssembler::load_byte_map_base(Register reg) { 5731 CardTable::CardValue* byte_map_base = 5732 ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base(); 5733 5734 // Strictly speaking the byte_map_base isn't an address at all, and it might 5735 // even be negative. It is thus materialised as a constant. 5736 #if INCLUDE_CDS 5737 if (AOTCodeCache::is_on_for_write()) { 5738 // AOT code needs relocation info for card table base 5739 lea(reg, ExternalAddress(reinterpret_cast<address>(byte_map_base))); 5740 } else { 5741 #endif 5742 mov(reg, (uint64_t)byte_map_base); 5743 #if INCLUDE_CDS 5744 } 5745 #endif 5746 } 5747 5748 void MacroAssembler::load_aotrc_address(Register reg, address a) { 5749 #if INCLUDE_CDS 5750 assert(AOTRuntimeConstants::contains(a), "address out of range for data area"); 5751 if (AOTCodeCache::is_on_for_write()) { 5752 // all aotrc field addresses should be registered in the AOTCodeCache address table 5753 lea(reg, ExternalAddress(a)); 5754 } else { 5755 mov(reg, (uint64_t)a); 5756 } 5757 #else 5758 ShouldNotReachHere(); 5759 #endif 5760 } 5761 5762 void MacroAssembler::build_frame(int framesize) { 5763 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 5764 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 5765 protect_return_address(); 5766 if (framesize < ((1 << 9) + 2 * wordSize)) { 5767 sub(sp, sp, framesize); 5768 stp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 5769 if (PreserveFramePointer) add(rfp, sp, framesize - 2 * wordSize); 5770 } else { 5771 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 5772 if (PreserveFramePointer) mov(rfp, sp); 5773 if (framesize < ((1 << 12) + 2 * wordSize)) 5774 sub(sp, sp, framesize - 2 * wordSize); 5775 else { 5776 mov(rscratch1, framesize - 2 * wordSize); 5777 sub(sp, sp, rscratch1); 5778 } 5779 } 5780 verify_cross_modify_fence_not_required(); 5781 } 5782 5783 void MacroAssembler::remove_frame(int framesize) { 5784 assert(framesize >= 2 * wordSize, "framesize must include space for FP/LR"); 5785 assert(framesize % (2*wordSize) == 0, "must preserve 2*wordSize alignment"); 5786 if (framesize < ((1 << 9) + 2 * wordSize)) { 5787 ldp(rfp, lr, Address(sp, framesize - 2 * wordSize)); 5788 add(sp, sp, framesize); 5789 } else { 5790 if (framesize < ((1 << 12) + 2 * wordSize)) 5791 add(sp, sp, framesize - 2 * wordSize); 5792 else { 5793 mov(rscratch1, framesize - 2 * wordSize); 5794 add(sp, sp, rscratch1); 5795 } 5796 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 5797 } 5798 authenticate_return_address(); 5799 } 5800 5801 5802 // This method counts leading positive bytes (highest bit not set) in provided byte array 5803 address MacroAssembler::count_positives(Register ary1, Register len, Register result) { 5804 // Simple and most common case of aligned small array which is not at the 5805 // end of memory page is placed here. All other cases are in stub. 5806 Label LOOP, END, STUB, STUB_LONG, SET_RESULT, DONE; 5807 const uint64_t UPPER_BIT_MASK=0x8080808080808080; 5808 assert_different_registers(ary1, len, result); 5809 5810 mov(result, len); 5811 cmpw(len, 0); 5812 br(LE, DONE); 5813 cmpw(len, 4 * wordSize); 5814 br(GE, STUB_LONG); // size > 32 then go to stub 5815 5816 int shift = 64 - exact_log2(os::vm_page_size()); 5817 lsl(rscratch1, ary1, shift); 5818 mov(rscratch2, (size_t)(4 * wordSize) << shift); 5819 adds(rscratch2, rscratch1, rscratch2); // At end of page? 5820 br(CS, STUB); // at the end of page then go to stub 5821 subs(len, len, wordSize); 5822 br(LT, END); 5823 5824 BIND(LOOP); 5825 ldr(rscratch1, Address(post(ary1, wordSize))); 5826 tst(rscratch1, UPPER_BIT_MASK); 5827 br(NE, SET_RESULT); 5828 subs(len, len, wordSize); 5829 br(GE, LOOP); 5830 cmpw(len, -wordSize); 5831 br(EQ, DONE); 5832 5833 BIND(END); 5834 ldr(rscratch1, Address(ary1)); 5835 sub(rscratch2, zr, len, LSL, 3); // LSL 3 is to get bits from bytes 5836 lslv(rscratch1, rscratch1, rscratch2); 5837 tst(rscratch1, UPPER_BIT_MASK); 5838 br(NE, SET_RESULT); 5839 b(DONE); 5840 5841 BIND(STUB); 5842 RuntimeAddress count_pos = RuntimeAddress(StubRoutines::aarch64::count_positives()); 5843 assert(count_pos.target() != nullptr, "count_positives stub has not been generated"); 5844 address tpc1 = trampoline_call(count_pos); 5845 if (tpc1 == nullptr) { 5846 DEBUG_ONLY(reset_labels(STUB_LONG, SET_RESULT, DONE)); 5847 postcond(pc() == badAddress); 5848 return nullptr; 5849 } 5850 b(DONE); 5851 5852 BIND(STUB_LONG); 5853 RuntimeAddress count_pos_long = RuntimeAddress(StubRoutines::aarch64::count_positives_long()); 5854 assert(count_pos_long.target() != nullptr, "count_positives_long stub has not been generated"); 5855 address tpc2 = trampoline_call(count_pos_long); 5856 if (tpc2 == nullptr) { 5857 DEBUG_ONLY(reset_labels(SET_RESULT, DONE)); 5858 postcond(pc() == badAddress); 5859 return nullptr; 5860 } 5861 b(DONE); 5862 5863 BIND(SET_RESULT); 5864 5865 add(len, len, wordSize); 5866 sub(result, result, len); 5867 5868 BIND(DONE); 5869 postcond(pc() != badAddress); 5870 return pc(); 5871 } 5872 5873 // Clobbers: rscratch1, rscratch2, rflags 5874 // May also clobber v0-v7 when (!UseSimpleArrayEquals && UseSIMDForArrayEquals) 5875 address MacroAssembler::arrays_equals(Register a1, Register a2, Register tmp3, 5876 Register tmp4, Register tmp5, Register result, 5877 Register cnt1, int elem_size) { 5878 Label DONE, SAME; 5879 Register tmp1 = rscratch1; 5880 Register tmp2 = rscratch2; 5881 int elem_per_word = wordSize/elem_size; 5882 int log_elem_size = exact_log2(elem_size); 5883 int klass_offset = arrayOopDesc::klass_offset_in_bytes(); 5884 int length_offset = arrayOopDesc::length_offset_in_bytes(); 5885 int base_offset 5886 = arrayOopDesc::base_offset_in_bytes(elem_size == 2 ? T_CHAR : T_BYTE); 5887 // When the length offset is not aligned to 8 bytes, 5888 // then we align it down. This is valid because the new 5889 // offset will always be the klass which is the same 5890 // for type arrays. 5891 int start_offset = align_down(length_offset, BytesPerWord); 5892 int extra_length = base_offset - start_offset; 5893 assert(start_offset == length_offset || start_offset == klass_offset, 5894 "start offset must be 8-byte-aligned or be the klass offset"); 5895 assert(base_offset != start_offset, "must include the length field"); 5896 extra_length = extra_length / elem_size; // We count in elements, not bytes. 5897 int stubBytesThreshold = 3 * 64 + (UseSIMDForArrayEquals ? 0 : 16); 5898 5899 assert(elem_size == 1 || elem_size == 2, "must be char or byte"); 5900 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 5901 5902 #ifndef PRODUCT 5903 { 5904 const char kind = (elem_size == 2) ? 'U' : 'L'; 5905 char comment[64]; 5906 snprintf(comment, sizeof comment, "array_equals%c{", kind); 5907 BLOCK_COMMENT(comment); 5908 } 5909 #endif 5910 5911 // if (a1 == a2) 5912 // return true; 5913 cmpoop(a1, a2); // May have read barriers for a1 and a2. 5914 br(EQ, SAME); 5915 5916 if (UseSimpleArrayEquals) { 5917 Label NEXT_WORD, SHORT, TAIL03, TAIL01, A_MIGHT_BE_NULL, A_IS_NOT_NULL; 5918 // if (a1 == nullptr || a2 == nullptr) 5919 // return false; 5920 // a1 & a2 == 0 means (some-pointer is null) or 5921 // (very-rare-or-even-probably-impossible-pointer-values) 5922 // so, we can save one branch in most cases 5923 tst(a1, a2); 5924 mov(result, false); 5925 br(EQ, A_MIGHT_BE_NULL); 5926 // if (a1.length != a2.length) 5927 // return false; 5928 bind(A_IS_NOT_NULL); 5929 ldrw(cnt1, Address(a1, length_offset)); 5930 // Increase loop counter by diff between base- and actual start-offset. 5931 addw(cnt1, cnt1, extra_length); 5932 lea(a1, Address(a1, start_offset)); 5933 lea(a2, Address(a2, start_offset)); 5934 // Check for short strings, i.e. smaller than wordSize. 5935 subs(cnt1, cnt1, elem_per_word); 5936 br(Assembler::LT, SHORT); 5937 // Main 8 byte comparison loop. 5938 bind(NEXT_WORD); { 5939 ldr(tmp1, Address(post(a1, wordSize))); 5940 ldr(tmp2, Address(post(a2, wordSize))); 5941 subs(cnt1, cnt1, elem_per_word); 5942 eor(tmp5, tmp1, tmp2); 5943 cbnz(tmp5, DONE); 5944 } br(GT, NEXT_WORD); 5945 // Last longword. In the case where length == 4 we compare the 5946 // same longword twice, but that's still faster than another 5947 // conditional branch. 5948 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 5949 // length == 4. 5950 if (log_elem_size > 0) 5951 lsl(cnt1, cnt1, log_elem_size); 5952 ldr(tmp3, Address(a1, cnt1)); 5953 ldr(tmp4, Address(a2, cnt1)); 5954 eor(tmp5, tmp3, tmp4); 5955 cbnz(tmp5, DONE); 5956 b(SAME); 5957 bind(A_MIGHT_BE_NULL); 5958 // in case both a1 and a2 are not-null, proceed with loads 5959 cbz(a1, DONE); 5960 cbz(a2, DONE); 5961 b(A_IS_NOT_NULL); 5962 bind(SHORT); 5963 5964 tbz(cnt1, 2 - log_elem_size, TAIL03); // 0-7 bytes left. 5965 { 5966 ldrw(tmp1, Address(post(a1, 4))); 5967 ldrw(tmp2, Address(post(a2, 4))); 5968 eorw(tmp5, tmp1, tmp2); 5969 cbnzw(tmp5, DONE); 5970 } 5971 bind(TAIL03); 5972 tbz(cnt1, 1 - log_elem_size, TAIL01); // 0-3 bytes left. 5973 { 5974 ldrh(tmp3, Address(post(a1, 2))); 5975 ldrh(tmp4, Address(post(a2, 2))); 5976 eorw(tmp5, tmp3, tmp4); 5977 cbnzw(tmp5, DONE); 5978 } 5979 bind(TAIL01); 5980 if (elem_size == 1) { // Only needed when comparing byte arrays. 5981 tbz(cnt1, 0, SAME); // 0-1 bytes left. 5982 { 5983 ldrb(tmp1, a1); 5984 ldrb(tmp2, a2); 5985 eorw(tmp5, tmp1, tmp2); 5986 cbnzw(tmp5, DONE); 5987 } 5988 } 5989 } else { 5990 Label NEXT_DWORD, SHORT, TAIL, TAIL2, STUB, 5991 CSET_EQ, LAST_CHECK; 5992 mov(result, false); 5993 cbz(a1, DONE); 5994 ldrw(cnt1, Address(a1, length_offset)); 5995 cbz(a2, DONE); 5996 // Increase loop counter by diff between base- and actual start-offset. 5997 addw(cnt1, cnt1, extra_length); 5998 5999 // on most CPUs a2 is still "locked"(surprisingly) in ldrw and it's 6000 // faster to perform another branch before comparing a1 and a2 6001 cmp(cnt1, (u1)elem_per_word); 6002 br(LE, SHORT); // short or same 6003 ldr(tmp3, Address(pre(a1, start_offset))); 6004 subs(zr, cnt1, stubBytesThreshold); 6005 br(GE, STUB); 6006 ldr(tmp4, Address(pre(a2, start_offset))); 6007 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 6008 6009 // Main 16 byte comparison loop with 2 exits 6010 bind(NEXT_DWORD); { 6011 ldr(tmp1, Address(pre(a1, wordSize))); 6012 ldr(tmp2, Address(pre(a2, wordSize))); 6013 subs(cnt1, cnt1, 2 * elem_per_word); 6014 br(LE, TAIL); 6015 eor(tmp4, tmp3, tmp4); 6016 cbnz(tmp4, DONE); 6017 ldr(tmp3, Address(pre(a1, wordSize))); 6018 ldr(tmp4, Address(pre(a2, wordSize))); 6019 cmp(cnt1, (u1)elem_per_word); 6020 br(LE, TAIL2); 6021 cmp(tmp1, tmp2); 6022 } br(EQ, NEXT_DWORD); 6023 b(DONE); 6024 6025 bind(TAIL); 6026 eor(tmp4, tmp3, tmp4); 6027 eor(tmp2, tmp1, tmp2); 6028 lslv(tmp2, tmp2, tmp5); 6029 orr(tmp5, tmp4, tmp2); 6030 cmp(tmp5, zr); 6031 b(CSET_EQ); 6032 6033 bind(TAIL2); 6034 eor(tmp2, tmp1, tmp2); 6035 cbnz(tmp2, DONE); 6036 b(LAST_CHECK); 6037 6038 bind(STUB); 6039 ldr(tmp4, Address(pre(a2, start_offset))); 6040 if (elem_size == 2) { // convert to byte counter 6041 lsl(cnt1, cnt1, 1); 6042 } 6043 eor(tmp5, tmp3, tmp4); 6044 cbnz(tmp5, DONE); 6045 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_array_equals()); 6046 assert(stub.target() != nullptr, "array_equals_long stub has not been generated"); 6047 address tpc = trampoline_call(stub); 6048 if (tpc == nullptr) { 6049 DEBUG_ONLY(reset_labels(SHORT, LAST_CHECK, CSET_EQ, SAME, DONE)); 6050 postcond(pc() == badAddress); 6051 return nullptr; 6052 } 6053 b(DONE); 6054 6055 // (a1 != null && a2 == null) || (a1 != null && a2 != null && a1 == a2) 6056 // so, if a2 == null => return false(0), else return true, so we can return a2 6057 mov(result, a2); 6058 b(DONE); 6059 bind(SHORT); 6060 sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); 6061 ldr(tmp3, Address(a1, start_offset)); 6062 ldr(tmp4, Address(a2, start_offset)); 6063 bind(LAST_CHECK); 6064 eor(tmp4, tmp3, tmp4); 6065 lslv(tmp5, tmp4, tmp5); 6066 cmp(tmp5, zr); 6067 bind(CSET_EQ); 6068 cset(result, EQ); 6069 b(DONE); 6070 } 6071 6072 bind(SAME); 6073 mov(result, true); 6074 // That's it. 6075 bind(DONE); 6076 6077 BLOCK_COMMENT("} array_equals"); 6078 postcond(pc() != badAddress); 6079 return pc(); 6080 } 6081 6082 // Compare Strings 6083 6084 // For Strings we're passed the address of the first characters in a1 6085 // and a2 and the length in cnt1. 6086 // There are two implementations. For arrays >= 8 bytes, all 6087 // comparisons (including the final one, which may overlap) are 6088 // performed 8 bytes at a time. For strings < 8 bytes, we compare a 6089 // halfword, then a short, and then a byte. 6090 6091 void MacroAssembler::string_equals(Register a1, Register a2, 6092 Register result, Register cnt1) 6093 { 6094 Label SAME, DONE, SHORT, NEXT_WORD; 6095 Register tmp1 = rscratch1; 6096 Register tmp2 = rscratch2; 6097 Register cnt2 = tmp2; // cnt2 only used in array length compare 6098 6099 assert_different_registers(a1, a2, result, cnt1, rscratch1, rscratch2); 6100 6101 #ifndef PRODUCT 6102 { 6103 char comment[64]; 6104 snprintf(comment, sizeof comment, "{string_equalsL"); 6105 BLOCK_COMMENT(comment); 6106 } 6107 #endif 6108 6109 mov(result, false); 6110 6111 // Check for short strings, i.e. smaller than wordSize. 6112 subs(cnt1, cnt1, wordSize); 6113 br(Assembler::LT, SHORT); 6114 // Main 8 byte comparison loop. 6115 bind(NEXT_WORD); { 6116 ldr(tmp1, Address(post(a1, wordSize))); 6117 ldr(tmp2, Address(post(a2, wordSize))); 6118 subs(cnt1, cnt1, wordSize); 6119 eor(tmp1, tmp1, tmp2); 6120 cbnz(tmp1, DONE); 6121 } br(GT, NEXT_WORD); 6122 // Last longword. In the case where length == 4 we compare the 6123 // same longword twice, but that's still faster than another 6124 // conditional branch. 6125 // cnt1 could be 0, -1, -2, -3, -4 for chars; -4 only happens when 6126 // length == 4. 6127 ldr(tmp1, Address(a1, cnt1)); 6128 ldr(tmp2, Address(a2, cnt1)); 6129 eor(tmp2, tmp1, tmp2); 6130 cbnz(tmp2, DONE); 6131 b(SAME); 6132 6133 bind(SHORT); 6134 Label TAIL03, TAIL01; 6135 6136 tbz(cnt1, 2, TAIL03); // 0-7 bytes left. 6137 { 6138 ldrw(tmp1, Address(post(a1, 4))); 6139 ldrw(tmp2, Address(post(a2, 4))); 6140 eorw(tmp1, tmp1, tmp2); 6141 cbnzw(tmp1, DONE); 6142 } 6143 bind(TAIL03); 6144 tbz(cnt1, 1, TAIL01); // 0-3 bytes left. 6145 { 6146 ldrh(tmp1, Address(post(a1, 2))); 6147 ldrh(tmp2, Address(post(a2, 2))); 6148 eorw(tmp1, tmp1, tmp2); 6149 cbnzw(tmp1, DONE); 6150 } 6151 bind(TAIL01); 6152 tbz(cnt1, 0, SAME); // 0-1 bytes left. 6153 { 6154 ldrb(tmp1, a1); 6155 ldrb(tmp2, a2); 6156 eorw(tmp1, tmp1, tmp2); 6157 cbnzw(tmp1, DONE); 6158 } 6159 // Arrays are equal. 6160 bind(SAME); 6161 mov(result, true); 6162 6163 // That's it. 6164 bind(DONE); 6165 BLOCK_COMMENT("} string_equals"); 6166 } 6167 6168 6169 // The size of the blocks erased by the zero_blocks stub. We must 6170 // handle anything smaller than this ourselves in zero_words(). 6171 const int MacroAssembler::zero_words_block_size = 8; 6172 6173 // zero_words() is used by C2 ClearArray patterns and by 6174 // C1_MacroAssembler. It is as small as possible, handling small word 6175 // counts locally and delegating anything larger to the zero_blocks 6176 // stub. It is expanded many times in compiled code, so it is 6177 // important to keep it short. 6178 6179 // ptr: Address of a buffer to be zeroed. 6180 // cnt: Count in HeapWords. 6181 // 6182 // ptr, cnt, rscratch1, and rscratch2 are clobbered. 6183 address MacroAssembler::zero_words(Register ptr, Register cnt) 6184 { 6185 assert(is_power_of_2(zero_words_block_size), "adjust this"); 6186 6187 BLOCK_COMMENT("zero_words {"); 6188 assert(ptr == r10 && cnt == r11, "mismatch in register usage"); 6189 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 6190 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 6191 6192 subs(rscratch1, cnt, zero_words_block_size); 6193 Label around; 6194 br(LO, around); 6195 { 6196 RuntimeAddress zero_blocks = RuntimeAddress(StubRoutines::aarch64::zero_blocks()); 6197 assert(zero_blocks.target() != nullptr, "zero_blocks stub has not been generated"); 6198 // Make sure this is a C2 compilation. C1 allocates space only for 6199 // trampoline stubs generated by Call LIR ops, and in any case it 6200 // makes sense for a C1 compilation task to proceed as quickly as 6201 // possible. 6202 CompileTask* task; 6203 if (StubRoutines::aarch64::complete() 6204 && Thread::current()->is_Compiler_thread() 6205 && (task = ciEnv::current()->task()) 6206 && is_c2_compile(task->comp_level())) { 6207 address tpc = trampoline_call(zero_blocks); 6208 if (tpc == nullptr) { 6209 DEBUG_ONLY(reset_labels(around)); 6210 return nullptr; 6211 } 6212 } else { 6213 far_call(zero_blocks); 6214 } 6215 } 6216 bind(around); 6217 6218 // We have a few words left to do. zero_blocks has adjusted r10 and r11 6219 // for us. 6220 for (int i = zero_words_block_size >> 1; i > 1; i >>= 1) { 6221 Label l; 6222 tbz(cnt, exact_log2(i), l); 6223 for (int j = 0; j < i; j += 2) { 6224 stp(zr, zr, post(ptr, 2 * BytesPerWord)); 6225 } 6226 bind(l); 6227 } 6228 { 6229 Label l; 6230 tbz(cnt, 0, l); 6231 str(zr, Address(ptr)); 6232 bind(l); 6233 } 6234 6235 BLOCK_COMMENT("} zero_words"); 6236 return pc(); 6237 } 6238 6239 // base: Address of a buffer to be zeroed, 8 bytes aligned. 6240 // cnt: Immediate count in HeapWords. 6241 // 6242 // r10, r11, rscratch1, and rscratch2 are clobbered. 6243 address MacroAssembler::zero_words(Register base, uint64_t cnt) 6244 { 6245 assert(wordSize <= BlockZeroingLowLimit, 6246 "increase BlockZeroingLowLimit"); 6247 address result = nullptr; 6248 if (cnt <= (uint64_t)BlockZeroingLowLimit / BytesPerWord) { 6249 #ifndef PRODUCT 6250 { 6251 char buf[64]; 6252 snprintf(buf, sizeof buf, "zero_words (count = %" PRIu64 ") {", cnt); 6253 BLOCK_COMMENT(buf); 6254 } 6255 #endif 6256 if (cnt >= 16) { 6257 uint64_t loops = cnt/16; 6258 if (loops > 1) { 6259 mov(rscratch2, loops - 1); 6260 } 6261 { 6262 Label loop; 6263 bind(loop); 6264 for (int i = 0; i < 16; i += 2) { 6265 stp(zr, zr, Address(base, i * BytesPerWord)); 6266 } 6267 add(base, base, 16 * BytesPerWord); 6268 if (loops > 1) { 6269 subs(rscratch2, rscratch2, 1); 6270 br(GE, loop); 6271 } 6272 } 6273 } 6274 cnt %= 16; 6275 int i = cnt & 1; // store any odd word to start 6276 if (i) str(zr, Address(base)); 6277 for (; i < (int)cnt; i += 2) { 6278 stp(zr, zr, Address(base, i * wordSize)); 6279 } 6280 BLOCK_COMMENT("} zero_words"); 6281 result = pc(); 6282 } else { 6283 mov(r10, base); mov(r11, cnt); 6284 result = zero_words(r10, r11); 6285 } 6286 return result; 6287 } 6288 6289 // Zero blocks of memory by using DC ZVA. 6290 // 6291 // Aligns the base address first sufficiently for DC ZVA, then uses 6292 // DC ZVA repeatedly for every full block. cnt is the size to be 6293 // zeroed in HeapWords. Returns the count of words left to be zeroed 6294 // in cnt. 6295 // 6296 // NOTE: This is intended to be used in the zero_blocks() stub. If 6297 // you want to use it elsewhere, note that cnt must be >= 2*zva_length. 6298 void MacroAssembler::zero_dcache_blocks(Register base, Register cnt) { 6299 Register tmp = rscratch1; 6300 Register tmp2 = rscratch2; 6301 int zva_length = VM_Version::zva_length(); 6302 Label initial_table_end, loop_zva; 6303 Label fini; 6304 6305 // Base must be 16 byte aligned. If not just return and let caller handle it 6306 tst(base, 0x0f); 6307 br(Assembler::NE, fini); 6308 // Align base with ZVA length. 6309 neg(tmp, base); 6310 andr(tmp, tmp, zva_length - 1); 6311 6312 // tmp: the number of bytes to be filled to align the base with ZVA length. 6313 add(base, base, tmp); 6314 sub(cnt, cnt, tmp, Assembler::ASR, 3); 6315 adr(tmp2, initial_table_end); 6316 sub(tmp2, tmp2, tmp, Assembler::LSR, 2); 6317 br(tmp2); 6318 6319 for (int i = -zva_length + 16; i < 0; i += 16) 6320 stp(zr, zr, Address(base, i)); 6321 bind(initial_table_end); 6322 6323 sub(cnt, cnt, zva_length >> 3); 6324 bind(loop_zva); 6325 dc(Assembler::ZVA, base); 6326 subs(cnt, cnt, zva_length >> 3); 6327 add(base, base, zva_length); 6328 br(Assembler::GE, loop_zva); 6329 add(cnt, cnt, zva_length >> 3); // count not zeroed by DC ZVA 6330 bind(fini); 6331 } 6332 6333 // base: Address of a buffer to be filled, 8 bytes aligned. 6334 // cnt: Count in 8-byte unit. 6335 // value: Value to be filled with. 6336 // base will point to the end of the buffer after filling. 6337 void MacroAssembler::fill_words(Register base, Register cnt, Register value) 6338 { 6339 // Algorithm: 6340 // 6341 // if (cnt == 0) { 6342 // return; 6343 // } 6344 // if ((p & 8) != 0) { 6345 // *p++ = v; 6346 // } 6347 // 6348 // scratch1 = cnt & 14; 6349 // cnt -= scratch1; 6350 // p += scratch1; 6351 // switch (scratch1 / 2) { 6352 // do { 6353 // cnt -= 16; 6354 // p[-16] = v; 6355 // p[-15] = v; 6356 // case 7: 6357 // p[-14] = v; 6358 // p[-13] = v; 6359 // case 6: 6360 // p[-12] = v; 6361 // p[-11] = v; 6362 // // ... 6363 // case 1: 6364 // p[-2] = v; 6365 // p[-1] = v; 6366 // case 0: 6367 // p += 16; 6368 // } while (cnt); 6369 // } 6370 // if ((cnt & 1) == 1) { 6371 // *p++ = v; 6372 // } 6373 6374 assert_different_registers(base, cnt, value, rscratch1, rscratch2); 6375 6376 Label fini, skip, entry, loop; 6377 const int unroll = 8; // Number of stp instructions we'll unroll 6378 6379 cbz(cnt, fini); 6380 tbz(base, 3, skip); 6381 str(value, Address(post(base, 8))); 6382 sub(cnt, cnt, 1); 6383 bind(skip); 6384 6385 andr(rscratch1, cnt, (unroll-1) * 2); 6386 sub(cnt, cnt, rscratch1); 6387 add(base, base, rscratch1, Assembler::LSL, 3); 6388 adr(rscratch2, entry); 6389 sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 1); 6390 br(rscratch2); 6391 6392 bind(loop); 6393 add(base, base, unroll * 16); 6394 for (int i = -unroll; i < 0; i++) 6395 stp(value, value, Address(base, i * 16)); 6396 bind(entry); 6397 subs(cnt, cnt, unroll * 2); 6398 br(Assembler::GE, loop); 6399 6400 tbz(cnt, 0, fini); 6401 str(value, Address(post(base, 8))); 6402 bind(fini); 6403 } 6404 6405 // Intrinsic for 6406 // 6407 // - sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray 6408 // return the number of characters copied. 6409 // - java/lang/StringUTF16.compress 6410 // return index of non-latin1 character if copy fails, otherwise 'len'. 6411 // 6412 // This version always returns the number of characters copied, and does not 6413 // clobber the 'len' register. A successful copy will complete with the post- 6414 // condition: 'res' == 'len', while an unsuccessful copy will exit with the 6415 // post-condition: 0 <= 'res' < 'len'. 6416 // 6417 // NOTE: Attempts to use 'ld2' (and 'umaxv' in the ISO part) has proven to 6418 // degrade performance (on Ampere Altra - Neoverse N1), to an extent 6419 // beyond the acceptable, even though the footprint would be smaller. 6420 // Using 'umaxv' in the ASCII-case comes with a small penalty but does 6421 // avoid additional bloat. 6422 // 6423 // Clobbers: src, dst, res, rscratch1, rscratch2, rflags 6424 void MacroAssembler::encode_iso_array(Register src, Register dst, 6425 Register len, Register res, bool ascii, 6426 FloatRegister vtmp0, FloatRegister vtmp1, 6427 FloatRegister vtmp2, FloatRegister vtmp3, 6428 FloatRegister vtmp4, FloatRegister vtmp5) 6429 { 6430 Register cnt = res; 6431 Register max = rscratch1; 6432 Register chk = rscratch2; 6433 6434 prfm(Address(src), PLDL1STRM); 6435 movw(cnt, len); 6436 6437 #define ASCII(insn) do { if (ascii) { insn; } } while (0) 6438 6439 Label LOOP_32, DONE_32, FAIL_32; 6440 6441 BIND(LOOP_32); 6442 { 6443 cmpw(cnt, 32); 6444 br(LT, DONE_32); 6445 ld1(vtmp0, vtmp1, vtmp2, vtmp3, T8H, Address(post(src, 64))); 6446 // Extract lower bytes. 6447 FloatRegister vlo0 = vtmp4; 6448 FloatRegister vlo1 = vtmp5; 6449 uzp1(vlo0, T16B, vtmp0, vtmp1); 6450 uzp1(vlo1, T16B, vtmp2, vtmp3); 6451 // Merge bits... 6452 orr(vtmp0, T16B, vtmp0, vtmp1); 6453 orr(vtmp2, T16B, vtmp2, vtmp3); 6454 // Extract merged upper bytes. 6455 FloatRegister vhix = vtmp0; 6456 uzp2(vhix, T16B, vtmp0, vtmp2); 6457 // ISO-check on hi-parts (all zero). 6458 // ASCII-check on lo-parts (no sign). 6459 FloatRegister vlox = vtmp1; // Merge lower bytes. 6460 ASCII(orr(vlox, T16B, vlo0, vlo1)); 6461 umov(chk, vhix, D, 1); ASCII(cm(LT, vlox, T16B, vlox)); 6462 fmovd(max, vhix); ASCII(umaxv(vlox, T16B, vlox)); 6463 orr(chk, chk, max); ASCII(umov(max, vlox, B, 0)); 6464 ASCII(orr(chk, chk, max)); 6465 cbnz(chk, FAIL_32); 6466 subw(cnt, cnt, 32); 6467 st1(vlo0, vlo1, T16B, Address(post(dst, 32))); 6468 b(LOOP_32); 6469 } 6470 BIND(FAIL_32); 6471 sub(src, src, 64); 6472 BIND(DONE_32); 6473 6474 Label LOOP_8, SKIP_8; 6475 6476 BIND(LOOP_8); 6477 { 6478 cmpw(cnt, 8); 6479 br(LT, SKIP_8); 6480 FloatRegister vhi = vtmp0; 6481 FloatRegister vlo = vtmp1; 6482 ld1(vtmp3, T8H, src); 6483 uzp1(vlo, T16B, vtmp3, vtmp3); 6484 uzp2(vhi, T16B, vtmp3, vtmp3); 6485 // ISO-check on hi-parts (all zero). 6486 // ASCII-check on lo-parts (no sign). 6487 ASCII(cm(LT, vtmp2, T16B, vlo)); 6488 fmovd(chk, vhi); ASCII(umaxv(vtmp2, T16B, vtmp2)); 6489 ASCII(umov(max, vtmp2, B, 0)); 6490 ASCII(orr(chk, chk, max)); 6491 cbnz(chk, SKIP_8); 6492 6493 strd(vlo, Address(post(dst, 8))); 6494 subw(cnt, cnt, 8); 6495 add(src, src, 16); 6496 b(LOOP_8); 6497 } 6498 BIND(SKIP_8); 6499 6500 #undef ASCII 6501 6502 Label LOOP, DONE; 6503 6504 cbz(cnt, DONE); 6505 BIND(LOOP); 6506 { 6507 Register chr = rscratch1; 6508 ldrh(chr, Address(post(src, 2))); 6509 tst(chr, ascii ? 0xff80 : 0xff00); 6510 br(NE, DONE); 6511 strb(chr, Address(post(dst, 1))); 6512 subs(cnt, cnt, 1); 6513 br(GT, LOOP); 6514 } 6515 BIND(DONE); 6516 // Return index where we stopped. 6517 subw(res, len, cnt); 6518 } 6519 6520 // Inflate byte[] array to char[]. 6521 // Clobbers: src, dst, len, rflags, rscratch1, v0-v6 6522 address MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 6523 FloatRegister vtmp1, FloatRegister vtmp2, 6524 FloatRegister vtmp3, Register tmp4) { 6525 Label big, done, after_init, to_stub; 6526 6527 assert_different_registers(src, dst, len, tmp4, rscratch1); 6528 6529 fmovd(vtmp1, 0.0); 6530 lsrw(tmp4, len, 3); 6531 bind(after_init); 6532 cbnzw(tmp4, big); 6533 // Short string: less than 8 bytes. 6534 { 6535 Label loop, tiny; 6536 6537 cmpw(len, 4); 6538 br(LT, tiny); 6539 // Use SIMD to do 4 bytes. 6540 ldrs(vtmp2, post(src, 4)); 6541 zip1(vtmp3, T8B, vtmp2, vtmp1); 6542 subw(len, len, 4); 6543 strd(vtmp3, post(dst, 8)); 6544 6545 cbzw(len, done); 6546 6547 // Do the remaining bytes by steam. 6548 bind(loop); 6549 ldrb(tmp4, post(src, 1)); 6550 strh(tmp4, post(dst, 2)); 6551 subw(len, len, 1); 6552 6553 bind(tiny); 6554 cbnz(len, loop); 6555 6556 b(done); 6557 } 6558 6559 if (SoftwarePrefetchHintDistance >= 0) { 6560 bind(to_stub); 6561 RuntimeAddress stub = RuntimeAddress(StubRoutines::aarch64::large_byte_array_inflate()); 6562 assert(stub.target() != nullptr, "large_byte_array_inflate stub has not been generated"); 6563 address tpc = trampoline_call(stub); 6564 if (tpc == nullptr) { 6565 DEBUG_ONLY(reset_labels(big, done)); 6566 postcond(pc() == badAddress); 6567 return nullptr; 6568 } 6569 b(after_init); 6570 } 6571 6572 // Unpack the bytes 8 at a time. 6573 bind(big); 6574 { 6575 Label loop, around, loop_last, loop_start; 6576 6577 if (SoftwarePrefetchHintDistance >= 0) { 6578 const int large_loop_threshold = (64 + 16)/8; 6579 ldrd(vtmp2, post(src, 8)); 6580 andw(len, len, 7); 6581 cmp(tmp4, (u1)large_loop_threshold); 6582 br(GE, to_stub); 6583 b(loop_start); 6584 6585 bind(loop); 6586 ldrd(vtmp2, post(src, 8)); 6587 bind(loop_start); 6588 subs(tmp4, tmp4, 1); 6589 br(EQ, loop_last); 6590 zip1(vtmp2, T16B, vtmp2, vtmp1); 6591 ldrd(vtmp3, post(src, 8)); 6592 st1(vtmp2, T8H, post(dst, 16)); 6593 subs(tmp4, tmp4, 1); 6594 zip1(vtmp3, T16B, vtmp3, vtmp1); 6595 st1(vtmp3, T8H, post(dst, 16)); 6596 br(NE, loop); 6597 b(around); 6598 bind(loop_last); 6599 zip1(vtmp2, T16B, vtmp2, vtmp1); 6600 st1(vtmp2, T8H, post(dst, 16)); 6601 bind(around); 6602 cbz(len, done); 6603 } else { 6604 andw(len, len, 7); 6605 bind(loop); 6606 ldrd(vtmp2, post(src, 8)); 6607 sub(tmp4, tmp4, 1); 6608 zip1(vtmp3, T16B, vtmp2, vtmp1); 6609 st1(vtmp3, T8H, post(dst, 16)); 6610 cbnz(tmp4, loop); 6611 } 6612 } 6613 6614 // Do the tail of up to 8 bytes. 6615 add(src, src, len); 6616 ldrd(vtmp3, Address(src, -8)); 6617 add(dst, dst, len, ext::uxtw, 1); 6618 zip1(vtmp3, T16B, vtmp3, vtmp1); 6619 strq(vtmp3, Address(dst, -16)); 6620 6621 bind(done); 6622 postcond(pc() != badAddress); 6623 return pc(); 6624 } 6625 6626 // Compress char[] array to byte[]. 6627 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) 6628 // Return the array length if every element in array can be encoded, 6629 // otherwise, the index of first non-latin1 (> 0xff) character. 6630 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 6631 Register res, 6632 FloatRegister tmp0, FloatRegister tmp1, 6633 FloatRegister tmp2, FloatRegister tmp3, 6634 FloatRegister tmp4, FloatRegister tmp5) { 6635 encode_iso_array(src, dst, len, res, false, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5); 6636 } 6637 6638 // java.math.round(double a) 6639 // Returns the closest long to the argument, with ties rounding to 6640 // positive infinity. This requires some fiddling for corner 6641 // cases. We take care to avoid double rounding in e.g. (jlong)(a + 0.5). 6642 void MacroAssembler::java_round_double(Register dst, FloatRegister src, 6643 FloatRegister ftmp) { 6644 Label DONE; 6645 BLOCK_COMMENT("java_round_double: { "); 6646 fmovd(rscratch1, src); 6647 // Use RoundToNearestTiesAway unless src small and -ve. 6648 fcvtasd(dst, src); 6649 // Test if src >= 0 || abs(src) >= 0x1.0p52 6650 eor(rscratch1, rscratch1, UCONST64(1) << 63); // flip sign bit 6651 mov(rscratch2, julong_cast(0x1.0p52)); 6652 cmp(rscratch1, rscratch2); 6653 br(HS, DONE); { 6654 // src < 0 && abs(src) < 0x1.0p52 6655 // src may have a fractional part, so add 0.5 6656 fmovd(ftmp, 0.5); 6657 faddd(ftmp, src, ftmp); 6658 // Convert double to jlong, use RoundTowardsNegative 6659 fcvtmsd(dst, ftmp); 6660 } 6661 bind(DONE); 6662 BLOCK_COMMENT("} java_round_double"); 6663 } 6664 6665 void MacroAssembler::java_round_float(Register dst, FloatRegister src, 6666 FloatRegister ftmp) { 6667 Label DONE; 6668 BLOCK_COMMENT("java_round_float: { "); 6669 fmovs(rscratch1, src); 6670 // Use RoundToNearestTiesAway unless src small and -ve. 6671 fcvtassw(dst, src); 6672 // Test if src >= 0 || abs(src) >= 0x1.0p23 6673 eor(rscratch1, rscratch1, 0x80000000); // flip sign bit 6674 mov(rscratch2, jint_cast(0x1.0p23f)); 6675 cmp(rscratch1, rscratch2); 6676 br(HS, DONE); { 6677 // src < 0 && |src| < 0x1.0p23 6678 // src may have a fractional part, so add 0.5 6679 fmovs(ftmp, 0.5f); 6680 fadds(ftmp, src, ftmp); 6681 // Convert float to jint, use RoundTowardsNegative 6682 fcvtmssw(dst, ftmp); 6683 } 6684 bind(DONE); 6685 BLOCK_COMMENT("} java_round_float"); 6686 } 6687 6688 // get_thread() can be called anywhere inside generated code so we 6689 // need to save whatever non-callee save context might get clobbered 6690 // by the call to JavaThread::aarch64_get_thread_helper() or, indeed, 6691 // the call setup code. 6692 // 6693 // On Linux, aarch64_get_thread_helper() clobbers only r0, r1, and flags. 6694 // On other systems, the helper is a usual C function. 6695 // 6696 void MacroAssembler::get_thread(Register dst) { 6697 RegSet saved_regs = 6698 LINUX_ONLY(RegSet::range(r0, r1) + lr - dst) 6699 NOT_LINUX (RegSet::range(r0, r17) + lr - dst); 6700 6701 protect_return_address(); 6702 push(saved_regs, sp); 6703 6704 mov(lr, CAST_FROM_FN_PTR(address, JavaThread::aarch64_get_thread_helper)); 6705 blr(lr); 6706 if (dst != c_rarg0) { 6707 mov(dst, c_rarg0); 6708 } 6709 6710 pop(saved_regs, sp); 6711 authenticate_return_address(); 6712 } 6713 6714 void MacroAssembler::cache_wb(Address line) { 6715 assert(line.getMode() == Address::base_plus_offset, "mode should be base_plus_offset"); 6716 assert(line.index() == noreg, "index should be noreg"); 6717 assert(line.offset() == 0, "offset should be 0"); 6718 // would like to assert this 6719 // assert(line._ext.shift == 0, "shift should be zero"); 6720 if (VM_Version::supports_dcpop()) { 6721 // writeback using clear virtual address to point of persistence 6722 dc(Assembler::CVAP, line.base()); 6723 } else { 6724 // no need to generate anything as Unsafe.writebackMemory should 6725 // never invoke this stub 6726 } 6727 } 6728 6729 void MacroAssembler::cache_wbsync(bool is_pre) { 6730 // we only need a barrier post sync 6731 if (!is_pre) { 6732 membar(Assembler::AnyAny); 6733 } 6734 } 6735 6736 void MacroAssembler::verify_sve_vector_length(Register tmp) { 6737 if (!UseSVE || VM_Version::get_max_supported_sve_vector_length() == FloatRegister::sve_vl_min) { 6738 return; 6739 } 6740 // Make sure that native code does not change SVE vector length. 6741 Label verify_ok; 6742 movw(tmp, zr); 6743 sve_inc(tmp, B); 6744 subsw(zr, tmp, VM_Version::get_initial_sve_vector_length()); 6745 br(EQ, verify_ok); 6746 stop("Error: SVE vector length has changed since jvm startup"); 6747 bind(verify_ok); 6748 } 6749 6750 void MacroAssembler::verify_ptrue() { 6751 Label verify_ok; 6752 if (!UseSVE) { 6753 return; 6754 } 6755 sve_cntp(rscratch1, B, ptrue, ptrue); // get true elements count. 6756 sve_dec(rscratch1, B); 6757 cbz(rscratch1, verify_ok); 6758 stop("Error: the preserved predicate register (p7) elements are not all true"); 6759 bind(verify_ok); 6760 } 6761 6762 void MacroAssembler::safepoint_isb() { 6763 isb(); 6764 #ifndef PRODUCT 6765 if (VerifyCrossModifyFence) { 6766 // Clear the thread state. 6767 strb(zr, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 6768 } 6769 #endif 6770 } 6771 6772 #ifndef PRODUCT 6773 void MacroAssembler::verify_cross_modify_fence_not_required() { 6774 if (VerifyCrossModifyFence) { 6775 // Check if thread needs a cross modify fence. 6776 ldrb(rscratch1, Address(rthread, in_bytes(JavaThread::requires_cross_modify_fence_offset()))); 6777 Label fence_not_required; 6778 cbz(rscratch1, fence_not_required); 6779 // If it does then fail. 6780 lea(rscratch1, RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::verify_cross_modify_fence_failure))); 6781 mov(c_rarg0, rthread); 6782 blr(rscratch1); 6783 bind(fence_not_required); 6784 } 6785 } 6786 #endif 6787 6788 void MacroAssembler::spin_wait() { 6789 for (int i = 0; i < VM_Version::spin_wait_desc().inst_count(); ++i) { 6790 switch (VM_Version::spin_wait_desc().inst()) { 6791 case SpinWait::NOP: 6792 nop(); 6793 break; 6794 case SpinWait::ISB: 6795 isb(); 6796 break; 6797 case SpinWait::YIELD: 6798 yield(); 6799 break; 6800 default: 6801 ShouldNotReachHere(); 6802 } 6803 } 6804 } 6805 6806 // Stack frame creation/removal 6807 6808 void MacroAssembler::enter(bool strip_ret_addr) { 6809 if (strip_ret_addr) { 6810 // Addresses can only be signed once. If there are multiple nested frames being created 6811 // in the same function, then the return address needs stripping first. 6812 strip_return_address(); 6813 } 6814 protect_return_address(); 6815 stp(rfp, lr, Address(pre(sp, -2 * wordSize))); 6816 mov(rfp, sp); 6817 } 6818 6819 void MacroAssembler::leave() { 6820 mov(sp, rfp); 6821 ldp(rfp, lr, Address(post(sp, 2 * wordSize))); 6822 authenticate_return_address(); 6823 } 6824 6825 // ROP Protection 6826 // Use the AArch64 PAC feature to add ROP protection for generated code. Use whenever creating/ 6827 // destroying stack frames or whenever directly loading/storing the LR to memory. 6828 // If ROP protection is not set then these functions are no-ops. 6829 // For more details on PAC see pauth_aarch64.hpp. 6830 6831 // Sign the LR. Use during construction of a stack frame, before storing the LR to memory. 6832 // Uses value zero as the modifier. 6833 // 6834 void MacroAssembler::protect_return_address() { 6835 if (VM_Version::use_rop_protection()) { 6836 check_return_address(); 6837 paciaz(); 6838 } 6839 } 6840 6841 // Sign the return value in the given register. Use before updating the LR in the existing stack 6842 // frame for the current function. 6843 // Uses value zero as the modifier. 6844 // 6845 void MacroAssembler::protect_return_address(Register return_reg) { 6846 if (VM_Version::use_rop_protection()) { 6847 check_return_address(return_reg); 6848 paciza(return_reg); 6849 } 6850 } 6851 6852 // Authenticate the LR. Use before function return, after restoring FP and loading LR from memory. 6853 // Uses value zero as the modifier. 6854 // 6855 void MacroAssembler::authenticate_return_address() { 6856 if (VM_Version::use_rop_protection()) { 6857 autiaz(); 6858 check_return_address(); 6859 } 6860 } 6861 6862 // Authenticate the return value in the given register. Use before updating the LR in the existing 6863 // stack frame for the current function. 6864 // Uses value zero as the modifier. 6865 // 6866 void MacroAssembler::authenticate_return_address(Register return_reg) { 6867 if (VM_Version::use_rop_protection()) { 6868 autiza(return_reg); 6869 check_return_address(return_reg); 6870 } 6871 } 6872 6873 // Strip any PAC data from LR without performing any authentication. Use with caution - only if 6874 // there is no guaranteed way of authenticating the LR. 6875 // 6876 void MacroAssembler::strip_return_address() { 6877 if (VM_Version::use_rop_protection()) { 6878 xpaclri(); 6879 } 6880 } 6881 6882 #ifndef PRODUCT 6883 // PAC failures can be difficult to debug. After an authentication failure, a segfault will only 6884 // occur when the pointer is used - ie when the program returns to the invalid LR. At this point 6885 // it is difficult to debug back to the callee function. 6886 // This function simply loads from the address in the given register. 6887 // Use directly after authentication to catch authentication failures. 6888 // Also use before signing to check that the pointer is valid and hasn't already been signed. 6889 // 6890 void MacroAssembler::check_return_address(Register return_reg) { 6891 if (VM_Version::use_rop_protection()) { 6892 ldr(zr, Address(return_reg)); 6893 } 6894 } 6895 #endif 6896 6897 // The java_calling_convention describes stack locations as ideal slots on 6898 // a frame with no abi restrictions. Since we must observe abi restrictions 6899 // (like the placement of the register window) the slots must be biased by 6900 // the following value. 6901 static int reg2offset_in(VMReg r) { 6902 // Account for saved rfp and lr 6903 // This should really be in_preserve_stack_slots 6904 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 6905 } 6906 6907 static int reg2offset_out(VMReg r) { 6908 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 6909 } 6910 6911 // On 64bit we will store integer like items to the stack as 6912 // 64bits items (AArch64 ABI) even though java would only store 6913 // 32bits for a parameter. On 32bit it will simply be 32bits 6914 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 6915 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp) { 6916 if (src.first()->is_stack()) { 6917 if (dst.first()->is_stack()) { 6918 // stack to stack 6919 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 6920 str(tmp, Address(sp, reg2offset_out(dst.first()))); 6921 } else { 6922 // stack to reg 6923 ldrsw(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 6924 } 6925 } else if (dst.first()->is_stack()) { 6926 // reg to stack 6927 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 6928 } else { 6929 if (dst.first() != src.first()) { 6930 sxtw(dst.first()->as_Register(), src.first()->as_Register()); 6931 } 6932 } 6933 } 6934 6935 // An oop arg. Must pass a handle not the oop itself 6936 void MacroAssembler::object_move( 6937 OopMap* map, 6938 int oop_handle_offset, 6939 int framesize_in_slots, 6940 VMRegPair src, 6941 VMRegPair dst, 6942 bool is_receiver, 6943 int* receiver_offset) { 6944 6945 // must pass a handle. First figure out the location we use as a handle 6946 6947 Register rHandle = dst.first()->is_stack() ? rscratch2 : dst.first()->as_Register(); 6948 6949 // See if oop is null if it is we need no handle 6950 6951 if (src.first()->is_stack()) { 6952 6953 // Oop is already on the stack as an argument 6954 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 6955 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 6956 if (is_receiver) { 6957 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 6958 } 6959 6960 ldr(rscratch1, Address(rfp, reg2offset_in(src.first()))); 6961 lea(rHandle, Address(rfp, reg2offset_in(src.first()))); 6962 // conditionally move a null 6963 cmp(rscratch1, zr); 6964 csel(rHandle, zr, rHandle, Assembler::EQ); 6965 } else { 6966 6967 // Oop is in an a register we must store it to the space we reserve 6968 // on the stack for oop_handles and pass a handle if oop is non-null 6969 6970 const Register rOop = src.first()->as_Register(); 6971 int oop_slot; 6972 if (rOop == j_rarg0) 6973 oop_slot = 0; 6974 else if (rOop == j_rarg1) 6975 oop_slot = 1; 6976 else if (rOop == j_rarg2) 6977 oop_slot = 2; 6978 else if (rOop == j_rarg3) 6979 oop_slot = 3; 6980 else if (rOop == j_rarg4) 6981 oop_slot = 4; 6982 else if (rOop == j_rarg5) 6983 oop_slot = 5; 6984 else if (rOop == j_rarg6) 6985 oop_slot = 6; 6986 else { 6987 assert(rOop == j_rarg7, "wrong register"); 6988 oop_slot = 7; 6989 } 6990 6991 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 6992 int offset = oop_slot*VMRegImpl::stack_slot_size; 6993 6994 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 6995 // Store oop in handle area, may be null 6996 str(rOop, Address(sp, offset)); 6997 if (is_receiver) { 6998 *receiver_offset = offset; 6999 } 7000 7001 cmp(rOop, zr); 7002 lea(rHandle, Address(sp, offset)); 7003 // conditionally move a null 7004 csel(rHandle, zr, rHandle, Assembler::EQ); 7005 } 7006 7007 // If arg is on the stack then place it otherwise it is already in correct reg. 7008 if (dst.first()->is_stack()) { 7009 str(rHandle, Address(sp, reg2offset_out(dst.first()))); 7010 } 7011 } 7012 7013 // A float arg may have to do float reg int reg conversion 7014 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp) { 7015 if (src.first()->is_stack()) { 7016 if (dst.first()->is_stack()) { 7017 ldrw(tmp, Address(rfp, reg2offset_in(src.first()))); 7018 strw(tmp, Address(sp, reg2offset_out(dst.first()))); 7019 } else { 7020 ldrs(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 7021 } 7022 } else if (src.first() != dst.first()) { 7023 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 7024 fmovs(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 7025 else 7026 strs(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 7027 } 7028 } 7029 7030 // A long move 7031 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp) { 7032 if (src.first()->is_stack()) { 7033 if (dst.first()->is_stack()) { 7034 // stack to stack 7035 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 7036 str(tmp, Address(sp, reg2offset_out(dst.first()))); 7037 } else { 7038 // stack to reg 7039 ldr(dst.first()->as_Register(), Address(rfp, reg2offset_in(src.first()))); 7040 } 7041 } else if (dst.first()->is_stack()) { 7042 // reg to stack 7043 // Do we really have to sign extend??? 7044 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 7045 str(src.first()->as_Register(), Address(sp, reg2offset_out(dst.first()))); 7046 } else { 7047 if (dst.first() != src.first()) { 7048 mov(dst.first()->as_Register(), src.first()->as_Register()); 7049 } 7050 } 7051 } 7052 7053 7054 // A double move 7055 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) { 7056 if (src.first()->is_stack()) { 7057 if (dst.first()->is_stack()) { 7058 ldr(tmp, Address(rfp, reg2offset_in(src.first()))); 7059 str(tmp, Address(sp, reg2offset_out(dst.first()))); 7060 } else { 7061 ldrd(dst.first()->as_FloatRegister(), Address(rfp, reg2offset_in(src.first()))); 7062 } 7063 } else if (src.first() != dst.first()) { 7064 if (src.is_single_phys_reg() && dst.is_single_phys_reg()) 7065 fmovd(dst.first()->as_FloatRegister(), src.first()->as_FloatRegister()); 7066 else 7067 strd(src.first()->as_FloatRegister(), Address(sp, reg2offset_out(dst.first()))); 7068 } 7069 } 7070 7071 // Implements lightweight-locking. 7072 // 7073 // - obj: the object to be locked 7074 // - t1, t2, t3: temporary registers, will be destroyed 7075 // - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding). 7076 void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register t1, Register t2, Register t3, Label& slow) { 7077 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 7078 assert_different_registers(basic_lock, obj, t1, t2, t3, rscratch1); 7079 7080 Label push; 7081 const Register top = t1; 7082 const Register mark = t2; 7083 const Register t = t3; 7084 7085 // Preload the markWord. It is important that this is the first 7086 // instruction emitted as it is part of C1's null check semantics. 7087 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); 7088 7089 if (UseObjectMonitorTable) { 7090 // Clear cache in case fast locking succeeds. 7091 str(zr, Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes())))); 7092 } 7093 7094 // Check if the lock-stack is full. 7095 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 7096 cmpw(top, (unsigned)LockStack::end_offset()); 7097 br(Assembler::GE, slow); 7098 7099 // Check for recursion. 7100 subw(t, top, oopSize); 7101 ldr(t, Address(rthread, t)); 7102 cmp(obj, t); 7103 br(Assembler::EQ, push); 7104 7105 // Check header for monitor (0b10). 7106 tst(mark, markWord::monitor_value); 7107 br(Assembler::NE, slow); 7108 7109 // Try to lock. Transition lock bits 0b01 => 0b00 7110 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); 7111 orr(mark, mark, markWord::unlocked_value); 7112 eor(t, mark, markWord::unlocked_value); 7113 cmpxchg(/*addr*/ obj, /*expected*/ mark, /*new*/ t, Assembler::xword, 7114 /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg); 7115 br(Assembler::NE, slow); 7116 7117 bind(push); 7118 // After successful lock, push object on lock-stack. 7119 str(obj, Address(rthread, top)); 7120 addw(top, top, oopSize); 7121 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 7122 } 7123 7124 // Implements lightweight-unlocking. 7125 // 7126 // - obj: the object to be unlocked 7127 // - t1, t2, t3: temporary registers 7128 // - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding). 7129 void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) { 7130 assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking"); 7131 // cmpxchg clobbers rscratch1. 7132 assert_different_registers(obj, t1, t2, t3, rscratch1); 7133 7134 #ifdef ASSERT 7135 { 7136 // Check for lock-stack underflow. 7137 Label stack_ok; 7138 ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset())); 7139 cmpw(t1, (unsigned)LockStack::start_offset()); 7140 br(Assembler::GE, stack_ok); 7141 STOP("Lock-stack underflow"); 7142 bind(stack_ok); 7143 } 7144 #endif 7145 7146 Label unlocked, push_and_slow; 7147 const Register top = t1; 7148 const Register mark = t2; 7149 const Register t = t3; 7150 7151 // Check if obj is top of lock-stack. 7152 ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 7153 subw(top, top, oopSize); 7154 ldr(t, Address(rthread, top)); 7155 cmp(obj, t); 7156 br(Assembler::NE, slow); 7157 7158 // Pop lock-stack. 7159 DEBUG_ONLY(str(zr, Address(rthread, top));) 7160 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 7161 7162 // Check if recursive. 7163 subw(t, top, oopSize); 7164 ldr(t, Address(rthread, t)); 7165 cmp(obj, t); 7166 br(Assembler::EQ, unlocked); 7167 7168 // Not recursive. Check header for monitor (0b10). 7169 ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes())); 7170 tbnz(mark, log2i_exact(markWord::monitor_value), push_and_slow); 7171 7172 #ifdef ASSERT 7173 // Check header not unlocked (0b01). 7174 Label not_unlocked; 7175 tbz(mark, log2i_exact(markWord::unlocked_value), not_unlocked); 7176 stop("lightweight_unlock already unlocked"); 7177 bind(not_unlocked); 7178 #endif 7179 7180 // Try to unlock. Transition lock bits 0b00 => 0b01 7181 assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea"); 7182 orr(t, mark, markWord::unlocked_value); 7183 cmpxchg(obj, mark, t, Assembler::xword, 7184 /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg); 7185 br(Assembler::EQ, unlocked); 7186 7187 bind(push_and_slow); 7188 // Restore lock-stack and handle the unlock in runtime. 7189 DEBUG_ONLY(str(obj, Address(rthread, top));) 7190 addw(top, top, oopSize); 7191 strw(top, Address(rthread, JavaThread::lock_stack_top_offset())); 7192 b(slow); 7193 7194 bind(unlocked); 7195 }