1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "asm/assembler.hpp" 27 #include "asm/assembler.inline.hpp" 28 #include "code/compiledIC.hpp" 29 #include "compiler/compiler_globals.hpp" 30 #include "compiler/disassembler.hpp" 31 #include "ci/ciInlineKlass.hpp" 32 #include "crc32c.h" 33 #include "gc/shared/barrierSet.hpp" 34 #include "gc/shared/barrierSetAssembler.hpp" 35 #include "gc/shared/collectedHeap.inline.hpp" 36 #include "gc/shared/tlab_globals.hpp" 37 #include "interpreter/bytecodeHistogram.hpp" 38 #include "interpreter/interpreter.hpp" 39 #include "jvm.h" 40 #include "memory/resourceArea.hpp" 41 #include "memory/universe.hpp" 42 #include "oops/accessDecorators.hpp" 43 #include "oops/compressedKlass.inline.hpp" 44 #include "oops/compressedOops.inline.hpp" 45 #include "oops/klass.inline.hpp" 46 #include "oops/resolvedFieldEntry.hpp" 47 #include "prims/methodHandles.hpp" 48 #include "runtime/continuation.hpp" 49 #include "runtime/interfaceSupport.inline.hpp" 50 #include "runtime/javaThread.hpp" 51 #include "runtime/jniHandles.hpp" 52 #include "runtime/objectMonitor.hpp" 53 #include "runtime/os.hpp" 54 #include "runtime/safepoint.hpp" 55 #include "runtime/safepointMechanism.hpp" 56 #include "runtime/sharedRuntime.hpp" 57 #include "runtime/signature_cc.hpp" 58 #include "runtime/stubRoutines.hpp" 59 #include "utilities/checkedCast.hpp" 60 #include "utilities/macros.hpp" 61 #include "vmreg_x86.inline.hpp" 62 #ifdef COMPILER2 63 #include "opto/output.hpp" 64 #endif 65 66 #ifdef PRODUCT 67 #define BLOCK_COMMENT(str) /* nothing */ 68 #define STOP(error) stop(error) 69 #else 70 #define BLOCK_COMMENT(str) block_comment(str) 71 #define STOP(error) block_comment(error); stop(error) 72 #endif 73 74 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":") 75 76 #ifdef ASSERT 77 bool AbstractAssembler::pd_check_instruction_mark() { return true; } 78 #endif 79 80 static const Assembler::Condition reverse[] = { 81 Assembler::noOverflow /* overflow = 0x0 */ , 82 Assembler::overflow /* noOverflow = 0x1 */ , 83 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ , 84 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ , 85 Assembler::notZero /* zero = 0x4, equal = 0x4 */ , 86 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ , 87 Assembler::above /* belowEqual = 0x6 */ , 88 Assembler::belowEqual /* above = 0x7 */ , 89 Assembler::positive /* negative = 0x8 */ , 90 Assembler::negative /* positive = 0x9 */ , 91 Assembler::noParity /* parity = 0xa */ , 92 Assembler::parity /* noParity = 0xb */ , 93 Assembler::greaterEqual /* less = 0xc */ , 94 Assembler::less /* greaterEqual = 0xd */ , 95 Assembler::greater /* lessEqual = 0xe */ , 96 Assembler::lessEqual /* greater = 0xf, */ 97 98 }; 99 100 101 // Implementation of MacroAssembler 102 103 // First all the versions that have distinct versions depending on 32/64 bit 104 // Unless the difference is trivial (1 line or so). 105 106 #ifndef _LP64 107 108 // 32bit versions 109 110 Address MacroAssembler::as_Address(AddressLiteral adr) { 111 return Address(adr.target(), adr.rspec()); 112 } 113 114 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) { 115 assert(rscratch == noreg, ""); 116 return Address::make_array(adr); 117 } 118 119 void MacroAssembler::call_VM_leaf_base(address entry_point, 120 int number_of_arguments) { 121 call(RuntimeAddress(entry_point)); 122 increment(rsp, number_of_arguments * wordSize); 123 } 124 125 void MacroAssembler::cmpklass(Address src1, Metadata* obj) { 126 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 127 } 128 129 130 void MacroAssembler::cmpklass(Register src1, Metadata* obj) { 131 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 132 } 133 134 void MacroAssembler::cmpoop(Address src1, jobject obj) { 135 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); 136 } 137 138 void MacroAssembler::cmpoop(Register src1, jobject obj, Register rscratch) { 139 assert(rscratch == noreg, "redundant"); 140 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate()); 141 } 142 143 void MacroAssembler::extend_sign(Register hi, Register lo) { 144 // According to Intel Doc. AP-526, "Integer Divide", p.18. 145 if (VM_Version::is_P6() && hi == rdx && lo == rax) { 146 cdql(); 147 } else { 148 movl(hi, lo); 149 sarl(hi, 31); 150 } 151 } 152 153 void MacroAssembler::jC2(Register tmp, Label& L) { 154 // set parity bit if FPU flag C2 is set (via rax) 155 save_rax(tmp); 156 fwait(); fnstsw_ax(); 157 sahf(); 158 restore_rax(tmp); 159 // branch 160 jcc(Assembler::parity, L); 161 } 162 163 void MacroAssembler::jnC2(Register tmp, Label& L) { 164 // set parity bit if FPU flag C2 is set (via rax) 165 save_rax(tmp); 166 fwait(); fnstsw_ax(); 167 sahf(); 168 restore_rax(tmp); 169 // branch 170 jcc(Assembler::noParity, L); 171 } 172 173 // 32bit can do a case table jump in one instruction but we no longer allow the base 174 // to be installed in the Address class 175 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) { 176 assert(rscratch == noreg, "not needed"); 177 jmp(as_Address(entry, noreg)); 178 } 179 180 // Note: y_lo will be destroyed 181 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { 182 // Long compare for Java (semantics as described in JVM spec.) 183 Label high, low, done; 184 185 cmpl(x_hi, y_hi); 186 jcc(Assembler::less, low); 187 jcc(Assembler::greater, high); 188 // x_hi is the return register 189 xorl(x_hi, x_hi); 190 cmpl(x_lo, y_lo); 191 jcc(Assembler::below, low); 192 jcc(Assembler::equal, done); 193 194 bind(high); 195 xorl(x_hi, x_hi); 196 increment(x_hi); 197 jmp(done); 198 199 bind(low); 200 xorl(x_hi, x_hi); 201 decrementl(x_hi); 202 203 bind(done); 204 } 205 206 void MacroAssembler::lea(Register dst, AddressLiteral src) { 207 mov_literal32(dst, (int32_t)src.target(), src.rspec()); 208 } 209 210 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) { 211 assert(rscratch == noreg, "not needed"); 212 213 // leal(dst, as_Address(adr)); 214 // see note in movl as to why we must use a move 215 mov_literal32(dst, (int32_t)adr.target(), adr.rspec()); 216 } 217 218 void MacroAssembler::leave() { 219 mov(rsp, rbp); 220 pop(rbp); 221 } 222 223 void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) { 224 // Multiplication of two Java long values stored on the stack 225 // as illustrated below. Result is in rdx:rax. 226 // 227 // rsp ---> [ ?? ] \ \ 228 // .... | y_rsp_offset | 229 // [ y_lo ] / (in bytes) | x_rsp_offset 230 // [ y_hi ] | (in bytes) 231 // .... | 232 // [ x_lo ] / 233 // [ x_hi ] 234 // .... 235 // 236 // Basic idea: lo(result) = lo(x_lo * y_lo) 237 // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi) 238 Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset); 239 Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset); 240 Label quick; 241 // load x_hi, y_hi and check if quick 242 // multiplication is possible 243 movl(rbx, x_hi); 244 movl(rcx, y_hi); 245 movl(rax, rbx); 246 orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0 247 jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply 248 // do full multiplication 249 // 1st step 250 mull(y_lo); // x_hi * y_lo 251 movl(rbx, rax); // save lo(x_hi * y_lo) in rbx, 252 // 2nd step 253 movl(rax, x_lo); 254 mull(rcx); // x_lo * y_hi 255 addl(rbx, rax); // add lo(x_lo * y_hi) to rbx, 256 // 3rd step 257 bind(quick); // note: rbx, = 0 if quick multiply! 258 movl(rax, x_lo); 259 mull(y_lo); // x_lo * y_lo 260 addl(rdx, rbx); // correct hi(x_lo * y_lo) 261 } 262 263 void MacroAssembler::lneg(Register hi, Register lo) { 264 negl(lo); 265 adcl(hi, 0); 266 negl(hi); 267 } 268 269 void MacroAssembler::lshl(Register hi, Register lo) { 270 // Java shift left long support (semantics as described in JVM spec., p.305) 271 // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n)) 272 // shift value is in rcx ! 273 assert(hi != rcx, "must not use rcx"); 274 assert(lo != rcx, "must not use rcx"); 275 const Register s = rcx; // shift count 276 const int n = BitsPerWord; 277 Label L; 278 andl(s, 0x3f); // s := s & 0x3f (s < 0x40) 279 cmpl(s, n); // if (s < n) 280 jcc(Assembler::less, L); // else (s >= n) 281 movl(hi, lo); // x := x << n 282 xorl(lo, lo); 283 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! 284 bind(L); // s (mod n) < n 285 shldl(hi, lo); // x := x << s 286 shll(lo); 287 } 288 289 290 void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) { 291 // Java shift right long support (semantics as described in JVM spec., p.306 & p.310) 292 // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n)) 293 assert(hi != rcx, "must not use rcx"); 294 assert(lo != rcx, "must not use rcx"); 295 const Register s = rcx; // shift count 296 const int n = BitsPerWord; 297 Label L; 298 andl(s, 0x3f); // s := s & 0x3f (s < 0x40) 299 cmpl(s, n); // if (s < n) 300 jcc(Assembler::less, L); // else (s >= n) 301 movl(lo, hi); // x := x >> n 302 if (sign_extension) sarl(hi, 31); 303 else xorl(hi, hi); 304 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n! 305 bind(L); // s (mod n) < n 306 shrdl(lo, hi); // x := x >> s 307 if (sign_extension) sarl(hi); 308 else shrl(hi); 309 } 310 311 void MacroAssembler::movoop(Register dst, jobject obj) { 312 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); 313 } 314 315 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) { 316 assert(rscratch == noreg, "redundant"); 317 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate()); 318 } 319 320 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 321 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 322 } 323 324 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) { 325 assert(rscratch == noreg, "redundant"); 326 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate()); 327 } 328 329 void MacroAssembler::movptr(Register dst, AddressLiteral src) { 330 if (src.is_lval()) { 331 mov_literal32(dst, (intptr_t)src.target(), src.rspec()); 332 } else { 333 movl(dst, as_Address(src)); 334 } 335 } 336 337 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) { 338 assert(rscratch == noreg, "redundant"); 339 movl(as_Address(dst, noreg), src); 340 } 341 342 void MacroAssembler::movptr(Register dst, ArrayAddress src) { 343 movl(dst, as_Address(src, noreg)); 344 } 345 346 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) { 347 assert(rscratch == noreg, "redundant"); 348 movl(dst, src); 349 } 350 351 void MacroAssembler::pushoop(jobject obj, Register rscratch) { 352 assert(rscratch == noreg, "redundant"); 353 push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate()); 354 } 355 356 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) { 357 assert(rscratch == noreg, "redundant"); 358 push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate()); 359 } 360 361 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) { 362 assert(rscratch == noreg, "redundant"); 363 if (src.is_lval()) { 364 push_literal32((int32_t)src.target(), src.rspec()); 365 } else { 366 pushl(as_Address(src)); 367 } 368 } 369 370 static void pass_arg0(MacroAssembler* masm, Register arg) { 371 masm->push(arg); 372 } 373 374 static void pass_arg1(MacroAssembler* masm, Register arg) { 375 masm->push(arg); 376 } 377 378 static void pass_arg2(MacroAssembler* masm, Register arg) { 379 masm->push(arg); 380 } 381 382 static void pass_arg3(MacroAssembler* masm, Register arg) { 383 masm->push(arg); 384 } 385 386 #ifndef PRODUCT 387 extern "C" void findpc(intptr_t x); 388 #endif 389 390 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) { 391 // In order to get locks to work, we need to fake a in_VM state 392 JavaThread* thread = JavaThread::current(); 393 JavaThreadState saved_state = thread->thread_state(); 394 thread->set_thread_state(_thread_in_vm); 395 if (ShowMessageBoxOnError) { 396 JavaThread* thread = JavaThread::current(); 397 JavaThreadState saved_state = thread->thread_state(); 398 thread->set_thread_state(_thread_in_vm); 399 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 400 ttyLocker ttyl; 401 BytecodeCounter::print(); 402 } 403 // To see where a verify_oop failed, get $ebx+40/X for this frame. 404 // This is the value of eip which points to where verify_oop will return. 405 if (os::message_box(msg, "Execution stopped, print registers?")) { 406 print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip); 407 BREAKPOINT; 408 } 409 } 410 fatal("DEBUG MESSAGE: %s", msg); 411 } 412 413 void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) { 414 ttyLocker ttyl; 415 DebuggingContext debugging{}; 416 tty->print_cr("eip = 0x%08x", eip); 417 #ifndef PRODUCT 418 if ((WizardMode || Verbose) && PrintMiscellaneous) { 419 tty->cr(); 420 findpc(eip); 421 tty->cr(); 422 } 423 #endif 424 #define PRINT_REG(rax) \ 425 { tty->print("%s = ", #rax); os::print_location(tty, rax); } 426 PRINT_REG(rax); 427 PRINT_REG(rbx); 428 PRINT_REG(rcx); 429 PRINT_REG(rdx); 430 PRINT_REG(rdi); 431 PRINT_REG(rsi); 432 PRINT_REG(rbp); 433 PRINT_REG(rsp); 434 #undef PRINT_REG 435 // Print some words near top of staack. 436 int* dump_sp = (int*) rsp; 437 for (int col1 = 0; col1 < 8; col1++) { 438 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 439 os::print_location(tty, *dump_sp++); 440 } 441 for (int row = 0; row < 16; row++) { 442 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 443 for (int col = 0; col < 8; col++) { 444 tty->print(" 0x%08x", *dump_sp++); 445 } 446 tty->cr(); 447 } 448 // Print some instructions around pc: 449 Disassembler::decode((address)eip-64, (address)eip); 450 tty->print_cr("--------"); 451 Disassembler::decode((address)eip, (address)eip+32); 452 } 453 454 void MacroAssembler::stop(const char* msg) { 455 // push address of message 456 ExternalAddress message((address)msg); 457 pushptr(message.addr(), noreg); 458 { Label L; call(L, relocInfo::none); bind(L); } // push eip 459 pusha(); // push registers 460 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32))); 461 hlt(); 462 } 463 464 void MacroAssembler::warn(const char* msg) { 465 push_CPU_state(); 466 467 // push address of message 468 ExternalAddress message((address)msg); 469 pushptr(message.addr(), noreg); 470 471 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); 472 addl(rsp, wordSize); // discard argument 473 pop_CPU_state(); 474 } 475 476 void MacroAssembler::print_state() { 477 { Label L; call(L, relocInfo::none); bind(L); } // push eip 478 pusha(); // push registers 479 480 push_CPU_state(); 481 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32))); 482 pop_CPU_state(); 483 484 popa(); 485 addl(rsp, wordSize); 486 } 487 488 #else // _LP64 489 490 // 64 bit versions 491 492 Address MacroAssembler::as_Address(AddressLiteral adr) { 493 // amd64 always does this as a pc-rel 494 // we can be absolute or disp based on the instruction type 495 // jmp/call are displacements others are absolute 496 assert(!adr.is_lval(), "must be rval"); 497 assert(reachable(adr), "must be"); 498 return Address(checked_cast<int32_t>(adr.target() - pc()), adr.target(), adr.reloc()); 499 500 } 501 502 Address MacroAssembler::as_Address(ArrayAddress adr, Register rscratch) { 503 AddressLiteral base = adr.base(); 504 lea(rscratch, base); 505 Address index = adr.index(); 506 assert(index._disp == 0, "must not have disp"); // maybe it can? 507 Address array(rscratch, index._index, index._scale, index._disp); 508 return array; 509 } 510 511 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) { 512 Label L, E; 513 514 #ifdef _WIN64 515 // Windows always allocates space for it's register args 516 assert(num_args <= 4, "only register arguments supported"); 517 subq(rsp, frame::arg_reg_save_area_bytes); 518 #endif 519 520 // Align stack if necessary 521 testl(rsp, 15); 522 jcc(Assembler::zero, L); 523 524 subq(rsp, 8); 525 call(RuntimeAddress(entry_point)); 526 addq(rsp, 8); 527 jmp(E); 528 529 bind(L); 530 call(RuntimeAddress(entry_point)); 531 532 bind(E); 533 534 #ifdef _WIN64 535 // restore stack pointer 536 addq(rsp, frame::arg_reg_save_area_bytes); 537 #endif 538 539 } 540 541 void MacroAssembler::cmp64(Register src1, AddressLiteral src2, Register rscratch) { 542 assert(!src2.is_lval(), "should use cmpptr"); 543 assert(rscratch != noreg || always_reachable(src2), "missing"); 544 545 if (reachable(src2)) { 546 cmpq(src1, as_Address(src2)); 547 } else { 548 lea(rscratch, src2); 549 Assembler::cmpq(src1, Address(rscratch, 0)); 550 } 551 } 552 553 int MacroAssembler::corrected_idivq(Register reg) { 554 // Full implementation of Java ldiv and lrem; checks for special 555 // case as described in JVM spec., p.243 & p.271. The function 556 // returns the (pc) offset of the idivl instruction - may be needed 557 // for implicit exceptions. 558 // 559 // normal case special case 560 // 561 // input : rax: dividend min_long 562 // reg: divisor (may not be eax/edx) -1 563 // 564 // output: rax: quotient (= rax idiv reg) min_long 565 // rdx: remainder (= rax irem reg) 0 566 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register"); 567 static const int64_t min_long = 0x8000000000000000; 568 Label normal_case, special_case; 569 570 // check for special case 571 cmp64(rax, ExternalAddress((address) &min_long), rdx /*rscratch*/); 572 jcc(Assembler::notEqual, normal_case); 573 xorl(rdx, rdx); // prepare rdx for possible special case (where 574 // remainder = 0) 575 cmpq(reg, -1); 576 jcc(Assembler::equal, special_case); 577 578 // handle normal case 579 bind(normal_case); 580 cdqq(); 581 int idivq_offset = offset(); 582 idivq(reg); 583 584 // normal and special case exit 585 bind(special_case); 586 587 return idivq_offset; 588 } 589 590 void MacroAssembler::decrementq(Register reg, int value) { 591 if (value == min_jint) { subq(reg, value); return; } 592 if (value < 0) { incrementq(reg, -value); return; } 593 if (value == 0) { ; return; } 594 if (value == 1 && UseIncDec) { decq(reg) ; return; } 595 /* else */ { subq(reg, value) ; return; } 596 } 597 598 void MacroAssembler::decrementq(Address dst, int value) { 599 if (value == min_jint) { subq(dst, value); return; } 600 if (value < 0) { incrementq(dst, -value); return; } 601 if (value == 0) { ; return; } 602 if (value == 1 && UseIncDec) { decq(dst) ; return; } 603 /* else */ { subq(dst, value) ; return; } 604 } 605 606 void MacroAssembler::incrementq(AddressLiteral dst, Register rscratch) { 607 assert(rscratch != noreg || always_reachable(dst), "missing"); 608 609 if (reachable(dst)) { 610 incrementq(as_Address(dst)); 611 } else { 612 lea(rscratch, dst); 613 incrementq(Address(rscratch, 0)); 614 } 615 } 616 617 void MacroAssembler::incrementq(Register reg, int value) { 618 if (value == min_jint) { addq(reg, value); return; } 619 if (value < 0) { decrementq(reg, -value); return; } 620 if (value == 0) { ; return; } 621 if (value == 1 && UseIncDec) { incq(reg) ; return; } 622 /* else */ { addq(reg, value) ; return; } 623 } 624 625 void MacroAssembler::incrementq(Address dst, int value) { 626 if (value == min_jint) { addq(dst, value); return; } 627 if (value < 0) { decrementq(dst, -value); return; } 628 if (value == 0) { ; return; } 629 if (value == 1 && UseIncDec) { incq(dst) ; return; } 630 /* else */ { addq(dst, value) ; return; } 631 } 632 633 // 32bit can do a case table jump in one instruction but we no longer allow the base 634 // to be installed in the Address class 635 void MacroAssembler::jump(ArrayAddress entry, Register rscratch) { 636 lea(rscratch, entry.base()); 637 Address dispatch = entry.index(); 638 assert(dispatch._base == noreg, "must be"); 639 dispatch._base = rscratch; 640 jmp(dispatch); 641 } 642 643 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) { 644 ShouldNotReachHere(); // 64bit doesn't use two regs 645 cmpq(x_lo, y_lo); 646 } 647 648 void MacroAssembler::lea(Register dst, AddressLiteral src) { 649 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 650 } 651 652 void MacroAssembler::lea(Address dst, AddressLiteral adr, Register rscratch) { 653 lea(rscratch, adr); 654 movptr(dst, rscratch); 655 } 656 657 void MacroAssembler::leave() { 658 // %%% is this really better? Why not on 32bit too? 659 emit_int8((unsigned char)0xC9); // LEAVE 660 } 661 662 void MacroAssembler::lneg(Register hi, Register lo) { 663 ShouldNotReachHere(); // 64bit doesn't use two regs 664 negq(lo); 665 } 666 667 void MacroAssembler::movoop(Register dst, jobject obj) { 668 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 669 } 670 671 void MacroAssembler::movoop(Address dst, jobject obj, Register rscratch) { 672 mov_literal64(rscratch, (intptr_t)obj, oop_Relocation::spec_for_immediate()); 673 movq(dst, rscratch); 674 } 675 676 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) { 677 mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 678 } 679 680 void MacroAssembler::mov_metadata(Address dst, Metadata* obj, Register rscratch) { 681 mov_literal64(rscratch, (intptr_t)obj, metadata_Relocation::spec_for_immediate()); 682 movq(dst, rscratch); 683 } 684 685 void MacroAssembler::movptr(Register dst, AddressLiteral src) { 686 if (src.is_lval()) { 687 mov_literal64(dst, (intptr_t)src.target(), src.rspec()); 688 } else { 689 if (reachable(src)) { 690 movq(dst, as_Address(src)); 691 } else { 692 lea(dst, src); 693 movq(dst, Address(dst, 0)); 694 } 695 } 696 } 697 698 void MacroAssembler::movptr(ArrayAddress dst, Register src, Register rscratch) { 699 movq(as_Address(dst, rscratch), src); 700 } 701 702 void MacroAssembler::movptr(Register dst, ArrayAddress src) { 703 movq(dst, as_Address(src, dst /*rscratch*/)); 704 } 705 706 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 707 void MacroAssembler::movptr(Address dst, intptr_t src, Register rscratch) { 708 if (is_simm32(src)) { 709 movptr(dst, checked_cast<int32_t>(src)); 710 } else { 711 mov64(rscratch, src); 712 movq(dst, rscratch); 713 } 714 } 715 716 void MacroAssembler::pushoop(jobject obj, Register rscratch) { 717 movoop(rscratch, obj); 718 push(rscratch); 719 } 720 721 void MacroAssembler::pushklass(Metadata* obj, Register rscratch) { 722 mov_metadata(rscratch, obj); 723 push(rscratch); 724 } 725 726 void MacroAssembler::pushptr(AddressLiteral src, Register rscratch) { 727 lea(rscratch, src); 728 if (src.is_lval()) { 729 push(rscratch); 730 } else { 731 pushq(Address(rscratch, 0)); 732 } 733 } 734 735 void MacroAssembler::reset_last_Java_frame(bool clear_fp) { 736 reset_last_Java_frame(r15_thread, clear_fp); 737 } 738 739 void MacroAssembler::set_last_Java_frame(Register last_java_sp, 740 Register last_java_fp, 741 address last_java_pc, 742 Register rscratch) { 743 set_last_Java_frame(r15_thread, last_java_sp, last_java_fp, last_java_pc, rscratch); 744 } 745 746 static void pass_arg0(MacroAssembler* masm, Register arg) { 747 if (c_rarg0 != arg ) { 748 masm->mov(c_rarg0, arg); 749 } 750 } 751 752 static void pass_arg1(MacroAssembler* masm, Register arg) { 753 if (c_rarg1 != arg ) { 754 masm->mov(c_rarg1, arg); 755 } 756 } 757 758 static void pass_arg2(MacroAssembler* masm, Register arg) { 759 if (c_rarg2 != arg ) { 760 masm->mov(c_rarg2, arg); 761 } 762 } 763 764 static void pass_arg3(MacroAssembler* masm, Register arg) { 765 if (c_rarg3 != arg ) { 766 masm->mov(c_rarg3, arg); 767 } 768 } 769 770 void MacroAssembler::stop(const char* msg) { 771 if (ShowMessageBoxOnError) { 772 address rip = pc(); 773 pusha(); // get regs on stack 774 lea(c_rarg1, InternalAddress(rip)); 775 movq(c_rarg2, rsp); // pass pointer to regs array 776 } 777 lea(c_rarg0, ExternalAddress((address) msg)); 778 andq(rsp, -16); // align stack as required by ABI 779 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64))); 780 hlt(); 781 } 782 783 void MacroAssembler::warn(const char* msg) { 784 push(rbp); 785 movq(rbp, rsp); 786 andq(rsp, -16); // align stack as required by push_CPU_state and call 787 push_CPU_state(); // keeps alignment at 16 bytes 788 789 lea(c_rarg0, ExternalAddress((address) msg)); 790 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning))); 791 792 pop_CPU_state(); 793 mov(rsp, rbp); 794 pop(rbp); 795 } 796 797 void MacroAssembler::print_state() { 798 address rip = pc(); 799 pusha(); // get regs on stack 800 push(rbp); 801 movq(rbp, rsp); 802 andq(rsp, -16); // align stack as required by push_CPU_state and call 803 push_CPU_state(); // keeps alignment at 16 bytes 804 805 lea(c_rarg0, InternalAddress(rip)); 806 lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array 807 call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1); 808 809 pop_CPU_state(); 810 mov(rsp, rbp); 811 pop(rbp); 812 popa(); 813 } 814 815 #ifndef PRODUCT 816 extern "C" void findpc(intptr_t x); 817 #endif 818 819 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) { 820 // In order to get locks to work, we need to fake a in_VM state 821 if (ShowMessageBoxOnError) { 822 JavaThread* thread = JavaThread::current(); 823 JavaThreadState saved_state = thread->thread_state(); 824 thread->set_thread_state(_thread_in_vm); 825 #ifndef PRODUCT 826 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) { 827 ttyLocker ttyl; 828 BytecodeCounter::print(); 829 } 830 #endif 831 // To see where a verify_oop failed, get $ebx+40/X for this frame. 832 // XXX correct this offset for amd64 833 // This is the value of eip which points to where verify_oop will return. 834 if (os::message_box(msg, "Execution stopped, print registers?")) { 835 print_state64(pc, regs); 836 BREAKPOINT; 837 } 838 } 839 fatal("DEBUG MESSAGE: %s", msg); 840 } 841 842 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) { 843 ttyLocker ttyl; 844 DebuggingContext debugging{}; 845 tty->print_cr("rip = 0x%016lx", (intptr_t)pc); 846 #ifndef PRODUCT 847 tty->cr(); 848 findpc(pc); 849 tty->cr(); 850 #endif 851 #define PRINT_REG(rax, value) \ 852 { tty->print("%s = ", #rax); os::print_location(tty, value); } 853 PRINT_REG(rax, regs[15]); 854 PRINT_REG(rbx, regs[12]); 855 PRINT_REG(rcx, regs[14]); 856 PRINT_REG(rdx, regs[13]); 857 PRINT_REG(rdi, regs[8]); 858 PRINT_REG(rsi, regs[9]); 859 PRINT_REG(rbp, regs[10]); 860 // rsp is actually not stored by pusha(), compute the old rsp from regs (rsp after pusha): regs + 16 = old rsp 861 PRINT_REG(rsp, (intptr_t)(®s[16])); 862 PRINT_REG(r8 , regs[7]); 863 PRINT_REG(r9 , regs[6]); 864 PRINT_REG(r10, regs[5]); 865 PRINT_REG(r11, regs[4]); 866 PRINT_REG(r12, regs[3]); 867 PRINT_REG(r13, regs[2]); 868 PRINT_REG(r14, regs[1]); 869 PRINT_REG(r15, regs[0]); 870 #undef PRINT_REG 871 // Print some words near the top of the stack. 872 int64_t* rsp = ®s[16]; 873 int64_t* dump_sp = rsp; 874 for (int col1 = 0; col1 < 8; col1++) { 875 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 876 os::print_location(tty, *dump_sp++); 877 } 878 for (int row = 0; row < 25; row++) { 879 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp); 880 for (int col = 0; col < 4; col++) { 881 tty->print(" 0x%016lx", (intptr_t)*dump_sp++); 882 } 883 tty->cr(); 884 } 885 // Print some instructions around pc: 886 Disassembler::decode((address)pc-64, (address)pc); 887 tty->print_cr("--------"); 888 Disassembler::decode((address)pc, (address)pc+32); 889 } 890 891 // The java_calling_convention describes stack locations as ideal slots on 892 // a frame with no abi restrictions. Since we must observe abi restrictions 893 // (like the placement of the register window) the slots must be biased by 894 // the following value. 895 static int reg2offset_in(VMReg r) { 896 // Account for saved rbp and return address 897 // This should really be in_preserve_stack_slots 898 return (r->reg2stack() + 4) * VMRegImpl::stack_slot_size; 899 } 900 901 static int reg2offset_out(VMReg r) { 902 return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size; 903 } 904 905 // A long move 906 void MacroAssembler::long_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 907 908 // The calling conventions assures us that each VMregpair is either 909 // all really one physical register or adjacent stack slots. 910 911 if (src.is_single_phys_reg() ) { 912 if (dst.is_single_phys_reg()) { 913 if (dst.first() != src.first()) { 914 mov(dst.first()->as_Register(), src.first()->as_Register()); 915 } 916 } else { 917 assert(dst.is_single_reg(), "not a stack pair: (%s, %s), (%s, %s)", 918 src.first()->name(), src.second()->name(), dst.first()->name(), dst.second()->name()); 919 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register()); 920 } 921 } else if (dst.is_single_phys_reg()) { 922 assert(src.is_single_reg(), "not a stack pair"); 923 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 924 } else { 925 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 926 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 927 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 928 } 929 } 930 931 // A double move 932 void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 933 934 // The calling conventions assures us that each VMregpair is either 935 // all really one physical register or adjacent stack slots. 936 937 if (src.is_single_phys_reg() ) { 938 if (dst.is_single_phys_reg()) { 939 // In theory these overlap but the ordering is such that this is likely a nop 940 if ( src.first() != dst.first()) { 941 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 942 } 943 } else { 944 assert(dst.is_single_reg(), "not a stack pair"); 945 movdbl(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister()); 946 } 947 } else if (dst.is_single_phys_reg()) { 948 assert(src.is_single_reg(), "not a stack pair"); 949 movdbl(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 950 } else { 951 assert(src.is_single_reg() && dst.is_single_reg(), "not stack pairs"); 952 movq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 953 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 954 } 955 } 956 957 958 // A float arg may have to do float reg int reg conversion 959 void MacroAssembler::float_move(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 960 assert(!src.second()->is_valid() && !dst.second()->is_valid(), "bad float_move"); 961 962 // The calling conventions assures us that each VMregpair is either 963 // all really one physical register or adjacent stack slots. 964 965 if (src.first()->is_stack()) { 966 if (dst.first()->is_stack()) { 967 movl(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 968 movptr(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 969 } else { 970 // stack to reg 971 assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 972 movflt(dst.first()->as_XMMRegister(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 973 } 974 } else if (dst.first()->is_stack()) { 975 // reg to stack 976 assert(src.first()->is_XMMRegister(), "only expect xmm registers as parameters"); 977 movflt(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_XMMRegister()); 978 } else { 979 // reg to reg 980 // In theory these overlap but the ordering is such that this is likely a nop 981 if ( src.first() != dst.first()) { 982 movdbl(dst.first()->as_XMMRegister(), src.first()->as_XMMRegister()); 983 } 984 } 985 } 986 987 // On 64 bit we will store integer like items to the stack as 988 // 64 bits items (x86_32/64 abi) even though java would only store 989 // 32bits for a parameter. On 32bit it will simply be 32 bits 990 // So this routine will do 32->32 on 32bit and 32->64 on 64bit 991 void MacroAssembler::move32_64(VMRegPair src, VMRegPair dst, Register tmp, int in_stk_bias, int out_stk_bias) { 992 if (src.first()->is_stack()) { 993 if (dst.first()->is_stack()) { 994 // stack to stack 995 movslq(tmp, Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 996 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), tmp); 997 } else { 998 // stack to reg 999 movslq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()) + in_stk_bias)); 1000 } 1001 } else if (dst.first()->is_stack()) { 1002 // reg to stack 1003 // Do we really have to sign extend??? 1004 // __ movslq(src.first()->as_Register(), src.first()->as_Register()); 1005 movq(Address(rsp, reg2offset_out(dst.first()) + out_stk_bias), src.first()->as_Register()); 1006 } else { 1007 // Do we really have to sign extend??? 1008 // __ movslq(dst.first()->as_Register(), src.first()->as_Register()); 1009 if (dst.first() != src.first()) { 1010 movq(dst.first()->as_Register(), src.first()->as_Register()); 1011 } 1012 } 1013 } 1014 1015 void MacroAssembler::move_ptr(VMRegPair src, VMRegPair dst) { 1016 if (src.first()->is_stack()) { 1017 if (dst.first()->is_stack()) { 1018 // stack to stack 1019 movq(rax, Address(rbp, reg2offset_in(src.first()))); 1020 movq(Address(rsp, reg2offset_out(dst.first())), rax); 1021 } else { 1022 // stack to reg 1023 movq(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first()))); 1024 } 1025 } else if (dst.first()->is_stack()) { 1026 // reg to stack 1027 movq(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register()); 1028 } else { 1029 if (dst.first() != src.first()) { 1030 movq(dst.first()->as_Register(), src.first()->as_Register()); 1031 } 1032 } 1033 } 1034 1035 // An oop arg. Must pass a handle not the oop itself 1036 void MacroAssembler::object_move(OopMap* map, 1037 int oop_handle_offset, 1038 int framesize_in_slots, 1039 VMRegPair src, 1040 VMRegPair dst, 1041 bool is_receiver, 1042 int* receiver_offset) { 1043 1044 // must pass a handle. First figure out the location we use as a handle 1045 1046 Register rHandle = dst.first()->is_stack() ? rax : dst.first()->as_Register(); 1047 1048 // See if oop is null if it is we need no handle 1049 1050 if (src.first()->is_stack()) { 1051 1052 // Oop is already on the stack as an argument 1053 int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots(); 1054 map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots)); 1055 if (is_receiver) { 1056 *receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size; 1057 } 1058 1059 cmpptr(Address(rbp, reg2offset_in(src.first())), NULL_WORD); 1060 lea(rHandle, Address(rbp, reg2offset_in(src.first()))); 1061 // conditionally move a null 1062 cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first()))); 1063 } else { 1064 1065 // Oop is in a register we must store it to the space we reserve 1066 // on the stack for oop_handles and pass a handle if oop is non-null 1067 1068 const Register rOop = src.first()->as_Register(); 1069 int oop_slot; 1070 if (rOop == j_rarg0) 1071 oop_slot = 0; 1072 else if (rOop == j_rarg1) 1073 oop_slot = 1; 1074 else if (rOop == j_rarg2) 1075 oop_slot = 2; 1076 else if (rOop == j_rarg3) 1077 oop_slot = 3; 1078 else if (rOop == j_rarg4) 1079 oop_slot = 4; 1080 else { 1081 assert(rOop == j_rarg5, "wrong register"); 1082 oop_slot = 5; 1083 } 1084 1085 oop_slot = oop_slot * VMRegImpl::slots_per_word + oop_handle_offset; 1086 int offset = oop_slot*VMRegImpl::stack_slot_size; 1087 1088 map->set_oop(VMRegImpl::stack2reg(oop_slot)); 1089 // Store oop in handle area, may be null 1090 movptr(Address(rsp, offset), rOop); 1091 if (is_receiver) { 1092 *receiver_offset = offset; 1093 } 1094 1095 cmpptr(rOop, NULL_WORD); 1096 lea(rHandle, Address(rsp, offset)); 1097 // conditionally move a null from the handle area where it was just stored 1098 cmovptr(Assembler::equal, rHandle, Address(rsp, offset)); 1099 } 1100 1101 // If arg is on the stack then place it otherwise it is already in correct reg. 1102 if (dst.first()->is_stack()) { 1103 movptr(Address(rsp, reg2offset_out(dst.first())), rHandle); 1104 } 1105 } 1106 1107 #endif // _LP64 1108 1109 // Now versions that are common to 32/64 bit 1110 1111 void MacroAssembler::addptr(Register dst, int32_t imm32) { 1112 LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32)); 1113 } 1114 1115 void MacroAssembler::addptr(Register dst, Register src) { 1116 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); 1117 } 1118 1119 void MacroAssembler::addptr(Address dst, Register src) { 1120 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); 1121 } 1122 1123 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1124 assert(rscratch != noreg || always_reachable(src), "missing"); 1125 1126 if (reachable(src)) { 1127 Assembler::addsd(dst, as_Address(src)); 1128 } else { 1129 lea(rscratch, src); 1130 Assembler::addsd(dst, Address(rscratch, 0)); 1131 } 1132 } 1133 1134 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src, Register rscratch) { 1135 assert(rscratch != noreg || always_reachable(src), "missing"); 1136 1137 if (reachable(src)) { 1138 addss(dst, as_Address(src)); 1139 } else { 1140 lea(rscratch, src); 1141 addss(dst, Address(rscratch, 0)); 1142 } 1143 } 1144 1145 void MacroAssembler::addpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1146 assert(rscratch != noreg || always_reachable(src), "missing"); 1147 1148 if (reachable(src)) { 1149 Assembler::addpd(dst, as_Address(src)); 1150 } else { 1151 lea(rscratch, src); 1152 Assembler::addpd(dst, Address(rscratch, 0)); 1153 } 1154 } 1155 1156 // See 8273459. Function for ensuring 64-byte alignment, intended for stubs only. 1157 // Stub code is generated once and never copied. 1158 // NMethods can't use this because they get copied and we can't force alignment > 32 bytes. 1159 void MacroAssembler::align64() { 1160 align(64, (uint)(uintptr_t)pc()); 1161 } 1162 1163 void MacroAssembler::align32() { 1164 align(32, (uint)(uintptr_t)pc()); 1165 } 1166 1167 void MacroAssembler::align(uint modulus) { 1168 // 8273459: Ensure alignment is possible with current segment alignment 1169 assert(modulus <= (uintx)CodeEntryAlignment, "Alignment must be <= CodeEntryAlignment"); 1170 align(modulus, offset()); 1171 } 1172 1173 void MacroAssembler::align(uint modulus, uint target) { 1174 if (target % modulus != 0) { 1175 nop(modulus - (target % modulus)); 1176 } 1177 } 1178 1179 void MacroAssembler::push_f(XMMRegister r) { 1180 subptr(rsp, wordSize); 1181 movflt(Address(rsp, 0), r); 1182 } 1183 1184 void MacroAssembler::pop_f(XMMRegister r) { 1185 movflt(r, Address(rsp, 0)); 1186 addptr(rsp, wordSize); 1187 } 1188 1189 void MacroAssembler::push_d(XMMRegister r) { 1190 subptr(rsp, 2 * wordSize); 1191 movdbl(Address(rsp, 0), r); 1192 } 1193 1194 void MacroAssembler::pop_d(XMMRegister r) { 1195 movdbl(r, Address(rsp, 0)); 1196 addptr(rsp, 2 * Interpreter::stackElementSize); 1197 } 1198 1199 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1200 // Used in sign-masking with aligned address. 1201 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 1202 assert(rscratch != noreg || always_reachable(src), "missing"); 1203 1204 if (reachable(src)) { 1205 Assembler::andpd(dst, as_Address(src)); 1206 } else { 1207 lea(rscratch, src); 1208 Assembler::andpd(dst, Address(rscratch, 0)); 1209 } 1210 } 1211 1212 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src, Register rscratch) { 1213 // Used in sign-masking with aligned address. 1214 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 1215 assert(rscratch != noreg || always_reachable(src), "missing"); 1216 1217 if (reachable(src)) { 1218 Assembler::andps(dst, as_Address(src)); 1219 } else { 1220 lea(rscratch, src); 1221 Assembler::andps(dst, Address(rscratch, 0)); 1222 } 1223 } 1224 1225 void MacroAssembler::andptr(Register dst, int32_t imm32) { 1226 LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32)); 1227 } 1228 1229 #ifdef _LP64 1230 void MacroAssembler::andq(Register dst, AddressLiteral src, Register rscratch) { 1231 assert(rscratch != noreg || always_reachable(src), "missing"); 1232 1233 if (reachable(src)) { 1234 andq(dst, as_Address(src)); 1235 } else { 1236 lea(rscratch, src); 1237 andq(dst, Address(rscratch, 0)); 1238 } 1239 } 1240 #endif 1241 1242 void MacroAssembler::atomic_incl(Address counter_addr) { 1243 lock(); 1244 incrementl(counter_addr); 1245 } 1246 1247 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register rscratch) { 1248 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1249 1250 if (reachable(counter_addr)) { 1251 atomic_incl(as_Address(counter_addr)); 1252 } else { 1253 lea(rscratch, counter_addr); 1254 atomic_incl(Address(rscratch, 0)); 1255 } 1256 } 1257 1258 #ifdef _LP64 1259 void MacroAssembler::atomic_incq(Address counter_addr) { 1260 lock(); 1261 incrementq(counter_addr); 1262 } 1263 1264 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register rscratch) { 1265 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1266 1267 if (reachable(counter_addr)) { 1268 atomic_incq(as_Address(counter_addr)); 1269 } else { 1270 lea(rscratch, counter_addr); 1271 atomic_incq(Address(rscratch, 0)); 1272 } 1273 } 1274 #endif 1275 1276 // Writes to stack successive pages until offset reached to check for 1277 // stack overflow + shadow pages. This clobbers tmp. 1278 void MacroAssembler::bang_stack_size(Register size, Register tmp) { 1279 movptr(tmp, rsp); 1280 // Bang stack for total size given plus shadow page size. 1281 // Bang one page at a time because large size can bang beyond yellow and 1282 // red zones. 1283 Label loop; 1284 bind(loop); 1285 movl(Address(tmp, (-(int)os::vm_page_size())), size ); 1286 subptr(tmp, (int)os::vm_page_size()); 1287 subl(size, (int)os::vm_page_size()); 1288 jcc(Assembler::greater, loop); 1289 1290 // Bang down shadow pages too. 1291 // At this point, (tmp-0) is the last address touched, so don't 1292 // touch it again. (It was touched as (tmp-pagesize) but then tmp 1293 // was post-decremented.) Skip this address by starting at i=1, and 1294 // touch a few more pages below. N.B. It is important to touch all 1295 // the way down including all pages in the shadow zone. 1296 for (int i = 1; i < ((int)StackOverflow::stack_shadow_zone_size() / (int)os::vm_page_size()); i++) { 1297 // this could be any sized move but this is can be a debugging crumb 1298 // so the bigger the better. 1299 movptr(Address(tmp, (-i*(int)os::vm_page_size())), size ); 1300 } 1301 } 1302 1303 void MacroAssembler::reserved_stack_check() { 1304 // testing if reserved zone needs to be enabled 1305 Label no_reserved_zone_enabling; 1306 Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread); 1307 NOT_LP64(get_thread(rsi);) 1308 1309 cmpptr(rsp, Address(thread, JavaThread::reserved_stack_activation_offset())); 1310 jcc(Assembler::below, no_reserved_zone_enabling); 1311 1312 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), thread); 1313 jump(RuntimeAddress(SharedRuntime::throw_delayed_StackOverflowError_entry())); 1314 should_not_reach_here(); 1315 1316 bind(no_reserved_zone_enabling); 1317 } 1318 1319 void MacroAssembler::c2bool(Register x) { 1320 // implements x == 0 ? 0 : 1 1321 // note: must only look at least-significant byte of x 1322 // since C-style booleans are stored in one byte 1323 // only! (was bug) 1324 andl(x, 0xFF); 1325 setb(Assembler::notZero, x); 1326 } 1327 1328 // Wouldn't need if AddressLiteral version had new name 1329 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) { 1330 Assembler::call(L, rtype); 1331 } 1332 1333 void MacroAssembler::call(Register entry) { 1334 Assembler::call(entry); 1335 } 1336 1337 void MacroAssembler::call(AddressLiteral entry, Register rscratch) { 1338 assert(rscratch != noreg || always_reachable(entry), "missing"); 1339 1340 if (reachable(entry)) { 1341 Assembler::call_literal(entry.target(), entry.rspec()); 1342 } else { 1343 lea(rscratch, entry); 1344 Assembler::call(rscratch); 1345 } 1346 } 1347 1348 void MacroAssembler::ic_call(address entry, jint method_index) { 1349 RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index); 1350 #ifdef _LP64 1351 // Needs full 64-bit immediate for later patching. 1352 mov64(rax, (int64_t)Universe::non_oop_word()); 1353 #else 1354 movptr(rax, (intptr_t)Universe::non_oop_word()); 1355 #endif 1356 call(AddressLiteral(entry, rh)); 1357 } 1358 1359 int MacroAssembler::ic_check_size() { 1360 return LP64_ONLY(14) NOT_LP64(12); 1361 } 1362 1363 int MacroAssembler::ic_check(int end_alignment) { 1364 Register receiver = LP64_ONLY(j_rarg0) NOT_LP64(rcx); 1365 Register data = rax; 1366 Register temp = LP64_ONLY(rscratch1) NOT_LP64(rbx); 1367 1368 // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed 1369 // before the inline cache check, so we don't have to execute any nop instructions when dispatching 1370 // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align 1371 // before the inline cache check here, and not after 1372 align(end_alignment, offset() + ic_check_size()); 1373 1374 int uep_offset = offset(); 1375 1376 if (UseCompressedClassPointers) { 1377 movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); 1378 cmpl(temp, Address(data, CompiledICData::speculated_klass_offset())); 1379 } else { 1380 movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); 1381 cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset())); 1382 } 1383 1384 // if inline cache check fails, then jump to runtime routine 1385 jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 1386 assert((offset() % end_alignment) == 0, "Misaligned verified entry point"); 1387 1388 return uep_offset; 1389 } 1390 1391 void MacroAssembler::emit_static_call_stub() { 1392 // Static stub relocation also tags the Method* in the code-stream. 1393 mov_metadata(rbx, (Metadata*) nullptr); // Method is zapped till fixup time. 1394 // This is recognized as unresolved by relocs/nativeinst/ic code. 1395 jump(RuntimeAddress(pc())); 1396 } 1397 1398 // Implementation of call_VM versions 1399 1400 void MacroAssembler::call_VM(Register oop_result, 1401 address entry_point, 1402 bool check_exceptions) { 1403 Label C, E; 1404 call(C, relocInfo::none); 1405 jmp(E); 1406 1407 bind(C); 1408 call_VM_helper(oop_result, entry_point, 0, check_exceptions); 1409 ret(0); 1410 1411 bind(E); 1412 } 1413 1414 void MacroAssembler::call_VM(Register oop_result, 1415 address entry_point, 1416 Register arg_1, 1417 bool check_exceptions) { 1418 Label C, E; 1419 call(C, relocInfo::none); 1420 jmp(E); 1421 1422 bind(C); 1423 pass_arg1(this, arg_1); 1424 call_VM_helper(oop_result, entry_point, 1, check_exceptions); 1425 ret(0); 1426 1427 bind(E); 1428 } 1429 1430 void MacroAssembler::call_VM(Register oop_result, 1431 address entry_point, 1432 Register arg_1, 1433 Register arg_2, 1434 bool check_exceptions) { 1435 Label C, E; 1436 call(C, relocInfo::none); 1437 jmp(E); 1438 1439 bind(C); 1440 1441 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1442 1443 pass_arg2(this, arg_2); 1444 pass_arg1(this, arg_1); 1445 call_VM_helper(oop_result, entry_point, 2, check_exceptions); 1446 ret(0); 1447 1448 bind(E); 1449 } 1450 1451 void MacroAssembler::call_VM(Register oop_result, 1452 address entry_point, 1453 Register arg_1, 1454 Register arg_2, 1455 Register arg_3, 1456 bool check_exceptions) { 1457 Label C, E; 1458 call(C, relocInfo::none); 1459 jmp(E); 1460 1461 bind(C); 1462 1463 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1464 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1465 pass_arg3(this, arg_3); 1466 pass_arg2(this, arg_2); 1467 pass_arg1(this, arg_1); 1468 call_VM_helper(oop_result, entry_point, 3, check_exceptions); 1469 ret(0); 1470 1471 bind(E); 1472 } 1473 1474 void MacroAssembler::call_VM(Register oop_result, 1475 Register last_java_sp, 1476 address entry_point, 1477 int number_of_arguments, 1478 bool check_exceptions) { 1479 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); 1480 call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1481 } 1482 1483 void MacroAssembler::call_VM(Register oop_result, 1484 Register last_java_sp, 1485 address entry_point, 1486 Register arg_1, 1487 bool check_exceptions) { 1488 pass_arg1(this, arg_1); 1489 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1490 } 1491 1492 void MacroAssembler::call_VM(Register oop_result, 1493 Register last_java_sp, 1494 address entry_point, 1495 Register arg_1, 1496 Register arg_2, 1497 bool check_exceptions) { 1498 1499 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1500 pass_arg2(this, arg_2); 1501 pass_arg1(this, arg_1); 1502 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1503 } 1504 1505 void MacroAssembler::call_VM(Register oop_result, 1506 Register last_java_sp, 1507 address entry_point, 1508 Register arg_1, 1509 Register arg_2, 1510 Register arg_3, 1511 bool check_exceptions) { 1512 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1513 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1514 pass_arg3(this, arg_3); 1515 pass_arg2(this, arg_2); 1516 pass_arg1(this, arg_1); 1517 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1518 } 1519 1520 void MacroAssembler::super_call_VM(Register oop_result, 1521 Register last_java_sp, 1522 address entry_point, 1523 int number_of_arguments, 1524 bool check_exceptions) { 1525 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg); 1526 MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions); 1527 } 1528 1529 void MacroAssembler::super_call_VM(Register oop_result, 1530 Register last_java_sp, 1531 address entry_point, 1532 Register arg_1, 1533 bool check_exceptions) { 1534 pass_arg1(this, arg_1); 1535 super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions); 1536 } 1537 1538 void MacroAssembler::super_call_VM(Register oop_result, 1539 Register last_java_sp, 1540 address entry_point, 1541 Register arg_1, 1542 Register arg_2, 1543 bool check_exceptions) { 1544 1545 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1546 pass_arg2(this, arg_2); 1547 pass_arg1(this, arg_1); 1548 super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions); 1549 } 1550 1551 void MacroAssembler::super_call_VM(Register oop_result, 1552 Register last_java_sp, 1553 address entry_point, 1554 Register arg_1, 1555 Register arg_2, 1556 Register arg_3, 1557 bool check_exceptions) { 1558 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1559 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1560 pass_arg3(this, arg_3); 1561 pass_arg2(this, arg_2); 1562 pass_arg1(this, arg_1); 1563 super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions); 1564 } 1565 1566 void MacroAssembler::call_VM_base(Register oop_result, 1567 Register java_thread, 1568 Register last_java_sp, 1569 address entry_point, 1570 int number_of_arguments, 1571 bool check_exceptions) { 1572 // determine java_thread register 1573 if (!java_thread->is_valid()) { 1574 #ifdef _LP64 1575 java_thread = r15_thread; 1576 #else 1577 java_thread = rdi; 1578 get_thread(java_thread); 1579 #endif // LP64 1580 } 1581 // determine last_java_sp register 1582 if (!last_java_sp->is_valid()) { 1583 last_java_sp = rsp; 1584 } 1585 // debugging support 1586 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 1587 LP64_ONLY(assert(java_thread == r15_thread, "unexpected register")); 1588 #ifdef ASSERT 1589 // TraceBytecodes does not use r12 but saves it over the call, so don't verify 1590 // r12 is the heapbase. 1591 LP64_ONLY(if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");) 1592 #endif // ASSERT 1593 1594 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 1595 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 1596 1597 // push java thread (becomes first argument of C function) 1598 1599 NOT_LP64(push(java_thread); number_of_arguments++); 1600 LP64_ONLY(mov(c_rarg0, r15_thread)); 1601 1602 // set last Java frame before call 1603 assert(last_java_sp != rbp, "can't use ebp/rbp"); 1604 1605 // Only interpreter should have to set fp 1606 set_last_Java_frame(java_thread, last_java_sp, rbp, nullptr, rscratch1); 1607 1608 // do the call, remove parameters 1609 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments); 1610 1611 // restore the thread (cannot use the pushed argument since arguments 1612 // may be overwritten by C code generated by an optimizing compiler); 1613 // however can use the register value directly if it is callee saved. 1614 if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) { 1615 // rdi & rsi (also r15) are callee saved -> nothing to do 1616 #ifdef ASSERT 1617 guarantee(java_thread != rax, "change this code"); 1618 push(rax); 1619 { Label L; 1620 get_thread(rax); 1621 cmpptr(java_thread, rax); 1622 jcc(Assembler::equal, L); 1623 STOP("MacroAssembler::call_VM_base: rdi not callee saved?"); 1624 bind(L); 1625 } 1626 pop(rax); 1627 #endif 1628 } else { 1629 get_thread(java_thread); 1630 } 1631 // reset last Java frame 1632 // Only interpreter should have to clear fp 1633 reset_last_Java_frame(java_thread, true); 1634 1635 // C++ interp handles this in the interpreter 1636 check_and_handle_popframe(java_thread); 1637 check_and_handle_earlyret(java_thread); 1638 1639 if (check_exceptions) { 1640 // check for pending exceptions (java_thread is set upon return) 1641 cmpptr(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD); 1642 #ifndef _LP64 1643 jump_cc(Assembler::notEqual, 1644 RuntimeAddress(StubRoutines::forward_exception_entry())); 1645 #else 1646 // This used to conditionally jump to forward_exception however it is 1647 // possible if we relocate that the branch will not reach. So we must jump 1648 // around so we can always reach 1649 1650 Label ok; 1651 jcc(Assembler::equal, ok); 1652 jump(RuntimeAddress(StubRoutines::forward_exception_entry())); 1653 bind(ok); 1654 #endif // LP64 1655 } 1656 1657 // get oop result if there is one and reset the value in the thread 1658 if (oop_result->is_valid()) { 1659 get_vm_result(oop_result, java_thread); 1660 } 1661 } 1662 1663 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) { 1664 1665 // Calculate the value for last_Java_sp 1666 // somewhat subtle. call_VM does an intermediate call 1667 // which places a return address on the stack just under the 1668 // stack pointer as the user finished with it. This allows 1669 // use to retrieve last_Java_pc from last_Java_sp[-1]. 1670 // On 32bit we then have to push additional args on the stack to accomplish 1671 // the actual requested call. On 64bit call_VM only can use register args 1672 // so the only extra space is the return address that call_VM created. 1673 // This hopefully explains the calculations here. 1674 1675 #ifdef _LP64 1676 // We've pushed one address, correct last_Java_sp 1677 lea(rax, Address(rsp, wordSize)); 1678 #else 1679 lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize)); 1680 #endif // LP64 1681 1682 call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions); 1683 1684 } 1685 1686 // Use this method when MacroAssembler version of call_VM_leaf_base() should be called from Interpreter. 1687 void MacroAssembler::call_VM_leaf0(address entry_point) { 1688 MacroAssembler::call_VM_leaf_base(entry_point, 0); 1689 } 1690 1691 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) { 1692 call_VM_leaf_base(entry_point, number_of_arguments); 1693 } 1694 1695 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) { 1696 pass_arg0(this, arg_0); 1697 call_VM_leaf(entry_point, 1); 1698 } 1699 1700 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1701 1702 LP64_ONLY(assert_different_registers(arg_0, c_rarg1)); 1703 pass_arg1(this, arg_1); 1704 pass_arg0(this, arg_0); 1705 call_VM_leaf(entry_point, 2); 1706 } 1707 1708 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1709 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2)); 1710 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1711 pass_arg2(this, arg_2); 1712 pass_arg1(this, arg_1); 1713 pass_arg0(this, arg_0); 1714 call_VM_leaf(entry_point, 3); 1715 } 1716 1717 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1718 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3)); 1719 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1720 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1721 pass_arg3(this, arg_3); 1722 pass_arg2(this, arg_2); 1723 pass_arg1(this, arg_1); 1724 pass_arg0(this, arg_0); 1725 call_VM_leaf(entry_point, 3); 1726 } 1727 1728 void MacroAssembler::super_call_VM_leaf(address entry_point) { 1729 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1730 } 1731 1732 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) { 1733 pass_arg0(this, arg_0); 1734 MacroAssembler::call_VM_leaf_base(entry_point, 1); 1735 } 1736 1737 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) { 1738 LP64_ONLY(assert_different_registers(arg_0, c_rarg1)); 1739 pass_arg1(this, arg_1); 1740 pass_arg0(this, arg_0); 1741 MacroAssembler::call_VM_leaf_base(entry_point, 2); 1742 } 1743 1744 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) { 1745 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2)); 1746 LP64_ONLY(assert_different_registers(arg_1, c_rarg2)); 1747 pass_arg2(this, arg_2); 1748 pass_arg1(this, arg_1); 1749 pass_arg0(this, arg_0); 1750 MacroAssembler::call_VM_leaf_base(entry_point, 3); 1751 } 1752 1753 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) { 1754 LP64_ONLY(assert_different_registers(arg_0, c_rarg1, c_rarg2, c_rarg3)); 1755 LP64_ONLY(assert_different_registers(arg_1, c_rarg2, c_rarg3)); 1756 LP64_ONLY(assert_different_registers(arg_2, c_rarg3)); 1757 pass_arg3(this, arg_3); 1758 pass_arg2(this, arg_2); 1759 pass_arg1(this, arg_1); 1760 pass_arg0(this, arg_0); 1761 MacroAssembler::call_VM_leaf_base(entry_point, 4); 1762 } 1763 1764 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) { 1765 movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset())); 1766 movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD); 1767 verify_oop_msg(oop_result, "broken oop in call_VM_base"); 1768 } 1769 1770 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) { 1771 movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset())); 1772 movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD); 1773 } 1774 1775 void MacroAssembler::check_and_handle_earlyret(Register java_thread) { 1776 } 1777 1778 void MacroAssembler::check_and_handle_popframe(Register java_thread) { 1779 } 1780 1781 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm, Register rscratch) { 1782 assert(rscratch != noreg || always_reachable(src1), "missing"); 1783 1784 if (reachable(src1)) { 1785 cmpl(as_Address(src1), imm); 1786 } else { 1787 lea(rscratch, src1); 1788 cmpl(Address(rscratch, 0), imm); 1789 } 1790 } 1791 1792 void MacroAssembler::cmp32(Register src1, AddressLiteral src2, Register rscratch) { 1793 assert(!src2.is_lval(), "use cmpptr"); 1794 assert(rscratch != noreg || always_reachable(src2), "missing"); 1795 1796 if (reachable(src2)) { 1797 cmpl(src1, as_Address(src2)); 1798 } else { 1799 lea(rscratch, src2); 1800 cmpl(src1, Address(rscratch, 0)); 1801 } 1802 } 1803 1804 void MacroAssembler::cmp32(Register src1, int32_t imm) { 1805 Assembler::cmpl(src1, imm); 1806 } 1807 1808 void MacroAssembler::cmp32(Register src1, Address src2) { 1809 Assembler::cmpl(src1, src2); 1810 } 1811 1812 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 1813 ucomisd(opr1, opr2); 1814 1815 Label L; 1816 if (unordered_is_less) { 1817 movl(dst, -1); 1818 jcc(Assembler::parity, L); 1819 jcc(Assembler::below , L); 1820 movl(dst, 0); 1821 jcc(Assembler::equal , L); 1822 increment(dst); 1823 } else { // unordered is greater 1824 movl(dst, 1); 1825 jcc(Assembler::parity, L); 1826 jcc(Assembler::above , L); 1827 movl(dst, 0); 1828 jcc(Assembler::equal , L); 1829 decrementl(dst); 1830 } 1831 bind(L); 1832 } 1833 1834 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) { 1835 ucomiss(opr1, opr2); 1836 1837 Label L; 1838 if (unordered_is_less) { 1839 movl(dst, -1); 1840 jcc(Assembler::parity, L); 1841 jcc(Assembler::below , L); 1842 movl(dst, 0); 1843 jcc(Assembler::equal , L); 1844 increment(dst); 1845 } else { // unordered is greater 1846 movl(dst, 1); 1847 jcc(Assembler::parity, L); 1848 jcc(Assembler::above , L); 1849 movl(dst, 0); 1850 jcc(Assembler::equal , L); 1851 decrementl(dst); 1852 } 1853 bind(L); 1854 } 1855 1856 1857 void MacroAssembler::cmp8(AddressLiteral src1, int imm, Register rscratch) { 1858 assert(rscratch != noreg || always_reachable(src1), "missing"); 1859 1860 if (reachable(src1)) { 1861 cmpb(as_Address(src1), imm); 1862 } else { 1863 lea(rscratch, src1); 1864 cmpb(Address(rscratch, 0), imm); 1865 } 1866 } 1867 1868 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2, Register rscratch) { 1869 #ifdef _LP64 1870 assert(rscratch != noreg || always_reachable(src2), "missing"); 1871 1872 if (src2.is_lval()) { 1873 movptr(rscratch, src2); 1874 Assembler::cmpq(src1, rscratch); 1875 } else if (reachable(src2)) { 1876 cmpq(src1, as_Address(src2)); 1877 } else { 1878 lea(rscratch, src2); 1879 Assembler::cmpq(src1, Address(rscratch, 0)); 1880 } 1881 #else 1882 assert(rscratch == noreg, "not needed"); 1883 if (src2.is_lval()) { 1884 cmp_literal32(src1, (int32_t)src2.target(), src2.rspec()); 1885 } else { 1886 cmpl(src1, as_Address(src2)); 1887 } 1888 #endif // _LP64 1889 } 1890 1891 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2, Register rscratch) { 1892 assert(src2.is_lval(), "not a mem-mem compare"); 1893 #ifdef _LP64 1894 // moves src2's literal address 1895 movptr(rscratch, src2); 1896 Assembler::cmpq(src1, rscratch); 1897 #else 1898 assert(rscratch == noreg, "not needed"); 1899 cmp_literal32(src1, (int32_t)src2.target(), src2.rspec()); 1900 #endif // _LP64 1901 } 1902 1903 void MacroAssembler::cmpoop(Register src1, Register src2) { 1904 cmpptr(src1, src2); 1905 } 1906 1907 void MacroAssembler::cmpoop(Register src1, Address src2) { 1908 cmpptr(src1, src2); 1909 } 1910 1911 #ifdef _LP64 1912 void MacroAssembler::cmpoop(Register src1, jobject src2, Register rscratch) { 1913 movoop(rscratch, src2); 1914 cmpptr(src1, rscratch); 1915 } 1916 #endif 1917 1918 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr, Register rscratch) { 1919 assert(rscratch != noreg || always_reachable(adr), "missing"); 1920 1921 if (reachable(adr)) { 1922 lock(); 1923 cmpxchgptr(reg, as_Address(adr)); 1924 } else { 1925 lea(rscratch, adr); 1926 lock(); 1927 cmpxchgptr(reg, Address(rscratch, 0)); 1928 } 1929 } 1930 1931 void MacroAssembler::cmpxchgptr(Register reg, Address adr) { 1932 LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr)); 1933 } 1934 1935 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src, Register rscratch) { 1936 assert(rscratch != noreg || always_reachable(src), "missing"); 1937 1938 if (reachable(src)) { 1939 Assembler::comisd(dst, as_Address(src)); 1940 } else { 1941 lea(rscratch, src); 1942 Assembler::comisd(dst, Address(rscratch, 0)); 1943 } 1944 } 1945 1946 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src, Register rscratch) { 1947 assert(rscratch != noreg || always_reachable(src), "missing"); 1948 1949 if (reachable(src)) { 1950 Assembler::comiss(dst, as_Address(src)); 1951 } else { 1952 lea(rscratch, src); 1953 Assembler::comiss(dst, Address(rscratch, 0)); 1954 } 1955 } 1956 1957 1958 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr, Register rscratch) { 1959 assert(rscratch != noreg || always_reachable(counter_addr), "missing"); 1960 1961 Condition negated_cond = negate_condition(cond); 1962 Label L; 1963 jcc(negated_cond, L); 1964 pushf(); // Preserve flags 1965 atomic_incl(counter_addr, rscratch); 1966 popf(); 1967 bind(L); 1968 } 1969 1970 int MacroAssembler::corrected_idivl(Register reg) { 1971 // Full implementation of Java idiv and irem; checks for 1972 // special case as described in JVM spec., p.243 & p.271. 1973 // The function returns the (pc) offset of the idivl 1974 // instruction - may be needed for implicit exceptions. 1975 // 1976 // normal case special case 1977 // 1978 // input : rax,: dividend min_int 1979 // reg: divisor (may not be rax,/rdx) -1 1980 // 1981 // output: rax,: quotient (= rax, idiv reg) min_int 1982 // rdx: remainder (= rax, irem reg) 0 1983 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register"); 1984 const int min_int = 0x80000000; 1985 Label normal_case, special_case; 1986 1987 // check for special case 1988 cmpl(rax, min_int); 1989 jcc(Assembler::notEqual, normal_case); 1990 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0) 1991 cmpl(reg, -1); 1992 jcc(Assembler::equal, special_case); 1993 1994 // handle normal case 1995 bind(normal_case); 1996 cdql(); 1997 int idivl_offset = offset(); 1998 idivl(reg); 1999 2000 // normal and special case exit 2001 bind(special_case); 2002 2003 return idivl_offset; 2004 } 2005 2006 2007 2008 void MacroAssembler::decrementl(Register reg, int value) { 2009 if (value == min_jint) {subl(reg, value) ; return; } 2010 if (value < 0) { incrementl(reg, -value); return; } 2011 if (value == 0) { ; return; } 2012 if (value == 1 && UseIncDec) { decl(reg) ; return; } 2013 /* else */ { subl(reg, value) ; return; } 2014 } 2015 2016 void MacroAssembler::decrementl(Address dst, int value) { 2017 if (value == min_jint) {subl(dst, value) ; return; } 2018 if (value < 0) { incrementl(dst, -value); return; } 2019 if (value == 0) { ; return; } 2020 if (value == 1 && UseIncDec) { decl(dst) ; return; } 2021 /* else */ { subl(dst, value) ; return; } 2022 } 2023 2024 void MacroAssembler::division_with_shift (Register reg, int shift_value) { 2025 assert(shift_value > 0, "illegal shift value"); 2026 Label _is_positive; 2027 testl (reg, reg); 2028 jcc (Assembler::positive, _is_positive); 2029 int offset = (1 << shift_value) - 1 ; 2030 2031 if (offset == 1) { 2032 incrementl(reg); 2033 } else { 2034 addl(reg, offset); 2035 } 2036 2037 bind (_is_positive); 2038 sarl(reg, shift_value); 2039 } 2040 2041 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2042 assert(rscratch != noreg || always_reachable(src), "missing"); 2043 2044 if (reachable(src)) { 2045 Assembler::divsd(dst, as_Address(src)); 2046 } else { 2047 lea(rscratch, src); 2048 Assembler::divsd(dst, Address(rscratch, 0)); 2049 } 2050 } 2051 2052 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2053 assert(rscratch != noreg || always_reachable(src), "missing"); 2054 2055 if (reachable(src)) { 2056 Assembler::divss(dst, as_Address(src)); 2057 } else { 2058 lea(rscratch, src); 2059 Assembler::divss(dst, Address(rscratch, 0)); 2060 } 2061 } 2062 2063 void MacroAssembler::enter() { 2064 push(rbp); 2065 mov(rbp, rsp); 2066 } 2067 2068 void MacroAssembler::post_call_nop() { 2069 if (!Continuations::enabled()) { 2070 return; 2071 } 2072 InstructionMark im(this); 2073 relocate(post_call_nop_Relocation::spec()); 2074 InlineSkippedInstructionsCounter skipCounter(this); 2075 emit_int8((uint8_t)0x0f); 2076 emit_int8((uint8_t)0x1f); 2077 emit_int8((uint8_t)0x84); 2078 emit_int8((uint8_t)0x00); 2079 emit_int32(0x00); 2080 } 2081 2082 // A 5 byte nop that is safe for patching (see patch_verified_entry) 2083 void MacroAssembler::fat_nop() { 2084 if (UseAddressNop) { 2085 addr_nop_5(); 2086 } else { 2087 emit_int8((uint8_t)0x26); // es: 2088 emit_int8((uint8_t)0x2e); // cs: 2089 emit_int8((uint8_t)0x64); // fs: 2090 emit_int8((uint8_t)0x65); // gs: 2091 emit_int8((uint8_t)0x90); 2092 } 2093 } 2094 2095 #ifndef _LP64 2096 void MacroAssembler::fcmp(Register tmp) { 2097 fcmp(tmp, 1, true, true); 2098 } 2099 2100 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) { 2101 assert(!pop_right || pop_left, "usage error"); 2102 if (VM_Version::supports_cmov()) { 2103 assert(tmp == noreg, "unneeded temp"); 2104 if (pop_left) { 2105 fucomip(index); 2106 } else { 2107 fucomi(index); 2108 } 2109 if (pop_right) { 2110 fpop(); 2111 } 2112 } else { 2113 assert(tmp != noreg, "need temp"); 2114 if (pop_left) { 2115 if (pop_right) { 2116 fcompp(); 2117 } else { 2118 fcomp(index); 2119 } 2120 } else { 2121 fcom(index); 2122 } 2123 // convert FPU condition into eflags condition via rax, 2124 save_rax(tmp); 2125 fwait(); fnstsw_ax(); 2126 sahf(); 2127 restore_rax(tmp); 2128 } 2129 // condition codes set as follows: 2130 // 2131 // CF (corresponds to C0) if x < y 2132 // PF (corresponds to C2) if unordered 2133 // ZF (corresponds to C3) if x = y 2134 } 2135 2136 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) { 2137 fcmp2int(dst, unordered_is_less, 1, true, true); 2138 } 2139 2140 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) { 2141 fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right); 2142 Label L; 2143 if (unordered_is_less) { 2144 movl(dst, -1); 2145 jcc(Assembler::parity, L); 2146 jcc(Assembler::below , L); 2147 movl(dst, 0); 2148 jcc(Assembler::equal , L); 2149 increment(dst); 2150 } else { // unordered is greater 2151 movl(dst, 1); 2152 jcc(Assembler::parity, L); 2153 jcc(Assembler::above , L); 2154 movl(dst, 0); 2155 jcc(Assembler::equal , L); 2156 decrementl(dst); 2157 } 2158 bind(L); 2159 } 2160 2161 void MacroAssembler::fld_d(AddressLiteral src) { 2162 fld_d(as_Address(src)); 2163 } 2164 2165 void MacroAssembler::fld_s(AddressLiteral src) { 2166 fld_s(as_Address(src)); 2167 } 2168 2169 void MacroAssembler::fldcw(AddressLiteral src) { 2170 fldcw(as_Address(src)); 2171 } 2172 2173 void MacroAssembler::fpop() { 2174 ffree(); 2175 fincstp(); 2176 } 2177 2178 void MacroAssembler::fremr(Register tmp) { 2179 save_rax(tmp); 2180 { Label L; 2181 bind(L); 2182 fprem(); 2183 fwait(); fnstsw_ax(); 2184 sahf(); 2185 jcc(Assembler::parity, L); 2186 } 2187 restore_rax(tmp); 2188 // Result is in ST0. 2189 // Note: fxch & fpop to get rid of ST1 2190 // (otherwise FPU stack could overflow eventually) 2191 fxch(1); 2192 fpop(); 2193 } 2194 2195 void MacroAssembler::empty_FPU_stack() { 2196 if (VM_Version::supports_mmx()) { 2197 emms(); 2198 } else { 2199 for (int i = 8; i-- > 0; ) ffree(i); 2200 } 2201 } 2202 #endif // !LP64 2203 2204 void MacroAssembler::mulpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2205 assert(rscratch != noreg || always_reachable(src), "missing"); 2206 if (reachable(src)) { 2207 Assembler::mulpd(dst, as_Address(src)); 2208 } else { 2209 lea(rscratch, src); 2210 Assembler::mulpd(dst, Address(rscratch, 0)); 2211 } 2212 } 2213 2214 void MacroAssembler::load_float(Address src) { 2215 #ifdef _LP64 2216 movflt(xmm0, src); 2217 #else 2218 if (UseSSE >= 1) { 2219 movflt(xmm0, src); 2220 } else { 2221 fld_s(src); 2222 } 2223 #endif // LP64 2224 } 2225 2226 void MacroAssembler::store_float(Address dst) { 2227 #ifdef _LP64 2228 movflt(dst, xmm0); 2229 #else 2230 if (UseSSE >= 1) { 2231 movflt(dst, xmm0); 2232 } else { 2233 fstp_s(dst); 2234 } 2235 #endif // LP64 2236 } 2237 2238 void MacroAssembler::load_double(Address src) { 2239 #ifdef _LP64 2240 movdbl(xmm0, src); 2241 #else 2242 if (UseSSE >= 2) { 2243 movdbl(xmm0, src); 2244 } else { 2245 fld_d(src); 2246 } 2247 #endif // LP64 2248 } 2249 2250 void MacroAssembler::store_double(Address dst) { 2251 #ifdef _LP64 2252 movdbl(dst, xmm0); 2253 #else 2254 if (UseSSE >= 2) { 2255 movdbl(dst, xmm0); 2256 } else { 2257 fstp_d(dst); 2258 } 2259 #endif // LP64 2260 } 2261 2262 // dst = c = a * b + c 2263 void MacroAssembler::fmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 2264 Assembler::vfmadd231sd(c, a, b); 2265 if (dst != c) { 2266 movdbl(dst, c); 2267 } 2268 } 2269 2270 // dst = c = a * b + c 2271 void MacroAssembler::fmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c) { 2272 Assembler::vfmadd231ss(c, a, b); 2273 if (dst != c) { 2274 movflt(dst, c); 2275 } 2276 } 2277 2278 // dst = c = a * b + c 2279 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 2280 Assembler::vfmadd231pd(c, a, b, vector_len); 2281 if (dst != c) { 2282 vmovdqu(dst, c); 2283 } 2284 } 2285 2286 // dst = c = a * b + c 2287 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, XMMRegister b, XMMRegister c, int vector_len) { 2288 Assembler::vfmadd231ps(c, a, b, vector_len); 2289 if (dst != c) { 2290 vmovdqu(dst, c); 2291 } 2292 } 2293 2294 // dst = c = a * b + c 2295 void MacroAssembler::vfmad(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 2296 Assembler::vfmadd231pd(c, a, b, vector_len); 2297 if (dst != c) { 2298 vmovdqu(dst, c); 2299 } 2300 } 2301 2302 // dst = c = a * b + c 2303 void MacroAssembler::vfmaf(XMMRegister dst, XMMRegister a, Address b, XMMRegister c, int vector_len) { 2304 Assembler::vfmadd231ps(c, a, b, vector_len); 2305 if (dst != c) { 2306 vmovdqu(dst, c); 2307 } 2308 } 2309 2310 void MacroAssembler::incrementl(AddressLiteral dst, Register rscratch) { 2311 assert(rscratch != noreg || always_reachable(dst), "missing"); 2312 2313 if (reachable(dst)) { 2314 incrementl(as_Address(dst)); 2315 } else { 2316 lea(rscratch, dst); 2317 incrementl(Address(rscratch, 0)); 2318 } 2319 } 2320 2321 void MacroAssembler::incrementl(ArrayAddress dst, Register rscratch) { 2322 incrementl(as_Address(dst, rscratch)); 2323 } 2324 2325 void MacroAssembler::incrementl(Register reg, int value) { 2326 if (value == min_jint) {addl(reg, value) ; return; } 2327 if (value < 0) { decrementl(reg, -value); return; } 2328 if (value == 0) { ; return; } 2329 if (value == 1 && UseIncDec) { incl(reg) ; return; } 2330 /* else */ { addl(reg, value) ; return; } 2331 } 2332 2333 void MacroAssembler::incrementl(Address dst, int value) { 2334 if (value == min_jint) {addl(dst, value) ; return; } 2335 if (value < 0) { decrementl(dst, -value); return; } 2336 if (value == 0) { ; return; } 2337 if (value == 1 && UseIncDec) { incl(dst) ; return; } 2338 /* else */ { addl(dst, value) ; return; } 2339 } 2340 2341 void MacroAssembler::jump(AddressLiteral dst, Register rscratch) { 2342 assert(rscratch != noreg || always_reachable(dst), "missing"); 2343 assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump"); 2344 if (reachable(dst)) { 2345 jmp_literal(dst.target(), dst.rspec()); 2346 } else { 2347 lea(rscratch, dst); 2348 jmp(rscratch); 2349 } 2350 } 2351 2352 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst, Register rscratch) { 2353 assert(rscratch != noreg || always_reachable(dst), "missing"); 2354 assert(!dst.rspec().reloc()->is_data(), "should not use ExternalAddress for jump_cc"); 2355 if (reachable(dst)) { 2356 InstructionMark im(this); 2357 relocate(dst.reloc()); 2358 const int short_size = 2; 2359 const int long_size = 6; 2360 int offs = (intptr_t)dst.target() - ((intptr_t)pc()); 2361 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) { 2362 // 0111 tttn #8-bit disp 2363 emit_int8(0x70 | cc); 2364 emit_int8((offs - short_size) & 0xFF); 2365 } else { 2366 // 0000 1111 1000 tttn #32-bit disp 2367 emit_int8(0x0F); 2368 emit_int8((unsigned char)(0x80 | cc)); 2369 emit_int32(offs - long_size); 2370 } 2371 } else { 2372 #ifdef ASSERT 2373 warning("reversing conditional branch"); 2374 #endif /* ASSERT */ 2375 Label skip; 2376 jccb(reverse[cc], skip); 2377 lea(rscratch, dst); 2378 Assembler::jmp(rscratch); 2379 bind(skip); 2380 } 2381 } 2382 2383 void MacroAssembler::ldmxcsr(AddressLiteral src, Register rscratch) { 2384 assert(rscratch != noreg || always_reachable(src), "missing"); 2385 2386 if (reachable(src)) { 2387 Assembler::ldmxcsr(as_Address(src)); 2388 } else { 2389 lea(rscratch, src); 2390 Assembler::ldmxcsr(Address(rscratch, 0)); 2391 } 2392 } 2393 2394 int MacroAssembler::load_signed_byte(Register dst, Address src) { 2395 int off; 2396 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 2397 off = offset(); 2398 movsbl(dst, src); // movsxb 2399 } else { 2400 off = load_unsigned_byte(dst, src); 2401 shll(dst, 24); 2402 sarl(dst, 24); 2403 } 2404 return off; 2405 } 2406 2407 // Note: load_signed_short used to be called load_signed_word. 2408 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler 2409 // manual, which means 16 bits, that usage is found nowhere in HotSpot code. 2410 // The term "word" in HotSpot means a 32- or 64-bit machine word. 2411 int MacroAssembler::load_signed_short(Register dst, Address src) { 2412 int off; 2413 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 2414 // This is dubious to me since it seems safe to do a signed 16 => 64 bit 2415 // version but this is what 64bit has always done. This seems to imply 2416 // that users are only using 32bits worth. 2417 off = offset(); 2418 movswl(dst, src); // movsxw 2419 } else { 2420 off = load_unsigned_short(dst, src); 2421 shll(dst, 16); 2422 sarl(dst, 16); 2423 } 2424 return off; 2425 } 2426 2427 int MacroAssembler::load_unsigned_byte(Register dst, Address src) { 2428 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 2429 // and "3.9 Partial Register Penalties", p. 22). 2430 int off; 2431 if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) { 2432 off = offset(); 2433 movzbl(dst, src); // movzxb 2434 } else { 2435 xorl(dst, dst); 2436 off = offset(); 2437 movb(dst, src); 2438 } 2439 return off; 2440 } 2441 2442 // Note: load_unsigned_short used to be called load_unsigned_word. 2443 int MacroAssembler::load_unsigned_short(Register dst, Address src) { 2444 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16, 2445 // and "3.9 Partial Register Penalties", p. 22). 2446 int off; 2447 if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) { 2448 off = offset(); 2449 movzwl(dst, src); // movzxw 2450 } else { 2451 xorl(dst, dst); 2452 off = offset(); 2453 movw(dst, src); 2454 } 2455 return off; 2456 } 2457 2458 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) { 2459 switch (size_in_bytes) { 2460 #ifndef _LP64 2461 case 8: 2462 assert(dst2 != noreg, "second dest register required"); 2463 movl(dst, src); 2464 movl(dst2, src.plus_disp(BytesPerInt)); 2465 break; 2466 #else 2467 case 8: movq(dst, src); break; 2468 #endif 2469 case 4: movl(dst, src); break; 2470 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break; 2471 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break; 2472 default: ShouldNotReachHere(); 2473 } 2474 } 2475 2476 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) { 2477 switch (size_in_bytes) { 2478 #ifndef _LP64 2479 case 8: 2480 assert(src2 != noreg, "second source register required"); 2481 movl(dst, src); 2482 movl(dst.plus_disp(BytesPerInt), src2); 2483 break; 2484 #else 2485 case 8: movq(dst, src); break; 2486 #endif 2487 case 4: movl(dst, src); break; 2488 case 2: movw(dst, src); break; 2489 case 1: movb(dst, src); break; 2490 default: ShouldNotReachHere(); 2491 } 2492 } 2493 2494 void MacroAssembler::mov32(AddressLiteral dst, Register src, Register rscratch) { 2495 assert(rscratch != noreg || always_reachable(dst), "missing"); 2496 2497 if (reachable(dst)) { 2498 movl(as_Address(dst), src); 2499 } else { 2500 lea(rscratch, dst); 2501 movl(Address(rscratch, 0), src); 2502 } 2503 } 2504 2505 void MacroAssembler::mov32(Register dst, AddressLiteral src) { 2506 if (reachable(src)) { 2507 movl(dst, as_Address(src)); 2508 } else { 2509 lea(dst, src); 2510 movl(dst, Address(dst, 0)); 2511 } 2512 } 2513 2514 // C++ bool manipulation 2515 2516 void MacroAssembler::movbool(Register dst, Address src) { 2517 if(sizeof(bool) == 1) 2518 movb(dst, src); 2519 else if(sizeof(bool) == 2) 2520 movw(dst, src); 2521 else if(sizeof(bool) == 4) 2522 movl(dst, src); 2523 else 2524 // unsupported 2525 ShouldNotReachHere(); 2526 } 2527 2528 void MacroAssembler::movbool(Address dst, bool boolconst) { 2529 if(sizeof(bool) == 1) 2530 movb(dst, (int) boolconst); 2531 else if(sizeof(bool) == 2) 2532 movw(dst, (int) boolconst); 2533 else if(sizeof(bool) == 4) 2534 movl(dst, (int) boolconst); 2535 else 2536 // unsupported 2537 ShouldNotReachHere(); 2538 } 2539 2540 void MacroAssembler::movbool(Address dst, Register src) { 2541 if(sizeof(bool) == 1) 2542 movb(dst, src); 2543 else if(sizeof(bool) == 2) 2544 movw(dst, src); 2545 else if(sizeof(bool) == 4) 2546 movl(dst, src); 2547 else 2548 // unsupported 2549 ShouldNotReachHere(); 2550 } 2551 2552 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src, Register rscratch) { 2553 assert(rscratch != noreg || always_reachable(src), "missing"); 2554 2555 if (reachable(src)) { 2556 movdl(dst, as_Address(src)); 2557 } else { 2558 lea(rscratch, src); 2559 movdl(dst, Address(rscratch, 0)); 2560 } 2561 } 2562 2563 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src, Register rscratch) { 2564 assert(rscratch != noreg || always_reachable(src), "missing"); 2565 2566 if (reachable(src)) { 2567 movq(dst, as_Address(src)); 2568 } else { 2569 lea(rscratch, src); 2570 movq(dst, Address(rscratch, 0)); 2571 } 2572 } 2573 2574 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src, Register rscratch) { 2575 assert(rscratch != noreg || always_reachable(src), "missing"); 2576 2577 if (reachable(src)) { 2578 if (UseXmmLoadAndClearUpper) { 2579 movsd (dst, as_Address(src)); 2580 } else { 2581 movlpd(dst, as_Address(src)); 2582 } 2583 } else { 2584 lea(rscratch, src); 2585 if (UseXmmLoadAndClearUpper) { 2586 movsd (dst, Address(rscratch, 0)); 2587 } else { 2588 movlpd(dst, Address(rscratch, 0)); 2589 } 2590 } 2591 } 2592 2593 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src, Register rscratch) { 2594 assert(rscratch != noreg || always_reachable(src), "missing"); 2595 2596 if (reachable(src)) { 2597 movss(dst, as_Address(src)); 2598 } else { 2599 lea(rscratch, src); 2600 movss(dst, Address(rscratch, 0)); 2601 } 2602 } 2603 2604 void MacroAssembler::movptr(Register dst, Register src) { 2605 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2606 } 2607 2608 void MacroAssembler::movptr(Register dst, Address src) { 2609 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2610 } 2611 2612 // src should NEVER be a real pointer. Use AddressLiteral for true pointers 2613 void MacroAssembler::movptr(Register dst, intptr_t src) { 2614 #ifdef _LP64 2615 if (is_uimm32(src)) { 2616 movl(dst, checked_cast<uint32_t>(src)); 2617 } else if (is_simm32(src)) { 2618 movq(dst, checked_cast<int32_t>(src)); 2619 } else { 2620 mov64(dst, src); 2621 } 2622 #else 2623 movl(dst, src); 2624 #endif 2625 } 2626 2627 void MacroAssembler::movptr(Address dst, Register src) { 2628 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src)); 2629 } 2630 2631 void MacroAssembler::movptr(Address dst, int32_t src) { 2632 LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); 2633 } 2634 2635 void MacroAssembler::movdqu(Address dst, XMMRegister src) { 2636 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2637 Assembler::movdqu(dst, src); 2638 } 2639 2640 void MacroAssembler::movdqu(XMMRegister dst, Address src) { 2641 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2642 Assembler::movdqu(dst, src); 2643 } 2644 2645 void MacroAssembler::movdqu(XMMRegister dst, XMMRegister src) { 2646 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2647 Assembler::movdqu(dst, src); 2648 } 2649 2650 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src, Register rscratch) { 2651 assert(rscratch != noreg || always_reachable(src), "missing"); 2652 2653 if (reachable(src)) { 2654 movdqu(dst, as_Address(src)); 2655 } else { 2656 lea(rscratch, src); 2657 movdqu(dst, Address(rscratch, 0)); 2658 } 2659 } 2660 2661 void MacroAssembler::vmovdqu(Address dst, XMMRegister src) { 2662 assert(((src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2663 Assembler::vmovdqu(dst, src); 2664 } 2665 2666 void MacroAssembler::vmovdqu(XMMRegister dst, Address src) { 2667 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2668 Assembler::vmovdqu(dst, src); 2669 } 2670 2671 void MacroAssembler::vmovdqu(XMMRegister dst, XMMRegister src) { 2672 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 2673 Assembler::vmovdqu(dst, src); 2674 } 2675 2676 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, Register rscratch) { 2677 assert(rscratch != noreg || always_reachable(src), "missing"); 2678 2679 if (reachable(src)) { 2680 vmovdqu(dst, as_Address(src)); 2681 } 2682 else { 2683 lea(rscratch, src); 2684 vmovdqu(dst, Address(rscratch, 0)); 2685 } 2686 } 2687 2688 void MacroAssembler::vmovdqu(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2689 assert(rscratch != noreg || always_reachable(src), "missing"); 2690 2691 if (vector_len == AVX_512bit) { 2692 evmovdquq(dst, src, AVX_512bit, rscratch); 2693 } else if (vector_len == AVX_256bit) { 2694 vmovdqu(dst, src, rscratch); 2695 } else { 2696 movdqu(dst, src, rscratch); 2697 } 2698 } 2699 2700 void MacroAssembler::kmov(KRegister dst, Address src) { 2701 if (VM_Version::supports_avx512bw()) { 2702 kmovql(dst, src); 2703 } else { 2704 assert(VM_Version::supports_evex(), ""); 2705 kmovwl(dst, src); 2706 } 2707 } 2708 2709 void MacroAssembler::kmov(Address dst, KRegister src) { 2710 if (VM_Version::supports_avx512bw()) { 2711 kmovql(dst, src); 2712 } else { 2713 assert(VM_Version::supports_evex(), ""); 2714 kmovwl(dst, src); 2715 } 2716 } 2717 2718 void MacroAssembler::kmov(KRegister dst, KRegister src) { 2719 if (VM_Version::supports_avx512bw()) { 2720 kmovql(dst, src); 2721 } else { 2722 assert(VM_Version::supports_evex(), ""); 2723 kmovwl(dst, src); 2724 } 2725 } 2726 2727 void MacroAssembler::kmov(Register dst, KRegister src) { 2728 if (VM_Version::supports_avx512bw()) { 2729 kmovql(dst, src); 2730 } else { 2731 assert(VM_Version::supports_evex(), ""); 2732 kmovwl(dst, src); 2733 } 2734 } 2735 2736 void MacroAssembler::kmov(KRegister dst, Register src) { 2737 if (VM_Version::supports_avx512bw()) { 2738 kmovql(dst, src); 2739 } else { 2740 assert(VM_Version::supports_evex(), ""); 2741 kmovwl(dst, src); 2742 } 2743 } 2744 2745 void MacroAssembler::kmovql(KRegister dst, AddressLiteral src, Register rscratch) { 2746 assert(rscratch != noreg || always_reachable(src), "missing"); 2747 2748 if (reachable(src)) { 2749 kmovql(dst, as_Address(src)); 2750 } else { 2751 lea(rscratch, src); 2752 kmovql(dst, Address(rscratch, 0)); 2753 } 2754 } 2755 2756 void MacroAssembler::kmovwl(KRegister dst, AddressLiteral src, Register rscratch) { 2757 assert(rscratch != noreg || always_reachable(src), "missing"); 2758 2759 if (reachable(src)) { 2760 kmovwl(dst, as_Address(src)); 2761 } else { 2762 lea(rscratch, src); 2763 kmovwl(dst, Address(rscratch, 0)); 2764 } 2765 } 2766 2767 void MacroAssembler::evmovdqub(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, 2768 int vector_len, Register rscratch) { 2769 assert(rscratch != noreg || always_reachable(src), "missing"); 2770 2771 if (reachable(src)) { 2772 Assembler::evmovdqub(dst, mask, as_Address(src), merge, vector_len); 2773 } else { 2774 lea(rscratch, src); 2775 Assembler::evmovdqub(dst, mask, Address(rscratch, 0), merge, vector_len); 2776 } 2777 } 2778 2779 void MacroAssembler::evmovdquw(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, 2780 int vector_len, Register rscratch) { 2781 assert(rscratch != noreg || always_reachable(src), "missing"); 2782 2783 if (reachable(src)) { 2784 Assembler::evmovdquw(dst, mask, as_Address(src), merge, vector_len); 2785 } else { 2786 lea(rscratch, src); 2787 Assembler::evmovdquw(dst, mask, Address(rscratch, 0), merge, vector_len); 2788 } 2789 } 2790 2791 void MacroAssembler::evmovdqul(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2792 assert(rscratch != noreg || always_reachable(src), "missing"); 2793 2794 if (reachable(src)) { 2795 Assembler::evmovdqul(dst, mask, as_Address(src), merge, vector_len); 2796 } else { 2797 lea(rscratch, src); 2798 Assembler::evmovdqul(dst, mask, Address(rscratch, 0), merge, vector_len); 2799 } 2800 } 2801 2802 void MacroAssembler::evmovdquq(XMMRegister dst, KRegister mask, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 2803 assert(rscratch != noreg || always_reachable(src), "missing"); 2804 2805 if (reachable(src)) { 2806 Assembler::evmovdquq(dst, mask, as_Address(src), merge, vector_len); 2807 } else { 2808 lea(rscratch, src); 2809 Assembler::evmovdquq(dst, mask, Address(rscratch, 0), merge, vector_len); 2810 } 2811 } 2812 2813 void MacroAssembler::evmovdquq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2814 assert(rscratch != noreg || always_reachable(src), "missing"); 2815 2816 if (reachable(src)) { 2817 Assembler::evmovdquq(dst, as_Address(src), vector_len); 2818 } else { 2819 lea(rscratch, src); 2820 Assembler::evmovdquq(dst, Address(rscratch, 0), vector_len); 2821 } 2822 } 2823 2824 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src, Register rscratch) { 2825 assert(rscratch != noreg || always_reachable(src), "missing"); 2826 2827 if (reachable(src)) { 2828 Assembler::movdqa(dst, as_Address(src)); 2829 } else { 2830 lea(rscratch, src); 2831 Assembler::movdqa(dst, Address(rscratch, 0)); 2832 } 2833 } 2834 2835 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2836 assert(rscratch != noreg || always_reachable(src), "missing"); 2837 2838 if (reachable(src)) { 2839 Assembler::movsd(dst, as_Address(src)); 2840 } else { 2841 lea(rscratch, src); 2842 Assembler::movsd(dst, Address(rscratch, 0)); 2843 } 2844 } 2845 2846 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2847 assert(rscratch != noreg || always_reachable(src), "missing"); 2848 2849 if (reachable(src)) { 2850 Assembler::movss(dst, as_Address(src)); 2851 } else { 2852 lea(rscratch, src); 2853 Assembler::movss(dst, Address(rscratch, 0)); 2854 } 2855 } 2856 2857 void MacroAssembler::movddup(XMMRegister dst, AddressLiteral src, Register rscratch) { 2858 assert(rscratch != noreg || always_reachable(src), "missing"); 2859 2860 if (reachable(src)) { 2861 Assembler::movddup(dst, as_Address(src)); 2862 } else { 2863 lea(rscratch, src); 2864 Assembler::movddup(dst, Address(rscratch, 0)); 2865 } 2866 } 2867 2868 void MacroAssembler::vmovddup(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 2869 assert(rscratch != noreg || always_reachable(src), "missing"); 2870 2871 if (reachable(src)) { 2872 Assembler::vmovddup(dst, as_Address(src), vector_len); 2873 } else { 2874 lea(rscratch, src); 2875 Assembler::vmovddup(dst, Address(rscratch, 0), vector_len); 2876 } 2877 } 2878 2879 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 2880 assert(rscratch != noreg || always_reachable(src), "missing"); 2881 2882 if (reachable(src)) { 2883 Assembler::mulsd(dst, as_Address(src)); 2884 } else { 2885 lea(rscratch, src); 2886 Assembler::mulsd(dst, Address(rscratch, 0)); 2887 } 2888 } 2889 2890 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src, Register rscratch) { 2891 assert(rscratch != noreg || always_reachable(src), "missing"); 2892 2893 if (reachable(src)) { 2894 Assembler::mulss(dst, as_Address(src)); 2895 } else { 2896 lea(rscratch, src); 2897 Assembler::mulss(dst, Address(rscratch, 0)); 2898 } 2899 } 2900 2901 void MacroAssembler::null_check(Register reg, int offset) { 2902 if (needs_explicit_null_check(offset)) { 2903 // provoke OS null exception if reg is null by 2904 // accessing M[reg] w/o changing any (non-CC) registers 2905 // NOTE: cmpl is plenty here to provoke a segv 2906 cmpptr(rax, Address(reg, 0)); 2907 // Note: should probably use testl(rax, Address(reg, 0)); 2908 // may be shorter code (however, this version of 2909 // testl needs to be implemented first) 2910 } else { 2911 // nothing to do, (later) access of M[reg + offset] 2912 // will provoke OS null exception if reg is null 2913 } 2914 } 2915 2916 void MacroAssembler::test_markword_is_inline_type(Register markword, Label& is_inline_type) { 2917 andptr(markword, markWord::inline_type_mask_in_place); 2918 cmpptr(markword, markWord::inline_type_pattern); 2919 jcc(Assembler::equal, is_inline_type); 2920 } 2921 2922 void MacroAssembler::test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type) { 2923 movl(temp_reg, Address(klass, Klass::access_flags_offset())); 2924 testl(temp_reg, JVM_ACC_IDENTITY); 2925 jcc(Assembler::zero, is_inline_type); 2926 } 2927 2928 void MacroAssembler::test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type) { 2929 testptr(object, object); 2930 jcc(Assembler::zero, not_inline_type); 2931 const int is_inline_type_mask = markWord::inline_type_pattern; 2932 movptr(tmp, Address(object, oopDesc::mark_offset_in_bytes())); 2933 andptr(tmp, is_inline_type_mask); 2934 cmpptr(tmp, is_inline_type_mask); 2935 jcc(Assembler::notEqual, not_inline_type); 2936 } 2937 2938 void MacroAssembler::test_klass_is_empty_inline_type(Register klass, Register temp_reg, Label& is_empty_inline_type) { 2939 #ifdef ASSERT 2940 { 2941 Label done_check; 2942 test_klass_is_inline_type(klass, temp_reg, done_check); 2943 stop("test_klass_is_empty_inline_type with non inline type klass"); 2944 bind(done_check); 2945 } 2946 #endif 2947 movl(temp_reg, Address(klass, InstanceKlass::misc_flags_offset())); 2948 testl(temp_reg, InstanceKlassFlags::is_empty_inline_type_value()); 2949 jcc(Assembler::notZero, is_empty_inline_type); 2950 } 2951 2952 void MacroAssembler::test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free_inline_type) { 2953 movl(temp_reg, flags); 2954 testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift); 2955 jcc(Assembler::notEqual, is_null_free_inline_type); 2956 } 2957 2958 void MacroAssembler::test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free_inline_type) { 2959 movl(temp_reg, flags); 2960 testl(temp_reg, 1 << ResolvedFieldEntry::is_null_free_inline_type_shift); 2961 jcc(Assembler::equal, not_null_free_inline_type); 2962 } 2963 2964 void MacroAssembler::test_field_is_flat(Register flags, Register temp_reg, Label& is_flat) { 2965 movl(temp_reg, flags); 2966 testl(temp_reg, 1 << ResolvedFieldEntry::is_flat_shift); 2967 jcc(Assembler::notEqual, is_flat); 2968 } 2969 2970 void MacroAssembler::test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker) { 2971 movl(temp_reg, flags); 2972 testl(temp_reg, 1 << ResolvedFieldEntry::has_null_marker_shift); 2973 jcc(Assembler::notEqual, has_null_marker); 2974 } 2975 2976 void MacroAssembler::test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label) { 2977 Label test_mark_word; 2978 // load mark word 2979 movptr(temp_reg, Address(oop, oopDesc::mark_offset_in_bytes())); 2980 // check displaced 2981 testl(temp_reg, markWord::unlocked_value); 2982 jccb(Assembler::notZero, test_mark_word); 2983 // slow path use klass prototype 2984 push(rscratch1); 2985 load_prototype_header(temp_reg, oop, rscratch1); 2986 pop(rscratch1); 2987 2988 bind(test_mark_word); 2989 testl(temp_reg, test_bit); 2990 jcc((jmp_set) ? Assembler::notZero : Assembler::zero, jmp_label); 2991 } 2992 2993 void MacroAssembler::test_flat_array_oop(Register oop, Register temp_reg, 2994 Label& is_flat_array) { 2995 #ifdef _LP64 2996 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, true, is_flat_array); 2997 #else 2998 load_klass(temp_reg, oop, noreg); 2999 movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset())); 3000 test_flat_array_layout(temp_reg, is_flat_array); 3001 #endif 3002 } 3003 3004 void MacroAssembler::test_non_flat_array_oop(Register oop, Register temp_reg, 3005 Label& is_non_flat_array) { 3006 #ifdef _LP64 3007 test_oop_prototype_bit(oop, temp_reg, markWord::flat_array_bit_in_place, false, is_non_flat_array); 3008 #else 3009 load_klass(temp_reg, oop, noreg); 3010 movl(temp_reg, Address(temp_reg, Klass::layout_helper_offset())); 3011 test_non_flat_array_layout(temp_reg, is_non_flat_array); 3012 #endif 3013 } 3014 3015 void MacroAssembler::test_null_free_array_oop(Register oop, Register temp_reg, Label&is_null_free_array) { 3016 #ifdef _LP64 3017 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, true, is_null_free_array); 3018 #else 3019 Unimplemented(); 3020 #endif 3021 } 3022 3023 void MacroAssembler::test_non_null_free_array_oop(Register oop, Register temp_reg, Label&is_non_null_free_array) { 3024 #ifdef _LP64 3025 test_oop_prototype_bit(oop, temp_reg, markWord::null_free_array_bit_in_place, false, is_non_null_free_array); 3026 #else 3027 Unimplemented(); 3028 #endif 3029 } 3030 3031 void MacroAssembler::test_flat_array_layout(Register lh, Label& is_flat_array) { 3032 testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace); 3033 jcc(Assembler::notZero, is_flat_array); 3034 } 3035 3036 void MacroAssembler::test_non_flat_array_layout(Register lh, Label& is_non_flat_array) { 3037 testl(lh, Klass::_lh_array_tag_flat_value_bit_inplace); 3038 jcc(Assembler::zero, is_non_flat_array); 3039 } 3040 3041 void MacroAssembler::os_breakpoint() { 3042 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability 3043 // (e.g., MSVC can't call ps() otherwise) 3044 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint))); 3045 } 3046 3047 void MacroAssembler::unimplemented(const char* what) { 3048 const char* buf = nullptr; 3049 { 3050 ResourceMark rm; 3051 stringStream ss; 3052 ss.print("unimplemented: %s", what); 3053 buf = code_string(ss.as_string()); 3054 } 3055 stop(buf); 3056 } 3057 3058 #ifdef _LP64 3059 #define XSTATE_BV 0x200 3060 #endif 3061 3062 void MacroAssembler::pop_CPU_state() { 3063 pop_FPU_state(); 3064 pop_IU_state(); 3065 } 3066 3067 void MacroAssembler::pop_FPU_state() { 3068 #ifndef _LP64 3069 frstor(Address(rsp, 0)); 3070 #else 3071 fxrstor(Address(rsp, 0)); 3072 #endif 3073 addptr(rsp, FPUStateSizeInWords * wordSize); 3074 } 3075 3076 void MacroAssembler::pop_IU_state() { 3077 popa(); 3078 LP64_ONLY(addq(rsp, 8)); 3079 popf(); 3080 } 3081 3082 // Save Integer and Float state 3083 // Warning: Stack must be 16 byte aligned (64bit) 3084 void MacroAssembler::push_CPU_state() { 3085 push_IU_state(); 3086 push_FPU_state(); 3087 } 3088 3089 void MacroAssembler::push_FPU_state() { 3090 subptr(rsp, FPUStateSizeInWords * wordSize); 3091 #ifndef _LP64 3092 fnsave(Address(rsp, 0)); 3093 fwait(); 3094 #else 3095 fxsave(Address(rsp, 0)); 3096 #endif // LP64 3097 } 3098 3099 void MacroAssembler::push_IU_state() { 3100 // Push flags first because pusha kills them 3101 pushf(); 3102 // Make sure rsp stays 16-byte aligned 3103 LP64_ONLY(subq(rsp, 8)); 3104 pusha(); 3105 } 3106 3107 void MacroAssembler::push_cont_fastpath() { 3108 if (!Continuations::enabled()) return; 3109 3110 #ifndef _LP64 3111 Register rthread = rax; 3112 Register rrealsp = rbx; 3113 push(rthread); 3114 push(rrealsp); 3115 3116 get_thread(rthread); 3117 3118 // The code below wants the original RSP. 3119 // Move it back after the pushes above. 3120 movptr(rrealsp, rsp); 3121 addptr(rrealsp, 2*wordSize); 3122 #else 3123 Register rthread = r15_thread; 3124 Register rrealsp = rsp; 3125 #endif 3126 3127 Label done; 3128 cmpptr(rrealsp, Address(rthread, JavaThread::cont_fastpath_offset())); 3129 jccb(Assembler::belowEqual, done); 3130 movptr(Address(rthread, JavaThread::cont_fastpath_offset()), rrealsp); 3131 bind(done); 3132 3133 #ifndef _LP64 3134 pop(rrealsp); 3135 pop(rthread); 3136 #endif 3137 } 3138 3139 void MacroAssembler::pop_cont_fastpath() { 3140 if (!Continuations::enabled()) return; 3141 3142 #ifndef _LP64 3143 Register rthread = rax; 3144 Register rrealsp = rbx; 3145 push(rthread); 3146 push(rrealsp); 3147 3148 get_thread(rthread); 3149 3150 // The code below wants the original RSP. 3151 // Move it back after the pushes above. 3152 movptr(rrealsp, rsp); 3153 addptr(rrealsp, 2*wordSize); 3154 #else 3155 Register rthread = r15_thread; 3156 Register rrealsp = rsp; 3157 #endif 3158 3159 Label done; 3160 cmpptr(rrealsp, Address(rthread, JavaThread::cont_fastpath_offset())); 3161 jccb(Assembler::below, done); 3162 movptr(Address(rthread, JavaThread::cont_fastpath_offset()), 0); 3163 bind(done); 3164 3165 #ifndef _LP64 3166 pop(rrealsp); 3167 pop(rthread); 3168 #endif 3169 } 3170 3171 void MacroAssembler::inc_held_monitor_count() { 3172 #ifndef _LP64 3173 Register thread = rax; 3174 push(thread); 3175 get_thread(thread); 3176 incrementl(Address(thread, JavaThread::held_monitor_count_offset())); 3177 pop(thread); 3178 #else // LP64 3179 incrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); 3180 #endif 3181 } 3182 3183 void MacroAssembler::dec_held_monitor_count() { 3184 #ifndef _LP64 3185 Register thread = rax; 3186 push(thread); 3187 get_thread(thread); 3188 decrementl(Address(thread, JavaThread::held_monitor_count_offset())); 3189 pop(thread); 3190 #else // LP64 3191 decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset())); 3192 #endif 3193 } 3194 3195 #ifdef ASSERT 3196 void MacroAssembler::stop_if_in_cont(Register cont, const char* name) { 3197 #ifdef _LP64 3198 Label no_cont; 3199 movptr(cont, Address(r15_thread, JavaThread::cont_entry_offset())); 3200 testl(cont, cont); 3201 jcc(Assembler::zero, no_cont); 3202 stop(name); 3203 bind(no_cont); 3204 #else 3205 Unimplemented(); 3206 #endif 3207 } 3208 #endif 3209 3210 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp) { // determine java_thread register 3211 if (!java_thread->is_valid()) { 3212 java_thread = rdi; 3213 get_thread(java_thread); 3214 } 3215 // we must set sp to zero to clear frame 3216 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD); 3217 // must clear fp, so that compiled frames are not confused; it is 3218 // possible that we need it only for debugging 3219 if (clear_fp) { 3220 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD); 3221 } 3222 // Always clear the pc because it could have been set by make_walkable() 3223 movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD); 3224 vzeroupper(); 3225 } 3226 3227 void MacroAssembler::restore_rax(Register tmp) { 3228 if (tmp == noreg) pop(rax); 3229 else if (tmp != rax) mov(rax, tmp); 3230 } 3231 3232 void MacroAssembler::round_to(Register reg, int modulus) { 3233 addptr(reg, modulus - 1); 3234 andptr(reg, -modulus); 3235 } 3236 3237 void MacroAssembler::save_rax(Register tmp) { 3238 if (tmp == noreg) push(rax); 3239 else if (tmp != rax) mov(tmp, rax); 3240 } 3241 3242 void MacroAssembler::safepoint_poll(Label& slow_path, Register thread_reg, bool at_return, bool in_nmethod) { 3243 if (at_return) { 3244 // Note that when in_nmethod is set, the stack pointer is incremented before the poll. Therefore, 3245 // we may safely use rsp instead to perform the stack watermark check. 3246 cmpptr(in_nmethod ? rsp : rbp, Address(thread_reg, JavaThread::polling_word_offset())); 3247 jcc(Assembler::above, slow_path); 3248 return; 3249 } 3250 testb(Address(thread_reg, JavaThread::polling_word_offset()), SafepointMechanism::poll_bit()); 3251 jcc(Assembler::notZero, slow_path); // handshake bit set implies poll 3252 } 3253 3254 // Calls to C land 3255 // 3256 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded 3257 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp 3258 // has to be reset to 0. This is required to allow proper stack traversal. 3259 void MacroAssembler::set_last_Java_frame(Register java_thread, 3260 Register last_java_sp, 3261 Register last_java_fp, 3262 address last_java_pc, 3263 Register rscratch) { 3264 vzeroupper(); 3265 // determine java_thread register 3266 if (!java_thread->is_valid()) { 3267 java_thread = rdi; 3268 get_thread(java_thread); 3269 } 3270 // determine last_java_sp register 3271 if (!last_java_sp->is_valid()) { 3272 last_java_sp = rsp; 3273 } 3274 // last_java_fp is optional 3275 if (last_java_fp->is_valid()) { 3276 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp); 3277 } 3278 // last_java_pc is optional 3279 if (last_java_pc != nullptr) { 3280 Address java_pc(java_thread, 3281 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()); 3282 lea(java_pc, InternalAddress(last_java_pc), rscratch); 3283 } 3284 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp); 3285 } 3286 3287 void MacroAssembler::shlptr(Register dst, int imm8) { 3288 LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8)); 3289 } 3290 3291 void MacroAssembler::shrptr(Register dst, int imm8) { 3292 LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8)); 3293 } 3294 3295 void MacroAssembler::sign_extend_byte(Register reg) { 3296 if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) { 3297 movsbl(reg, reg); // movsxb 3298 } else { 3299 shll(reg, 24); 3300 sarl(reg, 24); 3301 } 3302 } 3303 3304 void MacroAssembler::sign_extend_short(Register reg) { 3305 if (LP64_ONLY(true ||) VM_Version::is_P6()) { 3306 movswl(reg, reg); // movsxw 3307 } else { 3308 shll(reg, 16); 3309 sarl(reg, 16); 3310 } 3311 } 3312 3313 void MacroAssembler::testl(Address dst, int32_t imm32) { 3314 if (imm32 >= 0 && is8bit(imm32)) { 3315 testb(dst, imm32); 3316 } else { 3317 Assembler::testl(dst, imm32); 3318 } 3319 } 3320 3321 void MacroAssembler::testl(Register dst, int32_t imm32) { 3322 if (imm32 >= 0 && is8bit(imm32) && dst->has_byte_register()) { 3323 testb(dst, imm32); 3324 } else { 3325 Assembler::testl(dst, imm32); 3326 } 3327 } 3328 3329 void MacroAssembler::testl(Register dst, AddressLiteral src) { 3330 assert(always_reachable(src), "Address should be reachable"); 3331 testl(dst, as_Address(src)); 3332 } 3333 3334 #ifdef _LP64 3335 3336 void MacroAssembler::testq(Address dst, int32_t imm32) { 3337 if (imm32 >= 0) { 3338 testl(dst, imm32); 3339 } else { 3340 Assembler::testq(dst, imm32); 3341 } 3342 } 3343 3344 void MacroAssembler::testq(Register dst, int32_t imm32) { 3345 if (imm32 >= 0) { 3346 testl(dst, imm32); 3347 } else { 3348 Assembler::testq(dst, imm32); 3349 } 3350 } 3351 3352 #endif 3353 3354 void MacroAssembler::pcmpeqb(XMMRegister dst, XMMRegister src) { 3355 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3356 Assembler::pcmpeqb(dst, src); 3357 } 3358 3359 void MacroAssembler::pcmpeqw(XMMRegister dst, XMMRegister src) { 3360 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3361 Assembler::pcmpeqw(dst, src); 3362 } 3363 3364 void MacroAssembler::pcmpestri(XMMRegister dst, Address src, int imm8) { 3365 assert((dst->encoding() < 16),"XMM register should be 0-15"); 3366 Assembler::pcmpestri(dst, src, imm8); 3367 } 3368 3369 void MacroAssembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) { 3370 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3371 Assembler::pcmpestri(dst, src, imm8); 3372 } 3373 3374 void MacroAssembler::pmovzxbw(XMMRegister dst, XMMRegister src) { 3375 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3376 Assembler::pmovzxbw(dst, src); 3377 } 3378 3379 void MacroAssembler::pmovzxbw(XMMRegister dst, Address src) { 3380 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3381 Assembler::pmovzxbw(dst, src); 3382 } 3383 3384 void MacroAssembler::pmovmskb(Register dst, XMMRegister src) { 3385 assert((src->encoding() < 16),"XMM register should be 0-15"); 3386 Assembler::pmovmskb(dst, src); 3387 } 3388 3389 void MacroAssembler::ptest(XMMRegister dst, XMMRegister src) { 3390 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3391 Assembler::ptest(dst, src); 3392 } 3393 3394 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3395 assert(rscratch != noreg || always_reachable(src), "missing"); 3396 3397 if (reachable(src)) { 3398 Assembler::sqrtss(dst, as_Address(src)); 3399 } else { 3400 lea(rscratch, src); 3401 Assembler::sqrtss(dst, Address(rscratch, 0)); 3402 } 3403 } 3404 3405 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3406 assert(rscratch != noreg || always_reachable(src), "missing"); 3407 3408 if (reachable(src)) { 3409 Assembler::subsd(dst, as_Address(src)); 3410 } else { 3411 lea(rscratch, src); 3412 Assembler::subsd(dst, Address(rscratch, 0)); 3413 } 3414 } 3415 3416 void MacroAssembler::roundsd(XMMRegister dst, AddressLiteral src, int32_t rmode, Register rscratch) { 3417 assert(rscratch != noreg || always_reachable(src), "missing"); 3418 3419 if (reachable(src)) { 3420 Assembler::roundsd(dst, as_Address(src), rmode); 3421 } else { 3422 lea(rscratch, src); 3423 Assembler::roundsd(dst, Address(rscratch, 0), rmode); 3424 } 3425 } 3426 3427 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3428 assert(rscratch != noreg || always_reachable(src), "missing"); 3429 3430 if (reachable(src)) { 3431 Assembler::subss(dst, as_Address(src)); 3432 } else { 3433 lea(rscratch, src); 3434 Assembler::subss(dst, Address(rscratch, 0)); 3435 } 3436 } 3437 3438 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3439 assert(rscratch != noreg || always_reachable(src), "missing"); 3440 3441 if (reachable(src)) { 3442 Assembler::ucomisd(dst, as_Address(src)); 3443 } else { 3444 lea(rscratch, src); 3445 Assembler::ucomisd(dst, Address(rscratch, 0)); 3446 } 3447 } 3448 3449 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src, Register rscratch) { 3450 assert(rscratch != noreg || always_reachable(src), "missing"); 3451 3452 if (reachable(src)) { 3453 Assembler::ucomiss(dst, as_Address(src)); 3454 } else { 3455 lea(rscratch, src); 3456 Assembler::ucomiss(dst, Address(rscratch, 0)); 3457 } 3458 } 3459 3460 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src, Register rscratch) { 3461 assert(rscratch != noreg || always_reachable(src), "missing"); 3462 3463 // Used in sign-bit flipping with aligned address. 3464 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 3465 if (reachable(src)) { 3466 Assembler::xorpd(dst, as_Address(src)); 3467 } else { 3468 lea(rscratch, src); 3469 Assembler::xorpd(dst, Address(rscratch, 0)); 3470 } 3471 } 3472 3473 void MacroAssembler::xorpd(XMMRegister dst, XMMRegister src) { 3474 if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) { 3475 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 3476 } 3477 else { 3478 Assembler::xorpd(dst, src); 3479 } 3480 } 3481 3482 void MacroAssembler::xorps(XMMRegister dst, XMMRegister src) { 3483 if (UseAVX > 2 && !VM_Version::supports_avx512dq() && (dst->encoding() == src->encoding())) { 3484 Assembler::vpxor(dst, dst, src, Assembler::AVX_512bit); 3485 } else { 3486 Assembler::xorps(dst, src); 3487 } 3488 } 3489 3490 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src, Register rscratch) { 3491 assert(rscratch != noreg || always_reachable(src), "missing"); 3492 3493 // Used in sign-bit flipping with aligned address. 3494 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes"); 3495 if (reachable(src)) { 3496 Assembler::xorps(dst, as_Address(src)); 3497 } else { 3498 lea(rscratch, src); 3499 Assembler::xorps(dst, Address(rscratch, 0)); 3500 } 3501 } 3502 3503 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src, Register rscratch) { 3504 assert(rscratch != noreg || always_reachable(src), "missing"); 3505 3506 // Used in sign-bit flipping with aligned address. 3507 bool aligned_adr = (((intptr_t)src.target() & 15) == 0); 3508 assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes"); 3509 if (reachable(src)) { 3510 Assembler::pshufb(dst, as_Address(src)); 3511 } else { 3512 lea(rscratch, src); 3513 Assembler::pshufb(dst, Address(rscratch, 0)); 3514 } 3515 } 3516 3517 // AVX 3-operands instructions 3518 3519 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3520 assert(rscratch != noreg || always_reachable(src), "missing"); 3521 3522 if (reachable(src)) { 3523 vaddsd(dst, nds, as_Address(src)); 3524 } else { 3525 lea(rscratch, src); 3526 vaddsd(dst, nds, Address(rscratch, 0)); 3527 } 3528 } 3529 3530 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3531 assert(rscratch != noreg || always_reachable(src), "missing"); 3532 3533 if (reachable(src)) { 3534 vaddss(dst, nds, as_Address(src)); 3535 } else { 3536 lea(rscratch, src); 3537 vaddss(dst, nds, Address(rscratch, 0)); 3538 } 3539 } 3540 3541 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3542 assert(UseAVX > 0, "requires some form of AVX"); 3543 assert(rscratch != noreg || always_reachable(src), "missing"); 3544 3545 if (reachable(src)) { 3546 Assembler::vpaddb(dst, nds, as_Address(src), vector_len); 3547 } else { 3548 lea(rscratch, src); 3549 Assembler::vpaddb(dst, nds, Address(rscratch, 0), vector_len); 3550 } 3551 } 3552 3553 void MacroAssembler::vpaddd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3554 assert(UseAVX > 0, "requires some form of AVX"); 3555 assert(rscratch != noreg || always_reachable(src), "missing"); 3556 3557 if (reachable(src)) { 3558 Assembler::vpaddd(dst, nds, as_Address(src), vector_len); 3559 } else { 3560 lea(rscratch, src); 3561 Assembler::vpaddd(dst, nds, Address(rscratch, 0), vector_len); 3562 } 3563 } 3564 3565 void MacroAssembler::vabsss(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { 3566 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3567 assert(rscratch != noreg || always_reachable(negate_field), "missing"); 3568 3569 vandps(dst, nds, negate_field, vector_len, rscratch); 3570 } 3571 3572 void MacroAssembler::vabssd(XMMRegister dst, XMMRegister nds, XMMRegister src, AddressLiteral negate_field, int vector_len, Register rscratch) { 3573 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 3574 assert(rscratch != noreg || always_reachable(negate_field), "missing"); 3575 3576 vandpd(dst, nds, negate_field, vector_len, rscratch); 3577 } 3578 3579 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3580 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3581 Assembler::vpaddb(dst, nds, src, vector_len); 3582 } 3583 3584 void MacroAssembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3585 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3586 Assembler::vpaddb(dst, nds, src, vector_len); 3587 } 3588 3589 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3590 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3591 Assembler::vpaddw(dst, nds, src, vector_len); 3592 } 3593 3594 void MacroAssembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3595 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3596 Assembler::vpaddw(dst, nds, src, vector_len); 3597 } 3598 3599 void MacroAssembler::vpand(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3600 assert(rscratch != noreg || always_reachable(src), "missing"); 3601 3602 if (reachable(src)) { 3603 Assembler::vpand(dst, nds, as_Address(src), vector_len); 3604 } else { 3605 lea(rscratch, src); 3606 Assembler::vpand(dst, nds, Address(rscratch, 0), vector_len); 3607 } 3608 } 3609 3610 void MacroAssembler::vpbroadcastd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3611 assert(rscratch != noreg || always_reachable(src), "missing"); 3612 3613 if (reachable(src)) { 3614 Assembler::vpbroadcastd(dst, as_Address(src), vector_len); 3615 } else { 3616 lea(rscratch, src); 3617 Assembler::vpbroadcastd(dst, Address(rscratch, 0), vector_len); 3618 } 3619 } 3620 3621 void MacroAssembler::vbroadcasti128(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3622 assert(rscratch != noreg || always_reachable(src), "missing"); 3623 3624 if (reachable(src)) { 3625 Assembler::vbroadcasti128(dst, as_Address(src), vector_len); 3626 } else { 3627 lea(rscratch, src); 3628 Assembler::vbroadcasti128(dst, Address(rscratch, 0), vector_len); 3629 } 3630 } 3631 3632 void MacroAssembler::vpbroadcastq(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3633 assert(rscratch != noreg || always_reachable(src), "missing"); 3634 3635 if (reachable(src)) { 3636 Assembler::vpbroadcastq(dst, as_Address(src), vector_len); 3637 } else { 3638 lea(rscratch, src); 3639 Assembler::vpbroadcastq(dst, Address(rscratch, 0), vector_len); 3640 } 3641 } 3642 3643 void MacroAssembler::vbroadcastsd(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3644 assert(rscratch != noreg || always_reachable(src), "missing"); 3645 3646 if (reachable(src)) { 3647 Assembler::vbroadcastsd(dst, as_Address(src), vector_len); 3648 } else { 3649 lea(rscratch, src); 3650 Assembler::vbroadcastsd(dst, Address(rscratch, 0), vector_len); 3651 } 3652 } 3653 3654 void MacroAssembler::vbroadcastss(XMMRegister dst, AddressLiteral src, int vector_len, Register rscratch) { 3655 assert(rscratch != noreg || always_reachable(src), "missing"); 3656 3657 if (reachable(src)) { 3658 Assembler::vbroadcastss(dst, as_Address(src), vector_len); 3659 } else { 3660 lea(rscratch, src); 3661 Assembler::vbroadcastss(dst, Address(rscratch, 0), vector_len); 3662 } 3663 } 3664 3665 // Vector float blend 3666 // vblendvps(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg) 3667 void MacroAssembler::vblendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) { 3668 // WARN: Allow dst == (src1|src2), mask == scratch 3669 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1; 3670 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst; 3671 bool dst_available = dst != mask && (dst != src1 || dst != src2); 3672 if (blend_emulation && scratch_available && dst_available) { 3673 if (compute_mask) { 3674 vpsrad(scratch, mask, 32, vector_len); 3675 mask = scratch; 3676 } 3677 if (dst == src1) { 3678 vpandn(dst, mask, src1, vector_len); // if mask == 0, src1 3679 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2 3680 } else { 3681 vpand (dst, mask, src2, vector_len); // if mask == 1, src2 3682 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src1 3683 } 3684 vpor(dst, dst, scratch, vector_len); 3685 } else { 3686 Assembler::vblendvps(dst, src1, src2, mask, vector_len); 3687 } 3688 } 3689 3690 // vblendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister mask, int vector_len, bool compute_mask = true, XMMRegister scratch = xnoreg) 3691 void MacroAssembler::vblendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister mask, int vector_len, bool compute_mask, XMMRegister scratch) { 3692 // WARN: Allow dst == (src1|src2), mask == scratch 3693 bool blend_emulation = EnableX86ECoreOpts && UseAVX > 1; 3694 bool scratch_available = scratch != xnoreg && scratch != src1 && scratch != src2 && scratch != dst && (!compute_mask || scratch != mask); 3695 bool dst_available = dst != mask && (dst != src1 || dst != src2); 3696 if (blend_emulation && scratch_available && dst_available) { 3697 if (compute_mask) { 3698 vpxor(scratch, scratch, scratch, vector_len); 3699 vpcmpgtq(scratch, scratch, mask, vector_len); 3700 mask = scratch; 3701 } 3702 if (dst == src1) { 3703 vpandn(dst, mask, src1, vector_len); // if mask == 0, src 3704 vpand (scratch, mask, src2, vector_len); // if mask == 1, src2 3705 } else { 3706 vpand (dst, mask, src2, vector_len); // if mask == 1, src2 3707 vpandn(scratch, mask, src1, vector_len); // if mask == 0, src 3708 } 3709 vpor(dst, dst, scratch, vector_len); 3710 } else { 3711 Assembler::vblendvpd(dst, src1, src2, mask, vector_len); 3712 } 3713 } 3714 3715 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3716 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3717 Assembler::vpcmpeqb(dst, nds, src, vector_len); 3718 } 3719 3720 void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) { 3721 assert(((dst->encoding() < 16 && src1->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3722 Assembler::vpcmpeqb(dst, src1, src2, vector_len); 3723 } 3724 3725 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3726 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3727 Assembler::vpcmpeqw(dst, nds, src, vector_len); 3728 } 3729 3730 void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3731 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3732 Assembler::vpcmpeqw(dst, nds, src, vector_len); 3733 } 3734 3735 void MacroAssembler::evpcmpeqd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3736 assert(rscratch != noreg || always_reachable(src), "missing"); 3737 3738 if (reachable(src)) { 3739 Assembler::evpcmpeqd(kdst, mask, nds, as_Address(src), vector_len); 3740 } else { 3741 lea(rscratch, src); 3742 Assembler::evpcmpeqd(kdst, mask, nds, Address(rscratch, 0), vector_len); 3743 } 3744 } 3745 3746 void MacroAssembler::evpcmpd(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3747 int comparison, bool is_signed, int vector_len, Register rscratch) { 3748 assert(rscratch != noreg || always_reachable(src), "missing"); 3749 3750 if (reachable(src)) { 3751 Assembler::evpcmpd(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3752 } else { 3753 lea(rscratch, src); 3754 Assembler::evpcmpd(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3755 } 3756 } 3757 3758 void MacroAssembler::evpcmpq(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3759 int comparison, bool is_signed, int vector_len, Register rscratch) { 3760 assert(rscratch != noreg || always_reachable(src), "missing"); 3761 3762 if (reachable(src)) { 3763 Assembler::evpcmpq(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3764 } else { 3765 lea(rscratch, src); 3766 Assembler::evpcmpq(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3767 } 3768 } 3769 3770 void MacroAssembler::evpcmpb(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3771 int comparison, bool is_signed, int vector_len, Register rscratch) { 3772 assert(rscratch != noreg || always_reachable(src), "missing"); 3773 3774 if (reachable(src)) { 3775 Assembler::evpcmpb(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3776 } else { 3777 lea(rscratch, src); 3778 Assembler::evpcmpb(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3779 } 3780 } 3781 3782 void MacroAssembler::evpcmpw(KRegister kdst, KRegister mask, XMMRegister nds, AddressLiteral src, 3783 int comparison, bool is_signed, int vector_len, Register rscratch) { 3784 assert(rscratch != noreg || always_reachable(src), "missing"); 3785 3786 if (reachable(src)) { 3787 Assembler::evpcmpw(kdst, mask, nds, as_Address(src), comparison, is_signed, vector_len); 3788 } else { 3789 lea(rscratch, src); 3790 Assembler::evpcmpw(kdst, mask, nds, Address(rscratch, 0), comparison, is_signed, vector_len); 3791 } 3792 } 3793 3794 void MacroAssembler::vpcmpCC(XMMRegister dst, XMMRegister nds, XMMRegister src, int cond_encoding, Width width, int vector_len) { 3795 if (width == Assembler::Q) { 3796 Assembler::vpcmpCCq(dst, nds, src, cond_encoding, vector_len); 3797 } else { 3798 Assembler::vpcmpCCbwd(dst, nds, src, cond_encoding, vector_len); 3799 } 3800 } 3801 3802 void MacroAssembler::vpcmpCCW(XMMRegister dst, XMMRegister nds, XMMRegister src, XMMRegister xtmp, ComparisonPredicate cond, Width width, int vector_len) { 3803 int eq_cond_enc = 0x29; 3804 int gt_cond_enc = 0x37; 3805 if (width != Assembler::Q) { 3806 eq_cond_enc = 0x74 + width; 3807 gt_cond_enc = 0x64 + width; 3808 } 3809 switch (cond) { 3810 case eq: 3811 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len); 3812 break; 3813 case neq: 3814 vpcmpCC(dst, nds, src, eq_cond_enc, width, vector_len); 3815 vallones(xtmp, vector_len); 3816 vpxor(dst, xtmp, dst, vector_len); 3817 break; 3818 case le: 3819 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len); 3820 vallones(xtmp, vector_len); 3821 vpxor(dst, xtmp, dst, vector_len); 3822 break; 3823 case nlt: 3824 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len); 3825 vallones(xtmp, vector_len); 3826 vpxor(dst, xtmp, dst, vector_len); 3827 break; 3828 case lt: 3829 vpcmpCC(dst, src, nds, gt_cond_enc, width, vector_len); 3830 break; 3831 case nle: 3832 vpcmpCC(dst, nds, src, gt_cond_enc, width, vector_len); 3833 break; 3834 default: 3835 assert(false, "Should not reach here"); 3836 } 3837 } 3838 3839 void MacroAssembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) { 3840 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3841 Assembler::vpmovzxbw(dst, src, vector_len); 3842 } 3843 3844 void MacroAssembler::vpmovmskb(Register dst, XMMRegister src, int vector_len) { 3845 assert((src->encoding() < 16),"XMM register should be 0-15"); 3846 Assembler::vpmovmskb(dst, src, vector_len); 3847 } 3848 3849 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3850 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3851 Assembler::vpmullw(dst, nds, src, vector_len); 3852 } 3853 3854 void MacroAssembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3855 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3856 Assembler::vpmullw(dst, nds, src, vector_len); 3857 } 3858 3859 void MacroAssembler::vpmulld(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3860 assert((UseAVX > 0), "AVX support is needed"); 3861 assert(rscratch != noreg || always_reachable(src), "missing"); 3862 3863 if (reachable(src)) { 3864 Assembler::vpmulld(dst, nds, as_Address(src), vector_len); 3865 } else { 3866 lea(rscratch, src); 3867 Assembler::vpmulld(dst, nds, Address(rscratch, 0), vector_len); 3868 } 3869 } 3870 3871 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3872 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3873 Assembler::vpsubb(dst, nds, src, vector_len); 3874 } 3875 3876 void MacroAssembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3877 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3878 Assembler::vpsubb(dst, nds, src, vector_len); 3879 } 3880 3881 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) { 3882 assert(((dst->encoding() < 16 && src->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3883 Assembler::vpsubw(dst, nds, src, vector_len); 3884 } 3885 3886 void MacroAssembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) { 3887 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3888 Assembler::vpsubw(dst, nds, src, vector_len); 3889 } 3890 3891 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3892 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3893 Assembler::vpsraw(dst, nds, shift, vector_len); 3894 } 3895 3896 void MacroAssembler::vpsraw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3897 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3898 Assembler::vpsraw(dst, nds, shift, vector_len); 3899 } 3900 3901 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3902 assert(UseAVX > 2,""); 3903 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 3904 vector_len = 2; 3905 } 3906 Assembler::evpsraq(dst, nds, shift, vector_len); 3907 } 3908 3909 void MacroAssembler::evpsraq(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3910 assert(UseAVX > 2,""); 3911 if (!VM_Version::supports_avx512vl() && vector_len < 2) { 3912 vector_len = 2; 3913 } 3914 Assembler::evpsraq(dst, nds, shift, vector_len); 3915 } 3916 3917 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3918 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3919 Assembler::vpsrlw(dst, nds, shift, vector_len); 3920 } 3921 3922 void MacroAssembler::vpsrlw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3923 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3924 Assembler::vpsrlw(dst, nds, shift, vector_len); 3925 } 3926 3927 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, XMMRegister shift, int vector_len) { 3928 assert(((dst->encoding() < 16 && shift->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3929 Assembler::vpsllw(dst, nds, shift, vector_len); 3930 } 3931 3932 void MacroAssembler::vpsllw(XMMRegister dst, XMMRegister nds, int shift, int vector_len) { 3933 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3934 Assembler::vpsllw(dst, nds, shift, vector_len); 3935 } 3936 3937 void MacroAssembler::vptest(XMMRegister dst, XMMRegister src) { 3938 assert((dst->encoding() < 16 && src->encoding() < 16),"XMM register should be 0-15"); 3939 Assembler::vptest(dst, src); 3940 } 3941 3942 void MacroAssembler::punpcklbw(XMMRegister dst, XMMRegister src) { 3943 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3944 Assembler::punpcklbw(dst, src); 3945 } 3946 3947 void MacroAssembler::pshufd(XMMRegister dst, Address src, int mode) { 3948 assert(((dst->encoding() < 16) || VM_Version::supports_avx512vl()),"XMM register should be 0-15"); 3949 Assembler::pshufd(dst, src, mode); 3950 } 3951 3952 void MacroAssembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) { 3953 assert(((dst->encoding() < 16 && src->encoding() < 16) || VM_Version::supports_avx512vlbw()),"XMM register should be 0-15"); 3954 Assembler::pshuflw(dst, src, mode); 3955 } 3956 3957 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3958 assert(rscratch != noreg || always_reachable(src), "missing"); 3959 3960 if (reachable(src)) { 3961 vandpd(dst, nds, as_Address(src), vector_len); 3962 } else { 3963 lea(rscratch, src); 3964 vandpd(dst, nds, Address(rscratch, 0), vector_len); 3965 } 3966 } 3967 3968 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 3969 assert(rscratch != noreg || always_reachable(src), "missing"); 3970 3971 if (reachable(src)) { 3972 vandps(dst, nds, as_Address(src), vector_len); 3973 } else { 3974 lea(rscratch, src); 3975 vandps(dst, nds, Address(rscratch, 0), vector_len); 3976 } 3977 } 3978 3979 void MacroAssembler::evpord(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, 3980 bool merge, int vector_len, Register rscratch) { 3981 assert(rscratch != noreg || always_reachable(src), "missing"); 3982 3983 if (reachable(src)) { 3984 Assembler::evpord(dst, mask, nds, as_Address(src), merge, vector_len); 3985 } else { 3986 lea(rscratch, src); 3987 Assembler::evpord(dst, mask, nds, Address(rscratch, 0), merge, vector_len); 3988 } 3989 } 3990 3991 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 3992 assert(rscratch != noreg || always_reachable(src), "missing"); 3993 3994 if (reachable(src)) { 3995 vdivsd(dst, nds, as_Address(src)); 3996 } else { 3997 lea(rscratch, src); 3998 vdivsd(dst, nds, Address(rscratch, 0)); 3999 } 4000 } 4001 4002 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4003 assert(rscratch != noreg || always_reachable(src), "missing"); 4004 4005 if (reachable(src)) { 4006 vdivss(dst, nds, as_Address(src)); 4007 } else { 4008 lea(rscratch, src); 4009 vdivss(dst, nds, Address(rscratch, 0)); 4010 } 4011 } 4012 4013 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4014 assert(rscratch != noreg || always_reachable(src), "missing"); 4015 4016 if (reachable(src)) { 4017 vmulsd(dst, nds, as_Address(src)); 4018 } else { 4019 lea(rscratch, src); 4020 vmulsd(dst, nds, Address(rscratch, 0)); 4021 } 4022 } 4023 4024 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4025 assert(rscratch != noreg || always_reachable(src), "missing"); 4026 4027 if (reachable(src)) { 4028 vmulss(dst, nds, as_Address(src)); 4029 } else { 4030 lea(rscratch, src); 4031 vmulss(dst, nds, Address(rscratch, 0)); 4032 } 4033 } 4034 4035 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4036 assert(rscratch != noreg || always_reachable(src), "missing"); 4037 4038 if (reachable(src)) { 4039 vsubsd(dst, nds, as_Address(src)); 4040 } else { 4041 lea(rscratch, src); 4042 vsubsd(dst, nds, Address(rscratch, 0)); 4043 } 4044 } 4045 4046 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4047 assert(rscratch != noreg || always_reachable(src), "missing"); 4048 4049 if (reachable(src)) { 4050 vsubss(dst, nds, as_Address(src)); 4051 } else { 4052 lea(rscratch, src); 4053 vsubss(dst, nds, Address(rscratch, 0)); 4054 } 4055 } 4056 4057 void MacroAssembler::vnegatess(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4058 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 4059 assert(rscratch != noreg || always_reachable(src), "missing"); 4060 4061 vxorps(dst, nds, src, Assembler::AVX_128bit, rscratch); 4062 } 4063 4064 void MacroAssembler::vnegatesd(XMMRegister dst, XMMRegister nds, AddressLiteral src, Register rscratch) { 4065 assert(((dst->encoding() < 16 && nds->encoding() < 16) || VM_Version::supports_avx512vldq()),"XMM register should be 0-15"); 4066 assert(rscratch != noreg || always_reachable(src), "missing"); 4067 4068 vxorpd(dst, nds, src, Assembler::AVX_128bit, rscratch); 4069 } 4070 4071 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 4072 assert(rscratch != noreg || always_reachable(src), "missing"); 4073 4074 if (reachable(src)) { 4075 vxorpd(dst, nds, as_Address(src), vector_len); 4076 } else { 4077 lea(rscratch, src); 4078 vxorpd(dst, nds, Address(rscratch, 0), vector_len); 4079 } 4080 } 4081 4082 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 4083 assert(rscratch != noreg || always_reachable(src), "missing"); 4084 4085 if (reachable(src)) { 4086 vxorps(dst, nds, as_Address(src), vector_len); 4087 } else { 4088 lea(rscratch, src); 4089 vxorps(dst, nds, Address(rscratch, 0), vector_len); 4090 } 4091 } 4092 4093 void MacroAssembler::vpxor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 4094 assert(rscratch != noreg || always_reachable(src), "missing"); 4095 4096 if (UseAVX > 1 || (vector_len < 1)) { 4097 if (reachable(src)) { 4098 Assembler::vpxor(dst, nds, as_Address(src), vector_len); 4099 } else { 4100 lea(rscratch, src); 4101 Assembler::vpxor(dst, nds, Address(rscratch, 0), vector_len); 4102 } 4103 } else { 4104 MacroAssembler::vxorpd(dst, nds, src, vector_len, rscratch); 4105 } 4106 } 4107 4108 void MacroAssembler::vpermd(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 4109 assert(rscratch != noreg || always_reachable(src), "missing"); 4110 4111 if (reachable(src)) { 4112 Assembler::vpermd(dst, nds, as_Address(src), vector_len); 4113 } else { 4114 lea(rscratch, src); 4115 Assembler::vpermd(dst, nds, Address(rscratch, 0), vector_len); 4116 } 4117 } 4118 4119 void MacroAssembler::clear_jobject_tag(Register possibly_non_local) { 4120 const int32_t inverted_mask = ~static_cast<int32_t>(JNIHandles::tag_mask); 4121 STATIC_ASSERT(inverted_mask == -4); // otherwise check this code 4122 // The inverted mask is sign-extended 4123 andptr(possibly_non_local, inverted_mask); 4124 } 4125 4126 void MacroAssembler::resolve_jobject(Register value, 4127 Register thread, 4128 Register tmp) { 4129 assert_different_registers(value, thread, tmp); 4130 Label done, tagged, weak_tagged; 4131 testptr(value, value); 4132 jcc(Assembler::zero, done); // Use null as-is. 4133 testptr(value, JNIHandles::tag_mask); // Test for tag. 4134 jcc(Assembler::notZero, tagged); 4135 4136 // Resolve local handle 4137 access_load_at(T_OBJECT, IN_NATIVE | AS_RAW, value, Address(value, 0), tmp, thread); 4138 verify_oop(value); 4139 jmp(done); 4140 4141 bind(tagged); 4142 testptr(value, JNIHandles::TypeTag::weak_global); // Test for weak tag. 4143 jcc(Assembler::notZero, weak_tagged); 4144 4145 // Resolve global handle 4146 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp, thread); 4147 verify_oop(value); 4148 jmp(done); 4149 4150 bind(weak_tagged); 4151 // Resolve jweak. 4152 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 4153 value, Address(value, -JNIHandles::TypeTag::weak_global), tmp, thread); 4154 verify_oop(value); 4155 4156 bind(done); 4157 } 4158 4159 void MacroAssembler::resolve_global_jobject(Register value, 4160 Register thread, 4161 Register tmp) { 4162 assert_different_registers(value, thread, tmp); 4163 Label done; 4164 4165 testptr(value, value); 4166 jcc(Assembler::zero, done); // Use null as-is. 4167 4168 #ifdef ASSERT 4169 { 4170 Label valid_global_tag; 4171 testptr(value, JNIHandles::TypeTag::global); // Test for global tag. 4172 jcc(Assembler::notZero, valid_global_tag); 4173 stop("non global jobject using resolve_global_jobject"); 4174 bind(valid_global_tag); 4175 } 4176 #endif 4177 4178 // Resolve global handle 4179 access_load_at(T_OBJECT, IN_NATIVE, value, Address(value, -JNIHandles::TypeTag::global), tmp, thread); 4180 verify_oop(value); 4181 4182 bind(done); 4183 } 4184 4185 void MacroAssembler::subptr(Register dst, int32_t imm32) { 4186 LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32)); 4187 } 4188 4189 // Force generation of a 4 byte immediate value even if it fits into 8bit 4190 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) { 4191 LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32)); 4192 } 4193 4194 void MacroAssembler::subptr(Register dst, Register src) { 4195 LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); 4196 } 4197 4198 // C++ bool manipulation 4199 void MacroAssembler::testbool(Register dst) { 4200 if(sizeof(bool) == 1) 4201 testb(dst, 0xff); 4202 else if(sizeof(bool) == 2) { 4203 // testw implementation needed for two byte bools 4204 ShouldNotReachHere(); 4205 } else if(sizeof(bool) == 4) 4206 testl(dst, dst); 4207 else 4208 // unsupported 4209 ShouldNotReachHere(); 4210 } 4211 4212 void MacroAssembler::testptr(Register dst, Register src) { 4213 LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src)); 4214 } 4215 4216 // Object / value buffer allocation... 4217 // 4218 // Kills klass and rsi on LP64 4219 void MacroAssembler::allocate_instance(Register klass, Register new_obj, 4220 Register t1, Register t2, 4221 bool clear_fields, Label& alloc_failed) 4222 { 4223 Label done, initialize_header, initialize_object, slow_case, slow_case_no_pop; 4224 Register layout_size = t1; 4225 assert(new_obj == rax, "needs to be rax"); 4226 assert_different_registers(klass, new_obj, t1, t2); 4227 4228 // get instance_size in InstanceKlass (scaled to a count of bytes) 4229 movl(layout_size, Address(klass, Klass::layout_helper_offset())); 4230 // test to see if it is malformed in some way 4231 testl(layout_size, Klass::_lh_instance_slow_path_bit); 4232 jcc(Assembler::notZero, slow_case_no_pop); 4233 4234 // Allocate the instance: 4235 // If TLAB is enabled: 4236 // Try to allocate in the TLAB. 4237 // If fails, go to the slow path. 4238 // Else If inline contiguous allocations are enabled: 4239 // Try to allocate in eden. 4240 // If fails due to heap end, go to slow path. 4241 // 4242 // If TLAB is enabled OR inline contiguous is enabled: 4243 // Initialize the allocation. 4244 // Exit. 4245 // 4246 // Go to slow path. 4247 4248 push(klass); 4249 const Register thread = LP64_ONLY(r15_thread) NOT_LP64(klass); 4250 #ifndef _LP64 4251 if (UseTLAB) { 4252 get_thread(thread); 4253 } 4254 #endif // _LP64 4255 4256 if (UseTLAB) { 4257 tlab_allocate(thread, new_obj, layout_size, 0, klass, t2, slow_case); 4258 if (ZeroTLAB || (!clear_fields)) { 4259 // the fields have been already cleared 4260 jmp(initialize_header); 4261 } else { 4262 // initialize both the header and fields 4263 jmp(initialize_object); 4264 } 4265 } else { 4266 jmp(slow_case); 4267 } 4268 4269 // If UseTLAB is true, the object is created above and there is an initialize need. 4270 // Otherwise, skip and go to the slow path. 4271 if (UseTLAB) { 4272 if (clear_fields) { 4273 // The object is initialized before the header. If the object size is 4274 // zero, go directly to the header initialization. 4275 bind(initialize_object); 4276 decrement(layout_size, sizeof(oopDesc)); 4277 jcc(Assembler::zero, initialize_header); 4278 4279 // Initialize topmost object field, divide size by 8, check if odd and 4280 // test if zero. 4281 Register zero = klass; 4282 xorl(zero, zero); // use zero reg to clear memory (shorter code) 4283 shrl(layout_size, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd 4284 4285 #ifdef ASSERT 4286 // make sure instance_size was multiple of 8 4287 Label L; 4288 // Ignore partial flag stall after shrl() since it is debug VM 4289 jcc(Assembler::carryClear, L); 4290 stop("object size is not multiple of 2 - adjust this code"); 4291 bind(L); 4292 // must be > 0, no extra check needed here 4293 #endif 4294 4295 // initialize remaining object fields: instance_size was a multiple of 8 4296 { 4297 Label loop; 4298 bind(loop); 4299 movptr(Address(new_obj, layout_size, Address::times_8, sizeof(oopDesc) - 1*oopSize), zero); 4300 NOT_LP64(movptr(Address(new_obj, layout_size, Address::times_8, sizeof(oopDesc) - 2*oopSize), zero)); 4301 decrement(layout_size); 4302 jcc(Assembler::notZero, loop); 4303 } 4304 } // clear_fields 4305 4306 // initialize object header only. 4307 bind(initialize_header); 4308 pop(klass); 4309 Register mark_word = t2; 4310 movptr(mark_word, Address(klass, Klass::prototype_header_offset())); 4311 movptr(Address(new_obj, oopDesc::mark_offset_in_bytes ()), mark_word); 4312 #ifdef _LP64 4313 xorl(rsi, rsi); // use zero reg to clear memory (shorter code) 4314 store_klass_gap(new_obj, rsi); // zero klass gap for compressed oops 4315 #endif 4316 movptr(t2, klass); // preserve klass 4317 store_klass(new_obj, t2, rscratch1); // src klass reg is potentially compressed 4318 4319 jmp(done); 4320 } 4321 4322 bind(slow_case); 4323 pop(klass); 4324 bind(slow_case_no_pop); 4325 jmp(alloc_failed); 4326 4327 bind(done); 4328 } 4329 4330 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes. 4331 void MacroAssembler::tlab_allocate(Register thread, Register obj, 4332 Register var_size_in_bytes, 4333 int con_size_in_bytes, 4334 Register t1, 4335 Register t2, 4336 Label& slow_case) { 4337 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 4338 bs->tlab_allocate(this, thread, obj, var_size_in_bytes, con_size_in_bytes, t1, t2, slow_case); 4339 } 4340 4341 RegSet MacroAssembler::call_clobbered_gp_registers() { 4342 RegSet regs; 4343 #ifdef _LP64 4344 regs += RegSet::of(rax, rcx, rdx); 4345 #ifndef WINDOWS 4346 regs += RegSet::of(rsi, rdi); 4347 #endif 4348 regs += RegSet::range(r8, r11); 4349 #else 4350 regs += RegSet::of(rax, rcx, rdx); 4351 #endif 4352 #ifdef _LP64 4353 if (UseAPX) { 4354 regs += RegSet::range(r16, as_Register(Register::number_of_registers - 1)); 4355 } 4356 #endif 4357 return regs; 4358 } 4359 4360 XMMRegSet MacroAssembler::call_clobbered_xmm_registers() { 4361 int num_xmm_registers = XMMRegister::available_xmm_registers(); 4362 #if defined(WINDOWS) && defined(_LP64) 4363 XMMRegSet result = XMMRegSet::range(xmm0, xmm5); 4364 if (num_xmm_registers > 16) { 4365 result += XMMRegSet::range(xmm16, as_XMMRegister(num_xmm_registers - 1)); 4366 } 4367 return result; 4368 #else 4369 return XMMRegSet::range(xmm0, as_XMMRegister(num_xmm_registers - 1)); 4370 #endif 4371 } 4372 4373 static int FPUSaveAreaSize = align_up(108, StackAlignmentInBytes); // 108 bytes needed for FPU state by fsave/frstor 4374 4375 #ifndef _LP64 4376 static bool use_x87_registers() { return UseSSE < 2; } 4377 #endif 4378 static bool use_xmm_registers() { return UseSSE >= 1; } 4379 4380 // C1 only ever uses the first double/float of the XMM register. 4381 static int xmm_save_size() { return UseSSE >= 2 ? sizeof(double) : sizeof(float); } 4382 4383 static void save_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) { 4384 if (UseSSE == 1) { 4385 masm->movflt(Address(rsp, offset), reg); 4386 } else { 4387 masm->movdbl(Address(rsp, offset), reg); 4388 } 4389 } 4390 4391 static void restore_xmm_register(MacroAssembler* masm, int offset, XMMRegister reg) { 4392 if (UseSSE == 1) { 4393 masm->movflt(reg, Address(rsp, offset)); 4394 } else { 4395 masm->movdbl(reg, Address(rsp, offset)); 4396 } 4397 } 4398 4399 static int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers, 4400 bool save_fpu, int& gp_area_size, 4401 int& fp_area_size, int& xmm_area_size) { 4402 4403 gp_area_size = align_up(gp_registers.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size, 4404 StackAlignmentInBytes); 4405 #ifdef _LP64 4406 fp_area_size = 0; 4407 #else 4408 fp_area_size = (save_fpu && use_x87_registers()) ? FPUSaveAreaSize : 0; 4409 #endif 4410 xmm_area_size = (save_fpu && use_xmm_registers()) ? xmm_registers.size() * xmm_save_size() : 0; 4411 4412 return gp_area_size + fp_area_size + xmm_area_size; 4413 } 4414 4415 void MacroAssembler::push_call_clobbered_registers_except(RegSet exclude, bool save_fpu) { 4416 block_comment("push_call_clobbered_registers start"); 4417 // Regular registers 4418 RegSet gp_registers_to_push = call_clobbered_gp_registers() - exclude; 4419 4420 int gp_area_size; 4421 int fp_area_size; 4422 int xmm_area_size; 4423 int total_save_size = register_section_sizes(gp_registers_to_push, call_clobbered_xmm_registers(), save_fpu, 4424 gp_area_size, fp_area_size, xmm_area_size); 4425 subptr(rsp, total_save_size); 4426 4427 push_set(gp_registers_to_push, 0); 4428 4429 #ifndef _LP64 4430 if (save_fpu && use_x87_registers()) { 4431 fnsave(Address(rsp, gp_area_size)); 4432 fwait(); 4433 } 4434 #endif 4435 if (save_fpu && use_xmm_registers()) { 4436 push_set(call_clobbered_xmm_registers(), gp_area_size + fp_area_size); 4437 } 4438 4439 block_comment("push_call_clobbered_registers end"); 4440 } 4441 4442 void MacroAssembler::pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu) { 4443 block_comment("pop_call_clobbered_registers start"); 4444 4445 RegSet gp_registers_to_pop = call_clobbered_gp_registers() - exclude; 4446 4447 int gp_area_size; 4448 int fp_area_size; 4449 int xmm_area_size; 4450 int total_save_size = register_section_sizes(gp_registers_to_pop, call_clobbered_xmm_registers(), restore_fpu, 4451 gp_area_size, fp_area_size, xmm_area_size); 4452 4453 if (restore_fpu && use_xmm_registers()) { 4454 pop_set(call_clobbered_xmm_registers(), gp_area_size + fp_area_size); 4455 } 4456 #ifndef _LP64 4457 if (restore_fpu && use_x87_registers()) { 4458 frstor(Address(rsp, gp_area_size)); 4459 } 4460 #endif 4461 4462 pop_set(gp_registers_to_pop, 0); 4463 4464 addptr(rsp, total_save_size); 4465 4466 vzeroupper(); 4467 4468 block_comment("pop_call_clobbered_registers end"); 4469 } 4470 4471 void MacroAssembler::push_set(XMMRegSet set, int offset) { 4472 assert(is_aligned(set.size() * xmm_save_size(), StackAlignmentInBytes), "must be"); 4473 int spill_offset = offset; 4474 4475 for (RegSetIterator<XMMRegister> it = set.begin(); *it != xnoreg; ++it) { 4476 save_xmm_register(this, spill_offset, *it); 4477 spill_offset += xmm_save_size(); 4478 } 4479 } 4480 4481 void MacroAssembler::pop_set(XMMRegSet set, int offset) { 4482 int restore_size = set.size() * xmm_save_size(); 4483 assert(is_aligned(restore_size, StackAlignmentInBytes), "must be"); 4484 4485 int restore_offset = offset + restore_size - xmm_save_size(); 4486 4487 for (ReverseRegSetIterator<XMMRegister> it = set.rbegin(); *it != xnoreg; ++it) { 4488 restore_xmm_register(this, restore_offset, *it); 4489 restore_offset -= xmm_save_size(); 4490 } 4491 } 4492 4493 void MacroAssembler::push_set(RegSet set, int offset) { 4494 int spill_offset; 4495 if (offset == -1) { 4496 int register_push_size = set.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4497 int aligned_size = align_up(register_push_size, StackAlignmentInBytes); 4498 subptr(rsp, aligned_size); 4499 spill_offset = 0; 4500 } else { 4501 spill_offset = offset; 4502 } 4503 4504 for (RegSetIterator<Register> it = set.begin(); *it != noreg; ++it) { 4505 movptr(Address(rsp, spill_offset), *it); 4506 spill_offset += Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4507 } 4508 } 4509 4510 void MacroAssembler::pop_set(RegSet set, int offset) { 4511 4512 int gp_reg_size = Register::max_slots_per_register * VMRegImpl::stack_slot_size; 4513 int restore_size = set.size() * gp_reg_size; 4514 int aligned_size = align_up(restore_size, StackAlignmentInBytes); 4515 4516 int restore_offset; 4517 if (offset == -1) { 4518 restore_offset = restore_size - gp_reg_size; 4519 } else { 4520 restore_offset = offset + restore_size - gp_reg_size; 4521 } 4522 for (ReverseRegSetIterator<Register> it = set.rbegin(); *it != noreg; ++it) { 4523 movptr(*it, Address(rsp, restore_offset)); 4524 restore_offset -= gp_reg_size; 4525 } 4526 4527 if (offset == -1) { 4528 addptr(rsp, aligned_size); 4529 } 4530 } 4531 4532 // Preserves the contents of address, destroys the contents length_in_bytes and temp. 4533 void MacroAssembler::zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp) { 4534 assert(address != length_in_bytes && address != temp && temp != length_in_bytes, "registers must be different"); 4535 assert((offset_in_bytes & (BytesPerWord - 1)) == 0, "offset must be a multiple of BytesPerWord"); 4536 Label done; 4537 4538 testptr(length_in_bytes, length_in_bytes); 4539 jcc(Assembler::zero, done); 4540 4541 // initialize topmost word, divide index by 2, check if odd and test if zero 4542 // note: for the remaining code to work, index must be a multiple of BytesPerWord 4543 #ifdef ASSERT 4544 { 4545 Label L; 4546 testptr(length_in_bytes, BytesPerWord - 1); 4547 jcc(Assembler::zero, L); 4548 stop("length must be a multiple of BytesPerWord"); 4549 bind(L); 4550 } 4551 #endif 4552 Register index = length_in_bytes; 4553 xorptr(temp, temp); // use _zero reg to clear memory (shorter code) 4554 if (UseIncDec) { 4555 shrptr(index, 3); // divide by 8/16 and set carry flag if bit 2 was set 4556 } else { 4557 shrptr(index, 2); // use 2 instructions to avoid partial flag stall 4558 shrptr(index, 1); 4559 } 4560 #ifndef _LP64 4561 // index could have not been a multiple of 8 (i.e., bit 2 was set) 4562 { 4563 Label even; 4564 // note: if index was a multiple of 8, then it cannot 4565 // be 0 now otherwise it must have been 0 before 4566 // => if it is even, we don't need to check for 0 again 4567 jcc(Assembler::carryClear, even); 4568 // clear topmost word (no jump would be needed if conditional assignment worked here) 4569 movptr(Address(address, index, Address::times_8, offset_in_bytes - 0*BytesPerWord), temp); 4570 // index could be 0 now, must check again 4571 jcc(Assembler::zero, done); 4572 bind(even); 4573 } 4574 #endif // !_LP64 4575 // initialize remaining object fields: index is a multiple of 2 now 4576 { 4577 Label loop; 4578 bind(loop); 4579 movptr(Address(address, index, Address::times_8, offset_in_bytes - 1*BytesPerWord), temp); 4580 NOT_LP64(movptr(Address(address, index, Address::times_8, offset_in_bytes - 2*BytesPerWord), temp);) 4581 decrement(index); 4582 jcc(Assembler::notZero, loop); 4583 } 4584 4585 bind(done); 4586 } 4587 4588 void MacroAssembler::get_inline_type_field_klass(Register holder_klass, Register index, Register inline_klass) { 4589 inline_layout_info(holder_klass, index, inline_klass); 4590 movptr(inline_klass, Address(inline_klass, InlineLayoutInfo::klass_offset())); 4591 } 4592 4593 void MacroAssembler::inline_layout_info(Register holder_klass, Register index, Register layout_info) { 4594 movptr(layout_info, Address(holder_klass, InstanceKlass::inline_layout_info_array_offset())); 4595 #ifdef ASSERT 4596 { 4597 Label done; 4598 cmpptr(layout_info, 0); 4599 jcc(Assembler::notEqual, done); 4600 stop("inline_layout_info_array is null"); 4601 bind(done); 4602 } 4603 #endif 4604 4605 InlineLayoutInfo array[2]; 4606 int size = (char*)&array[1] - (char*)&array[0]; // computing size of array elements 4607 if (is_power_of_2(size)) { 4608 shll(index, log2i_exact(size)); // Scale index by power of 2 4609 } else { 4610 imull(index, index, size); // Scale the index to be the entry index * array_element_size 4611 } 4612 lea(layout_info, Address(layout_info, index, Address::times_1, Array<InlineLayoutInfo>::base_offset_in_bytes())); 4613 } 4614 4615 void MacroAssembler::get_default_value_oop(Register inline_klass, Register temp_reg, Register obj) { 4616 #ifdef ASSERT 4617 { 4618 Label done_check; 4619 test_klass_is_inline_type(inline_klass, temp_reg, done_check); 4620 stop("get_default_value_oop from non inline type klass"); 4621 bind(done_check); 4622 } 4623 #endif 4624 Register offset = temp_reg; 4625 // Getting the offset of the pre-allocated default value 4626 movptr(offset, Address(inline_klass, in_bytes(InstanceKlass::adr_inlineklass_fixed_block_offset()))); 4627 movl(offset, Address(offset, in_bytes(InlineKlass::default_value_offset_offset()))); 4628 4629 // Getting the mirror 4630 movptr(obj, Address(inline_klass, in_bytes(Klass::java_mirror_offset()))); 4631 resolve_oop_handle(obj, inline_klass); 4632 4633 // Getting the pre-allocated default value from the mirror 4634 Address field(obj, offset, Address::times_1); 4635 load_heap_oop(obj, field); 4636 } 4637 4638 void MacroAssembler::get_empty_inline_type_oop(Register inline_klass, Register temp_reg, Register obj) { 4639 #ifdef ASSERT 4640 { 4641 Label done_check; 4642 test_klass_is_empty_inline_type(inline_klass, temp_reg, done_check); 4643 stop("get_empty_value from non-empty inline klass"); 4644 bind(done_check); 4645 } 4646 #endif 4647 get_default_value_oop(inline_klass, temp_reg, obj); 4648 } 4649 4650 4651 // Look up the method for a megamorphic invokeinterface call. 4652 // The target method is determined by <intf_klass, itable_index>. 4653 // The receiver klass is in recv_klass. 4654 // On success, the result will be in method_result, and execution falls through. 4655 // On failure, execution transfers to the given label. 4656 void MacroAssembler::lookup_interface_method(Register recv_klass, 4657 Register intf_klass, 4658 RegisterOrConstant itable_index, 4659 Register method_result, 4660 Register scan_temp, 4661 Label& L_no_such_interface, 4662 bool return_method) { 4663 assert_different_registers(recv_klass, intf_klass, scan_temp); 4664 assert_different_registers(method_result, intf_klass, scan_temp); 4665 assert(recv_klass != method_result || !return_method, 4666 "recv_klass can be destroyed when method isn't needed"); 4667 4668 assert(itable_index.is_constant() || itable_index.as_register() == method_result, 4669 "caller must use same register for non-constant itable index as for method"); 4670 4671 // Compute start of first itableOffsetEntry (which is at the end of the vtable) 4672 int vtable_base = in_bytes(Klass::vtable_start_offset()); 4673 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 4674 int scan_step = itableOffsetEntry::size() * wordSize; 4675 int vte_size = vtableEntry::size_in_bytes(); 4676 Address::ScaleFactor times_vte_scale = Address::times_ptr; 4677 assert(vte_size == wordSize, "else adjust times_vte_scale"); 4678 4679 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 4680 4681 // Could store the aligned, prescaled offset in the klass. 4682 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base)); 4683 4684 if (return_method) { 4685 // Adjust recv_klass by scaled itable_index, so we can free itable_index. 4686 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 4687 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off)); 4688 } 4689 4690 // for (scan = klass->itable(); scan->interface() != nullptr; scan += scan_step) { 4691 // if (scan->interface() == intf) { 4692 // result = (klass + scan->offset() + itable_index); 4693 // } 4694 // } 4695 Label search, found_method; 4696 4697 for (int peel = 1; peel >= 0; peel--) { 4698 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset())); 4699 cmpptr(intf_klass, method_result); 4700 4701 if (peel) { 4702 jccb(Assembler::equal, found_method); 4703 } else { 4704 jccb(Assembler::notEqual, search); 4705 // (invert the test to fall through to found_method...) 4706 } 4707 4708 if (!peel) break; 4709 4710 bind(search); 4711 4712 // Check that the previous entry is non-null. A null entry means that 4713 // the receiver class doesn't implement the interface, and wasn't the 4714 // same as when the caller was compiled. 4715 testptr(method_result, method_result); 4716 jcc(Assembler::zero, L_no_such_interface); 4717 addptr(scan_temp, scan_step); 4718 } 4719 4720 bind(found_method); 4721 4722 if (return_method) { 4723 // Got a hit. 4724 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset())); 4725 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1)); 4726 } 4727 } 4728 4729 // Look up the method for a megamorphic invokeinterface call in a single pass over itable: 4730 // - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData 4731 // - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index 4732 // The target method is determined by <holder_klass, itable_index>. 4733 // The receiver klass is in recv_klass. 4734 // On success, the result will be in method_result, and execution falls through. 4735 // On failure, execution transfers to the given label. 4736 void MacroAssembler::lookup_interface_method_stub(Register recv_klass, 4737 Register holder_klass, 4738 Register resolved_klass, 4739 Register method_result, 4740 Register scan_temp, 4741 Register temp_reg2, 4742 Register receiver, 4743 int itable_index, 4744 Label& L_no_such_interface) { 4745 assert_different_registers(recv_klass, method_result, holder_klass, resolved_klass, scan_temp, temp_reg2, receiver); 4746 Register temp_itbl_klass = method_result; 4747 Register temp_reg = (temp_reg2 == noreg ? recv_klass : temp_reg2); // reuse recv_klass register on 32-bit x86 impl 4748 4749 int vtable_base = in_bytes(Klass::vtable_start_offset()); 4750 int itentry_off = in_bytes(itableMethodEntry::method_offset()); 4751 int scan_step = itableOffsetEntry::size() * wordSize; 4752 int vte_size = vtableEntry::size_in_bytes(); 4753 int ioffset = in_bytes(itableOffsetEntry::interface_offset()); 4754 int ooffset = in_bytes(itableOffsetEntry::offset_offset()); 4755 Address::ScaleFactor times_vte_scale = Address::times_ptr; 4756 assert(vte_size == wordSize, "adjust times_vte_scale"); 4757 4758 Label L_loop_scan_resolved_entry, L_resolved_found, L_holder_found; 4759 4760 // temp_itbl_klass = recv_klass.itable[0] 4761 // scan_temp = &recv_klass.itable[0] + step 4762 movl(scan_temp, Address(recv_klass, Klass::vtable_length_offset())); 4763 movptr(temp_itbl_klass, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset)); 4764 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base + ioffset + scan_step)); 4765 xorptr(temp_reg, temp_reg); 4766 4767 // Initial checks: 4768 // - if (holder_klass != resolved_klass), go to "scan for resolved" 4769 // - if (itable[0] == 0), no such interface 4770 // - if (itable[0] == holder_klass), shortcut to "holder found" 4771 cmpptr(holder_klass, resolved_klass); 4772 jccb(Assembler::notEqual, L_loop_scan_resolved_entry); 4773 testptr(temp_itbl_klass, temp_itbl_klass); 4774 jccb(Assembler::zero, L_no_such_interface); 4775 cmpptr(holder_klass, temp_itbl_klass); 4776 jccb(Assembler::equal, L_holder_found); 4777 4778 // Loop: Look for holder_klass record in itable 4779 // do { 4780 // tmp = itable[index]; 4781 // index += step; 4782 // if (tmp == holder_klass) { 4783 // goto L_holder_found; // Found! 4784 // } 4785 // } while (tmp != 0); 4786 // goto L_no_such_interface // Not found. 4787 Label L_scan_holder; 4788 bind(L_scan_holder); 4789 movptr(temp_itbl_klass, Address(scan_temp, 0)); 4790 addptr(scan_temp, scan_step); 4791 cmpptr(holder_klass, temp_itbl_klass); 4792 jccb(Assembler::equal, L_holder_found); 4793 testptr(temp_itbl_klass, temp_itbl_klass); 4794 jccb(Assembler::notZero, L_scan_holder); 4795 4796 jmpb(L_no_such_interface); 4797 4798 // Loop: Look for resolved_class record in itable 4799 // do { 4800 // tmp = itable[index]; 4801 // index += step; 4802 // if (tmp == holder_klass) { 4803 // // Also check if we have met a holder klass 4804 // holder_tmp = itable[index-step-ioffset]; 4805 // } 4806 // if (tmp == resolved_klass) { 4807 // goto L_resolved_found; // Found! 4808 // } 4809 // } while (tmp != 0); 4810 // goto L_no_such_interface // Not found. 4811 // 4812 Label L_loop_scan_resolved; 4813 bind(L_loop_scan_resolved); 4814 movptr(temp_itbl_klass, Address(scan_temp, 0)); 4815 addptr(scan_temp, scan_step); 4816 bind(L_loop_scan_resolved_entry); 4817 cmpptr(holder_klass, temp_itbl_klass); 4818 cmovl(Assembler::equal, temp_reg, Address(scan_temp, ooffset - ioffset - scan_step)); 4819 cmpptr(resolved_klass, temp_itbl_klass); 4820 jccb(Assembler::equal, L_resolved_found); 4821 testptr(temp_itbl_klass, temp_itbl_klass); 4822 jccb(Assembler::notZero, L_loop_scan_resolved); 4823 4824 jmpb(L_no_such_interface); 4825 4826 Label L_ready; 4827 4828 // See if we already have a holder klass. If not, go and scan for it. 4829 bind(L_resolved_found); 4830 testptr(temp_reg, temp_reg); 4831 jccb(Assembler::zero, L_scan_holder); 4832 jmpb(L_ready); 4833 4834 bind(L_holder_found); 4835 movl(temp_reg, Address(scan_temp, ooffset - ioffset - scan_step)); 4836 4837 // Finally, temp_reg contains holder_klass vtable offset 4838 bind(L_ready); 4839 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below"); 4840 if (temp_reg2 == noreg) { // recv_klass register is clobbered for 32-bit x86 impl 4841 load_klass(scan_temp, receiver, noreg); 4842 movptr(method_result, Address(scan_temp, temp_reg, Address::times_1, itable_index * wordSize + itentry_off)); 4843 } else { 4844 movptr(method_result, Address(recv_klass, temp_reg, Address::times_1, itable_index * wordSize + itentry_off)); 4845 } 4846 } 4847 4848 4849 // virtual method calling 4850 void MacroAssembler::lookup_virtual_method(Register recv_klass, 4851 RegisterOrConstant vtable_index, 4852 Register method_result) { 4853 const ByteSize base = Klass::vtable_start_offset(); 4854 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below"); 4855 Address vtable_entry_addr(recv_klass, 4856 vtable_index, Address::times_ptr, 4857 base + vtableEntry::method_offset()); 4858 movptr(method_result, vtable_entry_addr); 4859 } 4860 4861 4862 void MacroAssembler::check_klass_subtype(Register sub_klass, 4863 Register super_klass, 4864 Register temp_reg, 4865 Label& L_success) { 4866 Label L_failure; 4867 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, nullptr); 4868 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, nullptr); 4869 bind(L_failure); 4870 } 4871 4872 4873 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass, 4874 Register super_klass, 4875 Register temp_reg, 4876 Label* L_success, 4877 Label* L_failure, 4878 Label* L_slow_path, 4879 RegisterOrConstant super_check_offset) { 4880 assert_different_registers(sub_klass, super_klass, temp_reg); 4881 bool must_load_sco = (super_check_offset.constant_or_zero() == -1); 4882 if (super_check_offset.is_register()) { 4883 assert_different_registers(sub_klass, super_klass, 4884 super_check_offset.as_register()); 4885 } else if (must_load_sco) { 4886 assert(temp_reg != noreg, "supply either a temp or a register offset"); 4887 } 4888 4889 Label L_fallthrough; 4890 int label_nulls = 0; 4891 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4892 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4893 if (L_slow_path == nullptr) { L_slow_path = &L_fallthrough; label_nulls++; } 4894 assert(label_nulls <= 1, "at most one null in the batch"); 4895 4896 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 4897 int sco_offset = in_bytes(Klass::super_check_offset_offset()); 4898 Address super_check_offset_addr(super_klass, sco_offset); 4899 4900 // Hacked jcc, which "knows" that L_fallthrough, at least, is in 4901 // range of a jccb. If this routine grows larger, reconsider at 4902 // least some of these. 4903 #define local_jcc(assembler_cond, label) \ 4904 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \ 4905 else jcc( assembler_cond, label) /*omit semi*/ 4906 4907 // Hacked jmp, which may only be used just before L_fallthrough. 4908 #define final_jmp(label) \ 4909 if (&(label) == &L_fallthrough) { /*do nothing*/ } \ 4910 else jmp(label) /*omit semi*/ 4911 4912 // If the pointers are equal, we are done (e.g., String[] elements). 4913 // This self-check enables sharing of secondary supertype arrays among 4914 // non-primary types such as array-of-interface. Otherwise, each such 4915 // type would need its own customized SSA. 4916 // We move this check to the front of the fast path because many 4917 // type checks are in fact trivially successful in this manner, 4918 // so we get a nicely predicted branch right at the start of the check. 4919 cmpptr(sub_klass, super_klass); 4920 local_jcc(Assembler::equal, *L_success); 4921 4922 // Check the supertype display: 4923 if (must_load_sco) { 4924 // Positive movl does right thing on LP64. 4925 movl(temp_reg, super_check_offset_addr); 4926 super_check_offset = RegisterOrConstant(temp_reg); 4927 } 4928 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0); 4929 cmpptr(super_klass, super_check_addr); // load displayed supertype 4930 4931 // This check has worked decisively for primary supers. 4932 // Secondary supers are sought in the super_cache ('super_cache_addr'). 4933 // (Secondary supers are interfaces and very deeply nested subtypes.) 4934 // This works in the same check above because of a tricky aliasing 4935 // between the super_cache and the primary super display elements. 4936 // (The 'super_check_addr' can address either, as the case requires.) 4937 // Note that the cache is updated below if it does not help us find 4938 // what we need immediately. 4939 // So if it was a primary super, we can just fail immediately. 4940 // Otherwise, it's the slow path for us (no success at this point). 4941 4942 if (super_check_offset.is_register()) { 4943 local_jcc(Assembler::equal, *L_success); 4944 cmpl(super_check_offset.as_register(), sc_offset); 4945 if (L_failure == &L_fallthrough) { 4946 local_jcc(Assembler::equal, *L_slow_path); 4947 } else { 4948 local_jcc(Assembler::notEqual, *L_failure); 4949 final_jmp(*L_slow_path); 4950 } 4951 } else if (super_check_offset.as_constant() == sc_offset) { 4952 // Need a slow path; fast failure is impossible. 4953 if (L_slow_path == &L_fallthrough) { 4954 local_jcc(Assembler::equal, *L_success); 4955 } else { 4956 local_jcc(Assembler::notEqual, *L_slow_path); 4957 final_jmp(*L_success); 4958 } 4959 } else { 4960 // No slow path; it's a fast decision. 4961 if (L_failure == &L_fallthrough) { 4962 local_jcc(Assembler::equal, *L_success); 4963 } else { 4964 local_jcc(Assembler::notEqual, *L_failure); 4965 final_jmp(*L_success); 4966 } 4967 } 4968 4969 bind(L_fallthrough); 4970 4971 #undef local_jcc 4972 #undef final_jmp 4973 } 4974 4975 4976 void MacroAssembler::check_klass_subtype_slow_path_linear(Register sub_klass, 4977 Register super_klass, 4978 Register temp_reg, 4979 Register temp2_reg, 4980 Label* L_success, 4981 Label* L_failure, 4982 bool set_cond_codes) { 4983 assert_different_registers(sub_klass, super_klass, temp_reg); 4984 if (temp2_reg != noreg) 4985 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg); 4986 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg) 4987 4988 Label L_fallthrough; 4989 int label_nulls = 0; 4990 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 4991 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 4992 assert(label_nulls <= 1, "at most one null in the batch"); 4993 4994 // a couple of useful fields in sub_klass: 4995 int ss_offset = in_bytes(Klass::secondary_supers_offset()); 4996 int sc_offset = in_bytes(Klass::secondary_super_cache_offset()); 4997 Address secondary_supers_addr(sub_klass, ss_offset); 4998 Address super_cache_addr( sub_klass, sc_offset); 4999 5000 // Do a linear scan of the secondary super-klass chain. 5001 // This code is rarely used, so simplicity is a virtue here. 5002 // The repne_scan instruction uses fixed registers, which we must spill. 5003 // Don't worry too much about pre-existing connections with the input regs. 5004 5005 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super) 5006 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter) 5007 5008 // Get super_klass value into rax (even if it was in rdi or rcx). 5009 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false; 5010 if (super_klass != rax) { 5011 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; } 5012 mov(rax, super_klass); 5013 } 5014 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; } 5015 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; } 5016 5017 #ifndef PRODUCT 5018 uint* pst_counter = &SharedRuntime::_partial_subtype_ctr; 5019 ExternalAddress pst_counter_addr((address) pst_counter); 5020 NOT_LP64( incrementl(pst_counter_addr) ); 5021 LP64_ONLY( lea(rcx, pst_counter_addr) ); 5022 LP64_ONLY( incrementl(Address(rcx, 0)) ); 5023 #endif //PRODUCT 5024 5025 // We will consult the secondary-super array. 5026 movptr(rdi, secondary_supers_addr); 5027 // Load the array length. (Positive movl does right thing on LP64.) 5028 movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes())); 5029 // Skip to start of data. 5030 addptr(rdi, Array<Klass*>::base_offset_in_bytes()); 5031 5032 // Scan RCX words at [RDI] for an occurrence of RAX. 5033 // Set NZ/Z based on last compare. 5034 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does 5035 // not change flags (only scas instruction which is repeated sets flags). 5036 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found. 5037 5038 testptr(rax,rax); // Set Z = 0 5039 repne_scan(); 5040 5041 // Unspill the temp. registers: 5042 if (pushed_rdi) pop(rdi); 5043 if (pushed_rcx) pop(rcx); 5044 if (pushed_rax) pop(rax); 5045 5046 if (set_cond_codes) { 5047 // Special hack for the AD files: rdi is guaranteed non-zero. 5048 assert(!pushed_rdi, "rdi must be left non-null"); 5049 // Also, the condition codes are properly set Z/NZ on succeed/failure. 5050 } 5051 5052 if (L_failure == &L_fallthrough) 5053 jccb(Assembler::notEqual, *L_failure); 5054 else jcc(Assembler::notEqual, *L_failure); 5055 5056 // Success. Cache the super we found and proceed in triumph. 5057 movptr(super_cache_addr, super_klass); 5058 5059 if (L_success != &L_fallthrough) { 5060 jmp(*L_success); 5061 } 5062 5063 #undef IS_A_TEMP 5064 5065 bind(L_fallthrough); 5066 } 5067 5068 #ifndef _LP64 5069 5070 // 32-bit x86 only: always use the linear search. 5071 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 5072 Register super_klass, 5073 Register temp_reg, 5074 Register temp2_reg, 5075 Label* L_success, 5076 Label* L_failure, 5077 bool set_cond_codes) { 5078 check_klass_subtype_slow_path_linear 5079 (sub_klass, super_klass, temp_reg, temp2_reg, L_success, L_failure, set_cond_codes); 5080 } 5081 5082 #else // _LP64 5083 5084 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 5085 Register super_klass, 5086 Register temp_reg, 5087 Register temp2_reg, 5088 Label* L_success, 5089 Label* L_failure, 5090 bool set_cond_codes) { 5091 assert(set_cond_codes == false, "must be false on 64-bit x86"); 5092 check_klass_subtype_slow_path 5093 (sub_klass, super_klass, temp_reg, temp2_reg, noreg, noreg, 5094 L_success, L_failure); 5095 } 5096 5097 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass, 5098 Register super_klass, 5099 Register temp_reg, 5100 Register temp2_reg, 5101 Register temp3_reg, 5102 Register temp4_reg, 5103 Label* L_success, 5104 Label* L_failure) { 5105 if (UseSecondarySupersTable) { 5106 check_klass_subtype_slow_path_table 5107 (sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, temp4_reg, 5108 L_success, L_failure); 5109 } else { 5110 check_klass_subtype_slow_path_linear 5111 (sub_klass, super_klass, temp_reg, temp2_reg, L_success, L_failure, /*set_cond_codes*/false); 5112 } 5113 } 5114 5115 Register MacroAssembler::allocate_if_noreg(Register r, 5116 RegSetIterator<Register> &available_regs, 5117 RegSet ®s_to_push) { 5118 if (!r->is_valid()) { 5119 r = *available_regs++; 5120 regs_to_push += r; 5121 } 5122 return r; 5123 } 5124 5125 void MacroAssembler::check_klass_subtype_slow_path_table(Register sub_klass, 5126 Register super_klass, 5127 Register temp_reg, 5128 Register temp2_reg, 5129 Register temp3_reg, 5130 Register result_reg, 5131 Label* L_success, 5132 Label* L_failure) { 5133 // NB! Callers may assume that, when temp2_reg is a valid register, 5134 // this code sets it to a nonzero value. 5135 bool temp2_reg_was_valid = temp2_reg->is_valid(); 5136 5137 RegSet temps = RegSet::of(temp_reg, temp2_reg, temp3_reg); 5138 5139 Label L_fallthrough; 5140 int label_nulls = 0; 5141 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 5142 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 5143 assert(label_nulls <= 1, "at most one null in the batch"); 5144 5145 BLOCK_COMMENT("check_klass_subtype_slow_path_table"); 5146 5147 RegSetIterator<Register> available_regs 5148 = (RegSet::of(rax, rcx, rdx, r8) + r9 + r10 + r11 + r12 - temps - sub_klass - super_klass).begin(); 5149 5150 RegSet pushed_regs; 5151 5152 temp_reg = allocate_if_noreg(temp_reg, available_regs, pushed_regs); 5153 temp2_reg = allocate_if_noreg(temp2_reg, available_regs, pushed_regs); 5154 temp3_reg = allocate_if_noreg(temp3_reg, available_regs, pushed_regs); 5155 result_reg = allocate_if_noreg(result_reg, available_regs, pushed_regs); 5156 Register temp4_reg = allocate_if_noreg(noreg, available_regs, pushed_regs); 5157 5158 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg, temp3_reg, result_reg); 5159 5160 { 5161 5162 int register_push_size = pushed_regs.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size; 5163 int aligned_size = align_up(register_push_size, StackAlignmentInBytes); 5164 subptr(rsp, aligned_size); 5165 push_set(pushed_regs, 0); 5166 5167 lookup_secondary_supers_table_var(sub_klass, 5168 super_klass, 5169 temp_reg, temp2_reg, temp3_reg, temp4_reg, result_reg); 5170 cmpq(result_reg, 0); 5171 5172 // Unspill the temp. registers: 5173 pop_set(pushed_regs, 0); 5174 // Increment SP but do not clobber flags. 5175 lea(rsp, Address(rsp, aligned_size)); 5176 } 5177 5178 if (temp2_reg_was_valid) { 5179 movq(temp2_reg, 1); 5180 } 5181 5182 jcc(Assembler::notEqual, *L_failure); 5183 5184 if (L_success != &L_fallthrough) { 5185 jmp(*L_success); 5186 } 5187 5188 bind(L_fallthrough); 5189 } 5190 5191 // population_count variant for running without the POPCNT 5192 // instruction, which was introduced with SSE4.2 in 2008. 5193 void MacroAssembler::population_count(Register dst, Register src, 5194 Register scratch1, Register scratch2) { 5195 assert_different_registers(src, scratch1, scratch2); 5196 if (UsePopCountInstruction) { 5197 Assembler::popcntq(dst, src); 5198 } else { 5199 assert_different_registers(src, scratch1, scratch2); 5200 assert_different_registers(dst, scratch1, scratch2); 5201 Label loop, done; 5202 5203 mov(scratch1, src); 5204 // dst = 0; 5205 // while(scratch1 != 0) { 5206 // dst++; 5207 // scratch1 &= (scratch1 - 1); 5208 // } 5209 xorl(dst, dst); 5210 testq(scratch1, scratch1); 5211 jccb(Assembler::equal, done); 5212 { 5213 bind(loop); 5214 incq(dst); 5215 movq(scratch2, scratch1); 5216 decq(scratch2); 5217 andq(scratch1, scratch2); 5218 jccb(Assembler::notEqual, loop); 5219 } 5220 bind(done); 5221 } 5222 } 5223 5224 // Ensure that the inline code and the stub are using the same registers. 5225 #define LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS \ 5226 do { \ 5227 assert(r_super_klass == rax, "mismatch"); \ 5228 assert(r_array_base == rbx, "mismatch"); \ 5229 assert(r_array_length == rcx, "mismatch"); \ 5230 assert(r_array_index == rdx, "mismatch"); \ 5231 assert(r_sub_klass == rsi || r_sub_klass == noreg, "mismatch"); \ 5232 assert(r_bitmap == r11 || r_bitmap == noreg, "mismatch"); \ 5233 assert(result == rdi || result == noreg, "mismatch"); \ 5234 } while(0) 5235 5236 // Versions of salq and rorq that don't need count to be in rcx 5237 5238 void MacroAssembler::salq(Register dest, Register count) { 5239 if (count == rcx) { 5240 Assembler::salq(dest); 5241 } else { 5242 assert_different_registers(rcx, dest); 5243 xchgq(rcx, count); 5244 Assembler::salq(dest); 5245 xchgq(rcx, count); 5246 } 5247 } 5248 5249 void MacroAssembler::rorq(Register dest, Register count) { 5250 if (count == rcx) { 5251 Assembler::rorq(dest); 5252 } else { 5253 assert_different_registers(rcx, dest); 5254 xchgq(rcx, count); 5255 Assembler::rorq(dest); 5256 xchgq(rcx, count); 5257 } 5258 } 5259 5260 // Return true: we succeeded in generating this code 5261 // 5262 // At runtime, return 0 in result if r_super_klass is a superclass of 5263 // r_sub_klass, otherwise return nonzero. Use this if you know the 5264 // super_klass_slot of the class you're looking for. This is always 5265 // the case for instanceof and checkcast. 5266 void MacroAssembler::lookup_secondary_supers_table_const(Register r_sub_klass, 5267 Register r_super_klass, 5268 Register temp1, 5269 Register temp2, 5270 Register temp3, 5271 Register temp4, 5272 Register result, 5273 u1 super_klass_slot) { 5274 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result); 5275 5276 Label L_fallthrough, L_success, L_failure; 5277 5278 BLOCK_COMMENT("lookup_secondary_supers_table {"); 5279 5280 const Register 5281 r_array_index = temp1, 5282 r_array_length = temp2, 5283 r_array_base = temp3, 5284 r_bitmap = temp4; 5285 5286 LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS; 5287 5288 xorq(result, result); // = 0 5289 5290 movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset())); 5291 movq(r_array_index, r_bitmap); 5292 5293 // First check the bitmap to see if super_klass might be present. If 5294 // the bit is zero, we are certain that super_klass is not one of 5295 // the secondary supers. 5296 u1 bit = super_klass_slot; 5297 { 5298 // NB: If the count in a x86 shift instruction is 0, the flags are 5299 // not affected, so we do a testq instead. 5300 int shift_count = Klass::SECONDARY_SUPERS_TABLE_MASK - bit; 5301 if (shift_count != 0) { 5302 salq(r_array_index, shift_count); 5303 } else { 5304 testq(r_array_index, r_array_index); 5305 } 5306 } 5307 // We test the MSB of r_array_index, i.e. its sign bit 5308 jcc(Assembler::positive, L_failure); 5309 5310 // Get the first array index that can contain super_klass into r_array_index. 5311 if (bit != 0) { 5312 population_count(r_array_index, r_array_index, temp2, temp3); 5313 } else { 5314 movl(r_array_index, 1); 5315 } 5316 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 5317 5318 // We will consult the secondary-super array. 5319 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 5320 5321 // We're asserting that the first word in an Array<Klass*> is the 5322 // length, and the second word is the first word of the data. If 5323 // that ever changes, r_array_base will have to be adjusted here. 5324 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 5325 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 5326 5327 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8)); 5328 jccb(Assembler::equal, L_success); 5329 5330 // Is there another entry to check? Consult the bitmap. 5331 btq(r_bitmap, (bit + 1) & Klass::SECONDARY_SUPERS_TABLE_MASK); 5332 jccb(Assembler::carryClear, L_failure); 5333 5334 // Linear probe. Rotate the bitmap so that the next bit to test is 5335 // in Bit 1. 5336 if (bit != 0) { 5337 rorq(r_bitmap, bit); 5338 } 5339 5340 // Calls into the stub generated by lookup_secondary_supers_table_slow_path. 5341 // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap. 5342 // Kills: r_array_length. 5343 // Returns: result. 5344 call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_slow_path_stub())); 5345 // Result (0/1) is in rdi 5346 jmpb(L_fallthrough); 5347 5348 bind(L_failure); 5349 incq(result); // 0 => 1 5350 5351 bind(L_success); 5352 // result = 0; 5353 5354 bind(L_fallthrough); 5355 BLOCK_COMMENT("} lookup_secondary_supers_table"); 5356 5357 if (VerifySecondarySupers) { 5358 verify_secondary_supers_table(r_sub_klass, r_super_klass, result, 5359 temp1, temp2, temp3); 5360 } 5361 } 5362 5363 // At runtime, return 0 in result if r_super_klass is a superclass of 5364 // r_sub_klass, otherwise return nonzero. Use this version of 5365 // lookup_secondary_supers_table() if you don't know ahead of time 5366 // which superclass will be searched for. Used by interpreter and 5367 // runtime stubs. It is larger and has somewhat greater latency than 5368 // the version above, which takes a constant super_klass_slot. 5369 void MacroAssembler::lookup_secondary_supers_table_var(Register r_sub_klass, 5370 Register r_super_klass, 5371 Register temp1, 5372 Register temp2, 5373 Register temp3, 5374 Register temp4, 5375 Register result) { 5376 assert_different_registers(r_sub_klass, r_super_klass, temp1, temp2, temp3, temp4, result); 5377 assert_different_registers(r_sub_klass, r_super_klass, rcx); 5378 RegSet temps = RegSet::of(temp1, temp2, temp3, temp4); 5379 5380 Label L_fallthrough, L_success, L_failure; 5381 5382 BLOCK_COMMENT("lookup_secondary_supers_table {"); 5383 5384 RegSetIterator<Register> available_regs = (temps - rcx).begin(); 5385 5386 // FIXME. Once we are sure that all paths reaching this point really 5387 // do pass rcx as one of our temps we can get rid of the following 5388 // workaround. 5389 assert(temps.contains(rcx), "fix this code"); 5390 5391 // We prefer to have our shift count in rcx. If rcx is one of our 5392 // temps, use it for slot. If not, pick any of our temps. 5393 Register slot; 5394 if (!temps.contains(rcx)) { 5395 slot = *available_regs++; 5396 } else { 5397 slot = rcx; 5398 } 5399 5400 const Register r_array_index = *available_regs++; 5401 const Register r_bitmap = *available_regs++; 5402 5403 // The logic above guarantees this property, but we state it here. 5404 assert_different_registers(r_array_index, r_bitmap, rcx); 5405 5406 movq(r_bitmap, Address(r_sub_klass, Klass::secondary_supers_bitmap_offset())); 5407 movq(r_array_index, r_bitmap); 5408 5409 // First check the bitmap to see if super_klass might be present. If 5410 // the bit is zero, we are certain that super_klass is not one of 5411 // the secondary supers. 5412 movb(slot, Address(r_super_klass, Klass::hash_slot_offset())); 5413 xorl(slot, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1)); // slot ^ 63 === 63 - slot (mod 64) 5414 salq(r_array_index, slot); 5415 5416 testq(r_array_index, r_array_index); 5417 // We test the MSB of r_array_index, i.e. its sign bit 5418 jcc(Assembler::positive, L_failure); 5419 5420 const Register r_array_base = *available_regs++; 5421 5422 // Get the first array index that can contain super_klass into r_array_index. 5423 population_count(r_array_index, r_array_index, /*temp2*/r_array_base, /*temp3*/slot); 5424 5425 // NB! r_array_index is off by 1. It is compensated by keeping r_array_base off by 1 word. 5426 5427 // We will consult the secondary-super array. 5428 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 5429 5430 // We're asserting that the first word in an Array<Klass*> is the 5431 // length, and the second word is the first word of the data. If 5432 // that ever changes, r_array_base will have to be adjusted here. 5433 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, "Adjust this code"); 5434 assert(Array<Klass*>::length_offset_in_bytes() == 0, "Adjust this code"); 5435 5436 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8)); 5437 jccb(Assembler::equal, L_success); 5438 5439 // Restore slot to its true value 5440 xorl(slot, (u1)(Klass::SECONDARY_SUPERS_TABLE_SIZE - 1)); // slot ^ 63 === 63 - slot (mod 64) 5441 5442 // Linear probe. Rotate the bitmap so that the next bit to test is 5443 // in Bit 1. 5444 rorq(r_bitmap, slot); 5445 5446 // Is there another entry to check? Consult the bitmap. 5447 btq(r_bitmap, 1); 5448 jccb(Assembler::carryClear, L_failure); 5449 5450 // Calls into the stub generated by lookup_secondary_supers_table_slow_path. 5451 // Arguments: r_super_klass, r_array_base, r_array_index, r_bitmap. 5452 // Kills: r_array_length. 5453 // Returns: result. 5454 lookup_secondary_supers_table_slow_path(r_super_klass, 5455 r_array_base, 5456 r_array_index, 5457 r_bitmap, 5458 /*temp1*/result, 5459 /*temp2*/slot, 5460 &L_success, 5461 nullptr); 5462 5463 bind(L_failure); 5464 movq(result, 1); 5465 jmpb(L_fallthrough); 5466 5467 bind(L_success); 5468 xorq(result, result); // = 0 5469 5470 bind(L_fallthrough); 5471 BLOCK_COMMENT("} lookup_secondary_supers_table"); 5472 5473 if (VerifySecondarySupers) { 5474 verify_secondary_supers_table(r_sub_klass, r_super_klass, result, 5475 temp1, temp2, temp3); 5476 } 5477 } 5478 5479 void MacroAssembler::repne_scanq(Register addr, Register value, Register count, Register limit, 5480 Label* L_success, Label* L_failure) { 5481 Label L_loop, L_fallthrough; 5482 { 5483 int label_nulls = 0; 5484 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 5485 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 5486 assert(label_nulls <= 1, "at most one null in the batch"); 5487 } 5488 bind(L_loop); 5489 cmpq(value, Address(addr, count, Address::times_8)); 5490 jcc(Assembler::equal, *L_success); 5491 addl(count, 1); 5492 cmpl(count, limit); 5493 jcc(Assembler::less, L_loop); 5494 5495 if (&L_fallthrough != L_failure) { 5496 jmp(*L_failure); 5497 } 5498 bind(L_fallthrough); 5499 } 5500 5501 // Called by code generated by check_klass_subtype_slow_path 5502 // above. This is called when there is a collision in the hashed 5503 // lookup in the secondary supers array. 5504 void MacroAssembler::lookup_secondary_supers_table_slow_path(Register r_super_klass, 5505 Register r_array_base, 5506 Register r_array_index, 5507 Register r_bitmap, 5508 Register temp1, 5509 Register temp2, 5510 Label* L_success, 5511 Label* L_failure) { 5512 assert_different_registers(r_super_klass, r_array_base, r_array_index, r_bitmap, temp1, temp2); 5513 5514 const Register 5515 r_array_length = temp1, 5516 r_sub_klass = noreg, 5517 result = noreg; 5518 5519 Label L_fallthrough; 5520 int label_nulls = 0; 5521 if (L_success == nullptr) { L_success = &L_fallthrough; label_nulls++; } 5522 if (L_failure == nullptr) { L_failure = &L_fallthrough; label_nulls++; } 5523 assert(label_nulls <= 1, "at most one null in the batch"); 5524 5525 // Load the array length. 5526 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 5527 // And adjust the array base to point to the data. 5528 // NB! Effectively increments current slot index by 1. 5529 assert(Array<Klass*>::base_offset_in_bytes() == wordSize, ""); 5530 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes()); 5531 5532 // Linear probe 5533 Label L_huge; 5534 5535 // The bitmap is full to bursting. 5536 // Implicit invariant: BITMAP_FULL implies (length > 0) 5537 cmpl(r_array_length, (int32_t)Klass::SECONDARY_SUPERS_TABLE_SIZE - 2); 5538 jcc(Assembler::greater, L_huge); 5539 5540 // NB! Our caller has checked bits 0 and 1 in the bitmap. The 5541 // current slot (at secondary_supers[r_array_index]) has not yet 5542 // been inspected, and r_array_index may be out of bounds if we 5543 // wrapped around the end of the array. 5544 5545 { // This is conventional linear probing, but instead of terminating 5546 // when a null entry is found in the table, we maintain a bitmap 5547 // in which a 0 indicates missing entries. 5548 // The check above guarantees there are 0s in the bitmap, so the loop 5549 // eventually terminates. 5550 5551 xorl(temp2, temp2); // = 0; 5552 5553 Label L_again; 5554 bind(L_again); 5555 5556 // Check for array wraparound. 5557 cmpl(r_array_index, r_array_length); 5558 cmovl(Assembler::greaterEqual, r_array_index, temp2); 5559 5560 cmpq(r_super_klass, Address(r_array_base, r_array_index, Address::times_8)); 5561 jcc(Assembler::equal, *L_success); 5562 5563 // If the next bit in bitmap is zero, we're done. 5564 btq(r_bitmap, 2); // look-ahead check (Bit 2); Bits 0 and 1 are tested by now 5565 jcc(Assembler::carryClear, *L_failure); 5566 5567 rorq(r_bitmap, 1); // Bits 1/2 => 0/1 5568 addl(r_array_index, 1); 5569 5570 jmp(L_again); 5571 } 5572 5573 { // Degenerate case: more than 64 secondary supers. 5574 // FIXME: We could do something smarter here, maybe a vectorized 5575 // comparison or a binary search, but is that worth any added 5576 // complexity? 5577 bind(L_huge); 5578 xorl(r_array_index, r_array_index); // = 0 5579 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length, 5580 L_success, 5581 (&L_fallthrough != L_failure ? L_failure : nullptr)); 5582 5583 bind(L_fallthrough); 5584 } 5585 } 5586 5587 struct VerifyHelperArguments { 5588 Klass* _super; 5589 Klass* _sub; 5590 intptr_t _linear_result; 5591 intptr_t _table_result; 5592 }; 5593 5594 static void verify_secondary_supers_table_helper(const char* msg, VerifyHelperArguments* args) { 5595 Klass::on_secondary_supers_verification_failure(args->_super, 5596 args->_sub, 5597 args->_linear_result, 5598 args->_table_result, 5599 msg); 5600 } 5601 5602 // Make sure that the hashed lookup and a linear scan agree. 5603 void MacroAssembler::verify_secondary_supers_table(Register r_sub_klass, 5604 Register r_super_klass, 5605 Register result, 5606 Register temp1, 5607 Register temp2, 5608 Register temp3) { 5609 const Register 5610 r_array_index = temp1, 5611 r_array_length = temp2, 5612 r_array_base = temp3, 5613 r_bitmap = noreg; 5614 5615 BLOCK_COMMENT("verify_secondary_supers_table {"); 5616 5617 Label L_success, L_failure, L_check, L_done; 5618 5619 movptr(r_array_base, Address(r_sub_klass, in_bytes(Klass::secondary_supers_offset()))); 5620 movl(r_array_length, Address(r_array_base, Array<Klass*>::length_offset_in_bytes())); 5621 // And adjust the array base to point to the data. 5622 addptr(r_array_base, Array<Klass*>::base_offset_in_bytes()); 5623 5624 testl(r_array_length, r_array_length); // array_length == 0? 5625 jcc(Assembler::zero, L_failure); 5626 5627 movl(r_array_index, 0); 5628 repne_scanq(r_array_base, r_super_klass, r_array_index, r_array_length, &L_success); 5629 // fall through to L_failure 5630 5631 const Register linear_result = r_array_index; // reuse temp1 5632 5633 bind(L_failure); // not present 5634 movl(linear_result, 1); 5635 jmp(L_check); 5636 5637 bind(L_success); // present 5638 movl(linear_result, 0); 5639 5640 bind(L_check); 5641 cmpl(linear_result, result); 5642 jcc(Assembler::equal, L_done); 5643 5644 { // To avoid calling convention issues, build a record on the stack 5645 // and pass the pointer to that instead. 5646 push(result); 5647 push(linear_result); 5648 push(r_sub_klass); 5649 push(r_super_klass); 5650 movptr(c_rarg1, rsp); 5651 movptr(c_rarg0, (uintptr_t) "mismatch"); 5652 call(RuntimeAddress(CAST_FROM_FN_PTR(address, verify_secondary_supers_table_helper))); 5653 should_not_reach_here(); 5654 } 5655 bind(L_done); 5656 5657 BLOCK_COMMENT("} verify_secondary_supers_table"); 5658 } 5659 5660 #undef LOOKUP_SECONDARY_SUPERS_TABLE_REGISTERS 5661 5662 #endif // LP64 5663 5664 void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) { 5665 assert(L_fast_path != nullptr || L_slow_path != nullptr, "at least one is required"); 5666 5667 Label L_fallthrough; 5668 if (L_fast_path == nullptr) { 5669 L_fast_path = &L_fallthrough; 5670 } else if (L_slow_path == nullptr) { 5671 L_slow_path = &L_fallthrough; 5672 } 5673 5674 // Fast path check: class is fully initialized. 5675 // init_state needs acquire, but x86 is TSO, and so we are already good. 5676 cmpb(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized); 5677 jcc(Assembler::equal, *L_fast_path); 5678 5679 // Fast path check: current thread is initializer thread 5680 cmpptr(thread, Address(klass, InstanceKlass::init_thread_offset())); 5681 if (L_slow_path == &L_fallthrough) { 5682 jcc(Assembler::equal, *L_fast_path); 5683 bind(*L_slow_path); 5684 } else if (L_fast_path == &L_fallthrough) { 5685 jcc(Assembler::notEqual, *L_slow_path); 5686 bind(*L_fast_path); 5687 } else { 5688 Unimplemented(); 5689 } 5690 } 5691 5692 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) { 5693 if (VM_Version::supports_cmov()) { 5694 cmovl(cc, dst, src); 5695 } else { 5696 Label L; 5697 jccb(negate_condition(cc), L); 5698 movl(dst, src); 5699 bind(L); 5700 } 5701 } 5702 5703 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) { 5704 if (VM_Version::supports_cmov()) { 5705 cmovl(cc, dst, src); 5706 } else { 5707 Label L; 5708 jccb(negate_condition(cc), L); 5709 movl(dst, src); 5710 bind(L); 5711 } 5712 } 5713 5714 void MacroAssembler::_verify_oop(Register reg, const char* s, const char* file, int line) { 5715 if (!VerifyOops || VerifyAdapterSharing) { 5716 // Below address of the code string confuses VerifyAdapterSharing 5717 // because it may differ between otherwise equivalent adapters. 5718 return; 5719 } 5720 5721 BLOCK_COMMENT("verify_oop {"); 5722 #ifdef _LP64 5723 push(rscratch1); 5724 #endif 5725 push(rax); // save rax 5726 push(reg); // pass register argument 5727 5728 // Pass register number to verify_oop_subroutine 5729 const char* b = nullptr; 5730 { 5731 ResourceMark rm; 5732 stringStream ss; 5733 ss.print("verify_oop: %s: %s (%s:%d)", reg->name(), s, file, line); 5734 b = code_string(ss.as_string()); 5735 } 5736 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate()); 5737 pushptr(buffer.addr(), rscratch1); 5738 5739 // call indirectly to solve generation ordering problem 5740 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 5741 call(rax); 5742 // Caller pops the arguments (oop, message) and restores rax, r10 5743 BLOCK_COMMENT("} verify_oop"); 5744 } 5745 5746 void MacroAssembler::vallones(XMMRegister dst, int vector_len) { 5747 if (UseAVX > 2 && (vector_len == Assembler::AVX_512bit || VM_Version::supports_avx512vl())) { 5748 // Only pcmpeq has dependency breaking treatment (i.e the execution can begin without 5749 // waiting for the previous result on dst), not vpcmpeqd, so just use vpternlog 5750 vpternlogd(dst, 0xFF, dst, dst, vector_len); 5751 } else if (VM_Version::supports_avx()) { 5752 vpcmpeqd(dst, dst, dst, vector_len); 5753 } else { 5754 assert(VM_Version::supports_sse2(), ""); 5755 pcmpeqd(dst, dst); 5756 } 5757 } 5758 5759 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot, 5760 int extra_slot_offset) { 5761 // cf. TemplateTable::prepare_invoke(), if (load_receiver). 5762 int stackElementSize = Interpreter::stackElementSize; 5763 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0); 5764 #ifdef ASSERT 5765 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1); 5766 assert(offset1 - offset == stackElementSize, "correct arithmetic"); 5767 #endif 5768 Register scale_reg = noreg; 5769 Address::ScaleFactor scale_factor = Address::no_scale; 5770 if (arg_slot.is_constant()) { 5771 offset += arg_slot.as_constant() * stackElementSize; 5772 } else { 5773 scale_reg = arg_slot.as_register(); 5774 scale_factor = Address::times(stackElementSize); 5775 } 5776 offset += wordSize; // return PC is on stack 5777 return Address(rsp, scale_reg, scale_factor, offset); 5778 } 5779 5780 void MacroAssembler::_verify_oop_addr(Address addr, const char* s, const char* file, int line) { 5781 if (!VerifyOops || VerifyAdapterSharing) { 5782 // Below address of the code string confuses VerifyAdapterSharing 5783 // because it may differ between otherwise equivalent adapters. 5784 return; 5785 } 5786 5787 #ifdef _LP64 5788 push(rscratch1); 5789 #endif 5790 push(rax); // save rax, 5791 // addr may contain rsp so we will have to adjust it based on the push 5792 // we just did (and on 64 bit we do two pushes) 5793 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which 5794 // stores rax into addr which is backwards of what was intended. 5795 if (addr.uses(rsp)) { 5796 lea(rax, addr); 5797 pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord)); 5798 } else { 5799 pushptr(addr); 5800 } 5801 5802 // Pass register number to verify_oop_subroutine 5803 const char* b = nullptr; 5804 { 5805 ResourceMark rm; 5806 stringStream ss; 5807 ss.print("verify_oop_addr: %s (%s:%d)", s, file, line); 5808 b = code_string(ss.as_string()); 5809 } 5810 AddressLiteral buffer((address) b, external_word_Relocation::spec_for_immediate()); 5811 pushptr(buffer.addr(), rscratch1); 5812 5813 // call indirectly to solve generation ordering problem 5814 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address())); 5815 call(rax); 5816 // Caller pops the arguments (addr, message) and restores rax, r10. 5817 } 5818 5819 void MacroAssembler::verify_tlab() { 5820 #ifdef ASSERT 5821 if (UseTLAB && VerifyOops) { 5822 Label next, ok; 5823 Register t1 = rsi; 5824 Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread); 5825 5826 push(t1); 5827 NOT_LP64(push(thread_reg)); 5828 NOT_LP64(get_thread(thread_reg)); 5829 5830 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); 5831 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset()))); 5832 jcc(Assembler::aboveEqual, next); 5833 STOP("assert(top >= start)"); 5834 should_not_reach_here(); 5835 5836 bind(next); 5837 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset()))); 5838 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset()))); 5839 jcc(Assembler::aboveEqual, ok); 5840 STOP("assert(top <= end)"); 5841 should_not_reach_here(); 5842 5843 bind(ok); 5844 NOT_LP64(pop(thread_reg)); 5845 pop(t1); 5846 } 5847 #endif 5848 } 5849 5850 class ControlWord { 5851 public: 5852 int32_t _value; 5853 5854 int rounding_control() const { return (_value >> 10) & 3 ; } 5855 int precision_control() const { return (_value >> 8) & 3 ; } 5856 bool precision() const { return ((_value >> 5) & 1) != 0; } 5857 bool underflow() const { return ((_value >> 4) & 1) != 0; } 5858 bool overflow() const { return ((_value >> 3) & 1) != 0; } 5859 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 5860 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 5861 bool invalid() const { return ((_value >> 0) & 1) != 0; } 5862 5863 void print() const { 5864 // rounding control 5865 const char* rc; 5866 switch (rounding_control()) { 5867 case 0: rc = "round near"; break; 5868 case 1: rc = "round down"; break; 5869 case 2: rc = "round up "; break; 5870 case 3: rc = "chop "; break; 5871 default: 5872 rc = nullptr; // silence compiler warnings 5873 fatal("Unknown rounding control: %d", rounding_control()); 5874 }; 5875 // precision control 5876 const char* pc; 5877 switch (precision_control()) { 5878 case 0: pc = "24 bits "; break; 5879 case 1: pc = "reserved"; break; 5880 case 2: pc = "53 bits "; break; 5881 case 3: pc = "64 bits "; break; 5882 default: 5883 pc = nullptr; // silence compiler warnings 5884 fatal("Unknown precision control: %d", precision_control()); 5885 }; 5886 // flags 5887 char f[9]; 5888 f[0] = ' '; 5889 f[1] = ' '; 5890 f[2] = (precision ()) ? 'P' : 'p'; 5891 f[3] = (underflow ()) ? 'U' : 'u'; 5892 f[4] = (overflow ()) ? 'O' : 'o'; 5893 f[5] = (zero_divide ()) ? 'Z' : 'z'; 5894 f[6] = (denormalized()) ? 'D' : 'd'; 5895 f[7] = (invalid ()) ? 'I' : 'i'; 5896 f[8] = '\x0'; 5897 // output 5898 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc); 5899 } 5900 5901 }; 5902 5903 class StatusWord { 5904 public: 5905 int32_t _value; 5906 5907 bool busy() const { return ((_value >> 15) & 1) != 0; } 5908 bool C3() const { return ((_value >> 14) & 1) != 0; } 5909 bool C2() const { return ((_value >> 10) & 1) != 0; } 5910 bool C1() const { return ((_value >> 9) & 1) != 0; } 5911 bool C0() const { return ((_value >> 8) & 1) != 0; } 5912 int top() const { return (_value >> 11) & 7 ; } 5913 bool error_status() const { return ((_value >> 7) & 1) != 0; } 5914 bool stack_fault() const { return ((_value >> 6) & 1) != 0; } 5915 bool precision() const { return ((_value >> 5) & 1) != 0; } 5916 bool underflow() const { return ((_value >> 4) & 1) != 0; } 5917 bool overflow() const { return ((_value >> 3) & 1) != 0; } 5918 bool zero_divide() const { return ((_value >> 2) & 1) != 0; } 5919 bool denormalized() const { return ((_value >> 1) & 1) != 0; } 5920 bool invalid() const { return ((_value >> 0) & 1) != 0; } 5921 5922 void print() const { 5923 // condition codes 5924 char c[5]; 5925 c[0] = (C3()) ? '3' : '-'; 5926 c[1] = (C2()) ? '2' : '-'; 5927 c[2] = (C1()) ? '1' : '-'; 5928 c[3] = (C0()) ? '0' : '-'; 5929 c[4] = '\x0'; 5930 // flags 5931 char f[9]; 5932 f[0] = (error_status()) ? 'E' : '-'; 5933 f[1] = (stack_fault ()) ? 'S' : '-'; 5934 f[2] = (precision ()) ? 'P' : '-'; 5935 f[3] = (underflow ()) ? 'U' : '-'; 5936 f[4] = (overflow ()) ? 'O' : '-'; 5937 f[5] = (zero_divide ()) ? 'Z' : '-'; 5938 f[6] = (denormalized()) ? 'D' : '-'; 5939 f[7] = (invalid ()) ? 'I' : '-'; 5940 f[8] = '\x0'; 5941 // output 5942 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top()); 5943 } 5944 5945 }; 5946 5947 class TagWord { 5948 public: 5949 int32_t _value; 5950 5951 int tag_at(int i) const { return (_value >> (i*2)) & 3; } 5952 5953 void print() const { 5954 printf("%04x", _value & 0xFFFF); 5955 } 5956 5957 }; 5958 5959 class FPU_Register { 5960 public: 5961 int32_t _m0; 5962 int32_t _m1; 5963 int16_t _ex; 5964 5965 bool is_indefinite() const { 5966 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0; 5967 } 5968 5969 void print() const { 5970 char sign = (_ex < 0) ? '-' : '+'; 5971 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " "; 5972 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind); 5973 }; 5974 5975 }; 5976 5977 class FPU_State { 5978 public: 5979 enum { 5980 register_size = 10, 5981 number_of_registers = 8, 5982 register_mask = 7 5983 }; 5984 5985 ControlWord _control_word; 5986 StatusWord _status_word; 5987 TagWord _tag_word; 5988 int32_t _error_offset; 5989 int32_t _error_selector; 5990 int32_t _data_offset; 5991 int32_t _data_selector; 5992 int8_t _register[register_size * number_of_registers]; 5993 5994 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); } 5995 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; } 5996 5997 const char* tag_as_string(int tag) const { 5998 switch (tag) { 5999 case 0: return "valid"; 6000 case 1: return "zero"; 6001 case 2: return "special"; 6002 case 3: return "empty"; 6003 } 6004 ShouldNotReachHere(); 6005 return nullptr; 6006 } 6007 6008 void print() const { 6009 // print computation registers 6010 { int t = _status_word.top(); 6011 for (int i = 0; i < number_of_registers; i++) { 6012 int j = (i - t) & register_mask; 6013 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j); 6014 st(j)->print(); 6015 printf(" %s\n", tag_as_string(_tag_word.tag_at(i))); 6016 } 6017 } 6018 printf("\n"); 6019 // print control registers 6020 printf("ctrl = "); _control_word.print(); printf("\n"); 6021 printf("stat = "); _status_word .print(); printf("\n"); 6022 printf("tags = "); _tag_word .print(); printf("\n"); 6023 } 6024 6025 }; 6026 6027 class Flag_Register { 6028 public: 6029 int32_t _value; 6030 6031 bool overflow() const { return ((_value >> 11) & 1) != 0; } 6032 bool direction() const { return ((_value >> 10) & 1) != 0; } 6033 bool sign() const { return ((_value >> 7) & 1) != 0; } 6034 bool zero() const { return ((_value >> 6) & 1) != 0; } 6035 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; } 6036 bool parity() const { return ((_value >> 2) & 1) != 0; } 6037 bool carry() const { return ((_value >> 0) & 1) != 0; } 6038 6039 void print() const { 6040 // flags 6041 char f[8]; 6042 f[0] = (overflow ()) ? 'O' : '-'; 6043 f[1] = (direction ()) ? 'D' : '-'; 6044 f[2] = (sign ()) ? 'S' : '-'; 6045 f[3] = (zero ()) ? 'Z' : '-'; 6046 f[4] = (auxiliary_carry()) ? 'A' : '-'; 6047 f[5] = (parity ()) ? 'P' : '-'; 6048 f[6] = (carry ()) ? 'C' : '-'; 6049 f[7] = '\x0'; 6050 // output 6051 printf("%08x flags = %s", _value, f); 6052 } 6053 6054 }; 6055 6056 class IU_Register { 6057 public: 6058 int32_t _value; 6059 6060 void print() const { 6061 printf("%08x %11d", _value, _value); 6062 } 6063 6064 }; 6065 6066 class IU_State { 6067 public: 6068 Flag_Register _eflags; 6069 IU_Register _rdi; 6070 IU_Register _rsi; 6071 IU_Register _rbp; 6072 IU_Register _rsp; 6073 IU_Register _rbx; 6074 IU_Register _rdx; 6075 IU_Register _rcx; 6076 IU_Register _rax; 6077 6078 void print() const { 6079 // computation registers 6080 printf("rax, = "); _rax.print(); printf("\n"); 6081 printf("rbx, = "); _rbx.print(); printf("\n"); 6082 printf("rcx = "); _rcx.print(); printf("\n"); 6083 printf("rdx = "); _rdx.print(); printf("\n"); 6084 printf("rdi = "); _rdi.print(); printf("\n"); 6085 printf("rsi = "); _rsi.print(); printf("\n"); 6086 printf("rbp, = "); _rbp.print(); printf("\n"); 6087 printf("rsp = "); _rsp.print(); printf("\n"); 6088 printf("\n"); 6089 // control registers 6090 printf("flgs = "); _eflags.print(); printf("\n"); 6091 } 6092 }; 6093 6094 6095 class CPU_State { 6096 public: 6097 FPU_State _fpu_state; 6098 IU_State _iu_state; 6099 6100 void print() const { 6101 printf("--------------------------------------------------\n"); 6102 _iu_state .print(); 6103 printf("\n"); 6104 _fpu_state.print(); 6105 printf("--------------------------------------------------\n"); 6106 } 6107 6108 }; 6109 6110 6111 static void _print_CPU_state(CPU_State* state) { 6112 state->print(); 6113 }; 6114 6115 6116 void MacroAssembler::print_CPU_state() { 6117 push_CPU_state(); 6118 push(rsp); // pass CPU state 6119 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state))); 6120 addptr(rsp, wordSize); // discard argument 6121 pop_CPU_state(); 6122 } 6123 6124 6125 #ifndef _LP64 6126 static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) { 6127 static int counter = 0; 6128 FPU_State* fs = &state->_fpu_state; 6129 counter++; 6130 // For leaf calls, only verify that the top few elements remain empty. 6131 // We only need 1 empty at the top for C2 code. 6132 if( stack_depth < 0 ) { 6133 if( fs->tag_for_st(7) != 3 ) { 6134 printf("FPR7 not empty\n"); 6135 state->print(); 6136 assert(false, "error"); 6137 return false; 6138 } 6139 return true; // All other stack states do not matter 6140 } 6141 6142 assert((fs->_control_word._value & 0xffff) == StubRoutines::x86::fpu_cntrl_wrd_std(), 6143 "bad FPU control word"); 6144 6145 // compute stack depth 6146 int i = 0; 6147 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++; 6148 int d = i; 6149 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++; 6150 // verify findings 6151 if (i != FPU_State::number_of_registers) { 6152 // stack not contiguous 6153 printf("%s: stack not contiguous at ST%d\n", s, i); 6154 state->print(); 6155 assert(false, "error"); 6156 return false; 6157 } 6158 // check if computed stack depth corresponds to expected stack depth 6159 if (stack_depth < 0) { 6160 // expected stack depth is -stack_depth or less 6161 if (d > -stack_depth) { 6162 // too many elements on the stack 6163 printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d); 6164 state->print(); 6165 assert(false, "error"); 6166 return false; 6167 } 6168 } else { 6169 // expected stack depth is stack_depth 6170 if (d != stack_depth) { 6171 // wrong stack depth 6172 printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d); 6173 state->print(); 6174 assert(false, "error"); 6175 return false; 6176 } 6177 } 6178 // everything is cool 6179 return true; 6180 } 6181 6182 void MacroAssembler::verify_FPU(int stack_depth, const char* s) { 6183 if (!VerifyFPU) return; 6184 push_CPU_state(); 6185 push(rsp); // pass CPU state 6186 ExternalAddress msg((address) s); 6187 // pass message string s 6188 pushptr(msg.addr(), noreg); 6189 push(stack_depth); // pass stack depth 6190 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU))); 6191 addptr(rsp, 3 * wordSize); // discard arguments 6192 // check for error 6193 { Label L; 6194 testl(rax, rax); 6195 jcc(Assembler::notZero, L); 6196 int3(); // break if error condition 6197 bind(L); 6198 } 6199 pop_CPU_state(); 6200 } 6201 #endif // _LP64 6202 6203 void MacroAssembler::restore_cpu_control_state_after_jni(Register rscratch) { 6204 // Either restore the MXCSR register after returning from the JNI Call 6205 // or verify that it wasn't changed (with -Xcheck:jni flag). 6206 if (VM_Version::supports_sse()) { 6207 if (RestoreMXCSROnJNICalls) { 6208 ldmxcsr(ExternalAddress(StubRoutines::x86::addr_mxcsr_std()), rscratch); 6209 } else if (CheckJNICalls) { 6210 call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry())); 6211 } 6212 } 6213 // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty. 6214 vzeroupper(); 6215 6216 #ifndef _LP64 6217 // Either restore the x87 floating pointer control word after returning 6218 // from the JNI call or verify that it wasn't changed. 6219 if (CheckJNICalls) { 6220 call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry())); 6221 } 6222 #endif // _LP64 6223 } 6224 6225 // ((OopHandle)result).resolve(); 6226 void MacroAssembler::resolve_oop_handle(Register result, Register tmp) { 6227 assert_different_registers(result, tmp); 6228 6229 // Only 64 bit platforms support GCs that require a tmp register 6230 // Only IN_HEAP loads require a thread_tmp register 6231 // OopHandle::resolve is an indirection like jobject. 6232 access_load_at(T_OBJECT, IN_NATIVE, 6233 result, Address(result, 0), tmp, /*tmp_thread*/noreg); 6234 } 6235 6236 // ((WeakHandle)result).resolve(); 6237 void MacroAssembler::resolve_weak_handle(Register rresult, Register rtmp) { 6238 assert_different_registers(rresult, rtmp); 6239 Label resolved; 6240 6241 // A null weak handle resolves to null. 6242 cmpptr(rresult, 0); 6243 jcc(Assembler::equal, resolved); 6244 6245 // Only 64 bit platforms support GCs that require a tmp register 6246 // Only IN_HEAP loads require a thread_tmp register 6247 // WeakHandle::resolve is an indirection like jweak. 6248 access_load_at(T_OBJECT, IN_NATIVE | ON_PHANTOM_OOP_REF, 6249 rresult, Address(rresult, 0), rtmp, /*tmp_thread*/noreg); 6250 bind(resolved); 6251 } 6252 6253 void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp) { 6254 // get mirror 6255 const int mirror_offset = in_bytes(Klass::java_mirror_offset()); 6256 load_method_holder(mirror, method); 6257 movptr(mirror, Address(mirror, mirror_offset)); 6258 resolve_oop_handle(mirror, tmp); 6259 } 6260 6261 void MacroAssembler::load_method_holder_cld(Register rresult, Register rmethod) { 6262 load_method_holder(rresult, rmethod); 6263 movptr(rresult, Address(rresult, InstanceKlass::class_loader_data_offset())); 6264 } 6265 6266 void MacroAssembler::load_method_holder(Register holder, Register method) { 6267 movptr(holder, Address(method, Method::const_offset())); // ConstMethod* 6268 movptr(holder, Address(holder, ConstMethod::constants_offset())); // ConstantPool* 6269 movptr(holder, Address(holder, ConstantPool::pool_holder_offset())); // InstanceKlass* 6270 } 6271 6272 void MacroAssembler::load_metadata(Register dst, Register src) { 6273 if (UseCompressedClassPointers) { 6274 movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); 6275 } else { 6276 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 6277 } 6278 } 6279 6280 void MacroAssembler::load_klass(Register dst, Register src, Register tmp) { 6281 assert_different_registers(src, tmp); 6282 assert_different_registers(dst, tmp); 6283 #ifdef _LP64 6284 if (UseCompressedClassPointers) { 6285 movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); 6286 decode_klass_not_null(dst, tmp); 6287 } else 6288 #endif 6289 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); 6290 } 6291 6292 void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) { 6293 load_klass(dst, src, tmp); 6294 movptr(dst, Address(dst, Klass::prototype_header_offset())); 6295 } 6296 6297 void MacroAssembler::store_klass(Register dst, Register src, Register tmp) { 6298 assert_different_registers(src, tmp); 6299 assert_different_registers(dst, tmp); 6300 #ifdef _LP64 6301 if (UseCompressedClassPointers) { 6302 encode_klass_not_null(src, tmp); 6303 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src); 6304 } else 6305 #endif 6306 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src); 6307 } 6308 6309 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, 6310 Register tmp1, Register thread_tmp) { 6311 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 6312 decorators = AccessInternal::decorator_fixup(decorators, type); 6313 bool as_raw = (decorators & AS_RAW) != 0; 6314 if (as_raw) { 6315 bs->BarrierSetAssembler::load_at(this, decorators, type, dst, src, tmp1, thread_tmp); 6316 } else { 6317 bs->load_at(this, decorators, type, dst, src, tmp1, thread_tmp); 6318 } 6319 } 6320 6321 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val, 6322 Register tmp1, Register tmp2, Register tmp3) { 6323 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 6324 decorators = AccessInternal::decorator_fixup(decorators, type); 6325 bool as_raw = (decorators & AS_RAW) != 0; 6326 if (as_raw) { 6327 bs->BarrierSetAssembler::store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 6328 } else { 6329 bs->store_at(this, decorators, type, dst, val, tmp1, tmp2, tmp3); 6330 } 6331 } 6332 6333 void MacroAssembler::access_value_copy(DecoratorSet decorators, Register src, Register dst, 6334 Register inline_klass) { 6335 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 6336 bs->value_copy(this, decorators, src, dst, inline_klass); 6337 } 6338 6339 void MacroAssembler::flat_field_copy(DecoratorSet decorators, Register src, Register dst, 6340 Register inline_layout_info) { 6341 BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler(); 6342 bs->flat_field_copy(this, decorators, src, dst, inline_layout_info); 6343 } 6344 6345 void MacroAssembler::first_field_offset(Register inline_klass, Register offset) { 6346 movptr(offset, Address(inline_klass, InstanceKlass::adr_inlineklass_fixed_block_offset())); 6347 movl(offset, Address(offset, InlineKlass::first_field_offset_offset())); 6348 } 6349 6350 void MacroAssembler::data_for_oop(Register oop, Register data, Register inline_klass) { 6351 // ((address) (void*) o) + vk->first_field_offset(); 6352 Register offset = (data == oop) ? rscratch1 : data; 6353 first_field_offset(inline_klass, offset); 6354 if (data == oop) { 6355 addptr(data, offset); 6356 } else { 6357 lea(data, Address(oop, offset)); 6358 } 6359 } 6360 6361 void MacroAssembler::data_for_value_array_index(Register array, Register array_klass, 6362 Register index, Register data) { 6363 assert(index != rcx, "index needs to shift by rcx"); 6364 assert_different_registers(array, array_klass, index); 6365 assert_different_registers(rcx, array, index); 6366 6367 // array->base() + (index << Klass::layout_helper_log2_element_size(lh)); 6368 movl(rcx, Address(array_klass, Klass::layout_helper_offset())); 6369 6370 // Klass::layout_helper_log2_element_size(lh) 6371 // (lh >> _lh_log2_element_size_shift) & _lh_log2_element_size_mask; 6372 shrl(rcx, Klass::_lh_log2_element_size_shift); 6373 andl(rcx, Klass::_lh_log2_element_size_mask); 6374 shlptr(index); // index << rcx 6375 6376 lea(data, Address(array, index, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_PRIMITIVE_OBJECT))); 6377 } 6378 6379 void MacroAssembler::load_heap_oop(Register dst, Address src, Register tmp1, 6380 Register thread_tmp, DecoratorSet decorators) { 6381 access_load_at(T_OBJECT, IN_HEAP | decorators, dst, src, tmp1, thread_tmp); 6382 } 6383 6384 // Doesn't do verification, generates fixed size code 6385 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src, Register tmp1, 6386 Register thread_tmp, DecoratorSet decorators) { 6387 access_load_at(T_OBJECT, IN_HEAP | IS_NOT_NULL | decorators, dst, src, tmp1, thread_tmp); 6388 } 6389 6390 void MacroAssembler::store_heap_oop(Address dst, Register val, Register tmp1, 6391 Register tmp2, Register tmp3, DecoratorSet decorators) { 6392 access_store_at(T_OBJECT, IN_HEAP | decorators, dst, val, tmp1, tmp2, tmp3); 6393 } 6394 6395 // Used for storing nulls. 6396 void MacroAssembler::store_heap_oop_null(Address dst) { 6397 access_store_at(T_OBJECT, IN_HEAP, dst, noreg, noreg, noreg, noreg); 6398 } 6399 6400 #ifdef _LP64 6401 void MacroAssembler::store_klass_gap(Register dst, Register src) { 6402 if (UseCompressedClassPointers) { 6403 // Store to klass gap in destination 6404 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src); 6405 } 6406 } 6407 6408 #ifdef ASSERT 6409 void MacroAssembler::verify_heapbase(const char* msg) { 6410 assert (UseCompressedOops, "should be compressed"); 6411 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6412 if (CheckCompressedOops) { 6413 Label ok; 6414 ExternalAddress src2(CompressedOops::base_addr()); 6415 const bool is_src2_reachable = reachable(src2); 6416 if (!is_src2_reachable) { 6417 push(rscratch1); // cmpptr trashes rscratch1 6418 } 6419 cmpptr(r12_heapbase, src2, rscratch1); 6420 jcc(Assembler::equal, ok); 6421 STOP(msg); 6422 bind(ok); 6423 if (!is_src2_reachable) { 6424 pop(rscratch1); 6425 } 6426 } 6427 } 6428 #endif 6429 6430 // Algorithm must match oop.inline.hpp encode_heap_oop. 6431 void MacroAssembler::encode_heap_oop(Register r) { 6432 #ifdef ASSERT 6433 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?"); 6434 #endif 6435 verify_oop_msg(r, "broken oop in encode_heap_oop"); 6436 if (CompressedOops::base() == nullptr) { 6437 if (CompressedOops::shift() != 0) { 6438 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 6439 shrq(r, LogMinObjAlignmentInBytes); 6440 } 6441 return; 6442 } 6443 testq(r, r); 6444 cmovq(Assembler::equal, r, r12_heapbase); 6445 subq(r, r12_heapbase); 6446 shrq(r, LogMinObjAlignmentInBytes); 6447 } 6448 6449 void MacroAssembler::encode_heap_oop_not_null(Register r) { 6450 #ifdef ASSERT 6451 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?"); 6452 if (CheckCompressedOops) { 6453 Label ok; 6454 testq(r, r); 6455 jcc(Assembler::notEqual, ok); 6456 STOP("null oop passed to encode_heap_oop_not_null"); 6457 bind(ok); 6458 } 6459 #endif 6460 verify_oop_msg(r, "broken oop in encode_heap_oop_not_null"); 6461 if (CompressedOops::base() != nullptr) { 6462 subq(r, r12_heapbase); 6463 } 6464 if (CompressedOops::shift() != 0) { 6465 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 6466 shrq(r, LogMinObjAlignmentInBytes); 6467 } 6468 } 6469 6470 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) { 6471 #ifdef ASSERT 6472 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?"); 6473 if (CheckCompressedOops) { 6474 Label ok; 6475 testq(src, src); 6476 jcc(Assembler::notEqual, ok); 6477 STOP("null oop passed to encode_heap_oop_not_null2"); 6478 bind(ok); 6479 } 6480 #endif 6481 verify_oop_msg(src, "broken oop in encode_heap_oop_not_null2"); 6482 if (dst != src) { 6483 movq(dst, src); 6484 } 6485 if (CompressedOops::base() != nullptr) { 6486 subq(dst, r12_heapbase); 6487 } 6488 if (CompressedOops::shift() != 0) { 6489 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 6490 shrq(dst, LogMinObjAlignmentInBytes); 6491 } 6492 } 6493 6494 void MacroAssembler::decode_heap_oop(Register r) { 6495 #ifdef ASSERT 6496 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?"); 6497 #endif 6498 if (CompressedOops::base() == nullptr) { 6499 if (CompressedOops::shift() != 0) { 6500 assert (LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 6501 shlq(r, LogMinObjAlignmentInBytes); 6502 } 6503 } else { 6504 Label done; 6505 shlq(r, LogMinObjAlignmentInBytes); 6506 jccb(Assembler::equal, done); 6507 addq(r, r12_heapbase); 6508 bind(done); 6509 } 6510 verify_oop_msg(r, "broken oop in decode_heap_oop"); 6511 } 6512 6513 void MacroAssembler::decode_heap_oop_not_null(Register r) { 6514 // Note: it will change flags 6515 assert (UseCompressedOops, "should only be used for compressed headers"); 6516 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6517 // Cannot assert, unverified entry point counts instructions (see .ad file) 6518 // vtableStubs also counts instructions in pd_code_size_limit. 6519 // Also do not verify_oop as this is called by verify_oop. 6520 if (CompressedOops::shift() != 0) { 6521 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 6522 shlq(r, LogMinObjAlignmentInBytes); 6523 if (CompressedOops::base() != nullptr) { 6524 addq(r, r12_heapbase); 6525 } 6526 } else { 6527 assert (CompressedOops::base() == nullptr, "sanity"); 6528 } 6529 } 6530 6531 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) { 6532 // Note: it will change flags 6533 assert (UseCompressedOops, "should only be used for compressed headers"); 6534 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6535 // Cannot assert, unverified entry point counts instructions (see .ad file) 6536 // vtableStubs also counts instructions in pd_code_size_limit. 6537 // Also do not verify_oop as this is called by verify_oop. 6538 if (CompressedOops::shift() != 0) { 6539 assert(LogMinObjAlignmentInBytes == CompressedOops::shift(), "decode alg wrong"); 6540 if (LogMinObjAlignmentInBytes == Address::times_8) { 6541 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0)); 6542 } else { 6543 if (dst != src) { 6544 movq(dst, src); 6545 } 6546 shlq(dst, LogMinObjAlignmentInBytes); 6547 if (CompressedOops::base() != nullptr) { 6548 addq(dst, r12_heapbase); 6549 } 6550 } 6551 } else { 6552 assert (CompressedOops::base() == nullptr, "sanity"); 6553 if (dst != src) { 6554 movq(dst, src); 6555 } 6556 } 6557 } 6558 6559 void MacroAssembler::encode_klass_not_null(Register r, Register tmp) { 6560 assert_different_registers(r, tmp); 6561 if (CompressedKlassPointers::base() != nullptr) { 6562 mov64(tmp, (int64_t)CompressedKlassPointers::base()); 6563 subq(r, tmp); 6564 } 6565 if (CompressedKlassPointers::shift() != 0) { 6566 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 6567 shrq(r, LogKlassAlignmentInBytes); 6568 } 6569 } 6570 6571 void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) { 6572 assert_different_registers(src, dst); 6573 if (CompressedKlassPointers::base() != nullptr) { 6574 mov64(dst, -(int64_t)CompressedKlassPointers::base()); 6575 addq(dst, src); 6576 } else { 6577 movptr(dst, src); 6578 } 6579 if (CompressedKlassPointers::shift() != 0) { 6580 assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 6581 shrq(dst, LogKlassAlignmentInBytes); 6582 } 6583 } 6584 6585 void MacroAssembler::decode_klass_not_null(Register r, Register tmp) { 6586 assert_different_registers(r, tmp); 6587 // Note: it will change flags 6588 assert(UseCompressedClassPointers, "should only be used for compressed headers"); 6589 // Cannot assert, unverified entry point counts instructions (see .ad file) 6590 // vtableStubs also counts instructions in pd_code_size_limit. 6591 // Also do not verify_oop as this is called by verify_oop. 6592 if (CompressedKlassPointers::shift() != 0) { 6593 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 6594 shlq(r, LogKlassAlignmentInBytes); 6595 } 6596 if (CompressedKlassPointers::base() != nullptr) { 6597 mov64(tmp, (int64_t)CompressedKlassPointers::base()); 6598 addq(r, tmp); 6599 } 6600 } 6601 6602 void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) { 6603 assert_different_registers(src, dst); 6604 // Note: it will change flags 6605 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6606 // Cannot assert, unverified entry point counts instructions (see .ad file) 6607 // vtableStubs also counts instructions in pd_code_size_limit. 6608 // Also do not verify_oop as this is called by verify_oop. 6609 6610 if (CompressedKlassPointers::base() == nullptr && 6611 CompressedKlassPointers::shift() == 0) { 6612 // The best case scenario is that there is no base or shift. Then it is already 6613 // a pointer that needs nothing but a register rename. 6614 movl(dst, src); 6615 } else { 6616 if (CompressedKlassPointers::base() != nullptr) { 6617 mov64(dst, (int64_t)CompressedKlassPointers::base()); 6618 } else { 6619 xorq(dst, dst); 6620 } 6621 if (CompressedKlassPointers::shift() != 0) { 6622 assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); 6623 assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?"); 6624 leaq(dst, Address(dst, src, Address::times_8, 0)); 6625 } else { 6626 addq(dst, src); 6627 } 6628 } 6629 } 6630 6631 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) { 6632 assert (UseCompressedOops, "should only be used for compressed headers"); 6633 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6634 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6635 int oop_index = oop_recorder()->find_index(obj); 6636 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6637 mov_narrow_oop(dst, oop_index, rspec); 6638 } 6639 6640 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) { 6641 assert (UseCompressedOops, "should only be used for compressed headers"); 6642 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6643 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6644 int oop_index = oop_recorder()->find_index(obj); 6645 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6646 mov_narrow_oop(dst, oop_index, rspec); 6647 } 6648 6649 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) { 6650 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6651 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6652 int klass_index = oop_recorder()->find_index(k); 6653 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6654 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6655 } 6656 6657 void MacroAssembler::set_narrow_klass(Address dst, Klass* k) { 6658 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6659 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6660 int klass_index = oop_recorder()->find_index(k); 6661 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6662 mov_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6663 } 6664 6665 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) { 6666 assert (UseCompressedOops, "should only be used for compressed headers"); 6667 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6668 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6669 int oop_index = oop_recorder()->find_index(obj); 6670 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6671 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 6672 } 6673 6674 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) { 6675 assert (UseCompressedOops, "should only be used for compressed headers"); 6676 assert (Universe::heap() != nullptr, "java heap should be initialized"); 6677 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6678 int oop_index = oop_recorder()->find_index(obj); 6679 RelocationHolder rspec = oop_Relocation::spec(oop_index); 6680 Assembler::cmp_narrow_oop(dst, oop_index, rspec); 6681 } 6682 6683 void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) { 6684 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6685 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6686 int klass_index = oop_recorder()->find_index(k); 6687 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6688 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6689 } 6690 6691 void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) { 6692 assert (UseCompressedClassPointers, "should only be used for compressed headers"); 6693 assert (oop_recorder() != nullptr, "this assembler needs an OopRecorder"); 6694 int klass_index = oop_recorder()->find_index(k); 6695 RelocationHolder rspec = metadata_Relocation::spec(klass_index); 6696 Assembler::cmp_narrow_oop(dst, CompressedKlassPointers::encode(k), rspec); 6697 } 6698 6699 void MacroAssembler::reinit_heapbase() { 6700 if (UseCompressedOops) { 6701 if (Universe::heap() != nullptr) { 6702 if (CompressedOops::base() == nullptr) { 6703 MacroAssembler::xorptr(r12_heapbase, r12_heapbase); 6704 } else { 6705 mov64(r12_heapbase, (int64_t)CompressedOops::base()); 6706 } 6707 } else { 6708 movptr(r12_heapbase, ExternalAddress(CompressedOops::base_addr())); 6709 } 6710 } 6711 } 6712 6713 #endif // _LP64 6714 6715 #if COMPILER2_OR_JVMCI 6716 6717 // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM/ZMM registers 6718 void MacroAssembler::xmm_clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, KRegister mask) { 6719 // cnt - number of qwords (8-byte words). 6720 // base - start address, qword aligned. 6721 Label L_zero_64_bytes, L_loop, L_sloop, L_tail, L_end; 6722 bool use64byteVector = (MaxVectorSize == 64) && (VM_Version::avx3_threshold() == 0); 6723 if (use64byteVector) { 6724 evpbroadcastq(xtmp, val, AVX_512bit); 6725 } else if (MaxVectorSize >= 32) { 6726 movdq(xtmp, val); 6727 punpcklqdq(xtmp, xtmp); 6728 vinserti128_high(xtmp, xtmp); 6729 } else { 6730 movdq(xtmp, val); 6731 punpcklqdq(xtmp, xtmp); 6732 } 6733 jmp(L_zero_64_bytes); 6734 6735 BIND(L_loop); 6736 if (MaxVectorSize >= 32) { 6737 fill64(base, 0, xtmp, use64byteVector); 6738 } else { 6739 movdqu(Address(base, 0), xtmp); 6740 movdqu(Address(base, 16), xtmp); 6741 movdqu(Address(base, 32), xtmp); 6742 movdqu(Address(base, 48), xtmp); 6743 } 6744 addptr(base, 64); 6745 6746 BIND(L_zero_64_bytes); 6747 subptr(cnt, 8); 6748 jccb(Assembler::greaterEqual, L_loop); 6749 6750 // Copy trailing 64 bytes 6751 if (use64byteVector) { 6752 addptr(cnt, 8); 6753 jccb(Assembler::equal, L_end); 6754 fill64_masked(3, base, 0, xtmp, mask, cnt, val, true); 6755 jmp(L_end); 6756 } else { 6757 addptr(cnt, 4); 6758 jccb(Assembler::less, L_tail); 6759 if (MaxVectorSize >= 32) { 6760 vmovdqu(Address(base, 0), xtmp); 6761 } else { 6762 movdqu(Address(base, 0), xtmp); 6763 movdqu(Address(base, 16), xtmp); 6764 } 6765 } 6766 addptr(base, 32); 6767 subptr(cnt, 4); 6768 6769 BIND(L_tail); 6770 addptr(cnt, 4); 6771 jccb(Assembler::lessEqual, L_end); 6772 if (UseAVX > 2 && MaxVectorSize >= 32 && VM_Version::supports_avx512vl()) { 6773 fill32_masked(3, base, 0, xtmp, mask, cnt, val); 6774 } else { 6775 decrement(cnt); 6776 6777 BIND(L_sloop); 6778 movq(Address(base, 0), xtmp); 6779 addptr(base, 8); 6780 decrement(cnt); 6781 jccb(Assembler::greaterEqual, L_sloop); 6782 } 6783 BIND(L_end); 6784 } 6785 6786 int MacroAssembler::store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter) { 6787 assert(InlineTypeReturnedAsFields, "Inline types should never be returned as fields"); 6788 // An inline type might be returned. If fields are in registers we 6789 // need to allocate an inline type instance and initialize it with 6790 // the value of the fields. 6791 Label skip; 6792 // We only need a new buffered inline type if a new one is not returned 6793 testptr(rax, 1); 6794 jcc(Assembler::zero, skip); 6795 int call_offset = -1; 6796 6797 #ifdef _LP64 6798 // The following code is similar to allocate_instance but has some slight differences, 6799 // e.g. object size is always not zero, sometimes it's constant; storing klass ptr after 6800 // allocating is not necessary if vk != nullptr, etc. allocate_instance is not aware of these. 6801 Label slow_case; 6802 // 1. Try to allocate a new buffered inline instance either from TLAB or eden space 6803 mov(rscratch1, rax); // save rax for slow_case since *_allocate may corrupt it when allocation failed 6804 if (vk != nullptr) { 6805 // Called from C1, where the return type is statically known. 6806 movptr(rbx, (intptr_t)vk->get_InlineKlass()); 6807 jint lh = vk->layout_helper(); 6808 assert(lh != Klass::_lh_neutral_value, "inline class in return type must have been resolved"); 6809 if (UseTLAB && !Klass::layout_helper_needs_slow_path(lh)) { 6810 tlab_allocate(r15_thread, rax, noreg, lh, r13, r14, slow_case); 6811 } else { 6812 jmp(slow_case); 6813 } 6814 } else { 6815 // Call from interpreter. RAX contains ((the InlineKlass* of the return type) | 0x01) 6816 mov(rbx, rax); 6817 andptr(rbx, -2); 6818 if (UseTLAB) { 6819 movl(r14, Address(rbx, Klass::layout_helper_offset())); 6820 testl(r14, Klass::_lh_instance_slow_path_bit); 6821 jcc(Assembler::notZero, slow_case); 6822 tlab_allocate(r15_thread, rax, r14, 0, r13, r14, slow_case); 6823 } else { 6824 jmp(slow_case); 6825 } 6826 } 6827 if (UseTLAB) { 6828 // 2. Initialize buffered inline instance header 6829 Register buffer_obj = rax; 6830 movptr(Address(buffer_obj, oopDesc::mark_offset_in_bytes()), (intptr_t)markWord::inline_type_prototype().value()); 6831 xorl(r13, r13); 6832 store_klass_gap(buffer_obj, r13); 6833 if (vk == nullptr) { 6834 // store_klass corrupts rbx(klass), so save it in r13 for later use (interpreter case only). 6835 mov(r13, rbx); 6836 } 6837 store_klass(buffer_obj, rbx, rscratch1); 6838 // 3. Initialize its fields with an inline class specific handler 6839 if (vk != nullptr) { 6840 call(RuntimeAddress(vk->pack_handler())); // no need for call info as this will not safepoint. 6841 } else { 6842 movptr(rbx, Address(r13, InstanceKlass::adr_inlineklass_fixed_block_offset())); 6843 movptr(rbx, Address(rbx, InlineKlass::pack_handler_offset())); 6844 call(rbx); 6845 } 6846 jmp(skip); 6847 } 6848 bind(slow_case); 6849 // We failed to allocate a new inline type, fall back to a runtime 6850 // call. Some oop field may be live in some registers but we can't 6851 // tell. That runtime call will take care of preserving them 6852 // across a GC if there's one. 6853 mov(rax, rscratch1); 6854 #endif 6855 6856 if (from_interpreter) { 6857 super_call_VM_leaf(StubRoutines::store_inline_type_fields_to_buf()); 6858 } else { 6859 call(RuntimeAddress(StubRoutines::store_inline_type_fields_to_buf())); 6860 call_offset = offset(); 6861 } 6862 6863 bind(skip); 6864 return call_offset; 6865 } 6866 6867 // Move a value between registers/stack slots and update the reg_state 6868 bool MacroAssembler::move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]) { 6869 assert(from->is_valid() && to->is_valid(), "source and destination must be valid"); 6870 if (reg_state[to->value()] == reg_written) { 6871 return true; // Already written 6872 } 6873 if (from != to && bt != T_VOID) { 6874 if (reg_state[to->value()] == reg_readonly) { 6875 return false; // Not yet writable 6876 } 6877 if (from->is_reg()) { 6878 if (to->is_reg()) { 6879 if (from->is_XMMRegister()) { 6880 if (bt == T_DOUBLE) { 6881 movdbl(to->as_XMMRegister(), from->as_XMMRegister()); 6882 } else { 6883 assert(bt == T_FLOAT, "must be float"); 6884 movflt(to->as_XMMRegister(), from->as_XMMRegister()); 6885 } 6886 } else { 6887 movq(to->as_Register(), from->as_Register()); 6888 } 6889 } else { 6890 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6891 Address to_addr = Address(rsp, st_off); 6892 if (from->is_XMMRegister()) { 6893 if (bt == T_DOUBLE) { 6894 movdbl(to_addr, from->as_XMMRegister()); 6895 } else { 6896 assert(bt == T_FLOAT, "must be float"); 6897 movflt(to_addr, from->as_XMMRegister()); 6898 } 6899 } else { 6900 movq(to_addr, from->as_Register()); 6901 } 6902 } 6903 } else { 6904 Address from_addr = Address(rsp, from->reg2stack() * VMRegImpl::stack_slot_size + wordSize); 6905 if (to->is_reg()) { 6906 if (to->is_XMMRegister()) { 6907 if (bt == T_DOUBLE) { 6908 movdbl(to->as_XMMRegister(), from_addr); 6909 } else { 6910 assert(bt == T_FLOAT, "must be float"); 6911 movflt(to->as_XMMRegister(), from_addr); 6912 } 6913 } else { 6914 movq(to->as_Register(), from_addr); 6915 } 6916 } else { 6917 int st_off = to->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6918 movq(r13, from_addr); 6919 movq(Address(rsp, st_off), r13); 6920 } 6921 } 6922 } 6923 // Update register states 6924 reg_state[from->value()] = reg_writable; 6925 reg_state[to->value()] = reg_written; 6926 return true; 6927 } 6928 6929 // Calculate the extra stack space required for packing or unpacking inline 6930 // args and adjust the stack pointer 6931 int MacroAssembler::extend_stack_for_inline_args(int args_on_stack) { 6932 // Two additional slots to account for return address 6933 int sp_inc = (args_on_stack + 2) * VMRegImpl::stack_slot_size; 6934 sp_inc = align_up(sp_inc, StackAlignmentInBytes); 6935 // Save the return address, adjust the stack (make sure it is properly 6936 // 16-byte aligned) and copy the return address to the new top of the stack. 6937 // The stack will be repaired on return (see MacroAssembler::remove_frame). 6938 assert(sp_inc > 0, "sanity"); 6939 pop(r13); 6940 subptr(rsp, sp_inc); 6941 push(r13); 6942 return sp_inc; 6943 } 6944 6945 // Read all fields from an inline type buffer and store the field values in registers/stack slots. 6946 bool MacroAssembler::unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, 6947 VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index, 6948 RegState reg_state[]) { 6949 assert(sig->at(sig_index)._bt == T_VOID, "should be at end delimiter"); 6950 assert(from->is_valid(), "source must be valid"); 6951 bool progress = false; 6952 #ifdef ASSERT 6953 const int start_offset = offset(); 6954 #endif 6955 6956 Label L_null, L_notNull; 6957 // Don't use r14 as tmp because it's used for spilling (see MacroAssembler::spill_reg_for) 6958 Register tmp1 = r10; 6959 Register tmp2 = r13; 6960 Register fromReg = noreg; 6961 ScalarizedInlineArgsStream stream(sig, sig_index, to, to_count, to_index, -1); 6962 bool done = true; 6963 bool mark_done = true; 6964 VMReg toReg; 6965 BasicType bt; 6966 // Check if argument requires a null check 6967 bool null_check = false; 6968 VMReg nullCheckReg; 6969 while (stream.next(nullCheckReg, bt)) { 6970 if (sig->at(stream.sig_index())._offset == -1) { 6971 null_check = true; 6972 break; 6973 } 6974 } 6975 stream.reset(sig_index, to_index); 6976 while (stream.next(toReg, bt)) { 6977 assert(toReg->is_valid(), "destination must be valid"); 6978 int idx = (int)toReg->value(); 6979 if (reg_state[idx] == reg_readonly) { 6980 if (idx != from->value()) { 6981 mark_done = false; 6982 } 6983 done = false; 6984 continue; 6985 } else if (reg_state[idx] == reg_written) { 6986 continue; 6987 } 6988 assert(reg_state[idx] == reg_writable, "must be writable"); 6989 reg_state[idx] = reg_written; 6990 progress = true; 6991 6992 if (fromReg == noreg) { 6993 if (from->is_reg()) { 6994 fromReg = from->as_Register(); 6995 } else { 6996 int st_off = from->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 6997 movq(tmp1, Address(rsp, st_off)); 6998 fromReg = tmp1; 6999 } 7000 if (null_check) { 7001 // Nullable inline type argument, emit null check 7002 testptr(fromReg, fromReg); 7003 jcc(Assembler::zero, L_null); 7004 } 7005 } 7006 int off = sig->at(stream.sig_index())._offset; 7007 if (off == -1) { 7008 assert(null_check, "Missing null check at"); 7009 if (toReg->is_stack()) { 7010 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 7011 movq(Address(rsp, st_off), 1); 7012 } else { 7013 movq(toReg->as_Register(), 1); 7014 } 7015 continue; 7016 } 7017 assert(off > 0, "offset in object should be positive"); 7018 Address fromAddr = Address(fromReg, off); 7019 if (!toReg->is_XMMRegister()) { 7020 Register dst = toReg->is_stack() ? tmp2 : toReg->as_Register(); 7021 if (is_reference_type(bt)) { 7022 load_heap_oop(dst, fromAddr); 7023 } else { 7024 bool is_signed = (bt != T_CHAR) && (bt != T_BOOLEAN); 7025 load_sized_value(dst, fromAddr, type2aelembytes(bt), is_signed); 7026 } 7027 if (toReg->is_stack()) { 7028 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 7029 movq(Address(rsp, st_off), dst); 7030 } 7031 } else if (bt == T_DOUBLE) { 7032 movdbl(toReg->as_XMMRegister(), fromAddr); 7033 } else { 7034 assert(bt == T_FLOAT, "must be float"); 7035 movflt(toReg->as_XMMRegister(), fromAddr); 7036 } 7037 } 7038 if (progress && null_check) { 7039 if (done) { 7040 jmp(L_notNull); 7041 bind(L_null); 7042 // Set IsInit field to zero to signal that the argument is null. 7043 // Also set all oop fields to zero to make the GC happy. 7044 stream.reset(sig_index, to_index); 7045 while (stream.next(toReg, bt)) { 7046 if (sig->at(stream.sig_index())._offset == -1 || 7047 bt == T_OBJECT || bt == T_ARRAY) { 7048 if (toReg->is_stack()) { 7049 int st_off = toReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 7050 movq(Address(rsp, st_off), 0); 7051 } else { 7052 xorq(toReg->as_Register(), toReg->as_Register()); 7053 } 7054 } 7055 } 7056 bind(L_notNull); 7057 } else { 7058 bind(L_null); 7059 } 7060 } 7061 7062 sig_index = stream.sig_index(); 7063 to_index = stream.regs_index(); 7064 7065 if (mark_done && reg_state[from->value()] != reg_written) { 7066 // This is okay because no one else will write to that slot 7067 reg_state[from->value()] = reg_writable; 7068 } 7069 from_index--; 7070 assert(progress || (start_offset == offset()), "should not emit code"); 7071 return done; 7072 } 7073 7074 bool MacroAssembler::pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index, 7075 VMRegPair* from, int from_count, int& from_index, VMReg to, 7076 RegState reg_state[], Register val_array) { 7077 assert(sig->at(sig_index)._bt == T_METADATA, "should be at delimiter"); 7078 assert(to->is_valid(), "destination must be valid"); 7079 7080 if (reg_state[to->value()] == reg_written) { 7081 skip_unpacked_fields(sig, sig_index, from, from_count, from_index); 7082 return true; // Already written 7083 } 7084 7085 // TODO 8284443 Isn't it an issue if below code uses r14 as tmp when it contains a spilled value? 7086 // Be careful with r14 because it's used for spilling (see MacroAssembler::spill_reg_for). 7087 Register val_obj_tmp = r11; 7088 Register from_reg_tmp = r14; 7089 Register tmp1 = r10; 7090 Register tmp2 = r13; 7091 Register tmp3 = rbx; 7092 Register val_obj = to->is_stack() ? val_obj_tmp : to->as_Register(); 7093 7094 assert_different_registers(val_obj_tmp, from_reg_tmp, tmp1, tmp2, tmp3, val_array); 7095 7096 if (reg_state[to->value()] == reg_readonly) { 7097 if (!is_reg_in_unpacked_fields(sig, sig_index, to, from, from_count, from_index)) { 7098 skip_unpacked_fields(sig, sig_index, from, from_count, from_index); 7099 return false; // Not yet writable 7100 } 7101 val_obj = val_obj_tmp; 7102 } 7103 7104 int index = arrayOopDesc::base_offset_in_bytes(T_OBJECT) + vtarg_index * type2aelembytes(T_OBJECT); 7105 load_heap_oop(val_obj, Address(val_array, index)); 7106 7107 ScalarizedInlineArgsStream stream(sig, sig_index, from, from_count, from_index); 7108 VMReg fromReg; 7109 BasicType bt; 7110 Label L_null; 7111 while (stream.next(fromReg, bt)) { 7112 assert(fromReg->is_valid(), "source must be valid"); 7113 reg_state[fromReg->value()] = reg_writable; 7114 7115 int off = sig->at(stream.sig_index())._offset; 7116 if (off == -1) { 7117 // Nullable inline type argument, emit null check 7118 Label L_notNull; 7119 if (fromReg->is_stack()) { 7120 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 7121 testb(Address(rsp, ld_off), 1); 7122 } else { 7123 testb(fromReg->as_Register(), 1); 7124 } 7125 jcc(Assembler::notZero, L_notNull); 7126 movptr(val_obj, 0); 7127 jmp(L_null); 7128 bind(L_notNull); 7129 continue; 7130 } 7131 7132 assert(off > 0, "offset in object should be positive"); 7133 size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize; 7134 7135 Address dst(val_obj, off); 7136 if (!fromReg->is_XMMRegister()) { 7137 Register src; 7138 if (fromReg->is_stack()) { 7139 src = from_reg_tmp; 7140 int ld_off = fromReg->reg2stack() * VMRegImpl::stack_slot_size + wordSize; 7141 load_sized_value(src, Address(rsp, ld_off), size_in_bytes, /* is_signed */ false); 7142 } else { 7143 src = fromReg->as_Register(); 7144 } 7145 assert_different_registers(dst.base(), src, tmp1, tmp2, tmp3, val_array); 7146 if (is_reference_type(bt)) { 7147 store_heap_oop(dst, src, tmp1, tmp2, tmp3, IN_HEAP | ACCESS_WRITE | IS_DEST_UNINITIALIZED); 7148 } else { 7149 store_sized_value(dst, src, size_in_bytes); 7150 } 7151 } else if (bt == T_DOUBLE) { 7152 movdbl(dst, fromReg->as_XMMRegister()); 7153 } else { 7154 assert(bt == T_FLOAT, "must be float"); 7155 movflt(dst, fromReg->as_XMMRegister()); 7156 } 7157 } 7158 bind(L_null); 7159 sig_index = stream.sig_index(); 7160 from_index = stream.regs_index(); 7161 7162 assert(reg_state[to->value()] == reg_writable, "must have already been read"); 7163 bool success = move_helper(val_obj->as_VMReg(), to, T_OBJECT, reg_state); 7164 assert(success, "to register must be writeable"); 7165 return true; 7166 } 7167 7168 VMReg MacroAssembler::spill_reg_for(VMReg reg) { 7169 return reg->is_XMMRegister() ? xmm8->as_VMReg() : r14->as_VMReg(); 7170 } 7171 7172 void MacroAssembler::remove_frame(int initial_framesize, bool needs_stack_repair) { 7173 assert((initial_framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned"); 7174 if (needs_stack_repair) { 7175 movq(rbp, Address(rsp, initial_framesize)); 7176 // The stack increment resides just below the saved rbp 7177 addq(rsp, Address(rsp, initial_framesize - wordSize)); 7178 } else { 7179 if (initial_framesize > 0) { 7180 addq(rsp, initial_framesize); 7181 } 7182 pop(rbp); 7183 } 7184 } 7185 7186 // Clearing constant sized memory using YMM/ZMM registers. 7187 void MacroAssembler::clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask) { 7188 assert(UseAVX > 2 && VM_Version::supports_avx512vl(), ""); 7189 bool use64byteVector = (MaxVectorSize > 32) && (VM_Version::avx3_threshold() == 0); 7190 7191 int vector64_count = (cnt & (~0x7)) >> 3; 7192 cnt = cnt & 0x7; 7193 const int fill64_per_loop = 4; 7194 const int max_unrolled_fill64 = 8; 7195 7196 // 64 byte initialization loop. 7197 vpxor(xtmp, xtmp, xtmp, use64byteVector ? AVX_512bit : AVX_256bit); 7198 int start64 = 0; 7199 if (vector64_count > max_unrolled_fill64) { 7200 Label LOOP; 7201 Register index = rtmp; 7202 7203 start64 = vector64_count - (vector64_count % fill64_per_loop); 7204 7205 movl(index, 0); 7206 BIND(LOOP); 7207 for (int i = 0; i < fill64_per_loop; i++) { 7208 fill64(Address(base, index, Address::times_1, i * 64), xtmp, use64byteVector); 7209 } 7210 addl(index, fill64_per_loop * 64); 7211 cmpl(index, start64 * 64); 7212 jccb(Assembler::less, LOOP); 7213 } 7214 for (int i = start64; i < vector64_count; i++) { 7215 fill64(base, i * 64, xtmp, use64byteVector); 7216 } 7217 7218 // Clear remaining 64 byte tail. 7219 int disp = vector64_count * 64; 7220 if (cnt) { 7221 switch (cnt) { 7222 case 1: 7223 movq(Address(base, disp), xtmp); 7224 break; 7225 case 2: 7226 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_128bit); 7227 break; 7228 case 3: 7229 movl(rtmp, 0x7); 7230 kmovwl(mask, rtmp); 7231 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_256bit); 7232 break; 7233 case 4: 7234 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 7235 break; 7236 case 5: 7237 if (use64byteVector) { 7238 movl(rtmp, 0x1F); 7239 kmovwl(mask, rtmp); 7240 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 7241 } else { 7242 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 7243 movq(Address(base, disp + 32), xtmp); 7244 } 7245 break; 7246 case 6: 7247 if (use64byteVector) { 7248 movl(rtmp, 0x3F); 7249 kmovwl(mask, rtmp); 7250 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 7251 } else { 7252 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 7253 evmovdqu(T_LONG, k0, Address(base, disp + 32), xtmp, false, Assembler::AVX_128bit); 7254 } 7255 break; 7256 case 7: 7257 if (use64byteVector) { 7258 movl(rtmp, 0x7F); 7259 kmovwl(mask, rtmp); 7260 evmovdqu(T_LONG, mask, Address(base, disp), xtmp, true, Assembler::AVX_512bit); 7261 } else { 7262 evmovdqu(T_LONG, k0, Address(base, disp), xtmp, false, Assembler::AVX_256bit); 7263 movl(rtmp, 0x7); 7264 kmovwl(mask, rtmp); 7265 evmovdqu(T_LONG, mask, Address(base, disp + 32), xtmp, true, Assembler::AVX_256bit); 7266 } 7267 break; 7268 default: 7269 fatal("Unexpected length : %d\n",cnt); 7270 break; 7271 } 7272 } 7273 } 7274 7275 void MacroAssembler::clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, 7276 bool is_large, bool word_copy_only, KRegister mask) { 7277 // cnt - number of qwords (8-byte words). 7278 // base - start address, qword aligned. 7279 // is_large - if optimizers know cnt is larger than InitArrayShortSize 7280 assert(base==rdi, "base register must be edi for rep stos"); 7281 assert(val==rax, "val register must be eax for rep stos"); 7282 assert(cnt==rcx, "cnt register must be ecx for rep stos"); 7283 assert(InitArrayShortSize % BytesPerLong == 0, 7284 "InitArrayShortSize should be the multiple of BytesPerLong"); 7285 7286 Label DONE; 7287 7288 if (!is_large) { 7289 Label LOOP, LONG; 7290 cmpptr(cnt, InitArrayShortSize/BytesPerLong); 7291 jccb(Assembler::greater, LONG); 7292 7293 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM 7294 7295 decrement(cnt); 7296 jccb(Assembler::negative, DONE); // Zero length 7297 7298 // Use individual pointer-sized stores for small counts: 7299 BIND(LOOP); 7300 movptr(Address(base, cnt, Address::times_ptr), val); 7301 decrement(cnt); 7302 jccb(Assembler::greaterEqual, LOOP); 7303 jmpb(DONE); 7304 7305 BIND(LONG); 7306 } 7307 7308 // Use longer rep-prefixed ops for non-small counts: 7309 if (UseFastStosb && !word_copy_only) { 7310 shlptr(cnt, 3); // convert to number of bytes 7311 rep_stosb(); 7312 } else if (UseXMMForObjInit) { 7313 xmm_clear_mem(base, cnt, val, xtmp, mask); 7314 } else { 7315 NOT_LP64(shlptr(cnt, 1);) // convert to number of 32-bit words for 32-bit VM 7316 rep_stos(); 7317 } 7318 7319 BIND(DONE); 7320 } 7321 7322 #endif //COMPILER2_OR_JVMCI 7323 7324 7325 void MacroAssembler::generate_fill(BasicType t, bool aligned, 7326 Register to, Register value, Register count, 7327 Register rtmp, XMMRegister xtmp) { 7328 ShortBranchVerifier sbv(this); 7329 assert_different_registers(to, value, count, rtmp); 7330 Label L_exit; 7331 Label L_fill_2_bytes, L_fill_4_bytes; 7332 7333 #if defined(COMPILER2) && defined(_LP64) 7334 if(MaxVectorSize >=32 && 7335 VM_Version::supports_avx512vlbw() && 7336 VM_Version::supports_bmi2()) { 7337 generate_fill_avx3(t, to, value, count, rtmp, xtmp); 7338 return; 7339 } 7340 #endif 7341 7342 int shift = -1; 7343 switch (t) { 7344 case T_BYTE: 7345 shift = 2; 7346 break; 7347 case T_SHORT: 7348 shift = 1; 7349 break; 7350 case T_INT: 7351 shift = 0; 7352 break; 7353 default: ShouldNotReachHere(); 7354 } 7355 7356 if (t == T_BYTE) { 7357 andl(value, 0xff); 7358 movl(rtmp, value); 7359 shll(rtmp, 8); 7360 orl(value, rtmp); 7361 } 7362 if (t == T_SHORT) { 7363 andl(value, 0xffff); 7364 } 7365 if (t == T_BYTE || t == T_SHORT) { 7366 movl(rtmp, value); 7367 shll(rtmp, 16); 7368 orl(value, rtmp); 7369 } 7370 7371 cmpptr(count, 2<<shift); // Short arrays (< 8 bytes) fill by element 7372 jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp 7373 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) { 7374 Label L_skip_align2; 7375 // align source address at 4 bytes address boundary 7376 if (t == T_BYTE) { 7377 Label L_skip_align1; 7378 // One byte misalignment happens only for byte arrays 7379 testptr(to, 1); 7380 jccb(Assembler::zero, L_skip_align1); 7381 movb(Address(to, 0), value); 7382 increment(to); 7383 decrement(count); 7384 BIND(L_skip_align1); 7385 } 7386 // Two bytes misalignment happens only for byte and short (char) arrays 7387 testptr(to, 2); 7388 jccb(Assembler::zero, L_skip_align2); 7389 movw(Address(to, 0), value); 7390 addptr(to, 2); 7391 subptr(count, 1<<(shift-1)); 7392 BIND(L_skip_align2); 7393 } 7394 if (UseSSE < 2) { 7395 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; 7396 // Fill 32-byte chunks 7397 subptr(count, 8 << shift); 7398 jcc(Assembler::less, L_check_fill_8_bytes); 7399 align(16); 7400 7401 BIND(L_fill_32_bytes_loop); 7402 7403 for (int i = 0; i < 32; i += 4) { 7404 movl(Address(to, i), value); 7405 } 7406 7407 addptr(to, 32); 7408 subptr(count, 8 << shift); 7409 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); 7410 BIND(L_check_fill_8_bytes); 7411 addptr(count, 8 << shift); 7412 jccb(Assembler::zero, L_exit); 7413 jmpb(L_fill_8_bytes); 7414 7415 // 7416 // length is too short, just fill qwords 7417 // 7418 BIND(L_fill_8_bytes_loop); 7419 movl(Address(to, 0), value); 7420 movl(Address(to, 4), value); 7421 addptr(to, 8); 7422 BIND(L_fill_8_bytes); 7423 subptr(count, 1 << (shift + 1)); 7424 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); 7425 // fall through to fill 4 bytes 7426 } else { 7427 Label L_fill_32_bytes; 7428 if (!UseUnalignedLoadStores) { 7429 // align to 8 bytes, we know we are 4 byte aligned to start 7430 testptr(to, 4); 7431 jccb(Assembler::zero, L_fill_32_bytes); 7432 movl(Address(to, 0), value); 7433 addptr(to, 4); 7434 subptr(count, 1<<shift); 7435 } 7436 BIND(L_fill_32_bytes); 7437 { 7438 assert( UseSSE >= 2, "supported cpu only" ); 7439 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes; 7440 movdl(xtmp, value); 7441 if (UseAVX >= 2 && UseUnalignedLoadStores) { 7442 Label L_check_fill_32_bytes; 7443 if (UseAVX > 2) { 7444 // Fill 64-byte chunks 7445 Label L_fill_64_bytes_loop_avx3, L_check_fill_64_bytes_avx2; 7446 7447 // If number of bytes to fill < VM_Version::avx3_threshold(), perform fill using AVX2 7448 cmpptr(count, VM_Version::avx3_threshold()); 7449 jccb(Assembler::below, L_check_fill_64_bytes_avx2); 7450 7451 vpbroadcastd(xtmp, xtmp, Assembler::AVX_512bit); 7452 7453 subptr(count, 16 << shift); 7454 jccb(Assembler::less, L_check_fill_32_bytes); 7455 align(16); 7456 7457 BIND(L_fill_64_bytes_loop_avx3); 7458 evmovdqul(Address(to, 0), xtmp, Assembler::AVX_512bit); 7459 addptr(to, 64); 7460 subptr(count, 16 << shift); 7461 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop_avx3); 7462 jmpb(L_check_fill_32_bytes); 7463 7464 BIND(L_check_fill_64_bytes_avx2); 7465 } 7466 // Fill 64-byte chunks 7467 Label L_fill_64_bytes_loop; 7468 vpbroadcastd(xtmp, xtmp, Assembler::AVX_256bit); 7469 7470 subptr(count, 16 << shift); 7471 jcc(Assembler::less, L_check_fill_32_bytes); 7472 align(16); 7473 7474 BIND(L_fill_64_bytes_loop); 7475 vmovdqu(Address(to, 0), xtmp); 7476 vmovdqu(Address(to, 32), xtmp); 7477 addptr(to, 64); 7478 subptr(count, 16 << shift); 7479 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop); 7480 7481 BIND(L_check_fill_32_bytes); 7482 addptr(count, 8 << shift); 7483 jccb(Assembler::less, L_check_fill_8_bytes); 7484 vmovdqu(Address(to, 0), xtmp); 7485 addptr(to, 32); 7486 subptr(count, 8 << shift); 7487 7488 BIND(L_check_fill_8_bytes); 7489 // clean upper bits of YMM registers 7490 movdl(xtmp, value); 7491 pshufd(xtmp, xtmp, 0); 7492 } else { 7493 // Fill 32-byte chunks 7494 pshufd(xtmp, xtmp, 0); 7495 7496 subptr(count, 8 << shift); 7497 jcc(Assembler::less, L_check_fill_8_bytes); 7498 align(16); 7499 7500 BIND(L_fill_32_bytes_loop); 7501 7502 if (UseUnalignedLoadStores) { 7503 movdqu(Address(to, 0), xtmp); 7504 movdqu(Address(to, 16), xtmp); 7505 } else { 7506 movq(Address(to, 0), xtmp); 7507 movq(Address(to, 8), xtmp); 7508 movq(Address(to, 16), xtmp); 7509 movq(Address(to, 24), xtmp); 7510 } 7511 7512 addptr(to, 32); 7513 subptr(count, 8 << shift); 7514 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop); 7515 7516 BIND(L_check_fill_8_bytes); 7517 } 7518 addptr(count, 8 << shift); 7519 jccb(Assembler::zero, L_exit); 7520 jmpb(L_fill_8_bytes); 7521 7522 // 7523 // length is too short, just fill qwords 7524 // 7525 BIND(L_fill_8_bytes_loop); 7526 movq(Address(to, 0), xtmp); 7527 addptr(to, 8); 7528 BIND(L_fill_8_bytes); 7529 subptr(count, 1 << (shift + 1)); 7530 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop); 7531 } 7532 } 7533 // fill trailing 4 bytes 7534 BIND(L_fill_4_bytes); 7535 testl(count, 1<<shift); 7536 jccb(Assembler::zero, L_fill_2_bytes); 7537 movl(Address(to, 0), value); 7538 if (t == T_BYTE || t == T_SHORT) { 7539 Label L_fill_byte; 7540 addptr(to, 4); 7541 BIND(L_fill_2_bytes); 7542 // fill trailing 2 bytes 7543 testl(count, 1<<(shift-1)); 7544 jccb(Assembler::zero, L_fill_byte); 7545 movw(Address(to, 0), value); 7546 if (t == T_BYTE) { 7547 addptr(to, 2); 7548 BIND(L_fill_byte); 7549 // fill trailing byte 7550 testl(count, 1); 7551 jccb(Assembler::zero, L_exit); 7552 movb(Address(to, 0), value); 7553 } else { 7554 BIND(L_fill_byte); 7555 } 7556 } else { 7557 BIND(L_fill_2_bytes); 7558 } 7559 BIND(L_exit); 7560 } 7561 7562 void MacroAssembler::evpbroadcast(BasicType type, XMMRegister dst, Register src, int vector_len) { 7563 switch(type) { 7564 case T_BYTE: 7565 case T_BOOLEAN: 7566 evpbroadcastb(dst, src, vector_len); 7567 break; 7568 case T_SHORT: 7569 case T_CHAR: 7570 evpbroadcastw(dst, src, vector_len); 7571 break; 7572 case T_INT: 7573 case T_FLOAT: 7574 evpbroadcastd(dst, src, vector_len); 7575 break; 7576 case T_LONG: 7577 case T_DOUBLE: 7578 evpbroadcastq(dst, src, vector_len); 7579 break; 7580 default: 7581 fatal("Unhandled type : %s", type2name(type)); 7582 break; 7583 } 7584 } 7585 7586 // encode char[] to byte[] in ISO_8859_1 or ASCII 7587 //@IntrinsicCandidate 7588 //private static int implEncodeISOArray(byte[] sa, int sp, 7589 //byte[] da, int dp, int len) { 7590 // int i = 0; 7591 // for (; i < len; i++) { 7592 // char c = StringUTF16.getChar(sa, sp++); 7593 // if (c > '\u00FF') 7594 // break; 7595 // da[dp++] = (byte)c; 7596 // } 7597 // return i; 7598 //} 7599 // 7600 //@IntrinsicCandidate 7601 //private static int implEncodeAsciiArray(char[] sa, int sp, 7602 // byte[] da, int dp, int len) { 7603 // int i = 0; 7604 // for (; i < len; i++) { 7605 // char c = sa[sp++]; 7606 // if (c >= '\u0080') 7607 // break; 7608 // da[dp++] = (byte)c; 7609 // } 7610 // return i; 7611 //} 7612 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len, 7613 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 7614 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 7615 Register tmp5, Register result, bool ascii) { 7616 7617 // rsi: src 7618 // rdi: dst 7619 // rdx: len 7620 // rcx: tmp5 7621 // rax: result 7622 ShortBranchVerifier sbv(this); 7623 assert_different_registers(src, dst, len, tmp5, result); 7624 Label L_done, L_copy_1_char, L_copy_1_char_exit; 7625 7626 int mask = ascii ? 0xff80ff80 : 0xff00ff00; 7627 int short_mask = ascii ? 0xff80 : 0xff00; 7628 7629 // set result 7630 xorl(result, result); 7631 // check for zero length 7632 testl(len, len); 7633 jcc(Assembler::zero, L_done); 7634 7635 movl(result, len); 7636 7637 // Setup pointers 7638 lea(src, Address(src, len, Address::times_2)); // char[] 7639 lea(dst, Address(dst, len, Address::times_1)); // byte[] 7640 negptr(len); 7641 7642 if (UseSSE42Intrinsics || UseAVX >= 2) { 7643 Label L_copy_8_chars, L_copy_8_chars_exit; 7644 Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit; 7645 7646 if (UseAVX >= 2) { 7647 Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit; 7648 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector 7649 movdl(tmp1Reg, tmp5); 7650 vpbroadcastd(tmp1Reg, tmp1Reg, Assembler::AVX_256bit); 7651 jmp(L_chars_32_check); 7652 7653 bind(L_copy_32_chars); 7654 vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64)); 7655 vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32)); 7656 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 7657 vptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector 7658 jccb(Assembler::notZero, L_copy_32_chars_exit); 7659 vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector_len */ 1); 7660 vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector_len */ 1); 7661 vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg); 7662 7663 bind(L_chars_32_check); 7664 addptr(len, 32); 7665 jcc(Assembler::lessEqual, L_copy_32_chars); 7666 7667 bind(L_copy_32_chars_exit); 7668 subptr(len, 16); 7669 jccb(Assembler::greater, L_copy_16_chars_exit); 7670 7671 } else if (UseSSE42Intrinsics) { 7672 movl(tmp5, mask); // create mask to test for Unicode or non-ASCII chars in vector 7673 movdl(tmp1Reg, tmp5); 7674 pshufd(tmp1Reg, tmp1Reg, 0); 7675 jmpb(L_chars_16_check); 7676 } 7677 7678 bind(L_copy_16_chars); 7679 if (UseAVX >= 2) { 7680 vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32)); 7681 vptest(tmp2Reg, tmp1Reg); 7682 jcc(Assembler::notZero, L_copy_16_chars_exit); 7683 vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1); 7684 vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1); 7685 } else { 7686 if (UseAVX > 0) { 7687 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 7688 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 7689 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector_len */ 0); 7690 } else { 7691 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32)); 7692 por(tmp2Reg, tmp3Reg); 7693 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16)); 7694 por(tmp2Reg, tmp4Reg); 7695 } 7696 ptest(tmp2Reg, tmp1Reg); // check for Unicode or non-ASCII chars in vector 7697 jccb(Assembler::notZero, L_copy_16_chars_exit); 7698 packuswb(tmp3Reg, tmp4Reg); 7699 } 7700 movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg); 7701 7702 bind(L_chars_16_check); 7703 addptr(len, 16); 7704 jcc(Assembler::lessEqual, L_copy_16_chars); 7705 7706 bind(L_copy_16_chars_exit); 7707 if (UseAVX >= 2) { 7708 // clean upper bits of YMM registers 7709 vpxor(tmp2Reg, tmp2Reg); 7710 vpxor(tmp3Reg, tmp3Reg); 7711 vpxor(tmp4Reg, tmp4Reg); 7712 movdl(tmp1Reg, tmp5); 7713 pshufd(tmp1Reg, tmp1Reg, 0); 7714 } 7715 subptr(len, 8); 7716 jccb(Assembler::greater, L_copy_8_chars_exit); 7717 7718 bind(L_copy_8_chars); 7719 movdqu(tmp3Reg, Address(src, len, Address::times_2, -16)); 7720 ptest(tmp3Reg, tmp1Reg); 7721 jccb(Assembler::notZero, L_copy_8_chars_exit); 7722 packuswb(tmp3Reg, tmp1Reg); 7723 movq(Address(dst, len, Address::times_1, -8), tmp3Reg); 7724 addptr(len, 8); 7725 jccb(Assembler::lessEqual, L_copy_8_chars); 7726 7727 bind(L_copy_8_chars_exit); 7728 subptr(len, 8); 7729 jccb(Assembler::zero, L_done); 7730 } 7731 7732 bind(L_copy_1_char); 7733 load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0)); 7734 testl(tmp5, short_mask); // check if Unicode or non-ASCII char 7735 jccb(Assembler::notZero, L_copy_1_char_exit); 7736 movb(Address(dst, len, Address::times_1, 0), tmp5); 7737 addptr(len, 1); 7738 jccb(Assembler::less, L_copy_1_char); 7739 7740 bind(L_copy_1_char_exit); 7741 addptr(result, len); // len is negative count of not processed elements 7742 7743 bind(L_done); 7744 } 7745 7746 #ifdef _LP64 7747 /** 7748 * Helper for multiply_to_len(). 7749 */ 7750 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) { 7751 addq(dest_lo, src1); 7752 adcq(dest_hi, 0); 7753 addq(dest_lo, src2); 7754 adcq(dest_hi, 0); 7755 } 7756 7757 /** 7758 * Multiply 64 bit by 64 bit first loop. 7759 */ 7760 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart, 7761 Register y, Register y_idx, Register z, 7762 Register carry, Register product, 7763 Register idx, Register kdx) { 7764 // 7765 // jlong carry, x[], y[], z[]; 7766 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 7767 // huge_128 product = y[idx] * x[xstart] + carry; 7768 // z[kdx] = (jlong)product; 7769 // carry = (jlong)(product >>> 64); 7770 // } 7771 // z[xstart] = carry; 7772 // 7773 7774 Label L_first_loop, L_first_loop_exit; 7775 Label L_one_x, L_one_y, L_multiply; 7776 7777 decrementl(xstart); 7778 jcc(Assembler::negative, L_one_x); 7779 7780 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 7781 rorq(x_xstart, 32); // convert big-endian to little-endian 7782 7783 bind(L_first_loop); 7784 decrementl(idx); 7785 jcc(Assembler::negative, L_first_loop_exit); 7786 decrementl(idx); 7787 jcc(Assembler::negative, L_one_y); 7788 movq(y_idx, Address(y, idx, Address::times_4, 0)); 7789 rorq(y_idx, 32); // convert big-endian to little-endian 7790 bind(L_multiply); 7791 movq(product, x_xstart); 7792 mulq(y_idx); // product(rax) * y_idx -> rdx:rax 7793 addq(product, carry); 7794 adcq(rdx, 0); 7795 subl(kdx, 2); 7796 movl(Address(z, kdx, Address::times_4, 4), product); 7797 shrq(product, 32); 7798 movl(Address(z, kdx, Address::times_4, 0), product); 7799 movq(carry, rdx); 7800 jmp(L_first_loop); 7801 7802 bind(L_one_y); 7803 movl(y_idx, Address(y, 0)); 7804 jmp(L_multiply); 7805 7806 bind(L_one_x); 7807 movl(x_xstart, Address(x, 0)); 7808 jmp(L_first_loop); 7809 7810 bind(L_first_loop_exit); 7811 } 7812 7813 /** 7814 * Multiply 64 bit by 64 bit and add 128 bit. 7815 */ 7816 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z, 7817 Register yz_idx, Register idx, 7818 Register carry, Register product, int offset) { 7819 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry; 7820 // z[kdx] = (jlong)product; 7821 7822 movq(yz_idx, Address(y, idx, Address::times_4, offset)); 7823 rorq(yz_idx, 32); // convert big-endian to little-endian 7824 movq(product, x_xstart); 7825 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 7826 movq(yz_idx, Address(z, idx, Address::times_4, offset)); 7827 rorq(yz_idx, 32); // convert big-endian to little-endian 7828 7829 add2_with_carry(rdx, product, carry, yz_idx); 7830 7831 movl(Address(z, idx, Address::times_4, offset+4), product); 7832 shrq(product, 32); 7833 movl(Address(z, idx, Address::times_4, offset), product); 7834 7835 } 7836 7837 /** 7838 * Multiply 128 bit by 128 bit. Unrolled inner loop. 7839 */ 7840 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z, 7841 Register yz_idx, Register idx, Register jdx, 7842 Register carry, Register product, 7843 Register carry2) { 7844 // jlong carry, x[], y[], z[]; 7845 // int kdx = ystart+1; 7846 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 7847 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry; 7848 // z[kdx+idx+1] = (jlong)product; 7849 // jlong carry2 = (jlong)(product >>> 64); 7850 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2; 7851 // z[kdx+idx] = (jlong)product; 7852 // carry = (jlong)(product >>> 64); 7853 // } 7854 // idx += 2; 7855 // if (idx > 0) { 7856 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry; 7857 // z[kdx+idx] = (jlong)product; 7858 // carry = (jlong)(product >>> 64); 7859 // } 7860 // 7861 7862 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 7863 7864 movl(jdx, idx); 7865 andl(jdx, 0xFFFFFFFC); 7866 shrl(jdx, 2); 7867 7868 bind(L_third_loop); 7869 subl(jdx, 1); 7870 jcc(Assembler::negative, L_third_loop_exit); 7871 subl(idx, 4); 7872 7873 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8); 7874 movq(carry2, rdx); 7875 7876 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0); 7877 movq(carry, rdx); 7878 jmp(L_third_loop); 7879 7880 bind (L_third_loop_exit); 7881 7882 andl (idx, 0x3); 7883 jcc(Assembler::zero, L_post_third_loop_done); 7884 7885 Label L_check_1; 7886 subl(idx, 2); 7887 jcc(Assembler::negative, L_check_1); 7888 7889 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0); 7890 movq(carry, rdx); 7891 7892 bind (L_check_1); 7893 addl (idx, 0x2); 7894 andl (idx, 0x1); 7895 subl(idx, 1); 7896 jcc(Assembler::negative, L_post_third_loop_done); 7897 7898 movl(yz_idx, Address(y, idx, Address::times_4, 0)); 7899 movq(product, x_xstart); 7900 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax) 7901 movl(yz_idx, Address(z, idx, Address::times_4, 0)); 7902 7903 add2_with_carry(rdx, product, yz_idx, carry); 7904 7905 movl(Address(z, idx, Address::times_4, 0), product); 7906 shrq(product, 32); 7907 7908 shlq(rdx, 32); 7909 orq(product, rdx); 7910 movq(carry, product); 7911 7912 bind(L_post_third_loop_done); 7913 } 7914 7915 /** 7916 * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop. 7917 * 7918 */ 7919 void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z, 7920 Register carry, Register carry2, 7921 Register idx, Register jdx, 7922 Register yz_idx1, Register yz_idx2, 7923 Register tmp, Register tmp3, Register tmp4) { 7924 assert(UseBMI2Instructions, "should be used only when BMI2 is available"); 7925 7926 // jlong carry, x[], y[], z[]; 7927 // int kdx = ystart+1; 7928 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop 7929 // huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry; 7930 // jlong carry2 = (jlong)(tmp3 >>> 64); 7931 // huge_128 tmp4 = (y[idx] * rdx) + z[kdx+idx] + carry2; 7932 // carry = (jlong)(tmp4 >>> 64); 7933 // z[kdx+idx+1] = (jlong)tmp3; 7934 // z[kdx+idx] = (jlong)tmp4; 7935 // } 7936 // idx += 2; 7937 // if (idx > 0) { 7938 // yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry; 7939 // z[kdx+idx] = (jlong)yz_idx1; 7940 // carry = (jlong)(yz_idx1 >>> 64); 7941 // } 7942 // 7943 7944 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done; 7945 7946 movl(jdx, idx); 7947 andl(jdx, 0xFFFFFFFC); 7948 shrl(jdx, 2); 7949 7950 bind(L_third_loop); 7951 subl(jdx, 1); 7952 jcc(Assembler::negative, L_third_loop_exit); 7953 subl(idx, 4); 7954 7955 movq(yz_idx1, Address(y, idx, Address::times_4, 8)); 7956 rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian 7957 movq(yz_idx2, Address(y, idx, Address::times_4, 0)); 7958 rorxq(yz_idx2, yz_idx2, 32); 7959 7960 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 7961 mulxq(carry2, tmp, yz_idx2); // yz_idx2 * rdx -> carry2:tmp 7962 7963 movq(yz_idx1, Address(z, idx, Address::times_4, 8)); 7964 rorxq(yz_idx1, yz_idx1, 32); 7965 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 7966 rorxq(yz_idx2, yz_idx2, 32); 7967 7968 if (VM_Version::supports_adx()) { 7969 adcxq(tmp3, carry); 7970 adoxq(tmp3, yz_idx1); 7971 7972 adcxq(tmp4, tmp); 7973 adoxq(tmp4, yz_idx2); 7974 7975 movl(carry, 0); // does not affect flags 7976 adcxq(carry2, carry); 7977 adoxq(carry2, carry); 7978 } else { 7979 add2_with_carry(tmp4, tmp3, carry, yz_idx1); 7980 add2_with_carry(carry2, tmp4, tmp, yz_idx2); 7981 } 7982 movq(carry, carry2); 7983 7984 movl(Address(z, idx, Address::times_4, 12), tmp3); 7985 shrq(tmp3, 32); 7986 movl(Address(z, idx, Address::times_4, 8), tmp3); 7987 7988 movl(Address(z, idx, Address::times_4, 4), tmp4); 7989 shrq(tmp4, 32); 7990 movl(Address(z, idx, Address::times_4, 0), tmp4); 7991 7992 jmp(L_third_loop); 7993 7994 bind (L_third_loop_exit); 7995 7996 andl (idx, 0x3); 7997 jcc(Assembler::zero, L_post_third_loop_done); 7998 7999 Label L_check_1; 8000 subl(idx, 2); 8001 jcc(Assembler::negative, L_check_1); 8002 8003 movq(yz_idx1, Address(y, idx, Address::times_4, 0)); 8004 rorxq(yz_idx1, yz_idx1, 32); 8005 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3 8006 movq(yz_idx2, Address(z, idx, Address::times_4, 0)); 8007 rorxq(yz_idx2, yz_idx2, 32); 8008 8009 add2_with_carry(tmp4, tmp3, carry, yz_idx2); 8010 8011 movl(Address(z, idx, Address::times_4, 4), tmp3); 8012 shrq(tmp3, 32); 8013 movl(Address(z, idx, Address::times_4, 0), tmp3); 8014 movq(carry, tmp4); 8015 8016 bind (L_check_1); 8017 addl (idx, 0x2); 8018 andl (idx, 0x1); 8019 subl(idx, 1); 8020 jcc(Assembler::negative, L_post_third_loop_done); 8021 movl(tmp4, Address(y, idx, Address::times_4, 0)); 8022 mulxq(carry2, tmp3, tmp4); // tmp4 * rdx -> carry2:tmp3 8023 movl(tmp4, Address(z, idx, Address::times_4, 0)); 8024 8025 add2_with_carry(carry2, tmp3, tmp4, carry); 8026 8027 movl(Address(z, idx, Address::times_4, 0), tmp3); 8028 shrq(tmp3, 32); 8029 8030 shlq(carry2, 32); 8031 orq(tmp3, carry2); 8032 movq(carry, tmp3); 8033 8034 bind(L_post_third_loop_done); 8035 } 8036 8037 /** 8038 * Code for BigInteger::multiplyToLen() intrinsic. 8039 * 8040 * rdi: x 8041 * rax: xlen 8042 * rsi: y 8043 * rcx: ylen 8044 * r8: z 8045 * r11: tmp0 8046 * r12: tmp1 8047 * r13: tmp2 8048 * r14: tmp3 8049 * r15: tmp4 8050 * rbx: tmp5 8051 * 8052 */ 8053 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register tmp0, 8054 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) { 8055 ShortBranchVerifier sbv(this); 8056 assert_different_registers(x, xlen, y, ylen, z, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, rdx); 8057 8058 push(tmp0); 8059 push(tmp1); 8060 push(tmp2); 8061 push(tmp3); 8062 push(tmp4); 8063 push(tmp5); 8064 8065 push(xlen); 8066 8067 const Register idx = tmp1; 8068 const Register kdx = tmp2; 8069 const Register xstart = tmp3; 8070 8071 const Register y_idx = tmp4; 8072 const Register carry = tmp5; 8073 const Register product = xlen; 8074 const Register x_xstart = tmp0; 8075 8076 // First Loop. 8077 // 8078 // final static long LONG_MASK = 0xffffffffL; 8079 // int xstart = xlen - 1; 8080 // int ystart = ylen - 1; 8081 // long carry = 0; 8082 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) { 8083 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry; 8084 // z[kdx] = (int)product; 8085 // carry = product >>> 32; 8086 // } 8087 // z[xstart] = (int)carry; 8088 // 8089 8090 movl(idx, ylen); // idx = ylen; 8091 lea(kdx, Address(xlen, ylen)); // kdx = xlen+ylen; 8092 xorq(carry, carry); // carry = 0; 8093 8094 Label L_done; 8095 8096 movl(xstart, xlen); 8097 decrementl(xstart); 8098 jcc(Assembler::negative, L_done); 8099 8100 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx); 8101 8102 Label L_second_loop; 8103 testl(kdx, kdx); 8104 jcc(Assembler::zero, L_second_loop); 8105 8106 Label L_carry; 8107 subl(kdx, 1); 8108 jcc(Assembler::zero, L_carry); 8109 8110 movl(Address(z, kdx, Address::times_4, 0), carry); 8111 shrq(carry, 32); 8112 subl(kdx, 1); 8113 8114 bind(L_carry); 8115 movl(Address(z, kdx, Address::times_4, 0), carry); 8116 8117 // Second and third (nested) loops. 8118 // 8119 // for (int i = xstart-1; i >= 0; i--) { // Second loop 8120 // carry = 0; 8121 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop 8122 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) + 8123 // (z[k] & LONG_MASK) + carry; 8124 // z[k] = (int)product; 8125 // carry = product >>> 32; 8126 // } 8127 // z[i] = (int)carry; 8128 // } 8129 // 8130 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx 8131 8132 const Register jdx = tmp1; 8133 8134 bind(L_second_loop); 8135 xorl(carry, carry); // carry = 0; 8136 movl(jdx, ylen); // j = ystart+1 8137 8138 subl(xstart, 1); // i = xstart-1; 8139 jcc(Assembler::negative, L_done); 8140 8141 push (z); 8142 8143 Label L_last_x; 8144 lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j 8145 subl(xstart, 1); // i = xstart-1; 8146 jcc(Assembler::negative, L_last_x); 8147 8148 if (UseBMI2Instructions) { 8149 movq(rdx, Address(x, xstart, Address::times_4, 0)); 8150 rorxq(rdx, rdx, 32); // convert big-endian to little-endian 8151 } else { 8152 movq(x_xstart, Address(x, xstart, Address::times_4, 0)); 8153 rorq(x_xstart, 32); // convert big-endian to little-endian 8154 } 8155 8156 Label L_third_loop_prologue; 8157 bind(L_third_loop_prologue); 8158 8159 push (x); 8160 push (xstart); 8161 push (ylen); 8162 8163 8164 if (UseBMI2Instructions) { 8165 multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4); 8166 } else { // !UseBMI2Instructions 8167 multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x); 8168 } 8169 8170 pop(ylen); 8171 pop(xlen); 8172 pop(x); 8173 pop(z); 8174 8175 movl(tmp3, xlen); 8176 addl(tmp3, 1); 8177 movl(Address(z, tmp3, Address::times_4, 0), carry); 8178 subl(tmp3, 1); 8179 jccb(Assembler::negative, L_done); 8180 8181 shrq(carry, 32); 8182 movl(Address(z, tmp3, Address::times_4, 0), carry); 8183 jmp(L_second_loop); 8184 8185 // Next infrequent code is moved outside loops. 8186 bind(L_last_x); 8187 if (UseBMI2Instructions) { 8188 movl(rdx, Address(x, 0)); 8189 } else { 8190 movl(x_xstart, Address(x, 0)); 8191 } 8192 jmp(L_third_loop_prologue); 8193 8194 bind(L_done); 8195 8196 pop(xlen); 8197 8198 pop(tmp5); 8199 pop(tmp4); 8200 pop(tmp3); 8201 pop(tmp2); 8202 pop(tmp1); 8203 pop(tmp0); 8204 } 8205 8206 void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale, 8207 Register result, Register tmp1, Register tmp2, XMMRegister rymm0, XMMRegister rymm1, XMMRegister rymm2){ 8208 assert(UseSSE42Intrinsics, "SSE4.2 must be enabled."); 8209 Label VECTOR16_LOOP, VECTOR8_LOOP, VECTOR4_LOOP; 8210 Label VECTOR8_TAIL, VECTOR4_TAIL; 8211 Label VECTOR32_NOT_EQUAL, VECTOR16_NOT_EQUAL, VECTOR8_NOT_EQUAL, VECTOR4_NOT_EQUAL; 8212 Label SAME_TILL_END, DONE; 8213 Label BYTES_LOOP, BYTES_TAIL, BYTES_NOT_EQUAL; 8214 8215 //scale is in rcx in both Win64 and Unix 8216 ShortBranchVerifier sbv(this); 8217 8218 shlq(length); 8219 xorq(result, result); 8220 8221 if ((AVX3Threshold == 0) && (UseAVX > 2) && 8222 VM_Version::supports_avx512vlbw()) { 8223 Label VECTOR64_LOOP, VECTOR64_NOT_EQUAL, VECTOR32_TAIL; 8224 8225 cmpq(length, 64); 8226 jcc(Assembler::less, VECTOR32_TAIL); 8227 8228 movq(tmp1, length); 8229 andq(tmp1, 0x3F); // tail count 8230 andq(length, ~(0x3F)); //vector count 8231 8232 bind(VECTOR64_LOOP); 8233 // AVX512 code to compare 64 byte vectors. 8234 evmovdqub(rymm0, Address(obja, result), Assembler::AVX_512bit); 8235 evpcmpeqb(k7, rymm0, Address(objb, result), Assembler::AVX_512bit); 8236 kortestql(k7, k7); 8237 jcc(Assembler::aboveEqual, VECTOR64_NOT_EQUAL); // mismatch 8238 addq(result, 64); 8239 subq(length, 64); 8240 jccb(Assembler::notZero, VECTOR64_LOOP); 8241 8242 //bind(VECTOR64_TAIL); 8243 testq(tmp1, tmp1); 8244 jcc(Assembler::zero, SAME_TILL_END); 8245 8246 //bind(VECTOR64_TAIL); 8247 // AVX512 code to compare up to 63 byte vectors. 8248 mov64(tmp2, 0xFFFFFFFFFFFFFFFF); 8249 shlxq(tmp2, tmp2, tmp1); 8250 notq(tmp2); 8251 kmovql(k3, tmp2); 8252 8253 evmovdqub(rymm0, k3, Address(obja, result), false, Assembler::AVX_512bit); 8254 evpcmpeqb(k7, k3, rymm0, Address(objb, result), Assembler::AVX_512bit); 8255 8256 ktestql(k7, k3); 8257 jcc(Assembler::below, SAME_TILL_END); // not mismatch 8258 8259 bind(VECTOR64_NOT_EQUAL); 8260 kmovql(tmp1, k7); 8261 notq(tmp1); 8262 tzcntq(tmp1, tmp1); 8263 addq(result, tmp1); 8264 shrq(result); 8265 jmp(DONE); 8266 bind(VECTOR32_TAIL); 8267 } 8268 8269 cmpq(length, 8); 8270 jcc(Assembler::equal, VECTOR8_LOOP); 8271 jcc(Assembler::less, VECTOR4_TAIL); 8272 8273 if (UseAVX >= 2) { 8274 Label VECTOR16_TAIL, VECTOR32_LOOP; 8275 8276 cmpq(length, 16); 8277 jcc(Assembler::equal, VECTOR16_LOOP); 8278 jcc(Assembler::less, VECTOR8_LOOP); 8279 8280 cmpq(length, 32); 8281 jccb(Assembler::less, VECTOR16_TAIL); 8282 8283 subq(length, 32); 8284 bind(VECTOR32_LOOP); 8285 vmovdqu(rymm0, Address(obja, result)); 8286 vmovdqu(rymm1, Address(objb, result)); 8287 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_256bit); 8288 vptest(rymm2, rymm2); 8289 jcc(Assembler::notZero, VECTOR32_NOT_EQUAL);//mismatch found 8290 addq(result, 32); 8291 subq(length, 32); 8292 jcc(Assembler::greaterEqual, VECTOR32_LOOP); 8293 addq(length, 32); 8294 jcc(Assembler::equal, SAME_TILL_END); 8295 //falling through if less than 32 bytes left //close the branch here. 8296 8297 bind(VECTOR16_TAIL); 8298 cmpq(length, 16); 8299 jccb(Assembler::less, VECTOR8_TAIL); 8300 bind(VECTOR16_LOOP); 8301 movdqu(rymm0, Address(obja, result)); 8302 movdqu(rymm1, Address(objb, result)); 8303 vpxor(rymm2, rymm0, rymm1, Assembler::AVX_128bit); 8304 ptest(rymm2, rymm2); 8305 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 8306 addq(result, 16); 8307 subq(length, 16); 8308 jcc(Assembler::equal, SAME_TILL_END); 8309 //falling through if less than 16 bytes left 8310 } else {//regular intrinsics 8311 8312 cmpq(length, 16); 8313 jccb(Assembler::less, VECTOR8_TAIL); 8314 8315 subq(length, 16); 8316 bind(VECTOR16_LOOP); 8317 movdqu(rymm0, Address(obja, result)); 8318 movdqu(rymm1, Address(objb, result)); 8319 pxor(rymm0, rymm1); 8320 ptest(rymm0, rymm0); 8321 jcc(Assembler::notZero, VECTOR16_NOT_EQUAL);//mismatch found 8322 addq(result, 16); 8323 subq(length, 16); 8324 jccb(Assembler::greaterEqual, VECTOR16_LOOP); 8325 addq(length, 16); 8326 jcc(Assembler::equal, SAME_TILL_END); 8327 //falling through if less than 16 bytes left 8328 } 8329 8330 bind(VECTOR8_TAIL); 8331 cmpq(length, 8); 8332 jccb(Assembler::less, VECTOR4_TAIL); 8333 bind(VECTOR8_LOOP); 8334 movq(tmp1, Address(obja, result)); 8335 movq(tmp2, Address(objb, result)); 8336 xorq(tmp1, tmp2); 8337 testq(tmp1, tmp1); 8338 jcc(Assembler::notZero, VECTOR8_NOT_EQUAL);//mismatch found 8339 addq(result, 8); 8340 subq(length, 8); 8341 jcc(Assembler::equal, SAME_TILL_END); 8342 //falling through if less than 8 bytes left 8343 8344 bind(VECTOR4_TAIL); 8345 cmpq(length, 4); 8346 jccb(Assembler::less, BYTES_TAIL); 8347 bind(VECTOR4_LOOP); 8348 movl(tmp1, Address(obja, result)); 8349 xorl(tmp1, Address(objb, result)); 8350 testl(tmp1, tmp1); 8351 jcc(Assembler::notZero, VECTOR4_NOT_EQUAL);//mismatch found 8352 addq(result, 4); 8353 subq(length, 4); 8354 jcc(Assembler::equal, SAME_TILL_END); 8355 //falling through if less than 4 bytes left 8356 8357 bind(BYTES_TAIL); 8358 bind(BYTES_LOOP); 8359 load_unsigned_byte(tmp1, Address(obja, result)); 8360 load_unsigned_byte(tmp2, Address(objb, result)); 8361 xorl(tmp1, tmp2); 8362 testl(tmp1, tmp1); 8363 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 8364 decq(length); 8365 jcc(Assembler::zero, SAME_TILL_END); 8366 incq(result); 8367 load_unsigned_byte(tmp1, Address(obja, result)); 8368 load_unsigned_byte(tmp2, Address(objb, result)); 8369 xorl(tmp1, tmp2); 8370 testl(tmp1, tmp1); 8371 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 8372 decq(length); 8373 jcc(Assembler::zero, SAME_TILL_END); 8374 incq(result); 8375 load_unsigned_byte(tmp1, Address(obja, result)); 8376 load_unsigned_byte(tmp2, Address(objb, result)); 8377 xorl(tmp1, tmp2); 8378 testl(tmp1, tmp1); 8379 jcc(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found 8380 jmp(SAME_TILL_END); 8381 8382 if (UseAVX >= 2) { 8383 bind(VECTOR32_NOT_EQUAL); 8384 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_256bit); 8385 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_256bit); 8386 vpxor(rymm0, rymm0, rymm2, Assembler::AVX_256bit); 8387 vpmovmskb(tmp1, rymm0); 8388 bsfq(tmp1, tmp1); 8389 addq(result, tmp1); 8390 shrq(result); 8391 jmp(DONE); 8392 } 8393 8394 bind(VECTOR16_NOT_EQUAL); 8395 if (UseAVX >= 2) { 8396 vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_128bit); 8397 vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_128bit); 8398 pxor(rymm0, rymm2); 8399 } else { 8400 pcmpeqb(rymm2, rymm2); 8401 pxor(rymm0, rymm1); 8402 pcmpeqb(rymm0, rymm1); 8403 pxor(rymm0, rymm2); 8404 } 8405 pmovmskb(tmp1, rymm0); 8406 bsfq(tmp1, tmp1); 8407 addq(result, tmp1); 8408 shrq(result); 8409 jmpb(DONE); 8410 8411 bind(VECTOR8_NOT_EQUAL); 8412 bind(VECTOR4_NOT_EQUAL); 8413 bsfq(tmp1, tmp1); 8414 shrq(tmp1, 3); 8415 addq(result, tmp1); 8416 bind(BYTES_NOT_EQUAL); 8417 shrq(result); 8418 jmpb(DONE); 8419 8420 bind(SAME_TILL_END); 8421 mov64(result, -1); 8422 8423 bind(DONE); 8424 } 8425 8426 //Helper functions for square_to_len() 8427 8428 /** 8429 * Store the squares of x[], right shifted one bit (divided by 2) into z[] 8430 * Preserves x and z and modifies rest of the registers. 8431 */ 8432 void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 8433 // Perform square and right shift by 1 8434 // Handle odd xlen case first, then for even xlen do the following 8435 // jlong carry = 0; 8436 // for (int j=0, i=0; j < xlen; j+=2, i+=4) { 8437 // huge_128 product = x[j:j+1] * x[j:j+1]; 8438 // z[i:i+1] = (carry << 63) | (jlong)(product >>> 65); 8439 // z[i+2:i+3] = (jlong)(product >>> 1); 8440 // carry = (jlong)product; 8441 // } 8442 8443 xorq(tmp5, tmp5); // carry 8444 xorq(rdxReg, rdxReg); 8445 xorl(tmp1, tmp1); // index for x 8446 xorl(tmp4, tmp4); // index for z 8447 8448 Label L_first_loop, L_first_loop_exit; 8449 8450 testl(xlen, 1); 8451 jccb(Assembler::zero, L_first_loop); //jump if xlen is even 8452 8453 // Square and right shift by 1 the odd element using 32 bit multiply 8454 movl(raxReg, Address(x, tmp1, Address::times_4, 0)); 8455 imulq(raxReg, raxReg); 8456 shrq(raxReg, 1); 8457 adcq(tmp5, 0); 8458 movq(Address(z, tmp4, Address::times_4, 0), raxReg); 8459 incrementl(tmp1); 8460 addl(tmp4, 2); 8461 8462 // Square and right shift by 1 the rest using 64 bit multiply 8463 bind(L_first_loop); 8464 cmpptr(tmp1, xlen); 8465 jccb(Assembler::equal, L_first_loop_exit); 8466 8467 // Square 8468 movq(raxReg, Address(x, tmp1, Address::times_4, 0)); 8469 rorq(raxReg, 32); // convert big-endian to little-endian 8470 mulq(raxReg); // 64-bit multiply rax * rax -> rdx:rax 8471 8472 // Right shift by 1 and save carry 8473 shrq(tmp5, 1); // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1 8474 rcrq(rdxReg, 1); 8475 rcrq(raxReg, 1); 8476 adcq(tmp5, 0); 8477 8478 // Store result in z 8479 movq(Address(z, tmp4, Address::times_4, 0), rdxReg); 8480 movq(Address(z, tmp4, Address::times_4, 8), raxReg); 8481 8482 // Update indices for x and z 8483 addl(tmp1, 2); 8484 addl(tmp4, 4); 8485 jmp(L_first_loop); 8486 8487 bind(L_first_loop_exit); 8488 } 8489 8490 8491 /** 8492 * Perform the following multiply add operation using BMI2 instructions 8493 * carry:sum = sum + op1*op2 + carry 8494 * op2 should be in rdx 8495 * op2 is preserved, all other registers are modified 8496 */ 8497 void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) { 8498 // assert op2 is rdx 8499 mulxq(tmp2, op1, op1); // op1 * op2 -> tmp2:op1 8500 addq(sum, carry); 8501 adcq(tmp2, 0); 8502 addq(sum, op1); 8503 adcq(tmp2, 0); 8504 movq(carry, tmp2); 8505 } 8506 8507 /** 8508 * Perform the following multiply add operation: 8509 * carry:sum = sum + op1*op2 + carry 8510 * Preserves op1, op2 and modifies rest of registers 8511 */ 8512 void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) { 8513 // rdx:rax = op1 * op2 8514 movq(raxReg, op2); 8515 mulq(op1); 8516 8517 // rdx:rax = sum + carry + rdx:rax 8518 addq(sum, carry); 8519 adcq(rdxReg, 0); 8520 addq(sum, raxReg); 8521 adcq(rdxReg, 0); 8522 8523 // carry:sum = rdx:sum 8524 movq(carry, rdxReg); 8525 } 8526 8527 /** 8528 * Add 64 bit long carry into z[] with carry propagation. 8529 * Preserves z and carry register values and modifies rest of registers. 8530 * 8531 */ 8532 void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) { 8533 Label L_fourth_loop, L_fourth_loop_exit; 8534 8535 movl(tmp1, 1); 8536 subl(zlen, 2); 8537 addq(Address(z, zlen, Address::times_4, 0), carry); 8538 8539 bind(L_fourth_loop); 8540 jccb(Assembler::carryClear, L_fourth_loop_exit); 8541 subl(zlen, 2); 8542 jccb(Assembler::negative, L_fourth_loop_exit); 8543 addq(Address(z, zlen, Address::times_4, 0), tmp1); 8544 jmp(L_fourth_loop); 8545 bind(L_fourth_loop_exit); 8546 } 8547 8548 /** 8549 * Shift z[] left by 1 bit. 8550 * Preserves x, len, z and zlen registers and modifies rest of the registers. 8551 * 8552 */ 8553 void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) { 8554 8555 Label L_fifth_loop, L_fifth_loop_exit; 8556 8557 // Fifth loop 8558 // Perform primitiveLeftShift(z, zlen, 1) 8559 8560 const Register prev_carry = tmp1; 8561 const Register new_carry = tmp4; 8562 const Register value = tmp2; 8563 const Register zidx = tmp3; 8564 8565 // int zidx, carry; 8566 // long value; 8567 // carry = 0; 8568 // for (zidx = zlen-2; zidx >=0; zidx -= 2) { 8569 // (carry:value) = (z[i] << 1) | carry ; 8570 // z[i] = value; 8571 // } 8572 8573 movl(zidx, zlen); 8574 xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register 8575 8576 bind(L_fifth_loop); 8577 decl(zidx); // Use decl to preserve carry flag 8578 decl(zidx); 8579 jccb(Assembler::negative, L_fifth_loop_exit); 8580 8581 if (UseBMI2Instructions) { 8582 movq(value, Address(z, zidx, Address::times_4, 0)); 8583 rclq(value, 1); 8584 rorxq(value, value, 32); 8585 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 8586 } 8587 else { 8588 // clear new_carry 8589 xorl(new_carry, new_carry); 8590 8591 // Shift z[i] by 1, or in previous carry and save new carry 8592 movq(value, Address(z, zidx, Address::times_4, 0)); 8593 shlq(value, 1); 8594 adcl(new_carry, 0); 8595 8596 orq(value, prev_carry); 8597 rorq(value, 0x20); 8598 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form 8599 8600 // Set previous carry = new carry 8601 movl(prev_carry, new_carry); 8602 } 8603 jmp(L_fifth_loop); 8604 8605 bind(L_fifth_loop_exit); 8606 } 8607 8608 8609 /** 8610 * Code for BigInteger::squareToLen() intrinsic 8611 * 8612 * rdi: x 8613 * rsi: len 8614 * r8: z 8615 * rcx: zlen 8616 * r12: tmp1 8617 * r13: tmp2 8618 * r14: tmp3 8619 * r15: tmp4 8620 * rbx: tmp5 8621 * 8622 */ 8623 void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 8624 8625 Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, L_last_x, L_multiply; 8626 push(tmp1); 8627 push(tmp2); 8628 push(tmp3); 8629 push(tmp4); 8630 push(tmp5); 8631 8632 // First loop 8633 // Store the squares, right shifted one bit (i.e., divided by 2). 8634 square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg); 8635 8636 // Add in off-diagonal sums. 8637 // 8638 // Second, third (nested) and fourth loops. 8639 // zlen +=2; 8640 // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) { 8641 // carry = 0; 8642 // long op2 = x[xidx:xidx+1]; 8643 // for (int j=xidx-2,k=zidx; j >= 0; j-=2) { 8644 // k -= 2; 8645 // long op1 = x[j:j+1]; 8646 // long sum = z[k:k+1]; 8647 // carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs); 8648 // z[k:k+1] = sum; 8649 // } 8650 // add_one_64(z, k, carry, tmp_regs); 8651 // } 8652 8653 const Register carry = tmp5; 8654 const Register sum = tmp3; 8655 const Register op1 = tmp4; 8656 Register op2 = tmp2; 8657 8658 push(zlen); 8659 push(len); 8660 addl(zlen,2); 8661 bind(L_second_loop); 8662 xorq(carry, carry); 8663 subl(zlen, 4); 8664 subl(len, 2); 8665 push(zlen); 8666 push(len); 8667 cmpl(len, 0); 8668 jccb(Assembler::lessEqual, L_second_loop_exit); 8669 8670 // Multiply an array by one 64 bit long. 8671 if (UseBMI2Instructions) { 8672 op2 = rdxReg; 8673 movq(op2, Address(x, len, Address::times_4, 0)); 8674 rorxq(op2, op2, 32); 8675 } 8676 else { 8677 movq(op2, Address(x, len, Address::times_4, 0)); 8678 rorq(op2, 32); 8679 } 8680 8681 bind(L_third_loop); 8682 decrementl(len); 8683 jccb(Assembler::negative, L_third_loop_exit); 8684 decrementl(len); 8685 jccb(Assembler::negative, L_last_x); 8686 8687 movq(op1, Address(x, len, Address::times_4, 0)); 8688 rorq(op1, 32); 8689 8690 bind(L_multiply); 8691 subl(zlen, 2); 8692 movq(sum, Address(z, zlen, Address::times_4, 0)); 8693 8694 // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry. 8695 if (UseBMI2Instructions) { 8696 multiply_add_64_bmi2(sum, op1, op2, carry, tmp2); 8697 } 8698 else { 8699 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8700 } 8701 8702 movq(Address(z, zlen, Address::times_4, 0), sum); 8703 8704 jmp(L_third_loop); 8705 bind(L_third_loop_exit); 8706 8707 // Fourth loop 8708 // Add 64 bit long carry into z with carry propagation. 8709 // Uses offsetted zlen. 8710 add_one_64(z, zlen, carry, tmp1); 8711 8712 pop(len); 8713 pop(zlen); 8714 jmp(L_second_loop); 8715 8716 // Next infrequent code is moved outside loops. 8717 bind(L_last_x); 8718 movl(op1, Address(x, 0)); 8719 jmp(L_multiply); 8720 8721 bind(L_second_loop_exit); 8722 pop(len); 8723 pop(zlen); 8724 pop(len); 8725 pop(zlen); 8726 8727 // Fifth loop 8728 // Shift z left 1 bit. 8729 lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4); 8730 8731 // z[zlen-1] |= x[len-1] & 1; 8732 movl(tmp3, Address(x, len, Address::times_4, -4)); 8733 andl(tmp3, 1); 8734 orl(Address(z, zlen, Address::times_4, -4), tmp3); 8735 8736 pop(tmp5); 8737 pop(tmp4); 8738 pop(tmp3); 8739 pop(tmp2); 8740 pop(tmp1); 8741 } 8742 8743 /** 8744 * Helper function for mul_add() 8745 * Multiply the in[] by int k and add to out[] starting at offset offs using 8746 * 128 bit by 32 bit multiply and return the carry in tmp5. 8747 * Only quad int aligned length of in[] is operated on in this function. 8748 * k is in rdxReg for BMI2Instructions, for others it is in tmp2. 8749 * This function preserves out, in and k registers. 8750 * len and offset point to the appropriate index in "in" & "out" correspondingly 8751 * tmp5 has the carry. 8752 * other registers are temporary and are modified. 8753 * 8754 */ 8755 void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in, 8756 Register offset, Register len, Register tmp1, Register tmp2, Register tmp3, 8757 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 8758 8759 Label L_first_loop, L_first_loop_exit; 8760 8761 movl(tmp1, len); 8762 shrl(tmp1, 2); 8763 8764 bind(L_first_loop); 8765 subl(tmp1, 1); 8766 jccb(Assembler::negative, L_first_loop_exit); 8767 8768 subl(len, 4); 8769 subl(offset, 4); 8770 8771 Register op2 = tmp2; 8772 const Register sum = tmp3; 8773 const Register op1 = tmp4; 8774 const Register carry = tmp5; 8775 8776 if (UseBMI2Instructions) { 8777 op2 = rdxReg; 8778 } 8779 8780 movq(op1, Address(in, len, Address::times_4, 8)); 8781 rorq(op1, 32); 8782 movq(sum, Address(out, offset, Address::times_4, 8)); 8783 rorq(sum, 32); 8784 if (UseBMI2Instructions) { 8785 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 8786 } 8787 else { 8788 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8789 } 8790 // Store back in big endian from little endian 8791 rorq(sum, 0x20); 8792 movq(Address(out, offset, Address::times_4, 8), sum); 8793 8794 movq(op1, Address(in, len, Address::times_4, 0)); 8795 rorq(op1, 32); 8796 movq(sum, Address(out, offset, Address::times_4, 0)); 8797 rorq(sum, 32); 8798 if (UseBMI2Instructions) { 8799 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 8800 } 8801 else { 8802 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8803 } 8804 // Store back in big endian from little endian 8805 rorq(sum, 0x20); 8806 movq(Address(out, offset, Address::times_4, 0), sum); 8807 8808 jmp(L_first_loop); 8809 bind(L_first_loop_exit); 8810 } 8811 8812 /** 8813 * Code for BigInteger::mulAdd() intrinsic 8814 * 8815 * rdi: out 8816 * rsi: in 8817 * r11: offs (out.length - offset) 8818 * rcx: len 8819 * r8: k 8820 * r12: tmp1 8821 * r13: tmp2 8822 * r14: tmp3 8823 * r15: tmp4 8824 * rbx: tmp5 8825 * Multiply the in[] by word k and add to out[], return the carry in rax 8826 */ 8827 void MacroAssembler::mul_add(Register out, Register in, Register offs, 8828 Register len, Register k, Register tmp1, Register tmp2, Register tmp3, 8829 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) { 8830 8831 Label L_carry, L_last_in, L_done; 8832 8833 // carry = 0; 8834 // for (int j=len-1; j >= 0; j--) { 8835 // long product = (in[j] & LONG_MASK) * kLong + 8836 // (out[offs] & LONG_MASK) + carry; 8837 // out[offs--] = (int)product; 8838 // carry = product >>> 32; 8839 // } 8840 // 8841 push(tmp1); 8842 push(tmp2); 8843 push(tmp3); 8844 push(tmp4); 8845 push(tmp5); 8846 8847 Register op2 = tmp2; 8848 const Register sum = tmp3; 8849 const Register op1 = tmp4; 8850 const Register carry = tmp5; 8851 8852 if (UseBMI2Instructions) { 8853 op2 = rdxReg; 8854 movl(op2, k); 8855 } 8856 else { 8857 movl(op2, k); 8858 } 8859 8860 xorq(carry, carry); 8861 8862 //First loop 8863 8864 //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply 8865 //The carry is in tmp5 8866 mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg); 8867 8868 //Multiply the trailing in[] entry using 64 bit by 32 bit, if any 8869 decrementl(len); 8870 jccb(Assembler::negative, L_carry); 8871 decrementl(len); 8872 jccb(Assembler::negative, L_last_in); 8873 8874 movq(op1, Address(in, len, Address::times_4, 0)); 8875 rorq(op1, 32); 8876 8877 subl(offs, 2); 8878 movq(sum, Address(out, offs, Address::times_4, 0)); 8879 rorq(sum, 32); 8880 8881 if (UseBMI2Instructions) { 8882 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg); 8883 } 8884 else { 8885 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg); 8886 } 8887 8888 // Store back in big endian from little endian 8889 rorq(sum, 0x20); 8890 movq(Address(out, offs, Address::times_4, 0), sum); 8891 8892 testl(len, len); 8893 jccb(Assembler::zero, L_carry); 8894 8895 //Multiply the last in[] entry, if any 8896 bind(L_last_in); 8897 movl(op1, Address(in, 0)); 8898 movl(sum, Address(out, offs, Address::times_4, -4)); 8899 8900 movl(raxReg, k); 8901 mull(op1); //tmp4 * eax -> edx:eax 8902 addl(sum, carry); 8903 adcl(rdxReg, 0); 8904 addl(sum, raxReg); 8905 adcl(rdxReg, 0); 8906 movl(carry, rdxReg); 8907 8908 movl(Address(out, offs, Address::times_4, -4), sum); 8909 8910 bind(L_carry); 8911 //return tmp5/carry as carry in rax 8912 movl(rax, carry); 8913 8914 bind(L_done); 8915 pop(tmp5); 8916 pop(tmp4); 8917 pop(tmp3); 8918 pop(tmp2); 8919 pop(tmp1); 8920 } 8921 #endif 8922 8923 /** 8924 * Emits code to update CRC-32 with a byte value according to constants in table 8925 * 8926 * @param [in,out]crc Register containing the crc. 8927 * @param [in]val Register containing the byte to fold into the CRC. 8928 * @param [in]table Register containing the table of crc constants. 8929 * 8930 * uint32_t crc; 8931 * val = crc_table[(val ^ crc) & 0xFF]; 8932 * crc = val ^ (crc >> 8); 8933 * 8934 */ 8935 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 8936 xorl(val, crc); 8937 andl(val, 0xFF); 8938 shrl(crc, 8); // unsigned shift 8939 xorl(crc, Address(table, val, Address::times_4, 0)); 8940 } 8941 8942 /** 8943 * Fold 128-bit data chunk 8944 */ 8945 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) { 8946 if (UseAVX > 0) { 8947 vpclmulhdq(xtmp, xK, xcrc); // [123:64] 8948 vpclmulldq(xcrc, xK, xcrc); // [63:0] 8949 vpxor(xcrc, xcrc, Address(buf, offset), 0 /* vector_len */); 8950 pxor(xcrc, xtmp); 8951 } else { 8952 movdqa(xtmp, xcrc); 8953 pclmulhdq(xtmp, xK); // [123:64] 8954 pclmulldq(xcrc, xK); // [63:0] 8955 pxor(xcrc, xtmp); 8956 movdqu(xtmp, Address(buf, offset)); 8957 pxor(xcrc, xtmp); 8958 } 8959 } 8960 8961 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) { 8962 if (UseAVX > 0) { 8963 vpclmulhdq(xtmp, xK, xcrc); 8964 vpclmulldq(xcrc, xK, xcrc); 8965 pxor(xcrc, xbuf); 8966 pxor(xcrc, xtmp); 8967 } else { 8968 movdqa(xtmp, xcrc); 8969 pclmulhdq(xtmp, xK); 8970 pclmulldq(xcrc, xK); 8971 pxor(xcrc, xbuf); 8972 pxor(xcrc, xtmp); 8973 } 8974 } 8975 8976 /** 8977 * 8-bit folds to compute 32-bit CRC 8978 * 8979 * uint64_t xcrc; 8980 * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8); 8981 */ 8982 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) { 8983 movdl(tmp, xcrc); 8984 andl(tmp, 0xFF); 8985 movdl(xtmp, Address(table, tmp, Address::times_4, 0)); 8986 psrldq(xcrc, 1); // unsigned shift one byte 8987 pxor(xcrc, xtmp); 8988 } 8989 8990 /** 8991 * uint32_t crc; 8992 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 8993 */ 8994 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 8995 movl(tmp, crc); 8996 andl(tmp, 0xFF); 8997 shrl(crc, 8); 8998 xorl(crc, Address(table, tmp, Address::times_4, 0)); 8999 } 9000 9001 /** 9002 * @param crc register containing existing CRC (32-bit) 9003 * @param buf register pointing to input byte buffer (byte*) 9004 * @param len register containing number of bytes 9005 * @param table register that will contain address of CRC table 9006 * @param tmp scratch register 9007 */ 9008 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) { 9009 assert_different_registers(crc, buf, len, table, tmp, rax); 9010 9011 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 9012 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 9013 9014 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 9015 // context for the registers used, where all instructions below are using 128-bit mode 9016 // On EVEX without VL and BW, these instructions will all be AVX. 9017 lea(table, ExternalAddress(StubRoutines::crc_table_addr())); 9018 notl(crc); // ~crc 9019 cmpl(len, 16); 9020 jcc(Assembler::less, L_tail); 9021 9022 // Align buffer to 16 bytes 9023 movl(tmp, buf); 9024 andl(tmp, 0xF); 9025 jccb(Assembler::zero, L_aligned); 9026 subl(tmp, 16); 9027 addl(len, tmp); 9028 9029 align(4); 9030 BIND(L_align_loop); 9031 movsbl(rax, Address(buf, 0)); // load byte with sign extension 9032 update_byte_crc32(crc, rax, table); 9033 increment(buf); 9034 incrementl(tmp); 9035 jccb(Assembler::less, L_align_loop); 9036 9037 BIND(L_aligned); 9038 movl(tmp, len); // save 9039 shrl(len, 4); 9040 jcc(Assembler::zero, L_tail_restore); 9041 9042 // Fold crc into first bytes of vector 9043 movdqa(xmm1, Address(buf, 0)); 9044 movdl(rax, xmm1); 9045 xorl(crc, rax); 9046 if (VM_Version::supports_sse4_1()) { 9047 pinsrd(xmm1, crc, 0); 9048 } else { 9049 pinsrw(xmm1, crc, 0); 9050 shrl(crc, 16); 9051 pinsrw(xmm1, crc, 1); 9052 } 9053 addptr(buf, 16); 9054 subl(len, 4); // len > 0 9055 jcc(Assembler::less, L_fold_tail); 9056 9057 movdqa(xmm2, Address(buf, 0)); 9058 movdqa(xmm3, Address(buf, 16)); 9059 movdqa(xmm4, Address(buf, 32)); 9060 addptr(buf, 48); 9061 subl(len, 3); 9062 jcc(Assembler::lessEqual, L_fold_512b); 9063 9064 // Fold total 512 bits of polynomial on each iteration, 9065 // 128 bits per each of 4 parallel streams. 9066 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32), rscratch1); 9067 9068 align32(); 9069 BIND(L_fold_512b_loop); 9070 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 9071 fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16); 9072 fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32); 9073 fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48); 9074 addptr(buf, 64); 9075 subl(len, 4); 9076 jcc(Assembler::greater, L_fold_512b_loop); 9077 9078 // Fold 512 bits to 128 bits. 9079 BIND(L_fold_512b); 9080 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); 9081 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2); 9082 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3); 9083 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4); 9084 9085 // Fold the rest of 128 bits data chunks 9086 BIND(L_fold_tail); 9087 addl(len, 3); 9088 jccb(Assembler::lessEqual, L_fold_128b); 9089 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16), rscratch1); 9090 9091 BIND(L_fold_tail_loop); 9092 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 9093 addptr(buf, 16); 9094 decrementl(len); 9095 jccb(Assembler::greater, L_fold_tail_loop); 9096 9097 // Fold 128 bits in xmm1 down into 32 bits in crc register. 9098 BIND(L_fold_128b); 9099 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()), rscratch1); 9100 if (UseAVX > 0) { 9101 vpclmulqdq(xmm2, xmm0, xmm1, 0x1); 9102 vpand(xmm3, xmm0, xmm2, 0 /* vector_len */); 9103 vpclmulqdq(xmm0, xmm0, xmm3, 0x1); 9104 } else { 9105 movdqa(xmm2, xmm0); 9106 pclmulqdq(xmm2, xmm1, 0x1); 9107 movdqa(xmm3, xmm0); 9108 pand(xmm3, xmm2); 9109 pclmulqdq(xmm0, xmm3, 0x1); 9110 } 9111 psrldq(xmm1, 8); 9112 psrldq(xmm2, 4); 9113 pxor(xmm0, xmm1); 9114 pxor(xmm0, xmm2); 9115 9116 // 8 8-bit folds to compute 32-bit CRC. 9117 for (int j = 0; j < 4; j++) { 9118 fold_8bit_crc32(xmm0, table, xmm1, rax); 9119 } 9120 movdl(crc, xmm0); // mov 32 bits to general register 9121 for (int j = 0; j < 4; j++) { 9122 fold_8bit_crc32(crc, table, rax); 9123 } 9124 9125 BIND(L_tail_restore); 9126 movl(len, tmp); // restore 9127 BIND(L_tail); 9128 andl(len, 0xf); 9129 jccb(Assembler::zero, L_exit); 9130 9131 // Fold the rest of bytes 9132 align(4); 9133 BIND(L_tail_loop); 9134 movsbl(rax, Address(buf, 0)); // load byte with sign extension 9135 update_byte_crc32(crc, rax, table); 9136 increment(buf); 9137 decrementl(len); 9138 jccb(Assembler::greater, L_tail_loop); 9139 9140 BIND(L_exit); 9141 notl(crc); // ~c 9142 } 9143 9144 #ifdef _LP64 9145 // Helper function for AVX 512 CRC32 9146 // Fold 512-bit data chunks 9147 void MacroAssembler::fold512bit_crc32_avx512(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, 9148 Register pos, int offset) { 9149 evmovdquq(xmm3, Address(buf, pos, Address::times_1, offset), Assembler::AVX_512bit); 9150 evpclmulqdq(xtmp, xcrc, xK, 0x10, Assembler::AVX_512bit); // [123:64] 9151 evpclmulqdq(xmm2, xcrc, xK, 0x01, Assembler::AVX_512bit); // [63:0] 9152 evpxorq(xcrc, xtmp, xmm2, Assembler::AVX_512bit /* vector_len */); 9153 evpxorq(xcrc, xcrc, xmm3, Assembler::AVX_512bit /* vector_len */); 9154 } 9155 9156 // Helper function for AVX 512 CRC32 9157 // Compute CRC32 for < 256B buffers 9158 void MacroAssembler::kernel_crc32_avx512_256B(Register crc, Register buf, Register len, Register table, Register pos, 9159 Register tmp1, Register tmp2, Label& L_barrett, Label& L_16B_reduction_loop, 9160 Label& L_get_last_two_xmms, Label& L_128_done, Label& L_cleanup) { 9161 9162 Label L_less_than_32, L_exact_16_left, L_less_than_16_left; 9163 Label L_less_than_8_left, L_less_than_4_left, L_less_than_2_left, L_zero_left; 9164 Label L_only_less_than_4, L_only_less_than_3, L_only_less_than_2; 9165 9166 // check if there is enough buffer to be able to fold 16B at a time 9167 cmpl(len, 32); 9168 jcc(Assembler::less, L_less_than_32); 9169 9170 // if there is, load the constants 9171 movdqu(xmm10, Address(table, 1 * 16)); //rk1 and rk2 in xmm10 9172 movdl(xmm0, crc); // get the initial crc value 9173 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext 9174 pxor(xmm7, xmm0); 9175 9176 // update the buffer pointer 9177 addl(pos, 16); 9178 //update the counter.subtract 32 instead of 16 to save one instruction from the loop 9179 subl(len, 32); 9180 jmp(L_16B_reduction_loop); 9181 9182 bind(L_less_than_32); 9183 //mov initial crc to the return value. this is necessary for zero - length buffers. 9184 movl(rax, crc); 9185 testl(len, len); 9186 jcc(Assembler::equal, L_cleanup); 9187 9188 movdl(xmm0, crc); //get the initial crc value 9189 9190 cmpl(len, 16); 9191 jcc(Assembler::equal, L_exact_16_left); 9192 jcc(Assembler::less, L_less_than_16_left); 9193 9194 movdqu(xmm7, Address(buf, pos, Address::times_1, 0 * 16)); //load the plaintext 9195 pxor(xmm7, xmm0); //xor the initial crc value 9196 addl(pos, 16); 9197 subl(len, 16); 9198 movdqu(xmm10, Address(table, 1 * 16)); // rk1 and rk2 in xmm10 9199 jmp(L_get_last_two_xmms); 9200 9201 bind(L_less_than_16_left); 9202 //use stack space to load data less than 16 bytes, zero - out the 16B in memory first. 9203 pxor(xmm1, xmm1); 9204 movptr(tmp1, rsp); 9205 movdqu(Address(tmp1, 0 * 16), xmm1); 9206 9207 cmpl(len, 4); 9208 jcc(Assembler::less, L_only_less_than_4); 9209 9210 //backup the counter value 9211 movl(tmp2, len); 9212 cmpl(len, 8); 9213 jcc(Assembler::less, L_less_than_8_left); 9214 9215 //load 8 Bytes 9216 movq(rax, Address(buf, pos, Address::times_1, 0 * 16)); 9217 movq(Address(tmp1, 0 * 16), rax); 9218 addptr(tmp1, 8); 9219 subl(len, 8); 9220 addl(pos, 8); 9221 9222 bind(L_less_than_8_left); 9223 cmpl(len, 4); 9224 jcc(Assembler::less, L_less_than_4_left); 9225 9226 //load 4 Bytes 9227 movl(rax, Address(buf, pos, Address::times_1, 0)); 9228 movl(Address(tmp1, 0 * 16), rax); 9229 addptr(tmp1, 4); 9230 subl(len, 4); 9231 addl(pos, 4); 9232 9233 bind(L_less_than_4_left); 9234 cmpl(len, 2); 9235 jcc(Assembler::less, L_less_than_2_left); 9236 9237 // load 2 Bytes 9238 movw(rax, Address(buf, pos, Address::times_1, 0)); 9239 movl(Address(tmp1, 0 * 16), rax); 9240 addptr(tmp1, 2); 9241 subl(len, 2); 9242 addl(pos, 2); 9243 9244 bind(L_less_than_2_left); 9245 cmpl(len, 1); 9246 jcc(Assembler::less, L_zero_left); 9247 9248 // load 1 Byte 9249 movb(rax, Address(buf, pos, Address::times_1, 0)); 9250 movb(Address(tmp1, 0 * 16), rax); 9251 9252 bind(L_zero_left); 9253 movdqu(xmm7, Address(rsp, 0)); 9254 pxor(xmm7, xmm0); //xor the initial crc value 9255 9256 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr())); 9257 movdqu(xmm0, Address(rax, tmp2)); 9258 pshufb(xmm7, xmm0); 9259 jmp(L_128_done); 9260 9261 bind(L_exact_16_left); 9262 movdqu(xmm7, Address(buf, pos, Address::times_1, 0)); 9263 pxor(xmm7, xmm0); //xor the initial crc value 9264 jmp(L_128_done); 9265 9266 bind(L_only_less_than_4); 9267 cmpl(len, 3); 9268 jcc(Assembler::less, L_only_less_than_3); 9269 9270 // load 3 Bytes 9271 movb(rax, Address(buf, pos, Address::times_1, 0)); 9272 movb(Address(tmp1, 0), rax); 9273 9274 movb(rax, Address(buf, pos, Address::times_1, 1)); 9275 movb(Address(tmp1, 1), rax); 9276 9277 movb(rax, Address(buf, pos, Address::times_1, 2)); 9278 movb(Address(tmp1, 2), rax); 9279 9280 movdqu(xmm7, Address(rsp, 0)); 9281 pxor(xmm7, xmm0); //xor the initial crc value 9282 9283 pslldq(xmm7, 0x5); 9284 jmp(L_barrett); 9285 bind(L_only_less_than_3); 9286 cmpl(len, 2); 9287 jcc(Assembler::less, L_only_less_than_2); 9288 9289 // load 2 Bytes 9290 movb(rax, Address(buf, pos, Address::times_1, 0)); 9291 movb(Address(tmp1, 0), rax); 9292 9293 movb(rax, Address(buf, pos, Address::times_1, 1)); 9294 movb(Address(tmp1, 1), rax); 9295 9296 movdqu(xmm7, Address(rsp, 0)); 9297 pxor(xmm7, xmm0); //xor the initial crc value 9298 9299 pslldq(xmm7, 0x6); 9300 jmp(L_barrett); 9301 9302 bind(L_only_less_than_2); 9303 //load 1 Byte 9304 movb(rax, Address(buf, pos, Address::times_1, 0)); 9305 movb(Address(tmp1, 0), rax); 9306 9307 movdqu(xmm7, Address(rsp, 0)); 9308 pxor(xmm7, xmm0); //xor the initial crc value 9309 9310 pslldq(xmm7, 0x7); 9311 } 9312 9313 /** 9314 * Compute CRC32 using AVX512 instructions 9315 * param crc register containing existing CRC (32-bit) 9316 * param buf register pointing to input byte buffer (byte*) 9317 * param len register containing number of bytes 9318 * param table address of crc or crc32c table 9319 * param tmp1 scratch register 9320 * param tmp2 scratch register 9321 * return rax result register 9322 * 9323 * This routine is identical for crc32c with the exception of the precomputed constant 9324 * table which will be passed as the table argument. The calculation steps are 9325 * the same for both variants. 9326 */ 9327 void MacroAssembler::kernel_crc32_avx512(Register crc, Register buf, Register len, Register table, Register tmp1, Register tmp2) { 9328 assert_different_registers(crc, buf, len, table, tmp1, tmp2, rax, r12); 9329 9330 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 9331 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 9332 Label L_less_than_256, L_fold_128_B_loop, L_fold_256_B_loop; 9333 Label L_fold_128_B_register, L_final_reduction_for_128, L_16B_reduction_loop; 9334 Label L_128_done, L_get_last_two_xmms, L_barrett, L_cleanup; 9335 9336 const Register pos = r12; 9337 push(r12); 9338 subptr(rsp, 16 * 2 + 8); 9339 9340 // For EVEX with VL and BW, provide a standard mask, VL = 128 will guide the merge 9341 // context for the registers used, where all instructions below are using 128-bit mode 9342 // On EVEX without VL and BW, these instructions will all be AVX. 9343 movl(pos, 0); 9344 9345 // check if smaller than 256B 9346 cmpl(len, 256); 9347 jcc(Assembler::less, L_less_than_256); 9348 9349 // load the initial crc value 9350 movdl(xmm10, crc); 9351 9352 // receive the initial 64B data, xor the initial crc value 9353 evmovdquq(xmm0, Address(buf, pos, Address::times_1, 0 * 64), Assembler::AVX_512bit); 9354 evmovdquq(xmm4, Address(buf, pos, Address::times_1, 1 * 64), Assembler::AVX_512bit); 9355 evpxorq(xmm0, xmm0, xmm10, Assembler::AVX_512bit); 9356 evbroadcasti32x4(xmm10, Address(table, 2 * 16), Assembler::AVX_512bit); //zmm10 has rk3 and rk4 9357 9358 subl(len, 256); 9359 cmpl(len, 256); 9360 jcc(Assembler::less, L_fold_128_B_loop); 9361 9362 evmovdquq(xmm7, Address(buf, pos, Address::times_1, 2 * 64), Assembler::AVX_512bit); 9363 evmovdquq(xmm8, Address(buf, pos, Address::times_1, 3 * 64), Assembler::AVX_512bit); 9364 evbroadcasti32x4(xmm16, Address(table, 0 * 16), Assembler::AVX_512bit); //zmm16 has rk-1 and rk-2 9365 subl(len, 256); 9366 9367 bind(L_fold_256_B_loop); 9368 addl(pos, 256); 9369 fold512bit_crc32_avx512(xmm0, xmm16, xmm1, buf, pos, 0 * 64); 9370 fold512bit_crc32_avx512(xmm4, xmm16, xmm1, buf, pos, 1 * 64); 9371 fold512bit_crc32_avx512(xmm7, xmm16, xmm1, buf, pos, 2 * 64); 9372 fold512bit_crc32_avx512(xmm8, xmm16, xmm1, buf, pos, 3 * 64); 9373 9374 subl(len, 256); 9375 jcc(Assembler::greaterEqual, L_fold_256_B_loop); 9376 9377 // Fold 256 into 128 9378 addl(pos, 256); 9379 evpclmulqdq(xmm1, xmm0, xmm10, 0x01, Assembler::AVX_512bit); 9380 evpclmulqdq(xmm2, xmm0, xmm10, 0x10, Assembler::AVX_512bit); 9381 vpternlogq(xmm7, 0x96, xmm1, xmm2, Assembler::AVX_512bit); // xor ABC 9382 9383 evpclmulqdq(xmm5, xmm4, xmm10, 0x01, Assembler::AVX_512bit); 9384 evpclmulqdq(xmm6, xmm4, xmm10, 0x10, Assembler::AVX_512bit); 9385 vpternlogq(xmm8, 0x96, xmm5, xmm6, Assembler::AVX_512bit); // xor ABC 9386 9387 evmovdquq(xmm0, xmm7, Assembler::AVX_512bit); 9388 evmovdquq(xmm4, xmm8, Assembler::AVX_512bit); 9389 9390 addl(len, 128); 9391 jmp(L_fold_128_B_register); 9392 9393 // at this section of the code, there is 128 * x + y(0 <= y<128) bytes of buffer.The fold_128_B_loop 9394 // loop will fold 128B at a time until we have 128 + y Bytes of buffer 9395 9396 // fold 128B at a time.This section of the code folds 8 xmm registers in parallel 9397 bind(L_fold_128_B_loop); 9398 addl(pos, 128); 9399 fold512bit_crc32_avx512(xmm0, xmm10, xmm1, buf, pos, 0 * 64); 9400 fold512bit_crc32_avx512(xmm4, xmm10, xmm1, buf, pos, 1 * 64); 9401 9402 subl(len, 128); 9403 jcc(Assembler::greaterEqual, L_fold_128_B_loop); 9404 9405 addl(pos, 128); 9406 9407 // at this point, the buffer pointer is pointing at the last y Bytes of the buffer, where 0 <= y < 128 9408 // the 128B of folded data is in 8 of the xmm registers : xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7 9409 bind(L_fold_128_B_register); 9410 evmovdquq(xmm16, Address(table, 5 * 16), Assembler::AVX_512bit); // multiply by rk9-rk16 9411 evmovdquq(xmm11, Address(table, 9 * 16), Assembler::AVX_512bit); // multiply by rk17-rk20, rk1,rk2, 0,0 9412 evpclmulqdq(xmm1, xmm0, xmm16, 0x01, Assembler::AVX_512bit); 9413 evpclmulqdq(xmm2, xmm0, xmm16, 0x10, Assembler::AVX_512bit); 9414 // save last that has no multiplicand 9415 vextracti64x2(xmm7, xmm4, 3); 9416 9417 evpclmulqdq(xmm5, xmm4, xmm11, 0x01, Assembler::AVX_512bit); 9418 evpclmulqdq(xmm6, xmm4, xmm11, 0x10, Assembler::AVX_512bit); 9419 // Needed later in reduction loop 9420 movdqu(xmm10, Address(table, 1 * 16)); 9421 vpternlogq(xmm1, 0x96, xmm2, xmm5, Assembler::AVX_512bit); // xor ABC 9422 vpternlogq(xmm1, 0x96, xmm6, xmm7, Assembler::AVX_512bit); // xor ABC 9423 9424 // Swap 1,0,3,2 - 01 00 11 10 9425 evshufi64x2(xmm8, xmm1, xmm1, 0x4e, Assembler::AVX_512bit); 9426 evpxorq(xmm8, xmm8, xmm1, Assembler::AVX_256bit); 9427 vextracti128(xmm5, xmm8, 1); 9428 evpxorq(xmm7, xmm5, xmm8, Assembler::AVX_128bit); 9429 9430 // instead of 128, we add 128 - 16 to the loop counter to save 1 instruction from the loop 9431 // instead of a cmp instruction, we use the negative flag with the jl instruction 9432 addl(len, 128 - 16); 9433 jcc(Assembler::less, L_final_reduction_for_128); 9434 9435 bind(L_16B_reduction_loop); 9436 vpclmulqdq(xmm8, xmm7, xmm10, 0x01); 9437 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 9438 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit); 9439 movdqu(xmm0, Address(buf, pos, Address::times_1, 0 * 16)); 9440 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 9441 addl(pos, 16); 9442 subl(len, 16); 9443 jcc(Assembler::greaterEqual, L_16B_reduction_loop); 9444 9445 bind(L_final_reduction_for_128); 9446 addl(len, 16); 9447 jcc(Assembler::equal, L_128_done); 9448 9449 bind(L_get_last_two_xmms); 9450 movdqu(xmm2, xmm7); 9451 addl(pos, len); 9452 movdqu(xmm1, Address(buf, pos, Address::times_1, -16)); 9453 subl(pos, len); 9454 9455 // get rid of the extra data that was loaded before 9456 // load the shift constant 9457 lea(rax, ExternalAddress(StubRoutines::x86::shuf_table_crc32_avx512_addr())); 9458 movdqu(xmm0, Address(rax, len)); 9459 addl(rax, len); 9460 9461 vpshufb(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 9462 //Change mask to 512 9463 vpxor(xmm0, xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 2 * 16), Assembler::AVX_128bit, tmp2); 9464 vpshufb(xmm2, xmm2, xmm0, Assembler::AVX_128bit); 9465 9466 blendvpb(xmm2, xmm2, xmm1, xmm0, Assembler::AVX_128bit); 9467 vpclmulqdq(xmm8, xmm7, xmm10, 0x01); 9468 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 9469 vpxor(xmm7, xmm7, xmm8, Assembler::AVX_128bit); 9470 vpxor(xmm7, xmm7, xmm2, Assembler::AVX_128bit); 9471 9472 bind(L_128_done); 9473 // compute crc of a 128-bit value 9474 movdqu(xmm10, Address(table, 3 * 16)); 9475 movdqu(xmm0, xmm7); 9476 9477 // 64b fold 9478 vpclmulqdq(xmm7, xmm7, xmm10, 0x0); 9479 vpsrldq(xmm0, xmm0, 0x8, Assembler::AVX_128bit); 9480 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 9481 9482 // 32b fold 9483 movdqu(xmm0, xmm7); 9484 vpslldq(xmm7, xmm7, 0x4, Assembler::AVX_128bit); 9485 vpclmulqdq(xmm7, xmm7, xmm10, 0x10); 9486 vpxor(xmm7, xmm7, xmm0, Assembler::AVX_128bit); 9487 jmp(L_barrett); 9488 9489 bind(L_less_than_256); 9490 kernel_crc32_avx512_256B(crc, buf, len, table, pos, tmp1, tmp2, L_barrett, L_16B_reduction_loop, L_get_last_two_xmms, L_128_done, L_cleanup); 9491 9492 //barrett reduction 9493 bind(L_barrett); 9494 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr() + 1 * 16), Assembler::AVX_128bit, tmp2); 9495 movdqu(xmm1, xmm7); 9496 movdqu(xmm2, xmm7); 9497 movdqu(xmm10, Address(table, 4 * 16)); 9498 9499 pclmulqdq(xmm7, xmm10, 0x0); 9500 pxor(xmm7, xmm2); 9501 vpand(xmm7, xmm7, ExternalAddress(StubRoutines::x86::crc_by128_masks_avx512_addr()), Assembler::AVX_128bit, tmp2); 9502 movdqu(xmm2, xmm7); 9503 pclmulqdq(xmm7, xmm10, 0x10); 9504 pxor(xmm7, xmm2); 9505 pxor(xmm7, xmm1); 9506 pextrd(crc, xmm7, 2); 9507 9508 bind(L_cleanup); 9509 addptr(rsp, 16 * 2 + 8); 9510 pop(r12); 9511 } 9512 9513 // S. Gueron / Information Processing Letters 112 (2012) 184 9514 // Algorithm 4: Computing carry-less multiplication using a precomputed lookup table. 9515 // Input: A 32 bit value B = [byte3, byte2, byte1, byte0]. 9516 // Output: the 64-bit carry-less product of B * CONST 9517 void MacroAssembler::crc32c_ipl_alg4(Register in, uint32_t n, 9518 Register tmp1, Register tmp2, Register tmp3) { 9519 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr())); 9520 if (n > 0) { 9521 addq(tmp3, n * 256 * 8); 9522 } 9523 // Q1 = TABLEExt[n][B & 0xFF]; 9524 movl(tmp1, in); 9525 andl(tmp1, 0x000000FF); 9526 shll(tmp1, 3); 9527 addq(tmp1, tmp3); 9528 movq(tmp1, Address(tmp1, 0)); 9529 9530 // Q2 = TABLEExt[n][B >> 8 & 0xFF]; 9531 movl(tmp2, in); 9532 shrl(tmp2, 8); 9533 andl(tmp2, 0x000000FF); 9534 shll(tmp2, 3); 9535 addq(tmp2, tmp3); 9536 movq(tmp2, Address(tmp2, 0)); 9537 9538 shlq(tmp2, 8); 9539 xorq(tmp1, tmp2); 9540 9541 // Q3 = TABLEExt[n][B >> 16 & 0xFF]; 9542 movl(tmp2, in); 9543 shrl(tmp2, 16); 9544 andl(tmp2, 0x000000FF); 9545 shll(tmp2, 3); 9546 addq(tmp2, tmp3); 9547 movq(tmp2, Address(tmp2, 0)); 9548 9549 shlq(tmp2, 16); 9550 xorq(tmp1, tmp2); 9551 9552 // Q4 = TABLEExt[n][B >> 24 & 0xFF]; 9553 shrl(in, 24); 9554 andl(in, 0x000000FF); 9555 shll(in, 3); 9556 addq(in, tmp3); 9557 movq(in, Address(in, 0)); 9558 9559 shlq(in, 24); 9560 xorq(in, tmp1); 9561 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24; 9562 } 9563 9564 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1, 9565 Register in_out, 9566 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 9567 XMMRegister w_xtmp2, 9568 Register tmp1, 9569 Register n_tmp2, Register n_tmp3) { 9570 if (is_pclmulqdq_supported) { 9571 movdl(w_xtmp1, in_out); // modified blindly 9572 9573 movl(tmp1, const_or_pre_comp_const_index); 9574 movdl(w_xtmp2, tmp1); 9575 pclmulqdq(w_xtmp1, w_xtmp2, 0); 9576 9577 movdq(in_out, w_xtmp1); 9578 } else { 9579 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3); 9580 } 9581 } 9582 9583 // Recombination Alternative 2: No bit-reflections 9584 // T1 = (CRC_A * U1) << 1 9585 // T2 = (CRC_B * U2) << 1 9586 // C1 = T1 >> 32 9587 // C2 = T2 >> 32 9588 // T1 = T1 & 0xFFFFFFFF 9589 // T2 = T2 & 0xFFFFFFFF 9590 // T1 = CRC32(0, T1) 9591 // T2 = CRC32(0, T2) 9592 // C1 = C1 ^ T1 9593 // C2 = C2 ^ T2 9594 // CRC = C1 ^ C2 ^ CRC_C 9595 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 9596 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9597 Register tmp1, Register tmp2, 9598 Register n_tmp3) { 9599 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 9600 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 9601 shlq(in_out, 1); 9602 movl(tmp1, in_out); 9603 shrq(in_out, 32); 9604 xorl(tmp2, tmp2); 9605 crc32(tmp2, tmp1, 4); 9606 xorl(in_out, tmp2); // we don't care about upper 32 bit contents here 9607 shlq(in1, 1); 9608 movl(tmp1, in1); 9609 shrq(in1, 32); 9610 xorl(tmp2, tmp2); 9611 crc32(tmp2, tmp1, 4); 9612 xorl(in1, tmp2); 9613 xorl(in_out, in1); 9614 xorl(in_out, in2); 9615 } 9616 9617 // Set N to predefined value 9618 // Subtract from a length of a buffer 9619 // execute in a loop: 9620 // CRC_A = 0xFFFFFFFF, CRC_B = 0, CRC_C = 0 9621 // for i = 1 to N do 9622 // CRC_A = CRC32(CRC_A, A[i]) 9623 // CRC_B = CRC32(CRC_B, B[i]) 9624 // CRC_C = CRC32(CRC_C, C[i]) 9625 // end for 9626 // Recombine 9627 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 9628 Register in_out1, Register in_out2, Register in_out3, 9629 Register tmp1, Register tmp2, Register tmp3, 9630 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9631 Register tmp4, Register tmp5, 9632 Register n_tmp6) { 9633 Label L_processPartitions; 9634 Label L_processPartition; 9635 Label L_exit; 9636 9637 bind(L_processPartitions); 9638 cmpl(in_out1, 3 * size); 9639 jcc(Assembler::less, L_exit); 9640 xorl(tmp1, tmp1); 9641 xorl(tmp2, tmp2); 9642 movq(tmp3, in_out2); 9643 addq(tmp3, size); 9644 9645 bind(L_processPartition); 9646 crc32(in_out3, Address(in_out2, 0), 8); 9647 crc32(tmp1, Address(in_out2, size), 8); 9648 crc32(tmp2, Address(in_out2, size * 2), 8); 9649 addq(in_out2, 8); 9650 cmpq(in_out2, tmp3); 9651 jcc(Assembler::less, L_processPartition); 9652 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2, 9653 w_xtmp1, w_xtmp2, w_xtmp3, 9654 tmp4, tmp5, 9655 n_tmp6); 9656 addq(in_out2, 2 * size); 9657 subl(in_out1, 3 * size); 9658 jmp(L_processPartitions); 9659 9660 bind(L_exit); 9661 } 9662 #else 9663 void MacroAssembler::crc32c_ipl_alg4(Register in_out, uint32_t n, 9664 Register tmp1, Register tmp2, Register tmp3, 9665 XMMRegister xtmp1, XMMRegister xtmp2) { 9666 lea(tmp3, ExternalAddress(StubRoutines::crc32c_table_addr())); 9667 if (n > 0) { 9668 addl(tmp3, n * 256 * 8); 9669 } 9670 // Q1 = TABLEExt[n][B & 0xFF]; 9671 movl(tmp1, in_out); 9672 andl(tmp1, 0x000000FF); 9673 shll(tmp1, 3); 9674 addl(tmp1, tmp3); 9675 movq(xtmp1, Address(tmp1, 0)); 9676 9677 // Q2 = TABLEExt[n][B >> 8 & 0xFF]; 9678 movl(tmp2, in_out); 9679 shrl(tmp2, 8); 9680 andl(tmp2, 0x000000FF); 9681 shll(tmp2, 3); 9682 addl(tmp2, tmp3); 9683 movq(xtmp2, Address(tmp2, 0)); 9684 9685 psllq(xtmp2, 8); 9686 pxor(xtmp1, xtmp2); 9687 9688 // Q3 = TABLEExt[n][B >> 16 & 0xFF]; 9689 movl(tmp2, in_out); 9690 shrl(tmp2, 16); 9691 andl(tmp2, 0x000000FF); 9692 shll(tmp2, 3); 9693 addl(tmp2, tmp3); 9694 movq(xtmp2, Address(tmp2, 0)); 9695 9696 psllq(xtmp2, 16); 9697 pxor(xtmp1, xtmp2); 9698 9699 // Q4 = TABLEExt[n][B >> 24 & 0xFF]; 9700 shrl(in_out, 24); 9701 andl(in_out, 0x000000FF); 9702 shll(in_out, 3); 9703 addl(in_out, tmp3); 9704 movq(xtmp2, Address(in_out, 0)); 9705 9706 psllq(xtmp2, 24); 9707 pxor(xtmp1, xtmp2); // Result in CXMM 9708 // return Q1 ^ Q2 << 8 ^ Q3 << 16 ^ Q4 << 24; 9709 } 9710 9711 void MacroAssembler::crc32c_pclmulqdq(XMMRegister w_xtmp1, 9712 Register in_out, 9713 uint32_t const_or_pre_comp_const_index, bool is_pclmulqdq_supported, 9714 XMMRegister w_xtmp2, 9715 Register tmp1, 9716 Register n_tmp2, Register n_tmp3) { 9717 if (is_pclmulqdq_supported) { 9718 movdl(w_xtmp1, in_out); 9719 9720 movl(tmp1, const_or_pre_comp_const_index); 9721 movdl(w_xtmp2, tmp1); 9722 pclmulqdq(w_xtmp1, w_xtmp2, 0); 9723 // Keep result in XMM since GPR is 32 bit in length 9724 } else { 9725 crc32c_ipl_alg4(in_out, const_or_pre_comp_const_index, tmp1, n_tmp2, n_tmp3, w_xtmp1, w_xtmp2); 9726 } 9727 } 9728 9729 void MacroAssembler::crc32c_rec_alt2(uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, Register in_out, Register in1, Register in2, 9730 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9731 Register tmp1, Register tmp2, 9732 Register n_tmp3) { 9733 crc32c_pclmulqdq(w_xtmp1, in_out, const_or_pre_comp_const_index_u1, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 9734 crc32c_pclmulqdq(w_xtmp2, in1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, w_xtmp3, tmp1, tmp2, n_tmp3); 9735 9736 psllq(w_xtmp1, 1); 9737 movdl(tmp1, w_xtmp1); 9738 psrlq(w_xtmp1, 32); 9739 movdl(in_out, w_xtmp1); 9740 9741 xorl(tmp2, tmp2); 9742 crc32(tmp2, tmp1, 4); 9743 xorl(in_out, tmp2); 9744 9745 psllq(w_xtmp2, 1); 9746 movdl(tmp1, w_xtmp2); 9747 psrlq(w_xtmp2, 32); 9748 movdl(in1, w_xtmp2); 9749 9750 xorl(tmp2, tmp2); 9751 crc32(tmp2, tmp1, 4); 9752 xorl(in1, tmp2); 9753 xorl(in_out, in1); 9754 xorl(in_out, in2); 9755 } 9756 9757 void MacroAssembler::crc32c_proc_chunk(uint32_t size, uint32_t const_or_pre_comp_const_index_u1, uint32_t const_or_pre_comp_const_index_u2, bool is_pclmulqdq_supported, 9758 Register in_out1, Register in_out2, Register in_out3, 9759 Register tmp1, Register tmp2, Register tmp3, 9760 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9761 Register tmp4, Register tmp5, 9762 Register n_tmp6) { 9763 Label L_processPartitions; 9764 Label L_processPartition; 9765 Label L_exit; 9766 9767 bind(L_processPartitions); 9768 cmpl(in_out1, 3 * size); 9769 jcc(Assembler::less, L_exit); 9770 xorl(tmp1, tmp1); 9771 xorl(tmp2, tmp2); 9772 movl(tmp3, in_out2); 9773 addl(tmp3, size); 9774 9775 bind(L_processPartition); 9776 crc32(in_out3, Address(in_out2, 0), 4); 9777 crc32(tmp1, Address(in_out2, size), 4); 9778 crc32(tmp2, Address(in_out2, size*2), 4); 9779 crc32(in_out3, Address(in_out2, 0+4), 4); 9780 crc32(tmp1, Address(in_out2, size+4), 4); 9781 crc32(tmp2, Address(in_out2, size*2+4), 4); 9782 addl(in_out2, 8); 9783 cmpl(in_out2, tmp3); 9784 jcc(Assembler::less, L_processPartition); 9785 9786 push(tmp3); 9787 push(in_out1); 9788 push(in_out2); 9789 tmp4 = tmp3; 9790 tmp5 = in_out1; 9791 n_tmp6 = in_out2; 9792 9793 crc32c_rec_alt2(const_or_pre_comp_const_index_u1, const_or_pre_comp_const_index_u2, is_pclmulqdq_supported, in_out3, tmp1, tmp2, 9794 w_xtmp1, w_xtmp2, w_xtmp3, 9795 tmp4, tmp5, 9796 n_tmp6); 9797 9798 pop(in_out2); 9799 pop(in_out1); 9800 pop(tmp3); 9801 9802 addl(in_out2, 2 * size); 9803 subl(in_out1, 3 * size); 9804 jmp(L_processPartitions); 9805 9806 bind(L_exit); 9807 } 9808 #endif //LP64 9809 9810 #ifdef _LP64 9811 // Algorithm 2: Pipelined usage of the CRC32 instruction. 9812 // Input: A buffer I of L bytes. 9813 // Output: the CRC32C value of the buffer. 9814 // Notations: 9815 // Write L = 24N + r, with N = floor (L/24). 9816 // r = L mod 24 (0 <= r < 24). 9817 // Consider I as the concatenation of A|B|C|R, where A, B, C, each, 9818 // N quadwords, and R consists of r bytes. 9819 // A[j] = I [8j+7:8j], j= 0, 1, ..., N-1 9820 // B[j] = I [N + 8j+7:N + 8j], j= 0, 1, ..., N-1 9821 // C[j] = I [2N + 8j+7:2N + 8j], j= 0, 1, ..., N-1 9822 // if r > 0 R[j] = I [3N +j], j= 0, 1, ...,r-1 9823 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 9824 Register tmp1, Register tmp2, Register tmp3, 9825 Register tmp4, Register tmp5, Register tmp6, 9826 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9827 bool is_pclmulqdq_supported) { 9828 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS]; 9829 Label L_wordByWord; 9830 Label L_byteByByteProlog; 9831 Label L_byteByByte; 9832 Label L_exit; 9833 9834 if (is_pclmulqdq_supported ) { 9835 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr; 9836 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr+1); 9837 9838 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2); 9839 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3); 9840 9841 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4); 9842 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5); 9843 assert((CRC32C_NUM_PRECOMPUTED_CONSTANTS - 1 ) == 5, "Checking whether you declared all of the constants based on the number of \"chunks\""); 9844 } else { 9845 const_or_pre_comp_const_index[0] = 1; 9846 const_or_pre_comp_const_index[1] = 0; 9847 9848 const_or_pre_comp_const_index[2] = 3; 9849 const_or_pre_comp_const_index[3] = 2; 9850 9851 const_or_pre_comp_const_index[4] = 5; 9852 const_or_pre_comp_const_index[5] = 4; 9853 } 9854 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported, 9855 in2, in1, in_out, 9856 tmp1, tmp2, tmp3, 9857 w_xtmp1, w_xtmp2, w_xtmp3, 9858 tmp4, tmp5, 9859 tmp6); 9860 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported, 9861 in2, in1, in_out, 9862 tmp1, tmp2, tmp3, 9863 w_xtmp1, w_xtmp2, w_xtmp3, 9864 tmp4, tmp5, 9865 tmp6); 9866 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported, 9867 in2, in1, in_out, 9868 tmp1, tmp2, tmp3, 9869 w_xtmp1, w_xtmp2, w_xtmp3, 9870 tmp4, tmp5, 9871 tmp6); 9872 movl(tmp1, in2); 9873 andl(tmp1, 0x00000007); 9874 negl(tmp1); 9875 addl(tmp1, in2); 9876 addq(tmp1, in1); 9877 9878 cmpq(in1, tmp1); 9879 jccb(Assembler::greaterEqual, L_byteByByteProlog); 9880 align(16); 9881 BIND(L_wordByWord); 9882 crc32(in_out, Address(in1, 0), 8); 9883 addq(in1, 8); 9884 cmpq(in1, tmp1); 9885 jcc(Assembler::less, L_wordByWord); 9886 9887 BIND(L_byteByByteProlog); 9888 andl(in2, 0x00000007); 9889 movl(tmp2, 1); 9890 9891 cmpl(tmp2, in2); 9892 jccb(Assembler::greater, L_exit); 9893 BIND(L_byteByByte); 9894 crc32(in_out, Address(in1, 0), 1); 9895 incq(in1); 9896 incl(tmp2); 9897 cmpl(tmp2, in2); 9898 jcc(Assembler::lessEqual, L_byteByByte); 9899 9900 BIND(L_exit); 9901 } 9902 #else 9903 void MacroAssembler::crc32c_ipl_alg2_alt2(Register in_out, Register in1, Register in2, 9904 Register tmp1, Register tmp2, Register tmp3, 9905 Register tmp4, Register tmp5, Register tmp6, 9906 XMMRegister w_xtmp1, XMMRegister w_xtmp2, XMMRegister w_xtmp3, 9907 bool is_pclmulqdq_supported) { 9908 uint32_t const_or_pre_comp_const_index[CRC32C_NUM_PRECOMPUTED_CONSTANTS]; 9909 Label L_wordByWord; 9910 Label L_byteByByteProlog; 9911 Label L_byteByByte; 9912 Label L_exit; 9913 9914 if (is_pclmulqdq_supported) { 9915 const_or_pre_comp_const_index[1] = *(uint32_t *)StubRoutines::_crc32c_table_addr; 9916 const_or_pre_comp_const_index[0] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 1); 9917 9918 const_or_pre_comp_const_index[3] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 2); 9919 const_or_pre_comp_const_index[2] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 3); 9920 9921 const_or_pre_comp_const_index[5] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 4); 9922 const_or_pre_comp_const_index[4] = *((uint32_t *)StubRoutines::_crc32c_table_addr + 5); 9923 } else { 9924 const_or_pre_comp_const_index[0] = 1; 9925 const_or_pre_comp_const_index[1] = 0; 9926 9927 const_or_pre_comp_const_index[2] = 3; 9928 const_or_pre_comp_const_index[3] = 2; 9929 9930 const_or_pre_comp_const_index[4] = 5; 9931 const_or_pre_comp_const_index[5] = 4; 9932 } 9933 crc32c_proc_chunk(CRC32C_HIGH, const_or_pre_comp_const_index[0], const_or_pre_comp_const_index[1], is_pclmulqdq_supported, 9934 in2, in1, in_out, 9935 tmp1, tmp2, tmp3, 9936 w_xtmp1, w_xtmp2, w_xtmp3, 9937 tmp4, tmp5, 9938 tmp6); 9939 crc32c_proc_chunk(CRC32C_MIDDLE, const_or_pre_comp_const_index[2], const_or_pre_comp_const_index[3], is_pclmulqdq_supported, 9940 in2, in1, in_out, 9941 tmp1, tmp2, tmp3, 9942 w_xtmp1, w_xtmp2, w_xtmp3, 9943 tmp4, tmp5, 9944 tmp6); 9945 crc32c_proc_chunk(CRC32C_LOW, const_or_pre_comp_const_index[4], const_or_pre_comp_const_index[5], is_pclmulqdq_supported, 9946 in2, in1, in_out, 9947 tmp1, tmp2, tmp3, 9948 w_xtmp1, w_xtmp2, w_xtmp3, 9949 tmp4, tmp5, 9950 tmp6); 9951 movl(tmp1, in2); 9952 andl(tmp1, 0x00000007); 9953 negl(tmp1); 9954 addl(tmp1, in2); 9955 addl(tmp1, in1); 9956 9957 BIND(L_wordByWord); 9958 cmpl(in1, tmp1); 9959 jcc(Assembler::greaterEqual, L_byteByByteProlog); 9960 crc32(in_out, Address(in1,0), 4); 9961 addl(in1, 4); 9962 jmp(L_wordByWord); 9963 9964 BIND(L_byteByByteProlog); 9965 andl(in2, 0x00000007); 9966 movl(tmp2, 1); 9967 9968 BIND(L_byteByByte); 9969 cmpl(tmp2, in2); 9970 jccb(Assembler::greater, L_exit); 9971 movb(tmp1, Address(in1, 0)); 9972 crc32(in_out, tmp1, 1); 9973 incl(in1); 9974 incl(tmp2); 9975 jmp(L_byteByByte); 9976 9977 BIND(L_exit); 9978 } 9979 #endif // LP64 9980 #undef BIND 9981 #undef BLOCK_COMMENT 9982 9983 // Compress char[] array to byte[]. 9984 // Intrinsic for java.lang.StringUTF16.compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) 9985 // Return the array length if every element in array can be encoded, 9986 // otherwise, the index of first non-latin1 (> 0xff) character. 9987 // @IntrinsicCandidate 9988 // public static int compress(char[] src, int srcOff, byte[] dst, int dstOff, int len) { 9989 // for (int i = 0; i < len; i++) { 9990 // char c = src[srcOff]; 9991 // if (c > 0xff) { 9992 // return i; // return index of non-latin1 char 9993 // } 9994 // dst[dstOff] = (byte)c; 9995 // srcOff++; 9996 // dstOff++; 9997 // } 9998 // return len; 9999 // } 10000 void MacroAssembler::char_array_compress(Register src, Register dst, Register len, 10001 XMMRegister tmp1Reg, XMMRegister tmp2Reg, 10002 XMMRegister tmp3Reg, XMMRegister tmp4Reg, 10003 Register tmp5, Register result, KRegister mask1, KRegister mask2) { 10004 Label copy_chars_loop, done, reset_sp, copy_tail; 10005 10006 // rsi: src 10007 // rdi: dst 10008 // rdx: len 10009 // rcx: tmp5 10010 // rax: result 10011 10012 // rsi holds start addr of source char[] to be compressed 10013 // rdi holds start addr of destination byte[] 10014 // rdx holds length 10015 10016 assert(len != result, ""); 10017 10018 // save length for return 10019 movl(result, len); 10020 10021 if ((AVX3Threshold == 0) && (UseAVX > 2) && // AVX512 10022 VM_Version::supports_avx512vlbw() && 10023 VM_Version::supports_bmi2()) { 10024 10025 Label copy_32_loop, copy_loop_tail, below_threshold, reset_for_copy_tail; 10026 10027 // alignment 10028 Label post_alignment; 10029 10030 // if length of the string is less than 32, handle it the old fashioned way 10031 testl(len, -32); 10032 jcc(Assembler::zero, below_threshold); 10033 10034 // First check whether a character is compressible ( <= 0xFF). 10035 // Create mask to test for Unicode chars inside zmm vector 10036 movl(tmp5, 0x00FF); 10037 evpbroadcastw(tmp2Reg, tmp5, Assembler::AVX_512bit); 10038 10039 testl(len, -64); 10040 jccb(Assembler::zero, post_alignment); 10041 10042 movl(tmp5, dst); 10043 andl(tmp5, (32 - 1)); 10044 negl(tmp5); 10045 andl(tmp5, (32 - 1)); 10046 10047 // bail out when there is nothing to be done 10048 testl(tmp5, 0xFFFFFFFF); 10049 jccb(Assembler::zero, post_alignment); 10050 10051 // ~(~0 << len), where len is the # of remaining elements to process 10052 movl(len, 0xFFFFFFFF); 10053 shlxl(len, len, tmp5); 10054 notl(len); 10055 kmovdl(mask2, len); 10056 movl(len, result); 10057 10058 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit); 10059 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit); 10060 ktestd(mask1, mask2); 10061 jcc(Assembler::carryClear, copy_tail); 10062 10063 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit); 10064 10065 addptr(src, tmp5); 10066 addptr(src, tmp5); 10067 addptr(dst, tmp5); 10068 subl(len, tmp5); 10069 10070 bind(post_alignment); 10071 // end of alignment 10072 10073 movl(tmp5, len); 10074 andl(tmp5, (32 - 1)); // tail count (in chars) 10075 andl(len, ~(32 - 1)); // vector count (in chars) 10076 jccb(Assembler::zero, copy_loop_tail); 10077 10078 lea(src, Address(src, len, Address::times_2)); 10079 lea(dst, Address(dst, len, Address::times_1)); 10080 negptr(len); 10081 10082 bind(copy_32_loop); 10083 evmovdquw(tmp1Reg, Address(src, len, Address::times_2), Assembler::AVX_512bit); 10084 evpcmpuw(mask1, tmp1Reg, tmp2Reg, Assembler::le, Assembler::AVX_512bit); 10085 kortestdl(mask1, mask1); 10086 jccb(Assembler::carryClear, reset_for_copy_tail); 10087 10088 // All elements in current processed chunk are valid candidates for 10089 // compression. Write a truncated byte elements to the memory. 10090 evpmovwb(Address(dst, len, Address::times_1), tmp1Reg, Assembler::AVX_512bit); 10091 addptr(len, 32); 10092 jccb(Assembler::notZero, copy_32_loop); 10093 10094 bind(copy_loop_tail); 10095 // bail out when there is nothing to be done 10096 testl(tmp5, 0xFFFFFFFF); 10097 jcc(Assembler::zero, done); 10098 10099 movl(len, tmp5); 10100 10101 // ~(~0 << len), where len is the # of remaining elements to process 10102 movl(tmp5, 0xFFFFFFFF); 10103 shlxl(tmp5, tmp5, len); 10104 notl(tmp5); 10105 10106 kmovdl(mask2, tmp5); 10107 10108 evmovdquw(tmp1Reg, mask2, Address(src, 0), /*merge*/ false, Assembler::AVX_512bit); 10109 evpcmpw(mask1, mask2, tmp1Reg, tmp2Reg, Assembler::le, /*signed*/ false, Assembler::AVX_512bit); 10110 ktestd(mask1, mask2); 10111 jcc(Assembler::carryClear, copy_tail); 10112 10113 evpmovwb(Address(dst, 0), mask2, tmp1Reg, Assembler::AVX_512bit); 10114 jmp(done); 10115 10116 bind(reset_for_copy_tail); 10117 lea(src, Address(src, tmp5, Address::times_2)); 10118 lea(dst, Address(dst, tmp5, Address::times_1)); 10119 subptr(len, tmp5); 10120 jmp(copy_chars_loop); 10121 10122 bind(below_threshold); 10123 } 10124 10125 if (UseSSE42Intrinsics) { 10126 Label copy_32_loop, copy_16, copy_tail_sse, reset_for_copy_tail; 10127 10128 // vectored compression 10129 testl(len, 0xfffffff8); 10130 jcc(Assembler::zero, copy_tail); 10131 10132 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vectors 10133 movdl(tmp1Reg, tmp5); 10134 pshufd(tmp1Reg, tmp1Reg, 0); // store Unicode mask in tmp1Reg 10135 10136 andl(len, 0xfffffff0); 10137 jccb(Assembler::zero, copy_16); 10138 10139 // compress 16 chars per iter 10140 pxor(tmp4Reg, tmp4Reg); 10141 10142 lea(src, Address(src, len, Address::times_2)); 10143 lea(dst, Address(dst, len, Address::times_1)); 10144 negptr(len); 10145 10146 bind(copy_32_loop); 10147 movdqu(tmp2Reg, Address(src, len, Address::times_2)); // load 1st 8 characters 10148 por(tmp4Reg, tmp2Reg); 10149 movdqu(tmp3Reg, Address(src, len, Address::times_2, 16)); // load next 8 characters 10150 por(tmp4Reg, tmp3Reg); 10151 ptest(tmp4Reg, tmp1Reg); // check for Unicode chars in next vector 10152 jccb(Assembler::notZero, reset_for_copy_tail); 10153 packuswb(tmp2Reg, tmp3Reg); // only ASCII chars; compress each to 1 byte 10154 movdqu(Address(dst, len, Address::times_1), tmp2Reg); 10155 addptr(len, 16); 10156 jccb(Assembler::notZero, copy_32_loop); 10157 10158 // compress next vector of 8 chars (if any) 10159 bind(copy_16); 10160 // len = 0 10161 testl(result, 0x00000008); // check if there's a block of 8 chars to compress 10162 jccb(Assembler::zero, copy_tail_sse); 10163 10164 pxor(tmp3Reg, tmp3Reg); 10165 10166 movdqu(tmp2Reg, Address(src, 0)); 10167 ptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector 10168 jccb(Assembler::notZero, reset_for_copy_tail); 10169 packuswb(tmp2Reg, tmp3Reg); // only LATIN1 chars; compress each to 1 byte 10170 movq(Address(dst, 0), tmp2Reg); 10171 addptr(src, 16); 10172 addptr(dst, 8); 10173 jmpb(copy_tail_sse); 10174 10175 bind(reset_for_copy_tail); 10176 movl(tmp5, result); 10177 andl(tmp5, 0x0000000f); 10178 lea(src, Address(src, tmp5, Address::times_2)); 10179 lea(dst, Address(dst, tmp5, Address::times_1)); 10180 subptr(len, tmp5); 10181 jmpb(copy_chars_loop); 10182 10183 bind(copy_tail_sse); 10184 movl(len, result); 10185 andl(len, 0x00000007); // tail count (in chars) 10186 } 10187 // compress 1 char per iter 10188 bind(copy_tail); 10189 testl(len, len); 10190 jccb(Assembler::zero, done); 10191 lea(src, Address(src, len, Address::times_2)); 10192 lea(dst, Address(dst, len, Address::times_1)); 10193 negptr(len); 10194 10195 bind(copy_chars_loop); 10196 load_unsigned_short(tmp5, Address(src, len, Address::times_2)); 10197 testl(tmp5, 0xff00); // check if Unicode char 10198 jccb(Assembler::notZero, reset_sp); 10199 movb(Address(dst, len, Address::times_1), tmp5); // ASCII char; compress to 1 byte 10200 increment(len); 10201 jccb(Assembler::notZero, copy_chars_loop); 10202 10203 // add len then return (len will be zero if compress succeeded, otherwise negative) 10204 bind(reset_sp); 10205 addl(result, len); 10206 10207 bind(done); 10208 } 10209 10210 // Inflate byte[] array to char[]. 10211 // ..\jdk\src\java.base\share\classes\java\lang\StringLatin1.java 10212 // @IntrinsicCandidate 10213 // private static void inflate(byte[] src, int srcOff, char[] dst, int dstOff, int len) { 10214 // for (int i = 0; i < len; i++) { 10215 // dst[dstOff++] = (char)(src[srcOff++] & 0xff); 10216 // } 10217 // } 10218 void MacroAssembler::byte_array_inflate(Register src, Register dst, Register len, 10219 XMMRegister tmp1, Register tmp2, KRegister mask) { 10220 Label copy_chars_loop, done, below_threshold, avx3_threshold; 10221 // rsi: src 10222 // rdi: dst 10223 // rdx: len 10224 // rcx: tmp2 10225 10226 // rsi holds start addr of source byte[] to be inflated 10227 // rdi holds start addr of destination char[] 10228 // rdx holds length 10229 assert_different_registers(src, dst, len, tmp2); 10230 movl(tmp2, len); 10231 if ((UseAVX > 2) && // AVX512 10232 VM_Version::supports_avx512vlbw() && 10233 VM_Version::supports_bmi2()) { 10234 10235 Label copy_32_loop, copy_tail; 10236 Register tmp3_aliased = len; 10237 10238 // if length of the string is less than 16, handle it in an old fashioned way 10239 testl(len, -16); 10240 jcc(Assembler::zero, below_threshold); 10241 10242 testl(len, -1 * AVX3Threshold); 10243 jcc(Assembler::zero, avx3_threshold); 10244 10245 // In order to use only one arithmetic operation for the main loop we use 10246 // this pre-calculation 10247 andl(tmp2, (32 - 1)); // tail count (in chars), 32 element wide loop 10248 andl(len, -32); // vector count 10249 jccb(Assembler::zero, copy_tail); 10250 10251 lea(src, Address(src, len, Address::times_1)); 10252 lea(dst, Address(dst, len, Address::times_2)); 10253 negptr(len); 10254 10255 10256 // inflate 32 chars per iter 10257 bind(copy_32_loop); 10258 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_512bit); 10259 evmovdquw(Address(dst, len, Address::times_2), tmp1, Assembler::AVX_512bit); 10260 addptr(len, 32); 10261 jcc(Assembler::notZero, copy_32_loop); 10262 10263 bind(copy_tail); 10264 // bail out when there is nothing to be done 10265 testl(tmp2, -1); // we don't destroy the contents of tmp2 here 10266 jcc(Assembler::zero, done); 10267 10268 // ~(~0 << length), where length is the # of remaining elements to process 10269 movl(tmp3_aliased, -1); 10270 shlxl(tmp3_aliased, tmp3_aliased, tmp2); 10271 notl(tmp3_aliased); 10272 kmovdl(mask, tmp3_aliased); 10273 evpmovzxbw(tmp1, mask, Address(src, 0), Assembler::AVX_512bit); 10274 evmovdquw(Address(dst, 0), mask, tmp1, /*merge*/ true, Assembler::AVX_512bit); 10275 10276 jmp(done); 10277 bind(avx3_threshold); 10278 } 10279 if (UseSSE42Intrinsics) { 10280 Label copy_16_loop, copy_8_loop, copy_bytes, copy_new_tail, copy_tail; 10281 10282 if (UseAVX > 1) { 10283 andl(tmp2, (16 - 1)); 10284 andl(len, -16); 10285 jccb(Assembler::zero, copy_new_tail); 10286 } else { 10287 andl(tmp2, 0x00000007); // tail count (in chars) 10288 andl(len, 0xfffffff8); // vector count (in chars) 10289 jccb(Assembler::zero, copy_tail); 10290 } 10291 10292 // vectored inflation 10293 lea(src, Address(src, len, Address::times_1)); 10294 lea(dst, Address(dst, len, Address::times_2)); 10295 negptr(len); 10296 10297 if (UseAVX > 1) { 10298 bind(copy_16_loop); 10299 vpmovzxbw(tmp1, Address(src, len, Address::times_1), Assembler::AVX_256bit); 10300 vmovdqu(Address(dst, len, Address::times_2), tmp1); 10301 addptr(len, 16); 10302 jcc(Assembler::notZero, copy_16_loop); 10303 10304 bind(below_threshold); 10305 bind(copy_new_tail); 10306 movl(len, tmp2); 10307 andl(tmp2, 0x00000007); 10308 andl(len, 0xFFFFFFF8); 10309 jccb(Assembler::zero, copy_tail); 10310 10311 pmovzxbw(tmp1, Address(src, 0)); 10312 movdqu(Address(dst, 0), tmp1); 10313 addptr(src, 8); 10314 addptr(dst, 2 * 8); 10315 10316 jmp(copy_tail, true); 10317 } 10318 10319 // inflate 8 chars per iter 10320 bind(copy_8_loop); 10321 pmovzxbw(tmp1, Address(src, len, Address::times_1)); // unpack to 8 words 10322 movdqu(Address(dst, len, Address::times_2), tmp1); 10323 addptr(len, 8); 10324 jcc(Assembler::notZero, copy_8_loop); 10325 10326 bind(copy_tail); 10327 movl(len, tmp2); 10328 10329 cmpl(len, 4); 10330 jccb(Assembler::less, copy_bytes); 10331 10332 movdl(tmp1, Address(src, 0)); // load 4 byte chars 10333 pmovzxbw(tmp1, tmp1); 10334 movq(Address(dst, 0), tmp1); 10335 subptr(len, 4); 10336 addptr(src, 4); 10337 addptr(dst, 8); 10338 10339 bind(copy_bytes); 10340 } else { 10341 bind(below_threshold); 10342 } 10343 10344 testl(len, len); 10345 jccb(Assembler::zero, done); 10346 lea(src, Address(src, len, Address::times_1)); 10347 lea(dst, Address(dst, len, Address::times_2)); 10348 negptr(len); 10349 10350 // inflate 1 char per iter 10351 bind(copy_chars_loop); 10352 load_unsigned_byte(tmp2, Address(src, len, Address::times_1)); // load byte char 10353 movw(Address(dst, len, Address::times_2), tmp2); // inflate byte char to word 10354 increment(len); 10355 jcc(Assembler::notZero, copy_chars_loop); 10356 10357 bind(done); 10358 } 10359 10360 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, XMMRegister src, bool merge, int vector_len) { 10361 switch(type) { 10362 case T_BYTE: 10363 case T_BOOLEAN: 10364 evmovdqub(dst, kmask, src, merge, vector_len); 10365 break; 10366 case T_CHAR: 10367 case T_SHORT: 10368 evmovdquw(dst, kmask, src, merge, vector_len); 10369 break; 10370 case T_INT: 10371 case T_FLOAT: 10372 evmovdqul(dst, kmask, src, merge, vector_len); 10373 break; 10374 case T_LONG: 10375 case T_DOUBLE: 10376 evmovdquq(dst, kmask, src, merge, vector_len); 10377 break; 10378 default: 10379 fatal("Unexpected type argument %s", type2name(type)); 10380 break; 10381 } 10382 } 10383 10384 10385 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, XMMRegister dst, Address src, bool merge, int vector_len) { 10386 switch(type) { 10387 case T_BYTE: 10388 case T_BOOLEAN: 10389 evmovdqub(dst, kmask, src, merge, vector_len); 10390 break; 10391 case T_CHAR: 10392 case T_SHORT: 10393 evmovdquw(dst, kmask, src, merge, vector_len); 10394 break; 10395 case T_INT: 10396 case T_FLOAT: 10397 evmovdqul(dst, kmask, src, merge, vector_len); 10398 break; 10399 case T_LONG: 10400 case T_DOUBLE: 10401 evmovdquq(dst, kmask, src, merge, vector_len); 10402 break; 10403 default: 10404 fatal("Unexpected type argument %s", type2name(type)); 10405 break; 10406 } 10407 } 10408 10409 void MacroAssembler::evmovdqu(BasicType type, KRegister kmask, Address dst, XMMRegister src, bool merge, int vector_len) { 10410 switch(type) { 10411 case T_BYTE: 10412 case T_BOOLEAN: 10413 evmovdqub(dst, kmask, src, merge, vector_len); 10414 break; 10415 case T_CHAR: 10416 case T_SHORT: 10417 evmovdquw(dst, kmask, src, merge, vector_len); 10418 break; 10419 case T_INT: 10420 case T_FLOAT: 10421 evmovdqul(dst, kmask, src, merge, vector_len); 10422 break; 10423 case T_LONG: 10424 case T_DOUBLE: 10425 evmovdquq(dst, kmask, src, merge, vector_len); 10426 break; 10427 default: 10428 fatal("Unexpected type argument %s", type2name(type)); 10429 break; 10430 } 10431 } 10432 10433 void MacroAssembler::knot(uint masklen, KRegister dst, KRegister src, KRegister ktmp, Register rtmp) { 10434 switch(masklen) { 10435 case 2: 10436 knotbl(dst, src); 10437 movl(rtmp, 3); 10438 kmovbl(ktmp, rtmp); 10439 kandbl(dst, ktmp, dst); 10440 break; 10441 case 4: 10442 knotbl(dst, src); 10443 movl(rtmp, 15); 10444 kmovbl(ktmp, rtmp); 10445 kandbl(dst, ktmp, dst); 10446 break; 10447 case 8: 10448 knotbl(dst, src); 10449 break; 10450 case 16: 10451 knotwl(dst, src); 10452 break; 10453 case 32: 10454 knotdl(dst, src); 10455 break; 10456 case 64: 10457 knotql(dst, src); 10458 break; 10459 default: 10460 fatal("Unexpected vector length %d", masklen); 10461 break; 10462 } 10463 } 10464 10465 void MacroAssembler::kand(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 10466 switch(type) { 10467 case T_BOOLEAN: 10468 case T_BYTE: 10469 kandbl(dst, src1, src2); 10470 break; 10471 case T_CHAR: 10472 case T_SHORT: 10473 kandwl(dst, src1, src2); 10474 break; 10475 case T_INT: 10476 case T_FLOAT: 10477 kanddl(dst, src1, src2); 10478 break; 10479 case T_LONG: 10480 case T_DOUBLE: 10481 kandql(dst, src1, src2); 10482 break; 10483 default: 10484 fatal("Unexpected type argument %s", type2name(type)); 10485 break; 10486 } 10487 } 10488 10489 void MacroAssembler::kor(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 10490 switch(type) { 10491 case T_BOOLEAN: 10492 case T_BYTE: 10493 korbl(dst, src1, src2); 10494 break; 10495 case T_CHAR: 10496 case T_SHORT: 10497 korwl(dst, src1, src2); 10498 break; 10499 case T_INT: 10500 case T_FLOAT: 10501 kordl(dst, src1, src2); 10502 break; 10503 case T_LONG: 10504 case T_DOUBLE: 10505 korql(dst, src1, src2); 10506 break; 10507 default: 10508 fatal("Unexpected type argument %s", type2name(type)); 10509 break; 10510 } 10511 } 10512 10513 void MacroAssembler::kxor(BasicType type, KRegister dst, KRegister src1, KRegister src2) { 10514 switch(type) { 10515 case T_BOOLEAN: 10516 case T_BYTE: 10517 kxorbl(dst, src1, src2); 10518 break; 10519 case T_CHAR: 10520 case T_SHORT: 10521 kxorwl(dst, src1, src2); 10522 break; 10523 case T_INT: 10524 case T_FLOAT: 10525 kxordl(dst, src1, src2); 10526 break; 10527 case T_LONG: 10528 case T_DOUBLE: 10529 kxorql(dst, src1, src2); 10530 break; 10531 default: 10532 fatal("Unexpected type argument %s", type2name(type)); 10533 break; 10534 } 10535 } 10536 10537 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10538 switch(type) { 10539 case T_BOOLEAN: 10540 case T_BYTE: 10541 evpermb(dst, mask, nds, src, merge, vector_len); break; 10542 case T_CHAR: 10543 case T_SHORT: 10544 evpermw(dst, mask, nds, src, merge, vector_len); break; 10545 case T_INT: 10546 case T_FLOAT: 10547 evpermd(dst, mask, nds, src, merge, vector_len); break; 10548 case T_LONG: 10549 case T_DOUBLE: 10550 evpermq(dst, mask, nds, src, merge, vector_len); break; 10551 default: 10552 fatal("Unexpected type argument %s", type2name(type)); break; 10553 } 10554 } 10555 10556 void MacroAssembler::evperm(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10557 switch(type) { 10558 case T_BOOLEAN: 10559 case T_BYTE: 10560 evpermb(dst, mask, nds, src, merge, vector_len); break; 10561 case T_CHAR: 10562 case T_SHORT: 10563 evpermw(dst, mask, nds, src, merge, vector_len); break; 10564 case T_INT: 10565 case T_FLOAT: 10566 evpermd(dst, mask, nds, src, merge, vector_len); break; 10567 case T_LONG: 10568 case T_DOUBLE: 10569 evpermq(dst, mask, nds, src, merge, vector_len); break; 10570 default: 10571 fatal("Unexpected type argument %s", type2name(type)); break; 10572 } 10573 } 10574 10575 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10576 switch(type) { 10577 case T_BYTE: 10578 evpminub(dst, mask, nds, src, merge, vector_len); break; 10579 case T_SHORT: 10580 evpminuw(dst, mask, nds, src, merge, vector_len); break; 10581 case T_INT: 10582 evpminud(dst, mask, nds, src, merge, vector_len); break; 10583 case T_LONG: 10584 evpminuq(dst, mask, nds, src, merge, vector_len); break; 10585 default: 10586 fatal("Unexpected type argument %s", type2name(type)); break; 10587 } 10588 } 10589 10590 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10591 switch(type) { 10592 case T_BYTE: 10593 evpmaxub(dst, mask, nds, src, merge, vector_len); break; 10594 case T_SHORT: 10595 evpmaxuw(dst, mask, nds, src, merge, vector_len); break; 10596 case T_INT: 10597 evpmaxud(dst, mask, nds, src, merge, vector_len); break; 10598 case T_LONG: 10599 evpmaxuq(dst, mask, nds, src, merge, vector_len); break; 10600 default: 10601 fatal("Unexpected type argument %s", type2name(type)); break; 10602 } 10603 } 10604 10605 void MacroAssembler::evpminu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10606 switch(type) { 10607 case T_BYTE: 10608 evpminub(dst, mask, nds, src, merge, vector_len); break; 10609 case T_SHORT: 10610 evpminuw(dst, mask, nds, src, merge, vector_len); break; 10611 case T_INT: 10612 evpminud(dst, mask, nds, src, merge, vector_len); break; 10613 case T_LONG: 10614 evpminuq(dst, mask, nds, src, merge, vector_len); break; 10615 default: 10616 fatal("Unexpected type argument %s", type2name(type)); break; 10617 } 10618 } 10619 10620 void MacroAssembler::evpmaxu(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10621 switch(type) { 10622 case T_BYTE: 10623 evpmaxub(dst, mask, nds, src, merge, vector_len); break; 10624 case T_SHORT: 10625 evpmaxuw(dst, mask, nds, src, merge, vector_len); break; 10626 case T_INT: 10627 evpmaxud(dst, mask, nds, src, merge, vector_len); break; 10628 case T_LONG: 10629 evpmaxuq(dst, mask, nds, src, merge, vector_len); break; 10630 default: 10631 fatal("Unexpected type argument %s", type2name(type)); break; 10632 } 10633 } 10634 10635 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10636 switch(type) { 10637 case T_BYTE: 10638 evpminsb(dst, mask, nds, src, merge, vector_len); break; 10639 case T_SHORT: 10640 evpminsw(dst, mask, nds, src, merge, vector_len); break; 10641 case T_INT: 10642 evpminsd(dst, mask, nds, src, merge, vector_len); break; 10643 case T_LONG: 10644 evpminsq(dst, mask, nds, src, merge, vector_len); break; 10645 default: 10646 fatal("Unexpected type argument %s", type2name(type)); break; 10647 } 10648 } 10649 10650 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10651 switch(type) { 10652 case T_BYTE: 10653 evpmaxsb(dst, mask, nds, src, merge, vector_len); break; 10654 case T_SHORT: 10655 evpmaxsw(dst, mask, nds, src, merge, vector_len); break; 10656 case T_INT: 10657 evpmaxsd(dst, mask, nds, src, merge, vector_len); break; 10658 case T_LONG: 10659 evpmaxsq(dst, mask, nds, src, merge, vector_len); break; 10660 default: 10661 fatal("Unexpected type argument %s", type2name(type)); break; 10662 } 10663 } 10664 10665 void MacroAssembler::evpmins(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10666 switch(type) { 10667 case T_BYTE: 10668 evpminsb(dst, mask, nds, src, merge, vector_len); break; 10669 case T_SHORT: 10670 evpminsw(dst, mask, nds, src, merge, vector_len); break; 10671 case T_INT: 10672 evpminsd(dst, mask, nds, src, merge, vector_len); break; 10673 case T_LONG: 10674 evpminsq(dst, mask, nds, src, merge, vector_len); break; 10675 default: 10676 fatal("Unexpected type argument %s", type2name(type)); break; 10677 } 10678 } 10679 10680 void MacroAssembler::evpmaxs(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10681 switch(type) { 10682 case T_BYTE: 10683 evpmaxsb(dst, mask, nds, src, merge, vector_len); break; 10684 case T_SHORT: 10685 evpmaxsw(dst, mask, nds, src, merge, vector_len); break; 10686 case T_INT: 10687 evpmaxsd(dst, mask, nds, src, merge, vector_len); break; 10688 case T_LONG: 10689 evpmaxsq(dst, mask, nds, src, merge, vector_len); break; 10690 default: 10691 fatal("Unexpected type argument %s", type2name(type)); break; 10692 } 10693 } 10694 10695 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10696 switch(type) { 10697 case T_INT: 10698 evpxord(dst, mask, nds, src, merge, vector_len); break; 10699 case T_LONG: 10700 evpxorq(dst, mask, nds, src, merge, vector_len); break; 10701 default: 10702 fatal("Unexpected type argument %s", type2name(type)); break; 10703 } 10704 } 10705 10706 void MacroAssembler::evxor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10707 switch(type) { 10708 case T_INT: 10709 evpxord(dst, mask, nds, src, merge, vector_len); break; 10710 case T_LONG: 10711 evpxorq(dst, mask, nds, src, merge, vector_len); break; 10712 default: 10713 fatal("Unexpected type argument %s", type2name(type)); break; 10714 } 10715 } 10716 10717 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10718 switch(type) { 10719 case T_INT: 10720 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break; 10721 case T_LONG: 10722 evporq(dst, mask, nds, src, merge, vector_len); break; 10723 default: 10724 fatal("Unexpected type argument %s", type2name(type)); break; 10725 } 10726 } 10727 10728 void MacroAssembler::evor(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10729 switch(type) { 10730 case T_INT: 10731 Assembler::evpord(dst, mask, nds, src, merge, vector_len); break; 10732 case T_LONG: 10733 evporq(dst, mask, nds, src, merge, vector_len); break; 10734 default: 10735 fatal("Unexpected type argument %s", type2name(type)); break; 10736 } 10737 } 10738 10739 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, XMMRegister src, bool merge, int vector_len) { 10740 switch(type) { 10741 case T_INT: 10742 evpandd(dst, mask, nds, src, merge, vector_len); break; 10743 case T_LONG: 10744 evpandq(dst, mask, nds, src, merge, vector_len); break; 10745 default: 10746 fatal("Unexpected type argument %s", type2name(type)); break; 10747 } 10748 } 10749 10750 void MacroAssembler::evand(BasicType type, XMMRegister dst, KRegister mask, XMMRegister nds, Address src, bool merge, int vector_len) { 10751 switch(type) { 10752 case T_INT: 10753 evpandd(dst, mask, nds, src, merge, vector_len); break; 10754 case T_LONG: 10755 evpandq(dst, mask, nds, src, merge, vector_len); break; 10756 default: 10757 fatal("Unexpected type argument %s", type2name(type)); break; 10758 } 10759 } 10760 10761 void MacroAssembler::kortest(uint masklen, KRegister src1, KRegister src2) { 10762 switch(masklen) { 10763 case 8: 10764 kortestbl(src1, src2); 10765 break; 10766 case 16: 10767 kortestwl(src1, src2); 10768 break; 10769 case 32: 10770 kortestdl(src1, src2); 10771 break; 10772 case 64: 10773 kortestql(src1, src2); 10774 break; 10775 default: 10776 fatal("Unexpected mask length %d", masklen); 10777 break; 10778 } 10779 } 10780 10781 10782 void MacroAssembler::ktest(uint masklen, KRegister src1, KRegister src2) { 10783 switch(masklen) { 10784 case 8: 10785 ktestbl(src1, src2); 10786 break; 10787 case 16: 10788 ktestwl(src1, src2); 10789 break; 10790 case 32: 10791 ktestdl(src1, src2); 10792 break; 10793 case 64: 10794 ktestql(src1, src2); 10795 break; 10796 default: 10797 fatal("Unexpected mask length %d", masklen); 10798 break; 10799 } 10800 } 10801 10802 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) { 10803 switch(type) { 10804 case T_INT: 10805 evprold(dst, mask, src, shift, merge, vlen_enc); break; 10806 case T_LONG: 10807 evprolq(dst, mask, src, shift, merge, vlen_enc); break; 10808 default: 10809 fatal("Unexpected type argument %s", type2name(type)); break; 10810 break; 10811 } 10812 } 10813 10814 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src, int shift, bool merge, int vlen_enc) { 10815 switch(type) { 10816 case T_INT: 10817 evprord(dst, mask, src, shift, merge, vlen_enc); break; 10818 case T_LONG: 10819 evprorq(dst, mask, src, shift, merge, vlen_enc); break; 10820 default: 10821 fatal("Unexpected type argument %s", type2name(type)); break; 10822 } 10823 } 10824 10825 void MacroAssembler::evrold(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) { 10826 switch(type) { 10827 case T_INT: 10828 evprolvd(dst, mask, src1, src2, merge, vlen_enc); break; 10829 case T_LONG: 10830 evprolvq(dst, mask, src1, src2, merge, vlen_enc); break; 10831 default: 10832 fatal("Unexpected type argument %s", type2name(type)); break; 10833 } 10834 } 10835 10836 void MacroAssembler::evrord(BasicType type, XMMRegister dst, KRegister mask, XMMRegister src1, XMMRegister src2, bool merge, int vlen_enc) { 10837 switch(type) { 10838 case T_INT: 10839 evprorvd(dst, mask, src1, src2, merge, vlen_enc); break; 10840 case T_LONG: 10841 evprorvq(dst, mask, src1, src2, merge, vlen_enc); break; 10842 default: 10843 fatal("Unexpected type argument %s", type2name(type)); break; 10844 } 10845 } 10846 10847 void MacroAssembler::evpandq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 10848 assert(rscratch != noreg || always_reachable(src), "missing"); 10849 10850 if (reachable(src)) { 10851 evpandq(dst, nds, as_Address(src), vector_len); 10852 } else { 10853 lea(rscratch, src); 10854 evpandq(dst, nds, Address(rscratch, 0), vector_len); 10855 } 10856 } 10857 10858 void MacroAssembler::evpaddq(XMMRegister dst, KRegister mask, XMMRegister nds, AddressLiteral src, bool merge, int vector_len, Register rscratch) { 10859 assert(rscratch != noreg || always_reachable(src), "missing"); 10860 10861 if (reachable(src)) { 10862 Assembler::evpaddq(dst, mask, nds, as_Address(src), merge, vector_len); 10863 } else { 10864 lea(rscratch, src); 10865 Assembler::evpaddq(dst, mask, nds, Address(rscratch, 0), merge, vector_len); 10866 } 10867 } 10868 10869 void MacroAssembler::evporq(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 10870 assert(rscratch != noreg || always_reachable(src), "missing"); 10871 10872 if (reachable(src)) { 10873 evporq(dst, nds, as_Address(src), vector_len); 10874 } else { 10875 lea(rscratch, src); 10876 evporq(dst, nds, Address(rscratch, 0), vector_len); 10877 } 10878 } 10879 10880 void MacroAssembler::vpshufb(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 10881 assert(rscratch != noreg || always_reachable(src), "missing"); 10882 10883 if (reachable(src)) { 10884 vpshufb(dst, nds, as_Address(src), vector_len); 10885 } else { 10886 lea(rscratch, src); 10887 vpshufb(dst, nds, Address(rscratch, 0), vector_len); 10888 } 10889 } 10890 10891 void MacroAssembler::vpor(XMMRegister dst, XMMRegister nds, AddressLiteral src, int vector_len, Register rscratch) { 10892 assert(rscratch != noreg || always_reachable(src), "missing"); 10893 10894 if (reachable(src)) { 10895 Assembler::vpor(dst, nds, as_Address(src), vector_len); 10896 } else { 10897 lea(rscratch, src); 10898 Assembler::vpor(dst, nds, Address(rscratch, 0), vector_len); 10899 } 10900 } 10901 10902 void MacroAssembler::vpternlogq(XMMRegister dst, int imm8, XMMRegister src2, AddressLiteral src3, int vector_len, Register rscratch) { 10903 assert(rscratch != noreg || always_reachable(src3), "missing"); 10904 10905 if (reachable(src3)) { 10906 vpternlogq(dst, imm8, src2, as_Address(src3), vector_len); 10907 } else { 10908 lea(rscratch, src3); 10909 vpternlogq(dst, imm8, src2, Address(rscratch, 0), vector_len); 10910 } 10911 } 10912 10913 #if COMPILER2_OR_JVMCI 10914 10915 void MacroAssembler::fill_masked(BasicType bt, Address dst, XMMRegister xmm, KRegister mask, 10916 Register length, Register temp, int vec_enc) { 10917 // Computing mask for predicated vector store. 10918 movptr(temp, -1); 10919 bzhiq(temp, temp, length); 10920 kmov(mask, temp); 10921 evmovdqu(bt, mask, dst, xmm, true, vec_enc); 10922 } 10923 10924 // Set memory operation for length "less than" 64 bytes. 10925 void MacroAssembler::fill64_masked(uint shift, Register dst, int disp, 10926 XMMRegister xmm, KRegister mask, Register length, 10927 Register temp, bool use64byteVector) { 10928 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 10929 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG}; 10930 if (!use64byteVector) { 10931 fill32(dst, disp, xmm); 10932 subptr(length, 32 >> shift); 10933 fill32_masked(shift, dst, disp + 32, xmm, mask, length, temp); 10934 } else { 10935 assert(MaxVectorSize == 64, "vector length != 64"); 10936 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_512bit); 10937 } 10938 } 10939 10940 10941 void MacroAssembler::fill32_masked(uint shift, Register dst, int disp, 10942 XMMRegister xmm, KRegister mask, Register length, 10943 Register temp) { 10944 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 10945 const BasicType type[] = { T_BYTE, T_SHORT, T_INT, T_LONG}; 10946 fill_masked(type[shift], Address(dst, disp), xmm, mask, length, temp, Assembler::AVX_256bit); 10947 } 10948 10949 10950 void MacroAssembler::fill32(Address dst, XMMRegister xmm) { 10951 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 10952 vmovdqu(dst, xmm); 10953 } 10954 10955 void MacroAssembler::fill32(Register dst, int disp, XMMRegister xmm) { 10956 fill32(Address(dst, disp), xmm); 10957 } 10958 10959 void MacroAssembler::fill64(Address dst, XMMRegister xmm, bool use64byteVector) { 10960 assert(MaxVectorSize >= 32, "vector length should be >= 32"); 10961 if (!use64byteVector) { 10962 fill32(dst, xmm); 10963 fill32(dst.plus_disp(32), xmm); 10964 } else { 10965 evmovdquq(dst, xmm, Assembler::AVX_512bit); 10966 } 10967 } 10968 10969 void MacroAssembler::fill64(Register dst, int disp, XMMRegister xmm, bool use64byteVector) { 10970 fill64(Address(dst, disp), xmm, use64byteVector); 10971 } 10972 10973 #ifdef _LP64 10974 void MacroAssembler::generate_fill_avx3(BasicType type, Register to, Register value, 10975 Register count, Register rtmp, XMMRegister xtmp) { 10976 Label L_exit; 10977 Label L_fill_start; 10978 Label L_fill_64_bytes; 10979 Label L_fill_96_bytes; 10980 Label L_fill_128_bytes; 10981 Label L_fill_128_bytes_loop; 10982 Label L_fill_128_loop_header; 10983 Label L_fill_128_bytes_loop_header; 10984 Label L_fill_128_bytes_loop_pre_header; 10985 Label L_fill_zmm_sequence; 10986 10987 int shift = -1; 10988 int avx3threshold = VM_Version::avx3_threshold(); 10989 switch(type) { 10990 case T_BYTE: shift = 0; 10991 break; 10992 case T_SHORT: shift = 1; 10993 break; 10994 case T_INT: shift = 2; 10995 break; 10996 /* Uncomment when LONG fill stubs are supported. 10997 case T_LONG: shift = 3; 10998 break; 10999 */ 11000 default: 11001 fatal("Unhandled type: %s\n", type2name(type)); 11002 } 11003 11004 if ((avx3threshold != 0) || (MaxVectorSize == 32)) { 11005 11006 if (MaxVectorSize == 64) { 11007 cmpq(count, avx3threshold >> shift); 11008 jcc(Assembler::greater, L_fill_zmm_sequence); 11009 } 11010 11011 evpbroadcast(type, xtmp, value, Assembler::AVX_256bit); 11012 11013 bind(L_fill_start); 11014 11015 cmpq(count, 32 >> shift); 11016 jccb(Assembler::greater, L_fill_64_bytes); 11017 fill32_masked(shift, to, 0, xtmp, k2, count, rtmp); 11018 jmp(L_exit); 11019 11020 bind(L_fill_64_bytes); 11021 cmpq(count, 64 >> shift); 11022 jccb(Assembler::greater, L_fill_96_bytes); 11023 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp); 11024 jmp(L_exit); 11025 11026 bind(L_fill_96_bytes); 11027 cmpq(count, 96 >> shift); 11028 jccb(Assembler::greater, L_fill_128_bytes); 11029 fill64(to, 0, xtmp); 11030 subq(count, 64 >> shift); 11031 fill32_masked(shift, to, 64, xtmp, k2, count, rtmp); 11032 jmp(L_exit); 11033 11034 bind(L_fill_128_bytes); 11035 cmpq(count, 128 >> shift); 11036 jccb(Assembler::greater, L_fill_128_bytes_loop_pre_header); 11037 fill64(to, 0, xtmp); 11038 fill32(to, 64, xtmp); 11039 subq(count, 96 >> shift); 11040 fill32_masked(shift, to, 96, xtmp, k2, count, rtmp); 11041 jmp(L_exit); 11042 11043 bind(L_fill_128_bytes_loop_pre_header); 11044 { 11045 mov(rtmp, to); 11046 andq(rtmp, 31); 11047 jccb(Assembler::zero, L_fill_128_bytes_loop_header); 11048 negq(rtmp); 11049 addq(rtmp, 32); 11050 mov64(r8, -1L); 11051 bzhiq(r8, r8, rtmp); 11052 kmovql(k2, r8); 11053 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_256bit); 11054 addq(to, rtmp); 11055 shrq(rtmp, shift); 11056 subq(count, rtmp); 11057 } 11058 11059 cmpq(count, 128 >> shift); 11060 jcc(Assembler::less, L_fill_start); 11061 11062 bind(L_fill_128_bytes_loop_header); 11063 subq(count, 128 >> shift); 11064 11065 align32(); 11066 bind(L_fill_128_bytes_loop); 11067 fill64(to, 0, xtmp); 11068 fill64(to, 64, xtmp); 11069 addq(to, 128); 11070 subq(count, 128 >> shift); 11071 jccb(Assembler::greaterEqual, L_fill_128_bytes_loop); 11072 11073 addq(count, 128 >> shift); 11074 jcc(Assembler::zero, L_exit); 11075 jmp(L_fill_start); 11076 } 11077 11078 if (MaxVectorSize == 64) { 11079 // Sequence using 64 byte ZMM register. 11080 Label L_fill_128_bytes_zmm; 11081 Label L_fill_192_bytes_zmm; 11082 Label L_fill_192_bytes_loop_zmm; 11083 Label L_fill_192_bytes_loop_header_zmm; 11084 Label L_fill_192_bytes_loop_pre_header_zmm; 11085 Label L_fill_start_zmm_sequence; 11086 11087 bind(L_fill_zmm_sequence); 11088 evpbroadcast(type, xtmp, value, Assembler::AVX_512bit); 11089 11090 bind(L_fill_start_zmm_sequence); 11091 cmpq(count, 64 >> shift); 11092 jccb(Assembler::greater, L_fill_128_bytes_zmm); 11093 fill64_masked(shift, to, 0, xtmp, k2, count, rtmp, true); 11094 jmp(L_exit); 11095 11096 bind(L_fill_128_bytes_zmm); 11097 cmpq(count, 128 >> shift); 11098 jccb(Assembler::greater, L_fill_192_bytes_zmm); 11099 fill64(to, 0, xtmp, true); 11100 subq(count, 64 >> shift); 11101 fill64_masked(shift, to, 64, xtmp, k2, count, rtmp, true); 11102 jmp(L_exit); 11103 11104 bind(L_fill_192_bytes_zmm); 11105 cmpq(count, 192 >> shift); 11106 jccb(Assembler::greater, L_fill_192_bytes_loop_pre_header_zmm); 11107 fill64(to, 0, xtmp, true); 11108 fill64(to, 64, xtmp, true); 11109 subq(count, 128 >> shift); 11110 fill64_masked(shift, to, 128, xtmp, k2, count, rtmp, true); 11111 jmp(L_exit); 11112 11113 bind(L_fill_192_bytes_loop_pre_header_zmm); 11114 { 11115 movq(rtmp, to); 11116 andq(rtmp, 63); 11117 jccb(Assembler::zero, L_fill_192_bytes_loop_header_zmm); 11118 negq(rtmp); 11119 addq(rtmp, 64); 11120 mov64(r8, -1L); 11121 bzhiq(r8, r8, rtmp); 11122 kmovql(k2, r8); 11123 evmovdqu(T_BYTE, k2, Address(to, 0), xtmp, true, Assembler::AVX_512bit); 11124 addq(to, rtmp); 11125 shrq(rtmp, shift); 11126 subq(count, rtmp); 11127 } 11128 11129 cmpq(count, 192 >> shift); 11130 jcc(Assembler::less, L_fill_start_zmm_sequence); 11131 11132 bind(L_fill_192_bytes_loop_header_zmm); 11133 subq(count, 192 >> shift); 11134 11135 align32(); 11136 bind(L_fill_192_bytes_loop_zmm); 11137 fill64(to, 0, xtmp, true); 11138 fill64(to, 64, xtmp, true); 11139 fill64(to, 128, xtmp, true); 11140 addq(to, 192); 11141 subq(count, 192 >> shift); 11142 jccb(Assembler::greaterEqual, L_fill_192_bytes_loop_zmm); 11143 11144 addq(count, 192 >> shift); 11145 jcc(Assembler::zero, L_exit); 11146 jmp(L_fill_start_zmm_sequence); 11147 } 11148 bind(L_exit); 11149 } 11150 #endif 11151 #endif //COMPILER2_OR_JVMCI 11152 11153 11154 #ifdef _LP64 11155 void MacroAssembler::convert_f2i(Register dst, XMMRegister src) { 11156 Label done; 11157 cvttss2sil(dst, src); 11158 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 11159 cmpl(dst, 0x80000000); // float_sign_flip 11160 jccb(Assembler::notEqual, done); 11161 subptr(rsp, 8); 11162 movflt(Address(rsp, 0), src); 11163 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2i_fixup()))); 11164 pop(dst); 11165 bind(done); 11166 } 11167 11168 void MacroAssembler::convert_d2i(Register dst, XMMRegister src) { 11169 Label done; 11170 cvttsd2sil(dst, src); 11171 // Conversion instructions do not match JLS for overflow, underflow and NaN -> fixup in stub 11172 cmpl(dst, 0x80000000); // float_sign_flip 11173 jccb(Assembler::notEqual, done); 11174 subptr(rsp, 8); 11175 movdbl(Address(rsp, 0), src); 11176 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2i_fixup()))); 11177 pop(dst); 11178 bind(done); 11179 } 11180 11181 void MacroAssembler::convert_f2l(Register dst, XMMRegister src) { 11182 Label done; 11183 cvttss2siq(dst, src); 11184 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); 11185 jccb(Assembler::notEqual, done); 11186 subptr(rsp, 8); 11187 movflt(Address(rsp, 0), src); 11188 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::f2l_fixup()))); 11189 pop(dst); 11190 bind(done); 11191 } 11192 11193 void MacroAssembler::round_float(Register dst, XMMRegister src, Register rtmp, Register rcx) { 11194 // Following code is line by line assembly translation rounding algorithm. 11195 // Please refer to java.lang.Math.round(float) algorithm for details. 11196 const int32_t FloatConsts_EXP_BIT_MASK = 0x7F800000; 11197 const int32_t FloatConsts_SIGNIFICAND_WIDTH = 24; 11198 const int32_t FloatConsts_EXP_BIAS = 127; 11199 const int32_t FloatConsts_SIGNIF_BIT_MASK = 0x007FFFFF; 11200 const int32_t MINUS_32 = 0xFFFFFFE0; 11201 Label L_special_case, L_block1, L_exit; 11202 movl(rtmp, FloatConsts_EXP_BIT_MASK); 11203 movdl(dst, src); 11204 andl(dst, rtmp); 11205 sarl(dst, FloatConsts_SIGNIFICAND_WIDTH - 1); 11206 movl(rtmp, FloatConsts_SIGNIFICAND_WIDTH - 2 + FloatConsts_EXP_BIAS); 11207 subl(rtmp, dst); 11208 movl(rcx, rtmp); 11209 movl(dst, MINUS_32); 11210 testl(rtmp, dst); 11211 jccb(Assembler::notEqual, L_special_case); 11212 movdl(dst, src); 11213 andl(dst, FloatConsts_SIGNIF_BIT_MASK); 11214 orl(dst, FloatConsts_SIGNIF_BIT_MASK + 1); 11215 movdl(rtmp, src); 11216 testl(rtmp, rtmp); 11217 jccb(Assembler::greaterEqual, L_block1); 11218 negl(dst); 11219 bind(L_block1); 11220 sarl(dst); 11221 addl(dst, 0x1); 11222 sarl(dst, 0x1); 11223 jmp(L_exit); 11224 bind(L_special_case); 11225 convert_f2i(dst, src); 11226 bind(L_exit); 11227 } 11228 11229 void MacroAssembler::round_double(Register dst, XMMRegister src, Register rtmp, Register rcx) { 11230 // Following code is line by line assembly translation rounding algorithm. 11231 // Please refer to java.lang.Math.round(double) algorithm for details. 11232 const int64_t DoubleConsts_EXP_BIT_MASK = 0x7FF0000000000000L; 11233 const int64_t DoubleConsts_SIGNIFICAND_WIDTH = 53; 11234 const int64_t DoubleConsts_EXP_BIAS = 1023; 11235 const int64_t DoubleConsts_SIGNIF_BIT_MASK = 0x000FFFFFFFFFFFFFL; 11236 const int64_t MINUS_64 = 0xFFFFFFFFFFFFFFC0L; 11237 Label L_special_case, L_block1, L_exit; 11238 mov64(rtmp, DoubleConsts_EXP_BIT_MASK); 11239 movq(dst, src); 11240 andq(dst, rtmp); 11241 sarq(dst, DoubleConsts_SIGNIFICAND_WIDTH - 1); 11242 mov64(rtmp, DoubleConsts_SIGNIFICAND_WIDTH - 2 + DoubleConsts_EXP_BIAS); 11243 subq(rtmp, dst); 11244 movq(rcx, rtmp); 11245 mov64(dst, MINUS_64); 11246 testq(rtmp, dst); 11247 jccb(Assembler::notEqual, L_special_case); 11248 movq(dst, src); 11249 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK); 11250 andq(dst, rtmp); 11251 mov64(rtmp, DoubleConsts_SIGNIF_BIT_MASK + 1); 11252 orq(dst, rtmp); 11253 movq(rtmp, src); 11254 testq(rtmp, rtmp); 11255 jccb(Assembler::greaterEqual, L_block1); 11256 negq(dst); 11257 bind(L_block1); 11258 sarq(dst); 11259 addq(dst, 0x1); 11260 sarq(dst, 0x1); 11261 jmp(L_exit); 11262 bind(L_special_case); 11263 convert_d2l(dst, src); 11264 bind(L_exit); 11265 } 11266 11267 void MacroAssembler::convert_d2l(Register dst, XMMRegister src) { 11268 Label done; 11269 cvttsd2siq(dst, src); 11270 cmp64(dst, ExternalAddress((address) StubRoutines::x86::double_sign_flip())); 11271 jccb(Assembler::notEqual, done); 11272 subptr(rsp, 8); 11273 movdbl(Address(rsp, 0), src); 11274 call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::d2l_fixup()))); 11275 pop(dst); 11276 bind(done); 11277 } 11278 11279 void MacroAssembler::cache_wb(Address line) 11280 { 11281 // 64 bit cpus always support clflush 11282 assert(VM_Version::supports_clflush(), "clflush should be available"); 11283 bool optimized = VM_Version::supports_clflushopt(); 11284 bool no_evict = VM_Version::supports_clwb(); 11285 11286 // prefer clwb (writeback without evict) otherwise 11287 // prefer clflushopt (potentially parallel writeback with evict) 11288 // otherwise fallback on clflush (serial writeback with evict) 11289 11290 if (optimized) { 11291 if (no_evict) { 11292 clwb(line); 11293 } else { 11294 clflushopt(line); 11295 } 11296 } else { 11297 // no need for fence when using CLFLUSH 11298 clflush(line); 11299 } 11300 } 11301 11302 void MacroAssembler::cache_wbsync(bool is_pre) 11303 { 11304 assert(VM_Version::supports_clflush(), "clflush should be available"); 11305 bool optimized = VM_Version::supports_clflushopt(); 11306 bool no_evict = VM_Version::supports_clwb(); 11307 11308 // pick the correct implementation 11309 11310 if (!is_pre && (optimized || no_evict)) { 11311 // need an sfence for post flush when using clflushopt or clwb 11312 // otherwise no no need for any synchroniaztion 11313 11314 sfence(); 11315 } 11316 } 11317 11318 #endif // _LP64 11319 11320 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) { 11321 switch (cond) { 11322 // Note some conditions are synonyms for others 11323 case Assembler::zero: return Assembler::notZero; 11324 case Assembler::notZero: return Assembler::zero; 11325 case Assembler::less: return Assembler::greaterEqual; 11326 case Assembler::lessEqual: return Assembler::greater; 11327 case Assembler::greater: return Assembler::lessEqual; 11328 case Assembler::greaterEqual: return Assembler::less; 11329 case Assembler::below: return Assembler::aboveEqual; 11330 case Assembler::belowEqual: return Assembler::above; 11331 case Assembler::above: return Assembler::belowEqual; 11332 case Assembler::aboveEqual: return Assembler::below; 11333 case Assembler::overflow: return Assembler::noOverflow; 11334 case Assembler::noOverflow: return Assembler::overflow; 11335 case Assembler::negative: return Assembler::positive; 11336 case Assembler::positive: return Assembler::negative; 11337 case Assembler::parity: return Assembler::noParity; 11338 case Assembler::noParity: return Assembler::parity; 11339 } 11340 ShouldNotReachHere(); return Assembler::overflow; 11341 } 11342 11343 // 32-bit Windows has its own fast-path implementation 11344 // of get_thread 11345 #if !defined(WIN32) || defined(_LP64) 11346 11347 // This is simply a call to Thread::current() 11348 void MacroAssembler::get_thread(Register thread) { 11349 if (thread != rax) { 11350 push(rax); 11351 } 11352 LP64_ONLY(push(rdi);) 11353 LP64_ONLY(push(rsi);) 11354 push(rdx); 11355 push(rcx); 11356 #ifdef _LP64 11357 push(r8); 11358 push(r9); 11359 push(r10); 11360 push(r11); 11361 #endif 11362 11363 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, Thread::current), 0); 11364 11365 #ifdef _LP64 11366 pop(r11); 11367 pop(r10); 11368 pop(r9); 11369 pop(r8); 11370 #endif 11371 pop(rcx); 11372 pop(rdx); 11373 LP64_ONLY(pop(rsi);) 11374 LP64_ONLY(pop(rdi);) 11375 if (thread != rax) { 11376 mov(thread, rax); 11377 pop(rax); 11378 } 11379 } 11380 11381 11382 #endif // !WIN32 || _LP64 11383 11384 void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigned bias, Register tmp) { 11385 Label L_stack_ok; 11386 if (bias == 0) { 11387 testptr(sp, 2 * wordSize - 1); 11388 } else { 11389 // lea(tmp, Address(rsp, bias); 11390 mov(tmp, sp); 11391 addptr(tmp, bias); 11392 testptr(tmp, 2 * wordSize - 1); 11393 } 11394 jcc(Assembler::equal, L_stack_ok); 11395 block_comment(msg); 11396 stop(msg); 11397 bind(L_stack_ok); 11398 } 11399 11400 // Implements lightweight-locking. 11401 // 11402 // obj: the object to be locked 11403 // reg_rax: rax 11404 // thread: the thread which attempts to lock obj 11405 // tmp: a temporary register 11406 void MacroAssembler::lightweight_lock(Register basic_lock, Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) { 11407 assert(reg_rax == rax, ""); 11408 assert_different_registers(basic_lock, obj, reg_rax, thread, tmp); 11409 11410 Label push; 11411 const Register top = tmp; 11412 11413 // Preload the markWord. It is important that this is the first 11414 // instruction emitted as it is part of C1's null check semantics. 11415 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes())); 11416 11417 if (UseObjectMonitorTable) { 11418 // Clear cache in case fast locking succeeds. 11419 movptr(Address(basic_lock, BasicObjectLock::lock_offset() + in_ByteSize((BasicLock::object_monitor_cache_offset_in_bytes()))), 0); 11420 } 11421 11422 // Load top. 11423 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 11424 11425 // Check if the lock-stack is full. 11426 cmpl(top, LockStack::end_offset()); 11427 jcc(Assembler::greaterEqual, slow); 11428 11429 // Check for recursion. 11430 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize)); 11431 jcc(Assembler::equal, push); 11432 11433 // Check header for monitor (0b10). 11434 testptr(reg_rax, markWord::monitor_value); 11435 jcc(Assembler::notZero, slow); 11436 11437 // Try to lock. Transition lock bits 0b01 => 0b00 11438 movptr(tmp, reg_rax); 11439 andptr(tmp, ~(int32_t)markWord::unlocked_value); 11440 orptr(reg_rax, markWord::unlocked_value); 11441 if (EnableValhalla) { 11442 // Mask inline_type bit such that we go to the slow path if object is an inline type 11443 andptr(reg_rax, ~((int) markWord::inline_type_bit_in_place)); 11444 } 11445 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); 11446 jcc(Assembler::notEqual, slow); 11447 11448 // Restore top, CAS clobbers register. 11449 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 11450 11451 bind(push); 11452 // After successful lock, push object on lock-stack. 11453 movptr(Address(thread, top), obj); 11454 incrementl(top, oopSize); 11455 movl(Address(thread, JavaThread::lock_stack_top_offset()), top); 11456 } 11457 11458 // Implements lightweight-unlocking. 11459 // 11460 // obj: the object to be unlocked 11461 // reg_rax: rax 11462 // thread: the thread 11463 // tmp: a temporary register 11464 void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) { 11465 assert(reg_rax == rax, ""); 11466 assert_different_registers(obj, reg_rax, thread, tmp); 11467 11468 Label unlocked, push_and_slow; 11469 const Register top = tmp; 11470 11471 // Check if obj is top of lock-stack. 11472 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 11473 cmpptr(obj, Address(thread, top, Address::times_1, -oopSize)); 11474 jcc(Assembler::notEqual, slow); 11475 11476 // Pop lock-stack. 11477 DEBUG_ONLY(movptr(Address(thread, top, Address::times_1, -oopSize), 0);) 11478 subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize); 11479 11480 // Check if recursive. 11481 cmpptr(obj, Address(thread, top, Address::times_1, -2 * oopSize)); 11482 jcc(Assembler::equal, unlocked); 11483 11484 // Not recursive. Check header for monitor (0b10). 11485 movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes())); 11486 testptr(reg_rax, markWord::monitor_value); 11487 jcc(Assembler::notZero, push_and_slow); 11488 11489 #ifdef ASSERT 11490 // Check header not unlocked (0b01). 11491 Label not_unlocked; 11492 testptr(reg_rax, markWord::unlocked_value); 11493 jcc(Assembler::zero, not_unlocked); 11494 stop("lightweight_unlock already unlocked"); 11495 bind(not_unlocked); 11496 #endif 11497 11498 // Try to unlock. Transition lock bits 0b00 => 0b01 11499 movptr(tmp, reg_rax); 11500 orptr(tmp, markWord::unlocked_value); 11501 lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes())); 11502 jcc(Assembler::equal, unlocked); 11503 11504 bind(push_and_slow); 11505 // Restore lock-stack and handle the unlock in runtime. 11506 #ifdef ASSERT 11507 movl(top, Address(thread, JavaThread::lock_stack_top_offset())); 11508 movptr(Address(thread, top), obj); 11509 #endif 11510 addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize); 11511 jmp(slow); 11512 11513 bind(unlocked); 11514 } 11515 11516 #ifdef _LP64 11517 // Saves legacy GPRs state on stack. 11518 void MacroAssembler::save_legacy_gprs() { 11519 subq(rsp, 16 * wordSize); 11520 movq(Address(rsp, 15 * wordSize), rax); 11521 movq(Address(rsp, 14 * wordSize), rcx); 11522 movq(Address(rsp, 13 * wordSize), rdx); 11523 movq(Address(rsp, 12 * wordSize), rbx); 11524 movq(Address(rsp, 10 * wordSize), rbp); 11525 movq(Address(rsp, 9 * wordSize), rsi); 11526 movq(Address(rsp, 8 * wordSize), rdi); 11527 movq(Address(rsp, 7 * wordSize), r8); 11528 movq(Address(rsp, 6 * wordSize), r9); 11529 movq(Address(rsp, 5 * wordSize), r10); 11530 movq(Address(rsp, 4 * wordSize), r11); 11531 movq(Address(rsp, 3 * wordSize), r12); 11532 movq(Address(rsp, 2 * wordSize), r13); 11533 movq(Address(rsp, wordSize), r14); 11534 movq(Address(rsp, 0), r15); 11535 } 11536 11537 // Resotres back legacy GPRs state from stack. 11538 void MacroAssembler::restore_legacy_gprs() { 11539 movq(r15, Address(rsp, 0)); 11540 movq(r14, Address(rsp, wordSize)); 11541 movq(r13, Address(rsp, 2 * wordSize)); 11542 movq(r12, Address(rsp, 3 * wordSize)); 11543 movq(r11, Address(rsp, 4 * wordSize)); 11544 movq(r10, Address(rsp, 5 * wordSize)); 11545 movq(r9, Address(rsp, 6 * wordSize)); 11546 movq(r8, Address(rsp, 7 * wordSize)); 11547 movq(rdi, Address(rsp, 8 * wordSize)); 11548 movq(rsi, Address(rsp, 9 * wordSize)); 11549 movq(rbp, Address(rsp, 10 * wordSize)); 11550 movq(rbx, Address(rsp, 12 * wordSize)); 11551 movq(rdx, Address(rsp, 13 * wordSize)); 11552 movq(rcx, Address(rsp, 14 * wordSize)); 11553 movq(rax, Address(rsp, 15 * wordSize)); 11554 addq(rsp, 16 * wordSize); 11555 } 11556 11557 void MacroAssembler::setcc(Assembler::Condition comparison, Register dst) { 11558 if (VM_Version::supports_apx_f()) { 11559 esetzucc(comparison, dst); 11560 } else { 11561 setb(comparison, dst); 11562 movzbl(dst, dst); 11563 } 11564 } 11565 #endif