< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.hpp

Print this page

   1 /*
   2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
  26 #define CPU_X86_MACROASSEMBLER_X86_HPP
  27 
  28 #include "asm/assembler.hpp"
  29 #include "asm/register.hpp"
  30 #include "code/vmreg.inline.hpp"
  31 #include "compiler/oopMap.hpp"
  32 #include "utilities/macros.hpp"

  33 #include "runtime/vm_version.hpp"
  34 #include "utilities/checkedCast.hpp"
  35 


  36 // MacroAssembler extends Assembler by frequently used macros.
  37 //
  38 // Instructions for which a 'better' code sequence exists depending
  39 // on arguments should also go in here.
  40 
  41 class MacroAssembler: public Assembler {
  42   friend class LIR_Assembler;
  43   friend class Runtime1;      // as_Address()
  44 
  45  public:
  46   // Support for VM calls
  47   //
  48   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  49   // may customize this version by overriding it for its purposes (e.g., to save/restore
  50   // additional registers when doing a VM call).
  51 
  52   virtual void call_VM_leaf_base(
  53     address entry_point,               // the entry point
  54     int     number_of_arguments        // the number of arguments to pop after the call
  55   );

  85  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
  86  // The implementation is only non-empty for the InterpreterMacroAssembler,
  87  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
  88  virtual void check_and_handle_popframe(Register java_thread);
  89  virtual void check_and_handle_earlyret(Register java_thread);
  90 
  91   Address as_Address(AddressLiteral adr);
  92   Address as_Address(ArrayAddress adr, Register rscratch);
  93 
  94   // Support for null-checks
  95   //
  96   // Generates code that causes a null OS exception if the content of reg is null.
  97   // If the accessed location is M[reg + offset] and the offset is known, provide the
  98   // offset. No explicit code generation is needed if the offset is within a certain
  99   // range (0 <= offset <= page_size).
 100 
 101   void null_check(Register reg, int offset = -1);
 102   static bool needs_explicit_null_check(intptr_t offset);
 103   static bool uses_implicit_null_check(void* address);
 104 























 105   // Required platform-specific helpers for Label::patch_instructions.
 106   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
 107   void pd_patch_instruction(address branch, address target, const char* file, int line) {
 108     unsigned char op = branch[0];
 109     assert(op == 0xE8 /* call */ ||
 110         op == 0xE9 /* jmp */ ||
 111         op == 0xEB /* short jmp */ ||
 112         (op & 0xF0) == 0x70 /* short jcc */ ||
 113         (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ ||
 114         (op == 0xC7 && branch[1] == 0xF8) /* xbegin */ ||
 115         (op == 0x8D) /* lea */,
 116         "Invalid opcode at patch point");
 117 
 118     if (op == 0xEB || (op & 0xF0) == 0x70) {
 119       // short offset operators (jmp and jcc)
 120       char* disp = (char*) &branch[1];
 121       int imm8 = checked_cast<int>(target - (address) &disp[1]);
 122       guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
 123                 file == nullptr ? "<null>" : file, line);
 124       *disp = (char)imm8;

 354   void resolve_global_jobject(Register value, Register thread, Register tmp);
 355 
 356   // C 'boolean' to Java boolean: x == 0 ? 0 : 1
 357   void c2bool(Register x);
 358 
 359   // C++ bool manipulation
 360 
 361   void movbool(Register dst, Address src);
 362   void movbool(Address dst, bool boolconst);
 363   void movbool(Address dst, Register src);
 364   void testbool(Register dst);
 365 
 366   void resolve_oop_handle(Register result, Register tmp);
 367   void resolve_weak_handle(Register result, Register tmp);
 368   void load_mirror(Register mirror, Register method, Register tmp);
 369   void load_method_holder_cld(Register rresult, Register rmethod);
 370 
 371   void load_method_holder(Register holder, Register method);
 372 
 373   // oop manipulations



 374 #ifdef _LP64
 375   void load_narrow_klass_compact(Register dst, Register src);
 376 #endif
 377   void load_klass(Register dst, Register src, Register tmp);
 378   void store_klass(Register dst, Register src, Register tmp);
 379 
 380   // Compares the Klass pointer of an object to a given Klass (which might be narrow,
 381   // depending on UseCompressedClassPointers).
 382   void cmp_klass(Register klass, Register obj, Register tmp);
 383 
 384   // Compares the Klass pointer of two objects obj1 and obj2. Result is in the condition flags.
 385   // Uses tmp1 and tmp2 as temporary registers.
 386   void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2);
 387 
 388   void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 389                       Register tmp1, Register thread_tmp);
 390   void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 391                        Register tmp1, Register tmp2, Register tmp3);
 392 









 393   void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
 394                      Register thread_tmp = noreg, DecoratorSet decorators = 0);
 395   void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
 396                               Register thread_tmp = noreg, DecoratorSet decorators = 0);
 397   void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
 398                       Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
 399 
 400   // Used for storing null. All other oop constants should be
 401   // stored using routines that take a jobject.
 402   void store_heap_oop_null(Address dst);
 403 


 404 #ifdef _LP64
 405   void store_klass_gap(Register dst, Register src);
 406 
 407   // This dummy is to prevent a call to store_heap_oop from
 408   // converting a zero (like null) into a Register by giving
 409   // the compiler two choices it can't resolve
 410 
 411   void store_heap_oop(Address dst, void* dummy);
 412 
 413   void encode_heap_oop(Register r);
 414   void decode_heap_oop(Register r);
 415   void encode_heap_oop_not_null(Register r);
 416   void decode_heap_oop_not_null(Register r);
 417   void encode_heap_oop_not_null(Register dst, Register src);
 418   void decode_heap_oop_not_null(Register dst, Register src);
 419 
 420   void set_narrow_oop(Register dst, jobject obj);
 421   void set_narrow_oop(Address dst, jobject obj);
 422   void cmp_narrow_oop(Register dst, jobject obj);
 423   void cmp_narrow_oop(Address dst, jobject obj);

 585 
 586 public:
 587   void push_set(RegSet set, int offset = -1);
 588   void pop_set(RegSet set, int offset = -1);
 589 
 590   // Push and pop everything that might be clobbered by a native
 591   // runtime call.
 592   // Only save the lower 64 bits of each vector register.
 593   // Additional registers can be excluded in a passed RegSet.
 594   void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
 595   void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
 596 
 597   void push_call_clobbered_registers(bool save_fpu = true) {
 598     push_call_clobbered_registers_except(RegSet(), save_fpu);
 599   }
 600   void pop_call_clobbered_registers(bool restore_fpu = true) {
 601     pop_call_clobbered_registers_except(RegSet(), restore_fpu);
 602   }
 603 
 604   // allocation









 605   void tlab_allocate(
 606     Register thread,                   // Current thread
 607     Register obj,                      // result: pointer to object after successful allocation
 608     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 609     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 610     Register t1,                       // temp register
 611     Register t2,                       // temp register
 612     Label&   slow_case                 // continuation point if fast allocation fails
 613   );
 614   void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
 615 





 616   void population_count(Register dst, Register src, Register scratch1, Register scratch2);
 617 
 618   // interface method calling
 619   void lookup_interface_method(Register recv_klass,
 620                                Register intf_klass,
 621                                RegisterOrConstant itable_index,
 622                                Register method_result,
 623                                Register scan_temp,
 624                                Label& no_such_interface,
 625                                bool return_method = true);
 626 
 627   void lookup_interface_method_stub(Register recv_klass,
 628                                     Register holder_klass,
 629                                     Register resolved_klass,
 630                                     Register method_result,
 631                                     Register scan_temp,
 632                                     Register temp_reg2,
 633                                     Register receiver,
 634                                     int itable_index,
 635                                     Label& L_no_such_interface);

 846   // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
 847   // operands. In general the names are modified to avoid hiding the instruction in Assembler
 848   // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
 849   // here in MacroAssembler. The major exception to this rule is call
 850 
 851   // Arithmetics
 852 
 853 
 854   void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
 855   void addptr(Address dst, Register src);
 856 
 857   void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
 858   void addptr(Register dst, int32_t src);
 859   void addptr(Register dst, Register src);
 860   void addptr(Register dst, RegisterOrConstant src) {
 861     if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant()));
 862     else                   addptr(dst, src.as_register());
 863   }
 864 
 865   void andptr(Register dst, int32_t src);
 866   void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }

 867 
 868 #ifdef _LP64
 869   using Assembler::andq;
 870   void andq(Register dst, AddressLiteral src, Register rscratch = noreg);
 871 #endif
 872 
 873   void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg);
 874 
 875   // renamed to drag out the casting of address to int32_t/intptr_t
 876   void cmp32(Register src1, int32_t imm);
 877 
 878   void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg);
 879   // compare reg - mem, or reg - &mem
 880   void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg);
 881 
 882   void cmp32(Register src1, Address src2);
 883 
 884 #ifndef _LP64
 885   void cmpklass(Address dst, Metadata* obj);
 886   void cmpklass(Register dst, Metadata* obj);

2062   void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
2063 
2064   using Assembler::movq;
2065   void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
2066 
2067   // Can push value or effective address
2068   void pushptr(AddressLiteral src, Register rscratch);
2069 
2070   void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
2071   void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
2072 
2073   void pushoop(jobject obj, Register rscratch);
2074   void pushklass(Metadata* obj, Register rscratch);
2075 
2076   // sign extend as need a l to ptr sized element
2077   void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
2078   void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
2079 
2080 
2081  public:















2082   // clear memory of size 'cnt' qwords, starting at 'base';
2083   // if 'is_large' is set, do not try to produce short loop
2084   void clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, bool is_large, KRegister mask=knoreg);
2085 
2086   // clear memory initialization sequence for constant size;
2087   void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
2088 
2089   // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
2090   void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
2091 
2092   // Fill primitive arrays
2093   void generate_fill(BasicType t, bool aligned,
2094                      Register to, Register value, Register count,
2095                      Register rtmp, XMMRegister xtmp);
2096 
2097   void encode_iso_array(Register src, Register dst, Register len,
2098                         XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
2099                         XMMRegister tmp4, Register tmp5, Register result, bool ascii);
2100 
2101 #ifdef _LP64
2102   void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
2103   void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
2104                              Register y, Register y_idx, Register z,

   1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef CPU_X86_MACROASSEMBLER_X86_HPP
  26 #define CPU_X86_MACROASSEMBLER_X86_HPP
  27 
  28 #include "asm/assembler.hpp"
  29 #include "asm/register.hpp"
  30 #include "code/vmreg.inline.hpp"
  31 #include "compiler/oopMap.hpp"
  32 #include "utilities/macros.hpp"
  33 #include "runtime/signature.hpp"
  34 #include "runtime/vm_version.hpp"
  35 #include "utilities/checkedCast.hpp"
  36 
  37 class ciInlineKlass;
  38 
  39 // MacroAssembler extends Assembler by frequently used macros.
  40 //
  41 // Instructions for which a 'better' code sequence exists depending
  42 // on arguments should also go in here.
  43 
  44 class MacroAssembler: public Assembler {
  45   friend class LIR_Assembler;
  46   friend class Runtime1;      // as_Address()
  47 
  48  public:
  49   // Support for VM calls
  50   //
  51   // This is the base routine called by the different versions of call_VM_leaf. The interpreter
  52   // may customize this version by overriding it for its purposes (e.g., to save/restore
  53   // additional registers when doing a VM call).
  54 
  55   virtual void call_VM_leaf_base(
  56     address entry_point,               // the entry point
  57     int     number_of_arguments        // the number of arguments to pop after the call
  58   );

  88  // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
  89  // The implementation is only non-empty for the InterpreterMacroAssembler,
  90  // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
  91  virtual void check_and_handle_popframe(Register java_thread);
  92  virtual void check_and_handle_earlyret(Register java_thread);
  93 
  94   Address as_Address(AddressLiteral adr);
  95   Address as_Address(ArrayAddress adr, Register rscratch);
  96 
  97   // Support for null-checks
  98   //
  99   // Generates code that causes a null OS exception if the content of reg is null.
 100   // If the accessed location is M[reg + offset] and the offset is known, provide the
 101   // offset. No explicit code generation is needed if the offset is within a certain
 102   // range (0 <= offset <= page_size).
 103 
 104   void null_check(Register reg, int offset = -1);
 105   static bool needs_explicit_null_check(intptr_t offset);
 106   static bool uses_implicit_null_check(void* address);
 107 
 108   // markWord tests, kills markWord reg
 109   void test_markword_is_inline_type(Register markword, Label& is_inline_type);
 110 
 111   // inlineKlass queries, kills temp_reg
 112   void test_klass_is_inline_type(Register klass, Register temp_reg, Label& is_inline_type);
 113   void test_oop_is_not_inline_type(Register object, Register tmp, Label& not_inline_type);
 114 
 115   void test_field_is_null_free_inline_type(Register flags, Register temp_reg, Label& is_null_free);
 116   void test_field_is_not_null_free_inline_type(Register flags, Register temp_reg, Label& not_null_free);
 117   void test_field_is_flat(Register flags, Register temp_reg, Label& is_flat);
 118   void test_field_has_null_marker(Register flags, Register temp_reg, Label& has_null_marker);
 119 
 120   // Check oops for special arrays, i.e. flat arrays and/or null-free arrays
 121   void test_oop_prototype_bit(Register oop, Register temp_reg, int32_t test_bit, bool jmp_set, Label& jmp_label);
 122   void test_flat_array_oop(Register oop, Register temp_reg, Label& is_flat_array);
 123   void test_non_flat_array_oop(Register oop, Register temp_reg, Label& is_non_flat_array);
 124   void test_null_free_array_oop(Register oop, Register temp_reg, Label& is_null_free_array);
 125   void test_non_null_free_array_oop(Register oop, Register temp_reg, Label& is_non_null_free_array);
 126 
 127   // Check array klass layout helper for flat or null-free arrays...
 128   void test_flat_array_layout(Register lh, Label& is_flat_array);
 129   void test_non_flat_array_layout(Register lh, Label& is_non_flat_array);
 130 
 131   // Required platform-specific helpers for Label::patch_instructions.
 132   // They _shadow_ the declarations in AbstractAssembler, which are undefined.
 133   void pd_patch_instruction(address branch, address target, const char* file, int line) {
 134     unsigned char op = branch[0];
 135     assert(op == 0xE8 /* call */ ||
 136         op == 0xE9 /* jmp */ ||
 137         op == 0xEB /* short jmp */ ||
 138         (op & 0xF0) == 0x70 /* short jcc */ ||
 139         (op == 0x0F && (branch[1] & 0xF0) == 0x80) /* jcc */ ||
 140         (op == 0xC7 && branch[1] == 0xF8) /* xbegin */ ||
 141         (op == 0x8D) /* lea */,
 142         "Invalid opcode at patch point");
 143 
 144     if (op == 0xEB || (op & 0xF0) == 0x70) {
 145       // short offset operators (jmp and jcc)
 146       char* disp = (char*) &branch[1];
 147       int imm8 = checked_cast<int>(target - (address) &disp[1]);
 148       guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset at %s:%d",
 149                 file == nullptr ? "<null>" : file, line);
 150       *disp = (char)imm8;

 380   void resolve_global_jobject(Register value, Register thread, Register tmp);
 381 
 382   // C 'boolean' to Java boolean: x == 0 ? 0 : 1
 383   void c2bool(Register x);
 384 
 385   // C++ bool manipulation
 386 
 387   void movbool(Register dst, Address src);
 388   void movbool(Address dst, bool boolconst);
 389   void movbool(Address dst, Register src);
 390   void testbool(Register dst);
 391 
 392   void resolve_oop_handle(Register result, Register tmp);
 393   void resolve_weak_handle(Register result, Register tmp);
 394   void load_mirror(Register mirror, Register method, Register tmp);
 395   void load_method_holder_cld(Register rresult, Register rmethod);
 396 
 397   void load_method_holder(Register holder, Register method);
 398 
 399   // oop manipulations
 400 
 401   // Load oopDesc._metadata without decode (useful for direct Klass* compare from oops)
 402   void load_metadata(Register dst, Register src);
 403 #ifdef _LP64
 404   void load_narrow_klass_compact(Register dst, Register src);
 405 #endif
 406   void load_klass(Register dst, Register src, Register tmp);
 407   void store_klass(Register dst, Register src, Register tmp);
 408 
 409   // Compares the Klass pointer of an object to a given Klass (which might be narrow,
 410   // depending on UseCompressedClassPointers).
 411   void cmp_klass(Register klass, Register obj, Register tmp);
 412 
 413   // Compares the Klass pointer of two objects obj1 and obj2. Result is in the condition flags.
 414   // Uses tmp1 and tmp2 as temporary registers.
 415   void cmp_klasses_from_objects(Register obj1, Register obj2, Register tmp1, Register tmp2);
 416 
 417   void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
 418                       Register tmp1, Register thread_tmp);
 419   void access_store_at(BasicType type, DecoratorSet decorators, Address dst, Register val,
 420                        Register tmp1, Register tmp2, Register tmp3);
 421 
 422   void flat_field_copy(DecoratorSet decorators, Register src, Register dst, Register inline_layout_info);
 423 
 424   // inline type data payload offsets...
 425   void payload_offset(Register inline_klass, Register offset);
 426   void payload_addr(Register oop, Register data, Register inline_klass);
 427   // get data payload ptr a flat value array at index, kills rcx and index
 428   void data_for_value_array_index(Register array, Register array_klass,
 429                                   Register index, Register data);
 430 
 431   void load_heap_oop(Register dst, Address src, Register tmp1 = noreg,
 432                      Register thread_tmp = noreg, DecoratorSet decorators = 0);
 433   void load_heap_oop_not_null(Register dst, Address src, Register tmp1 = noreg,
 434                               Register thread_tmp = noreg, DecoratorSet decorators = 0);
 435   void store_heap_oop(Address dst, Register val, Register tmp1 = noreg,
 436                       Register tmp2 = noreg, Register tmp3 = noreg, DecoratorSet decorators = 0);
 437 
 438   // Used for storing null. All other oop constants should be
 439   // stored using routines that take a jobject.
 440   void store_heap_oop_null(Address dst);
 441 
 442   void load_prototype_header(Register dst, Register src, Register tmp);
 443 
 444 #ifdef _LP64
 445   void store_klass_gap(Register dst, Register src);
 446 
 447   // This dummy is to prevent a call to store_heap_oop from
 448   // converting a zero (like null) into a Register by giving
 449   // the compiler two choices it can't resolve
 450 
 451   void store_heap_oop(Address dst, void* dummy);
 452 
 453   void encode_heap_oop(Register r);
 454   void decode_heap_oop(Register r);
 455   void encode_heap_oop_not_null(Register r);
 456   void decode_heap_oop_not_null(Register r);
 457   void encode_heap_oop_not_null(Register dst, Register src);
 458   void decode_heap_oop_not_null(Register dst, Register src);
 459 
 460   void set_narrow_oop(Register dst, jobject obj);
 461   void set_narrow_oop(Address dst, jobject obj);
 462   void cmp_narrow_oop(Register dst, jobject obj);
 463   void cmp_narrow_oop(Address dst, jobject obj);

 625 
 626 public:
 627   void push_set(RegSet set, int offset = -1);
 628   void pop_set(RegSet set, int offset = -1);
 629 
 630   // Push and pop everything that might be clobbered by a native
 631   // runtime call.
 632   // Only save the lower 64 bits of each vector register.
 633   // Additional registers can be excluded in a passed RegSet.
 634   void push_call_clobbered_registers_except(RegSet exclude, bool save_fpu = true);
 635   void pop_call_clobbered_registers_except(RegSet exclude, bool restore_fpu = true);
 636 
 637   void push_call_clobbered_registers(bool save_fpu = true) {
 638     push_call_clobbered_registers_except(RegSet(), save_fpu);
 639   }
 640   void pop_call_clobbered_registers(bool restore_fpu = true) {
 641     pop_call_clobbered_registers_except(RegSet(), restore_fpu);
 642   }
 643 
 644   // allocation
 645 
 646   // Object / value buffer allocation...
 647   // Allocate instance of klass, assumes klass initialized by caller
 648   // new_obj prefers to be rax
 649   // Kills t1 and t2, perserves klass, return allocation in new_obj (rsi on LP64)
 650   void allocate_instance(Register klass, Register new_obj,
 651                          Register t1, Register t2,
 652                          bool clear_fields, Label& alloc_failed);
 653 
 654   void tlab_allocate(
 655     Register thread,                   // Current thread
 656     Register obj,                      // result: pointer to object after successful allocation
 657     Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
 658     int      con_size_in_bytes,        // object size in bytes if   known at compile time
 659     Register t1,                       // temp register
 660     Register t2,                       // temp register
 661     Label&   slow_case                 // continuation point if fast allocation fails
 662   );
 663   void zero_memory(Register address, Register length_in_bytes, int offset_in_bytes, Register temp);
 664 
 665   // For field "index" within "klass", return inline_klass ...
 666   void get_inline_type_field_klass(Register klass, Register index, Register inline_klass);
 667 
 668   void inline_layout_info(Register klass, Register index, Register layout_info);
 669 
 670   void population_count(Register dst, Register src, Register scratch1, Register scratch2);
 671 
 672   // interface method calling
 673   void lookup_interface_method(Register recv_klass,
 674                                Register intf_klass,
 675                                RegisterOrConstant itable_index,
 676                                Register method_result,
 677                                Register scan_temp,
 678                                Label& no_such_interface,
 679                                bool return_method = true);
 680 
 681   void lookup_interface_method_stub(Register recv_klass,
 682                                     Register holder_klass,
 683                                     Register resolved_klass,
 684                                     Register method_result,
 685                                     Register scan_temp,
 686                                     Register temp_reg2,
 687                                     Register receiver,
 688                                     int itable_index,
 689                                     Label& L_no_such_interface);

 900   // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
 901   // operands. In general the names are modified to avoid hiding the instruction in Assembler
 902   // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
 903   // here in MacroAssembler. The major exception to this rule is call
 904 
 905   // Arithmetics
 906 
 907 
 908   void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
 909   void addptr(Address dst, Register src);
 910 
 911   void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
 912   void addptr(Register dst, int32_t src);
 913   void addptr(Register dst, Register src);
 914   void addptr(Register dst, RegisterOrConstant src) {
 915     if (src.is_constant()) addptr(dst, checked_cast<int>(src.as_constant()));
 916     else                   addptr(dst, src.as_register());
 917   }
 918 
 919   void andptr(Register dst, int32_t src);
 920   void andptr(Register dst, Register src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; }
 921   void andptr(Register dst, Address src) { LP64_ONLY(andq(dst, src)) NOT_LP64(andl(dst, src)) ; }
 922 
 923 #ifdef _LP64
 924   using Assembler::andq;
 925   void andq(Register dst, AddressLiteral src, Register rscratch = noreg);
 926 #endif
 927 
 928   void cmp8(AddressLiteral src1, int imm, Register rscratch = noreg);
 929 
 930   // renamed to drag out the casting of address to int32_t/intptr_t
 931   void cmp32(Register src1, int32_t imm);
 932 
 933   void cmp32(AddressLiteral src1, int32_t imm, Register rscratch = noreg);
 934   // compare reg - mem, or reg - &mem
 935   void cmp32(Register src1, AddressLiteral src2, Register rscratch = noreg);
 936 
 937   void cmp32(Register src1, Address src2);
 938 
 939 #ifndef _LP64
 940   void cmpklass(Address dst, Metadata* obj);
 941   void cmpklass(Register dst, Metadata* obj);

2117   void movdl(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
2118 
2119   using Assembler::movq;
2120   void movq(XMMRegister dst, AddressLiteral src, Register rscratch = noreg);
2121 
2122   // Can push value or effective address
2123   void pushptr(AddressLiteral src, Register rscratch);
2124 
2125   void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
2126   void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
2127 
2128   void pushoop(jobject obj, Register rscratch);
2129   void pushklass(Metadata* obj, Register rscratch);
2130 
2131   // sign extend as need a l to ptr sized element
2132   void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
2133   void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
2134 
2135 
2136  public:
2137   // Inline type specific methods
2138   #include "asm/macroAssembler_common.hpp"
2139 
2140   int store_inline_type_fields_to_buf(ciInlineKlass* vk, bool from_interpreter = true);
2141   bool move_helper(VMReg from, VMReg to, BasicType bt, RegState reg_state[]);
2142   bool unpack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index,
2143                             VMReg from, int& from_index, VMRegPair* to, int to_count, int& to_index,
2144                             RegState reg_state[]);
2145   bool pack_inline_helper(const GrowableArray<SigEntry>* sig, int& sig_index, int vtarg_index,
2146                           VMRegPair* from, int from_count, int& from_index, VMReg to,
2147                           RegState reg_state[], Register val_array);
2148   int extend_stack_for_inline_args(int args_on_stack);
2149   void remove_frame(int initial_framesize, bool needs_stack_repair);
2150   VMReg spill_reg_for(VMReg reg);
2151 
2152   // clear memory of size 'cnt' qwords, starting at 'base';
2153   // if 'is_large' is set, do not try to produce short loop
2154   void clear_mem(Register base, Register cnt, Register val, XMMRegister xtmp, bool is_large, bool word_copy_only, KRegister mask=knoreg);
2155 
2156   // clear memory initialization sequence for constant size;
2157   void clear_mem(Register base, int cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
2158 
2159   // clear memory of size 'cnt' qwords, starting at 'base' using XMM/YMM registers
2160   void xmm_clear_mem(Register base, Register cnt, Register rtmp, XMMRegister xtmp, KRegister mask=knoreg);
2161 
2162   // Fill primitive arrays
2163   void generate_fill(BasicType t, bool aligned,
2164                      Register to, Register value, Register count,
2165                      Register rtmp, XMMRegister xtmp);
2166 
2167   void encode_iso_array(Register src, Register dst, Register len,
2168                         XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3,
2169                         XMMRegister tmp4, Register tmp5, Register result, bool ascii);
2170 
2171 #ifdef _LP64
2172   void add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2);
2173   void multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
2174                              Register y, Register y_idx, Register z,
< prev index next >