1 //
    2 // Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
    3 // Copyright (c) 2014, 2024, Red Hat, Inc. All rights reserved.
    4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    5 //
    6 // This code is free software; you can redistribute it and/or modify it
    7 // under the terms of the GNU General Public License version 2 only, as
    8 // published by the Free Software Foundation.
    9 //
   10 // This code is distributed in the hope that it will be useful, but WITHOUT
   11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   12 // FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   13 // version 2 for more details (a copy is included in the LICENSE file that
   14 // accompanied this code).
   15 //
   16 // You should have received a copy of the GNU General Public License version
   17 // 2 along with this work; if not, write to the Free Software Foundation,
   18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   19 //
   20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   21 // or visit www.oracle.com if you need additional information or have any
   22 // questions.
   23 //
   24 //
   25 
   26 // AArch64 Architecture Description File
   27 
   28 //----------REGISTER DEFINITION BLOCK------------------------------------------
   29 // This information is used by the matcher and the register allocator to
   30 // describe individual registers and classes of registers within the target
   31 // architecture.
   32 
   33 register %{
   34 //----------Architecture Description Register Definitions----------------------
   35 // General Registers
   36 // "reg_def"  name ( register save type, C convention save type,
   37 //                   ideal register type, encoding );
   38 // Register Save Types:
   39 //
   40 // NS  = No-Save:       The register allocator assumes that these registers
   41 //                      can be used without saving upon entry to the method, &
   42 //                      that they do not need to be saved at call sites.
   43 //
   44 // SOC = Save-On-Call:  The register allocator assumes that these registers
   45 //                      can be used without saving upon entry to the method,
   46 //                      but that they must be saved at call sites.
   47 //
   48 // SOE = Save-On-Entry: The register allocator assumes that these registers
   49 //                      must be saved before using them upon entry to the
   50 //                      method, but they do not need to be saved at call
   51 //                      sites.
   52 //
   53 // AS  = Always-Save:   The register allocator assumes that these registers
   54 //                      must be saved before using them upon entry to the
   55 //                      method, & that they must be saved at call sites.
   56 //
   57 // Ideal Register Type is used to determine how to save & restore a
   58 // register.  Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
   59 // spilled with LoadP/StoreP.  If the register supports both, use Op_RegI.
   60 //
   61 // The encoding number is the actual bit-pattern placed into the opcodes.
   62 
   63 // We must define the 64 bit int registers in two 32 bit halves, the
   64 // real lower register and a virtual upper half register. upper halves
   65 // are used by the register allocator but are not actually supplied as
   66 // operands to memory ops.
   67 //
   68 // follow the C1 compiler in making registers
   69 //
   70 //   r0-r7,r10-r26 volatile (caller save)
   71 //   r27-r32 system (no save, no allocate)
   72 //   r8-r9 non-allocatable (so we can use them as scratch regs)
   73 //
   74 // as regards Java usage. we don't use any callee save registers
   75 // because this makes it difficult to de-optimise a frame (see comment
   76 // in x86 implementation of Deoptimization::unwind_callee_save_values)
   77 //
   78 
   79 // General Registers
   80 
   81 reg_def R0      ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()         );
   82 reg_def R0_H    ( SOC, SOC, Op_RegI,  0, r0->as_VMReg()->next() );
   83 reg_def R1      ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()         );
   84 reg_def R1_H    ( SOC, SOC, Op_RegI,  1, r1->as_VMReg()->next() );
   85 reg_def R2      ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()         );
   86 reg_def R2_H    ( SOC, SOC, Op_RegI,  2, r2->as_VMReg()->next() );
   87 reg_def R3      ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()         );
   88 reg_def R3_H    ( SOC, SOC, Op_RegI,  3, r3->as_VMReg()->next() );
   89 reg_def R4      ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()         );
   90 reg_def R4_H    ( SOC, SOC, Op_RegI,  4, r4->as_VMReg()->next() );
   91 reg_def R5      ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()         );
   92 reg_def R5_H    ( SOC, SOC, Op_RegI,  5, r5->as_VMReg()->next() );
   93 reg_def R6      ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()         );
   94 reg_def R6_H    ( SOC, SOC, Op_RegI,  6, r6->as_VMReg()->next() );
   95 reg_def R7      ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()         );
   96 reg_def R7_H    ( SOC, SOC, Op_RegI,  7, r7->as_VMReg()->next() );
   97 reg_def R8      ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()         ); // rscratch1, non-allocatable
   98 reg_def R8_H    ( NS,  SOC, Op_RegI,  8, r8->as_VMReg()->next() );
   99 reg_def R9      ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()         ); // rscratch2, non-allocatable
  100 reg_def R9_H    ( NS,  SOC, Op_RegI,  9, r9->as_VMReg()->next() );
  101 reg_def R10     ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()        );
  102 reg_def R10_H   ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
  103 reg_def R11     ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()        );
  104 reg_def R11_H   ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
  105 reg_def R12     ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()        );
  106 reg_def R12_H   ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
  107 reg_def R13     ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()        );
  108 reg_def R13_H   ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
  109 reg_def R14     ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()        );
  110 reg_def R14_H   ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
  111 reg_def R15     ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()        );
  112 reg_def R15_H   ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
  113 reg_def R16     ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()        );
  114 reg_def R16_H   ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
  115 reg_def R17     ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()        );
  116 reg_def R17_H   ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
  117 reg_def R18     ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()        );
  118 reg_def R18_H   ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()->next());
  119 reg_def R19     ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()        );
  120 reg_def R19_H   ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
  121 reg_def R20     ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()        ); // caller esp
  122 reg_def R20_H   ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
  123 reg_def R21     ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()        );
  124 reg_def R21_H   ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
  125 reg_def R22     ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()        );
  126 reg_def R22_H   ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
  127 reg_def R23     ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()        );
  128 reg_def R23_H   ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
  129 reg_def R24     ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()        );
  130 reg_def R24_H   ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
  131 reg_def R25     ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()        );
  132 reg_def R25_H   ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
  133 reg_def R26     ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()        );
  134 reg_def R26_H   ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
  135 reg_def R27     ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()        ); // heapbase
  136 reg_def R27_H   ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()->next());
  137 reg_def R28     (  NS, SOE, Op_RegI, 28, r28->as_VMReg()        ); // thread
  138 reg_def R28_H   (  NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
  139 reg_def R29     (  NS,  NS, Op_RegI, 29, r29->as_VMReg()        ); // fp
  140 reg_def R29_H   (  NS,  NS, Op_RegI, 29, r29->as_VMReg()->next());
  141 reg_def R30     (  NS,  NS, Op_RegI, 30, r30->as_VMReg()        ); // lr
  142 reg_def R30_H   (  NS,  NS, Op_RegI, 30, r30->as_VMReg()->next());
  143 reg_def R31     (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()     ); // sp
  144 reg_def R31_H   (  NS,  NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
  145 
  146 // ----------------------------
  147 // Float/Double/Vector Registers
  148 // ----------------------------
  149 
  150 // Double Registers
  151 
  152 // The rules of ADL require that double registers be defined in pairs.
  153 // Each pair must be two 32-bit values, but not necessarily a pair of
  154 // single float registers. In each pair, ADLC-assigned register numbers
  155 // must be adjacent, with the lower number even. Finally, when the
  156 // CPU stores such a register pair to memory, the word associated with
  157 // the lower ADLC-assigned number must be stored to the lower address.
  158 
  159 // AArch64 has 32 floating-point registers. Each can store a vector of
  160 // single or double precision floating-point values up to 8 * 32
  161 // floats, 4 * 64 bit floats or 2 * 128 bit floats.  We currently only
  162 // use the first float or double element of the vector.
  163 
  164 // for Java use float registers v0-v15 are always save on call whereas
  165 // the platform ABI treats v8-v15 as callee save). float registers
  166 // v16-v31 are SOC as per the platform spec
  167 
  168 // For SVE vector registers, we simply extend vector register size to 8
  169 // 'logical' slots. This is nominally 256 bits but it actually covers
  170 // all possible 'physical' SVE vector register lengths from 128 ~ 2048
  171 // bits. The 'physical' SVE vector register length is detected during
  172 // startup, so the register allocator is able to identify the correct
  173 // number of bytes needed for an SVE spill/unspill.
  174 // Note that a vector register with 4 slots denotes a 128-bit NEON
  175 // register allowing it to be distinguished from the corresponding SVE
  176 // vector register when the SVE vector length is 128 bits.
  177 
  178   reg_def V0   ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()          );
  179   reg_def V0_H ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next()  );
  180   reg_def V0_J ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(2) );
  181   reg_def V0_K ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(3) );
  182 
  183   reg_def V1   ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()          );
  184   reg_def V1_H ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next()  );
  185   reg_def V1_J ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(2) );
  186   reg_def V1_K ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(3) );
  187 
  188   reg_def V2   ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()          );
  189   reg_def V2_H ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next()  );
  190   reg_def V2_J ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(2) );
  191   reg_def V2_K ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(3) );
  192 
  193   reg_def V3   ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()          );
  194   reg_def V3_H ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next()  );
  195   reg_def V3_J ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(2) );
  196   reg_def V3_K ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(3) );
  197 
  198   reg_def V4   ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()          );
  199   reg_def V4_H ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next()  );
  200   reg_def V4_J ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(2) );
  201   reg_def V4_K ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(3) );
  202 
  203   reg_def V5   ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()          );
  204   reg_def V5_H ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next()  );
  205   reg_def V5_J ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(2) );
  206   reg_def V5_K ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(3) );
  207 
  208   reg_def V6   ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()          );
  209   reg_def V6_H ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next()  );
  210   reg_def V6_J ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(2) );
  211   reg_def V6_K ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(3) );
  212 
  213   reg_def V7   ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()          );
  214   reg_def V7_H ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next()  );
  215   reg_def V7_J ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(2) );
  216   reg_def V7_K ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(3) );
  217 
  218   reg_def V8   ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()          );
  219   reg_def V8_H ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()->next()  );
  220   reg_def V8_J ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(2) );
  221   reg_def V8_K ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(3) );
  222 
  223   reg_def V9   ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()          );
  224   reg_def V9_H ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()->next()  );
  225   reg_def V9_J ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(2) );
  226   reg_def V9_K ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(3) );
  227 
  228   reg_def V10   ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()          );
  229   reg_def V10_H ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()->next()  );
  230   reg_def V10_J ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2) );
  231   reg_def V10_K ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3) );
  232 
  233   reg_def V11   ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()          );
  234   reg_def V11_H ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()->next()  );
  235   reg_def V11_J ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2) );
  236   reg_def V11_K ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3) );
  237 
  238   reg_def V12   ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()          );
  239   reg_def V12_H ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()->next()  );
  240   reg_def V12_J ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2) );
  241   reg_def V12_K ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3) );
  242 
  243   reg_def V13   ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()          );
  244   reg_def V13_H ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()->next()  );
  245   reg_def V13_J ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2) );
  246   reg_def V13_K ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3) );
  247 
  248   reg_def V14   ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()          );
  249   reg_def V14_H ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()->next()  );
  250   reg_def V14_J ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2) );
  251   reg_def V14_K ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3) );
  252 
  253   reg_def V15   ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()          );
  254   reg_def V15_H ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()->next()  );
  255   reg_def V15_J ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2) );
  256   reg_def V15_K ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3) );
  257 
  258   reg_def V16   ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()          );
  259   reg_def V16_H ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next()  );
  260   reg_def V16_J ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2) );
  261   reg_def V16_K ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3) );
  262 
  263   reg_def V17   ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()          );
  264   reg_def V17_H ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next()  );
  265   reg_def V17_J ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2) );
  266   reg_def V17_K ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3) );
  267 
  268   reg_def V18   ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()          );
  269   reg_def V18_H ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next()  );
  270   reg_def V18_J ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2) );
  271   reg_def V18_K ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3) );
  272 
  273   reg_def V19   ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()          );
  274   reg_def V19_H ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next()  );
  275   reg_def V19_J ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2) );
  276   reg_def V19_K ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3) );
  277 
  278   reg_def V20   ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()          );
  279   reg_def V20_H ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next()  );
  280   reg_def V20_J ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2) );
  281   reg_def V20_K ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3) );
  282 
  283   reg_def V21   ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()          );
  284   reg_def V21_H ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next()  );
  285   reg_def V21_J ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2) );
  286   reg_def V21_K ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3) );
  287 
  288   reg_def V22   ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()          );
  289   reg_def V22_H ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next()  );
  290   reg_def V22_J ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2) );
  291   reg_def V22_K ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3) );
  292 
  293   reg_def V23   ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()          );
  294   reg_def V23_H ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next()  );
  295   reg_def V23_J ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2) );
  296   reg_def V23_K ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3) );
  297 
  298   reg_def V24   ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()          );
  299   reg_def V24_H ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next()  );
  300   reg_def V24_J ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2) );
  301   reg_def V24_K ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3) );
  302 
  303   reg_def V25   ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()          );
  304   reg_def V25_H ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next()  );
  305   reg_def V25_J ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2) );
  306   reg_def V25_K ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3) );
  307 
  308   reg_def V26   ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()          );
  309   reg_def V26_H ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next()  );
  310   reg_def V26_J ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2) );
  311   reg_def V26_K ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3) );
  312 
  313   reg_def V27   ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()          );
  314   reg_def V27_H ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next()  );
  315   reg_def V27_J ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2) );
  316   reg_def V27_K ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3) );
  317 
  318   reg_def V28   ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()          );
  319   reg_def V28_H ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next()  );
  320   reg_def V28_J ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2) );
  321   reg_def V28_K ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3) );
  322 
  323   reg_def V29   ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()          );
  324   reg_def V29_H ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next()  );
  325   reg_def V29_J ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2) );
  326   reg_def V29_K ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3) );
  327 
  328   reg_def V30   ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()          );
  329   reg_def V30_H ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next()  );
  330   reg_def V30_J ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2) );
  331   reg_def V30_K ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3) );
  332 
  333   reg_def V31   ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()          );
  334   reg_def V31_H ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next()  );
  335   reg_def V31_J ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2) );
  336   reg_def V31_K ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3) );
  337 
  338 // ----------------------------
  339 // SVE Predicate Registers
  340 // ----------------------------
  341   reg_def P0 (SOC, SOC, Op_RegVectMask, 0, p0->as_VMReg());
  342   reg_def P1 (SOC, SOC, Op_RegVectMask, 1, p1->as_VMReg());
  343   reg_def P2 (SOC, SOC, Op_RegVectMask, 2, p2->as_VMReg());
  344   reg_def P3 (SOC, SOC, Op_RegVectMask, 3, p3->as_VMReg());
  345   reg_def P4 (SOC, SOC, Op_RegVectMask, 4, p4->as_VMReg());
  346   reg_def P5 (SOC, SOC, Op_RegVectMask, 5, p5->as_VMReg());
  347   reg_def P6 (SOC, SOC, Op_RegVectMask, 6, p6->as_VMReg());
  348   reg_def P7 (SOC, SOC, Op_RegVectMask, 7, p7->as_VMReg());
  349   reg_def P8 (SOC, SOC, Op_RegVectMask, 8, p8->as_VMReg());
  350   reg_def P9 (SOC, SOC, Op_RegVectMask, 9, p9->as_VMReg());
  351   reg_def P10 (SOC, SOC, Op_RegVectMask, 10, p10->as_VMReg());
  352   reg_def P11 (SOC, SOC, Op_RegVectMask, 11, p11->as_VMReg());
  353   reg_def P12 (SOC, SOC, Op_RegVectMask, 12, p12->as_VMReg());
  354   reg_def P13 (SOC, SOC, Op_RegVectMask, 13, p13->as_VMReg());
  355   reg_def P14 (SOC, SOC, Op_RegVectMask, 14, p14->as_VMReg());
  356   reg_def P15 (SOC, SOC, Op_RegVectMask, 15, p15->as_VMReg());
  357 
  358 // ----------------------------
  359 // Special Registers
  360 // ----------------------------
  361 
  362 // the AArch64 CSPR status flag register is not directly accessible as
  363 // instruction operand. the FPSR status flag register is a system
  364 // register which can be written/read using MSR/MRS but again does not
  365 // appear as an operand (a code identifying the FSPR occurs as an
  366 // immediate value in the instruction).
  367 
  368 reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
  369 
  370 // Specify priority of register selection within phases of register
  371 // allocation.  Highest priority is first.  A useful heuristic is to
  372 // give registers a low priority when they are required by machine
  373 // instructions, like EAX and EDX on I486, and choose no-save registers
  374 // before save-on-call, & save-on-call before save-on-entry.  Registers
  375 // which participate in fixed calling sequences should come last.
  376 // Registers which are used as pairs must fall on an even boundary.
  377 
  378 alloc_class chunk0(
  379     // volatiles
  380     R10, R10_H,
  381     R11, R11_H,
  382     R12, R12_H,
  383     R13, R13_H,
  384     R14, R14_H,
  385     R15, R15_H,
  386     R16, R16_H,
  387     R17, R17_H,
  388     R18, R18_H,
  389 
  390     // arg registers
  391     R0, R0_H,
  392     R1, R1_H,
  393     R2, R2_H,
  394     R3, R3_H,
  395     R4, R4_H,
  396     R5, R5_H,
  397     R6, R6_H,
  398     R7, R7_H,
  399 
  400     // non-volatiles
  401     R19, R19_H,
  402     R20, R20_H,
  403     R21, R21_H,
  404     R22, R22_H,
  405     R23, R23_H,
  406     R24, R24_H,
  407     R25, R25_H,
  408     R26, R26_H,
  409 
  410     // non-allocatable registers
  411 
  412     R27, R27_H, // heapbase
  413     R28, R28_H, // thread
  414     R29, R29_H, // fp
  415     R30, R30_H, // lr
  416     R31, R31_H, // sp
  417     R8, R8_H,   // rscratch1
  418     R9, R9_H,   // rscratch2
  419 );
  420 
  421 alloc_class chunk1(
  422 
  423     // no save
  424     V16, V16_H, V16_J, V16_K,
  425     V17, V17_H, V17_J, V17_K,
  426     V18, V18_H, V18_J, V18_K,
  427     V19, V19_H, V19_J, V19_K,
  428     V20, V20_H, V20_J, V20_K,
  429     V21, V21_H, V21_J, V21_K,
  430     V22, V22_H, V22_J, V22_K,
  431     V23, V23_H, V23_J, V23_K,
  432     V24, V24_H, V24_J, V24_K,
  433     V25, V25_H, V25_J, V25_K,
  434     V26, V26_H, V26_J, V26_K,
  435     V27, V27_H, V27_J, V27_K,
  436     V28, V28_H, V28_J, V28_K,
  437     V29, V29_H, V29_J, V29_K,
  438     V30, V30_H, V30_J, V30_K,
  439     V31, V31_H, V31_J, V31_K,
  440 
  441     // arg registers
  442     V0, V0_H, V0_J, V0_K,
  443     V1, V1_H, V1_J, V1_K,
  444     V2, V2_H, V2_J, V2_K,
  445     V3, V3_H, V3_J, V3_K,
  446     V4, V4_H, V4_J, V4_K,
  447     V5, V5_H, V5_J, V5_K,
  448     V6, V6_H, V6_J, V6_K,
  449     V7, V7_H, V7_J, V7_K,
  450 
  451     // non-volatiles
  452     V8, V8_H, V8_J, V8_K,
  453     V9, V9_H, V9_J, V9_K,
  454     V10, V10_H, V10_J, V10_K,
  455     V11, V11_H, V11_J, V11_K,
  456     V12, V12_H, V12_J, V12_K,
  457     V13, V13_H, V13_J, V13_K,
  458     V14, V14_H, V14_J, V14_K,
  459     V15, V15_H, V15_J, V15_K,
  460 );
  461 
  462 alloc_class chunk2 (
  463     // Governing predicates for load/store and arithmetic
  464     P0,
  465     P1,
  466     P2,
  467     P3,
  468     P4,
  469     P5,
  470     P6,
  471 
  472     // Extra predicates
  473     P8,
  474     P9,
  475     P10,
  476     P11,
  477     P12,
  478     P13,
  479     P14,
  480     P15,
  481 
  482     // Preserved for all-true predicate
  483     P7,
  484 );
  485 
  486 alloc_class chunk3(RFLAGS);
  487 
  488 //----------Architecture Description Register Classes--------------------------
  489 // Several register classes are automatically defined based upon information in
  490 // this architecture description.
  491 // 1) reg_class inline_cache_reg           ( /* as def'd in frame section */ )
  492 // 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
  493 //
  494 
  495 // Class for all 32 bit general purpose registers
  496 reg_class all_reg32(
  497     R0,
  498     R1,
  499     R2,
  500     R3,
  501     R4,
  502     R5,
  503     R6,
  504     R7,
  505     R10,
  506     R11,
  507     R12,
  508     R13,
  509     R14,
  510     R15,
  511     R16,
  512     R17,
  513     R18,
  514     R19,
  515     R20,
  516     R21,
  517     R22,
  518     R23,
  519     R24,
  520     R25,
  521     R26,
  522     R27,
  523     R28,
  524     R29,
  525     R30,
  526     R31
  527 );
  528 
  529 
  530 // Class for all 32 bit integer registers (excluding SP which
  531 // will never be used as an integer register)
  532 reg_class any_reg32 %{
  533   return _ANY_REG32_mask;
  534 %}
  535 
  536 // Singleton class for R0 int register
  537 reg_class int_r0_reg(R0);
  538 
  539 // Singleton class for R2 int register
  540 reg_class int_r2_reg(R2);
  541 
  542 // Singleton class for R3 int register
  543 reg_class int_r3_reg(R3);
  544 
  545 // Singleton class for R4 int register
  546 reg_class int_r4_reg(R4);
  547 
  548 // Singleton class for R31 int register
  549 reg_class int_r31_reg(R31);
  550 
  551 // Class for all 64 bit general purpose registers
  552 reg_class all_reg(
  553     R0, R0_H,
  554     R1, R1_H,
  555     R2, R2_H,
  556     R3, R3_H,
  557     R4, R4_H,
  558     R5, R5_H,
  559     R6, R6_H,
  560     R7, R7_H,
  561     R10, R10_H,
  562     R11, R11_H,
  563     R12, R12_H,
  564     R13, R13_H,
  565     R14, R14_H,
  566     R15, R15_H,
  567     R16, R16_H,
  568     R17, R17_H,
  569     R18, R18_H,
  570     R19, R19_H,
  571     R20, R20_H,
  572     R21, R21_H,
  573     R22, R22_H,
  574     R23, R23_H,
  575     R24, R24_H,
  576     R25, R25_H,
  577     R26, R26_H,
  578     R27, R27_H,
  579     R28, R28_H,
  580     R29, R29_H,
  581     R30, R30_H,
  582     R31, R31_H
  583 );
  584 
  585 // Class for all long integer registers (including SP)
  586 reg_class any_reg %{
  587   return _ANY_REG_mask;
  588 %}
  589 
  590 // Class for non-allocatable 32 bit registers
  591 reg_class non_allocatable_reg32(
  592 #ifdef R18_RESERVED
  593     // See comment in register_aarch64.hpp
  594     R18,                        // tls on Windows
  595 #endif
  596     R28,                        // thread
  597     R30,                        // lr
  598     R31                         // sp
  599 );
  600 
  601 // Class for non-allocatable 64 bit registers
  602 reg_class non_allocatable_reg(
  603 #ifdef R18_RESERVED
  604     // See comment in register_aarch64.hpp
  605     R18, R18_H,                 // tls on Windows, platform register on macOS
  606 #endif
  607     R28, R28_H,                 // thread
  608     R30, R30_H,                 // lr
  609     R31, R31_H                  // sp
  610 );
  611 
  612 // Class for all non-special integer registers
  613 reg_class no_special_reg32 %{
  614   return _NO_SPECIAL_REG32_mask;
  615 %}
  616 
  617 // Class for all non-special long integer registers
  618 reg_class no_special_reg %{
  619   return _NO_SPECIAL_REG_mask;
  620 %}
  621 
  622 // Class for 64 bit register r0
  623 reg_class r0_reg(
  624     R0, R0_H
  625 );
  626 
  627 // Class for 64 bit register r1
  628 reg_class r1_reg(
  629     R1, R1_H
  630 );
  631 
  632 // Class for 64 bit register r2
  633 reg_class r2_reg(
  634     R2, R2_H
  635 );
  636 
  637 // Class for 64 bit register r3
  638 reg_class r3_reg(
  639     R3, R3_H
  640 );
  641 
  642 // Class for 64 bit register r4
  643 reg_class r4_reg(
  644     R4, R4_H
  645 );
  646 
  647 // Class for 64 bit register r5
  648 reg_class r5_reg(
  649     R5, R5_H
  650 );
  651 
  652 // Class for 64 bit register r10
  653 reg_class r10_reg(
  654     R10, R10_H
  655 );
  656 
  657 // Class for 64 bit register r11
  658 reg_class r11_reg(
  659     R11, R11_H
  660 );
  661 
  662 // Class for method register
  663 reg_class method_reg(
  664     R12, R12_H
  665 );
  666 
  667 // Class for thread register
  668 reg_class thread_reg(
  669     R28, R28_H
  670 );
  671 
  672 // Class for frame pointer register
  673 reg_class fp_reg(
  674     R29, R29_H
  675 );
  676 
  677 // Class for link register
  678 reg_class lr_reg(
  679     R30, R30_H
  680 );
  681 
  682 // Class for long sp register
  683 reg_class sp_reg(
  684   R31, R31_H
  685 );
  686 
  687 // Class for all pointer registers
  688 reg_class ptr_reg %{
  689   return _PTR_REG_mask;
  690 %}
  691 
  692 // Class for all non_special pointer registers
  693 reg_class no_special_ptr_reg %{
  694   return _NO_SPECIAL_PTR_REG_mask;
  695 %}
  696 
  697 // Class for all non_special pointer registers (excluding rfp)
  698 reg_class no_special_no_rfp_ptr_reg %{
  699   return _NO_SPECIAL_NO_RFP_PTR_REG_mask;
  700 %}
  701 
  702 // Class for all float registers
  703 reg_class float_reg(
  704     V0,
  705     V1,
  706     V2,
  707     V3,
  708     V4,
  709     V5,
  710     V6,
  711     V7,
  712     V8,
  713     V9,
  714     V10,
  715     V11,
  716     V12,
  717     V13,
  718     V14,
  719     V15,
  720     V16,
  721     V17,
  722     V18,
  723     V19,
  724     V20,
  725     V21,
  726     V22,
  727     V23,
  728     V24,
  729     V25,
  730     V26,
  731     V27,
  732     V28,
  733     V29,
  734     V30,
  735     V31
  736 );
  737 
  738 // Double precision float registers have virtual `high halves' that
  739 // are needed by the allocator.
  740 // Class for all double registers
  741 reg_class double_reg(
  742     V0, V0_H,
  743     V1, V1_H,
  744     V2, V2_H,
  745     V3, V3_H,
  746     V4, V4_H,
  747     V5, V5_H,
  748     V6, V6_H,
  749     V7, V7_H,
  750     V8, V8_H,
  751     V9, V9_H,
  752     V10, V10_H,
  753     V11, V11_H,
  754     V12, V12_H,
  755     V13, V13_H,
  756     V14, V14_H,
  757     V15, V15_H,
  758     V16, V16_H,
  759     V17, V17_H,
  760     V18, V18_H,
  761     V19, V19_H,
  762     V20, V20_H,
  763     V21, V21_H,
  764     V22, V22_H,
  765     V23, V23_H,
  766     V24, V24_H,
  767     V25, V25_H,
  768     V26, V26_H,
  769     V27, V27_H,
  770     V28, V28_H,
  771     V29, V29_H,
  772     V30, V30_H,
  773     V31, V31_H
  774 );
  775 
  776 // Class for all SVE vector registers.
  777 reg_class vectora_reg (
  778     V0, V0_H, V0_J, V0_K,
  779     V1, V1_H, V1_J, V1_K,
  780     V2, V2_H, V2_J, V2_K,
  781     V3, V3_H, V3_J, V3_K,
  782     V4, V4_H, V4_J, V4_K,
  783     V5, V5_H, V5_J, V5_K,
  784     V6, V6_H, V6_J, V6_K,
  785     V7, V7_H, V7_J, V7_K,
  786     V8, V8_H, V8_J, V8_K,
  787     V9, V9_H, V9_J, V9_K,
  788     V10, V10_H, V10_J, V10_K,
  789     V11, V11_H, V11_J, V11_K,
  790     V12, V12_H, V12_J, V12_K,
  791     V13, V13_H, V13_J, V13_K,
  792     V14, V14_H, V14_J, V14_K,
  793     V15, V15_H, V15_J, V15_K,
  794     V16, V16_H, V16_J, V16_K,
  795     V17, V17_H, V17_J, V17_K,
  796     V18, V18_H, V18_J, V18_K,
  797     V19, V19_H, V19_J, V19_K,
  798     V20, V20_H, V20_J, V20_K,
  799     V21, V21_H, V21_J, V21_K,
  800     V22, V22_H, V22_J, V22_K,
  801     V23, V23_H, V23_J, V23_K,
  802     V24, V24_H, V24_J, V24_K,
  803     V25, V25_H, V25_J, V25_K,
  804     V26, V26_H, V26_J, V26_K,
  805     V27, V27_H, V27_J, V27_K,
  806     V28, V28_H, V28_J, V28_K,
  807     V29, V29_H, V29_J, V29_K,
  808     V30, V30_H, V30_J, V30_K,
  809     V31, V31_H, V31_J, V31_K,
  810 );
  811 
  812 // Class for all 64bit vector registers
  813 reg_class vectord_reg(
  814     V0, V0_H,
  815     V1, V1_H,
  816     V2, V2_H,
  817     V3, V3_H,
  818     V4, V4_H,
  819     V5, V5_H,
  820     V6, V6_H,
  821     V7, V7_H,
  822     V8, V8_H,
  823     V9, V9_H,
  824     V10, V10_H,
  825     V11, V11_H,
  826     V12, V12_H,
  827     V13, V13_H,
  828     V14, V14_H,
  829     V15, V15_H,
  830     V16, V16_H,
  831     V17, V17_H,
  832     V18, V18_H,
  833     V19, V19_H,
  834     V20, V20_H,
  835     V21, V21_H,
  836     V22, V22_H,
  837     V23, V23_H,
  838     V24, V24_H,
  839     V25, V25_H,
  840     V26, V26_H,
  841     V27, V27_H,
  842     V28, V28_H,
  843     V29, V29_H,
  844     V30, V30_H,
  845     V31, V31_H
  846 );
  847 
  848 // Class for all 128bit vector registers
  849 reg_class vectorx_reg(
  850     V0, V0_H, V0_J, V0_K,
  851     V1, V1_H, V1_J, V1_K,
  852     V2, V2_H, V2_J, V2_K,
  853     V3, V3_H, V3_J, V3_K,
  854     V4, V4_H, V4_J, V4_K,
  855     V5, V5_H, V5_J, V5_K,
  856     V6, V6_H, V6_J, V6_K,
  857     V7, V7_H, V7_J, V7_K,
  858     V8, V8_H, V8_J, V8_K,
  859     V9, V9_H, V9_J, V9_K,
  860     V10, V10_H, V10_J, V10_K,
  861     V11, V11_H, V11_J, V11_K,
  862     V12, V12_H, V12_J, V12_K,
  863     V13, V13_H, V13_J, V13_K,
  864     V14, V14_H, V14_J, V14_K,
  865     V15, V15_H, V15_J, V15_K,
  866     V16, V16_H, V16_J, V16_K,
  867     V17, V17_H, V17_J, V17_K,
  868     V18, V18_H, V18_J, V18_K,
  869     V19, V19_H, V19_J, V19_K,
  870     V20, V20_H, V20_J, V20_K,
  871     V21, V21_H, V21_J, V21_K,
  872     V22, V22_H, V22_J, V22_K,
  873     V23, V23_H, V23_J, V23_K,
  874     V24, V24_H, V24_J, V24_K,
  875     V25, V25_H, V25_J, V25_K,
  876     V26, V26_H, V26_J, V26_K,
  877     V27, V27_H, V27_J, V27_K,
  878     V28, V28_H, V28_J, V28_K,
  879     V29, V29_H, V29_J, V29_K,
  880     V30, V30_H, V30_J, V30_K,
  881     V31, V31_H, V31_J, V31_K
  882 );
  883 
  884 // Class for 128 bit register v0
  885 reg_class v0_reg(
  886     V0, V0_H
  887 );
  888 
  889 // Class for 128 bit register v1
  890 reg_class v1_reg(
  891     V1, V1_H
  892 );
  893 
  894 // Class for 128 bit register v2
  895 reg_class v2_reg(
  896     V2, V2_H
  897 );
  898 
  899 // Class for 128 bit register v3
  900 reg_class v3_reg(
  901     V3, V3_H
  902 );
  903 
  904 // Class for 128 bit register v4
  905 reg_class v4_reg(
  906     V4, V4_H
  907 );
  908 
  909 // Class for 128 bit register v5
  910 reg_class v5_reg(
  911     V5, V5_H
  912 );
  913 
  914 // Class for 128 bit register v6
  915 reg_class v6_reg(
  916     V6, V6_H
  917 );
  918 
  919 // Class for 128 bit register v7
  920 reg_class v7_reg(
  921     V7, V7_H
  922 );
  923 
  924 // Class for 128 bit register v8
  925 reg_class v8_reg(
  926     V8, V8_H
  927 );
  928 
  929 // Class for 128 bit register v9
  930 reg_class v9_reg(
  931     V9, V9_H
  932 );
  933 
  934 // Class for 128 bit register v10
  935 reg_class v10_reg(
  936     V10, V10_H
  937 );
  938 
  939 // Class for 128 bit register v11
  940 reg_class v11_reg(
  941     V11, V11_H
  942 );
  943 
  944 // Class for 128 bit register v12
  945 reg_class v12_reg(
  946     V12, V12_H
  947 );
  948 
  949 // Class for 128 bit register v13
  950 reg_class v13_reg(
  951     V13, V13_H
  952 );
  953 
  954 // Class for 128 bit register v14
  955 reg_class v14_reg(
  956     V14, V14_H
  957 );
  958 
  959 // Class for 128 bit register v15
  960 reg_class v15_reg(
  961     V15, V15_H
  962 );
  963 
  964 // Class for 128 bit register v16
  965 reg_class v16_reg(
  966     V16, V16_H
  967 );
  968 
  969 // Class for 128 bit register v17
  970 reg_class v17_reg(
  971     V17, V17_H
  972 );
  973 
  974 // Class for 128 bit register v18
  975 reg_class v18_reg(
  976     V18, V18_H
  977 );
  978 
  979 // Class for 128 bit register v19
  980 reg_class v19_reg(
  981     V19, V19_H
  982 );
  983 
  984 // Class for 128 bit register v20
  985 reg_class v20_reg(
  986     V20, V20_H
  987 );
  988 
  989 // Class for 128 bit register v21
  990 reg_class v21_reg(
  991     V21, V21_H
  992 );
  993 
  994 // Class for 128 bit register v22
  995 reg_class v22_reg(
  996     V22, V22_H
  997 );
  998 
  999 // Class for 128 bit register v23
 1000 reg_class v23_reg(
 1001     V23, V23_H
 1002 );
 1003 
 1004 // Class for 128 bit register v24
 1005 reg_class v24_reg(
 1006     V24, V24_H
 1007 );
 1008 
 1009 // Class for 128 bit register v25
 1010 reg_class v25_reg(
 1011     V25, V25_H
 1012 );
 1013 
 1014 // Class for 128 bit register v26
 1015 reg_class v26_reg(
 1016     V26, V26_H
 1017 );
 1018 
 1019 // Class for 128 bit register v27
 1020 reg_class v27_reg(
 1021     V27, V27_H
 1022 );
 1023 
 1024 // Class for 128 bit register v28
 1025 reg_class v28_reg(
 1026     V28, V28_H
 1027 );
 1028 
 1029 // Class for 128 bit register v29
 1030 reg_class v29_reg(
 1031     V29, V29_H
 1032 );
 1033 
 1034 // Class for 128 bit register v30
 1035 reg_class v30_reg(
 1036     V30, V30_H
 1037 );
 1038 
 1039 // Class for 128 bit register v31
 1040 reg_class v31_reg(
 1041     V31, V31_H
 1042 );
 1043 
 1044 // Class for all SVE predicate registers.
 1045 reg_class pr_reg (
 1046     P0,
 1047     P1,
 1048     P2,
 1049     P3,
 1050     P4,
 1051     P5,
 1052     P6,
 1053     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1054     P8,
 1055     P9,
 1056     P10,
 1057     P11,
 1058     P12,
 1059     P13,
 1060     P14,
 1061     P15
 1062 );
 1063 
 1064 // Class for SVE governing predicate registers, which are used
 1065 // to determine the active elements of a predicated instruction.
 1066 reg_class gov_pr (
 1067     P0,
 1068     P1,
 1069     P2,
 1070     P3,
 1071     P4,
 1072     P5,
 1073     P6,
 1074     // P7, non-allocatable, preserved with all elements preset to TRUE.
 1075 );
 1076 
 1077 reg_class p0_reg(P0);
 1078 reg_class p1_reg(P1);
 1079 
 1080 // Singleton class for condition codes
 1081 reg_class int_flags(RFLAGS);
 1082 
 1083 %}
 1084 
 1085 //----------DEFINITION BLOCK---------------------------------------------------
 1086 // Define name --> value mappings to inform the ADLC of an integer valued name
 1087 // Current support includes integer values in the range [0, 0x7FFFFFFF]
 1088 // Format:
 1089 //        int_def  <name>         ( <int_value>, <expression>);
 1090 // Generated Code in ad_<arch>.hpp
 1091 //        #define  <name>   (<expression>)
 1092 //        // value == <int_value>
 1093 // Generated code in ad_<arch>.cpp adlc_verification()
 1094 //        assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
 1095 //
 1096 
 1097 // we follow the ppc-aix port in using a simple cost model which ranks
 1098 // register operations as cheap, memory ops as more expensive and
 1099 // branches as most expensive. the first two have a low as well as a
 1100 // normal cost. huge cost appears to be a way of saying don't do
 1101 // something
 1102 
 1103 definitions %{
 1104   // The default cost (of a register move instruction).
 1105   int_def INSN_COST            (    100,     100);
 1106   int_def BRANCH_COST          (    200,     2 * INSN_COST);
 1107   int_def CALL_COST            (    200,     2 * INSN_COST);
 1108   int_def VOLATILE_REF_COST    (   1000,     10 * INSN_COST);
 1109 %}
 1110 
 1111 
 1112 //----------SOURCE BLOCK-------------------------------------------------------
 1113 // This is a block of C++ code which provides values, functions, and
 1114 // definitions necessary in the rest of the architecture description
 1115 
 1116 source_hpp %{
 1117 
 1118 #include "asm/macroAssembler.hpp"
 1119 #include "gc/shared/barrierSetAssembler.hpp"
 1120 #include "gc/shared/cardTable.hpp"
 1121 #include "gc/shared/cardTableBarrierSet.hpp"
 1122 #include "gc/shared/collectedHeap.hpp"
 1123 #include "opto/addnode.hpp"
 1124 #include "opto/convertnode.hpp"
 1125 #include "runtime/objectMonitor.hpp"
 1126 
 1127 extern RegMask _ANY_REG32_mask;
 1128 extern RegMask _ANY_REG_mask;
 1129 extern RegMask _PTR_REG_mask;
 1130 extern RegMask _NO_SPECIAL_REG32_mask;
 1131 extern RegMask _NO_SPECIAL_REG_mask;
 1132 extern RegMask _NO_SPECIAL_PTR_REG_mask;
 1133 extern RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
 1134 
 1135 class CallStubImpl {
 1136 
 1137   //--------------------------------------------------------------
 1138   //---<  Used for optimization in Compile::shorten_branches  >---
 1139   //--------------------------------------------------------------
 1140 
 1141  public:
 1142   // Size of call trampoline stub.
 1143   static uint size_call_trampoline() {
 1144     return 0; // no call trampolines on this platform
 1145   }
 1146 
 1147   // number of relocations needed by a call trampoline stub
 1148   static uint reloc_call_trampoline() {
 1149     return 0; // no call trampolines on this platform
 1150   }
 1151 };
 1152 
 1153 class HandlerImpl {
 1154 
 1155  public:
 1156 
 1157   static int emit_exception_handler(C2_MacroAssembler *masm);
 1158   static int emit_deopt_handler(C2_MacroAssembler* masm);
 1159 
 1160   static uint size_exception_handler() {
 1161     return MacroAssembler::far_codestub_branch_size();
 1162   }
 1163 
 1164   static uint size_deopt_handler() {
 1165     // count one adr and one far branch instruction
 1166     return NativeInstruction::instruction_size + MacroAssembler::far_codestub_branch_size();
 1167   }
 1168 };
 1169 
 1170 class Node::PD {
 1171 public:
 1172   enum NodeFlags {
 1173     _last_flag = Node::_last_flag
 1174   };
 1175 };
 1176 
 1177   bool is_CAS(int opcode, bool maybe_volatile);
 1178 
 1179   // predicates controlling emit of ldr<x>/ldar<x> and associated dmb
 1180 
 1181   bool unnecessary_acquire(const Node *barrier);
 1182   bool needs_acquiring_load(const Node *load);
 1183 
 1184   // predicates controlling emit of str<x>/stlr<x> and associated dmbs
 1185 
 1186   bool unnecessary_release(const Node *barrier);
 1187   bool unnecessary_volatile(const Node *barrier);
 1188   bool needs_releasing_store(const Node *store);
 1189 
 1190   // predicate controlling translation of CompareAndSwapX
 1191   bool needs_acquiring_load_exclusive(const Node *load);
 1192 
 1193   // predicate controlling addressing modes
 1194   bool size_fits_all_mem_uses(AddPNode* addp, int shift);
 1195 
 1196   // Convert BootTest condition to Assembler condition.
 1197   // Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
 1198   Assembler::Condition to_assembler_cond(BoolTest::mask cond);
 1199 %}
 1200 
 1201 source %{
 1202 
 1203   // Derived RegMask with conditionally allocatable registers
 1204 
 1205   void PhaseOutput::pd_perform_mach_node_analysis() {
 1206   }
 1207 
 1208   int MachNode::pd_alignment_required() const {
 1209     return 1;
 1210   }
 1211 
 1212   int MachNode::compute_padding(int current_offset) const {
 1213     return 0;
 1214   }
 1215 
 1216   RegMask _ANY_REG32_mask;
 1217   RegMask _ANY_REG_mask;
 1218   RegMask _PTR_REG_mask;
 1219   RegMask _NO_SPECIAL_REG32_mask;
 1220   RegMask _NO_SPECIAL_REG_mask;
 1221   RegMask _NO_SPECIAL_PTR_REG_mask;
 1222   RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
 1223 
 1224   void reg_mask_init() {
 1225     // We derive below RegMask(s) from the ones which are auto-generated from
 1226     // adlc register classes to make AArch64 rheapbase (r27) and rfp (r29)
 1227     // registers conditionally reserved.
 1228 
 1229     _ANY_REG32_mask = _ALL_REG32_mask;
 1230     _ANY_REG32_mask.Remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
 1231 
 1232     _ANY_REG_mask = _ALL_REG_mask;
 1233 
 1234     _PTR_REG_mask = _ALL_REG_mask;
 1235 
 1236     _NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
 1237     _NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
 1238 
 1239     _NO_SPECIAL_REG_mask = _ALL_REG_mask;
 1240     _NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1241 
 1242     _NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
 1243     _NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
 1244 
 1245     // r27 is not allocatable when compressed oops is on and heapbase is not
 1246     // zero, compressed klass pointers doesn't use r27 after JDK-8234794
 1247     if (UseCompressedOops && (CompressedOops::base() != nullptr)) {
 1248       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1249       _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1250       _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
 1251     }
 1252 
 1253     // r29 is not allocatable when PreserveFramePointer is on
 1254     if (PreserveFramePointer) {
 1255       _NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1256       _NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1257       _NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1258     }
 1259 
 1260     _NO_SPECIAL_NO_RFP_PTR_REG_mask = _NO_SPECIAL_PTR_REG_mask;
 1261     _NO_SPECIAL_NO_RFP_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
 1262   }
 1263 
 1264   // Optimizaton of volatile gets and puts
 1265   // -------------------------------------
 1266   //
 1267   // AArch64 has ldar<x> and stlr<x> instructions which we can safely
 1268   // use to implement volatile reads and writes. For a volatile read
 1269   // we simply need
 1270   //
 1271   //   ldar<x>
 1272   //
 1273   // and for a volatile write we need
 1274   //
 1275   //   stlr<x>
 1276   //
 1277   // Alternatively, we can implement them by pairing a normal
 1278   // load/store with a memory barrier. For a volatile read we need
 1279   //
 1280   //   ldr<x>
 1281   //   dmb ishld
 1282   //
 1283   // for a volatile write
 1284   //
 1285   //   dmb ish
 1286   //   str<x>
 1287   //   dmb ish
 1288   //
 1289   // We can also use ldaxr and stlxr to implement compare and swap CAS
 1290   // sequences. These are normally translated to an instruction
 1291   // sequence like the following
 1292   //
 1293   //   dmb      ish
 1294   // retry:
 1295   //   ldxr<x>   rval raddr
 1296   //   cmp       rval rold
 1297   //   b.ne done
 1298   //   stlxr<x>  rval, rnew, rold
 1299   //   cbnz      rval retry
 1300   // done:
 1301   //   cset      r0, eq
 1302   //   dmb ishld
 1303   //
 1304   // Note that the exclusive store is already using an stlxr
 1305   // instruction. That is required to ensure visibility to other
 1306   // threads of the exclusive write (assuming it succeeds) before that
 1307   // of any subsequent writes.
 1308   //
 1309   // The following instruction sequence is an improvement on the above
 1310   //
 1311   // retry:
 1312   //   ldaxr<x>  rval raddr
 1313   //   cmp       rval rold
 1314   //   b.ne done
 1315   //   stlxr<x>  rval, rnew, rold
 1316   //   cbnz      rval retry
 1317   // done:
 1318   //   cset      r0, eq
 1319   //
 1320   // We don't need the leading dmb ish since the stlxr guarantees
 1321   // visibility of prior writes in the case that the swap is
 1322   // successful. Crucially we don't have to worry about the case where
 1323   // the swap is not successful since no valid program should be
 1324   // relying on visibility of prior changes by the attempting thread
 1325   // in the case where the CAS fails.
 1326   //
 1327   // Similarly, we don't need the trailing dmb ishld if we substitute
 1328   // an ldaxr instruction since that will provide all the guarantees we
 1329   // require regarding observation of changes made by other threads
 1330   // before any change to the CAS address observed by the load.
 1331   //
 1332   // In order to generate the desired instruction sequence we need to
 1333   // be able to identify specific 'signature' ideal graph node
 1334   // sequences which i) occur as a translation of a volatile reads or
 1335   // writes or CAS operations and ii) do not occur through any other
 1336   // translation or graph transformation. We can then provide
 1337   // alternative aldc matching rules which translate these node
 1338   // sequences to the desired machine code sequences. Selection of the
 1339   // alternative rules can be implemented by predicates which identify
 1340   // the relevant node sequences.
 1341   //
 1342   // The ideal graph generator translates a volatile read to the node
 1343   // sequence
 1344   //
 1345   //   LoadX[mo_acquire]
 1346   //   MemBarAcquire
 1347   //
 1348   // As a special case when using the compressed oops optimization we
 1349   // may also see this variant
 1350   //
 1351   //   LoadN[mo_acquire]
 1352   //   DecodeN
 1353   //   MemBarAcquire
 1354   //
 1355   // A volatile write is translated to the node sequence
 1356   //
 1357   //   MemBarRelease
 1358   //   StoreX[mo_release] {CardMark}-optional
 1359   //   MemBarVolatile
 1360   //
 1361   // n.b. the above node patterns are generated with a strict
 1362   // 'signature' configuration of input and output dependencies (see
 1363   // the predicates below for exact details). The card mark may be as
 1364   // simple as a few extra nodes or, in a few GC configurations, may
 1365   // include more complex control flow between the leading and
 1366   // trailing memory barriers. However, whatever the card mark
 1367   // configuration these signatures are unique to translated volatile
 1368   // reads/stores -- they will not appear as a result of any other
 1369   // bytecode translation or inlining nor as a consequence of
 1370   // optimizing transforms.
 1371   //
 1372   // We also want to catch inlined unsafe volatile gets and puts and
 1373   // be able to implement them using either ldar<x>/stlr<x> or some
 1374   // combination of ldr<x>/stlr<x> and dmb instructions.
 1375   //
 1376   // Inlined unsafe volatiles puts manifest as a minor variant of the
 1377   // normal volatile put node sequence containing an extra cpuorder
 1378   // membar
 1379   //
 1380   //   MemBarRelease
 1381   //   MemBarCPUOrder
 1382   //   StoreX[mo_release] {CardMark}-optional
 1383   //   MemBarCPUOrder
 1384   //   MemBarVolatile
 1385   //
 1386   // n.b. as an aside, a cpuorder membar is not itself subject to
 1387   // matching and translation by adlc rules.  However, the rule
 1388   // predicates need to detect its presence in order to correctly
 1389   // select the desired adlc rules.
 1390   //
 1391   // Inlined unsafe volatile gets manifest as a slightly different
 1392   // node sequence to a normal volatile get because of the
 1393   // introduction of some CPUOrder memory barriers to bracket the
 1394   // Load. However, but the same basic skeleton of a LoadX feeding a
 1395   // MemBarAcquire, possibly through an optional DecodeN, is still
 1396   // present
 1397   //
 1398   //   MemBarCPUOrder
 1399   //        ||       \\
 1400   //   MemBarCPUOrder LoadX[mo_acquire]
 1401   //        ||            |
 1402   //        ||       {DecodeN} optional
 1403   //        ||       /
 1404   //     MemBarAcquire
 1405   //
 1406   // In this case the acquire membar does not directly depend on the
 1407   // load. However, we can be sure that the load is generated from an
 1408   // inlined unsafe volatile get if we see it dependent on this unique
 1409   // sequence of membar nodes. Similarly, given an acquire membar we
 1410   // can know that it was added because of an inlined unsafe volatile
 1411   // get if it is fed and feeds a cpuorder membar and if its feed
 1412   // membar also feeds an acquiring load.
 1413   //
 1414   // Finally an inlined (Unsafe) CAS operation is translated to the
 1415   // following ideal graph
 1416   //
 1417   //   MemBarRelease
 1418   //   MemBarCPUOrder
 1419   //   CompareAndSwapX {CardMark}-optional
 1420   //   MemBarCPUOrder
 1421   //   MemBarAcquire
 1422   //
 1423   // So, where we can identify these volatile read and write
 1424   // signatures we can choose to plant either of the above two code
 1425   // sequences. For a volatile read we can simply plant a normal
 1426   // ldr<x> and translate the MemBarAcquire to a dmb. However, we can
 1427   // also choose to inhibit translation of the MemBarAcquire and
 1428   // inhibit planting of the ldr<x>, instead planting an ldar<x>.
 1429   //
 1430   // When we recognise a volatile store signature we can choose to
 1431   // plant at a dmb ish as a translation for the MemBarRelease, a
 1432   // normal str<x> and then a dmb ish for the MemBarVolatile.
 1433   // Alternatively, we can inhibit translation of the MemBarRelease
 1434   // and MemBarVolatile and instead plant a simple stlr<x>
 1435   // instruction.
 1436   //
 1437   // when we recognise a CAS signature we can choose to plant a dmb
 1438   // ish as a translation for the MemBarRelease, the conventional
 1439   // macro-instruction sequence for the CompareAndSwap node (which
 1440   // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
 1441   // Alternatively, we can elide generation of the dmb instructions
 1442   // and plant the alternative CompareAndSwap macro-instruction
 1443   // sequence (which uses ldaxr<x>).
 1444   //
 1445   // Of course, the above only applies when we see these signature
 1446   // configurations. We still want to plant dmb instructions in any
 1447   // other cases where we may see a MemBarAcquire, MemBarRelease or
 1448   // MemBarVolatile. For example, at the end of a constructor which
 1449   // writes final/volatile fields we will see a MemBarRelease
 1450   // instruction and this needs a 'dmb ish' lest we risk the
 1451   // constructed object being visible without making the
 1452   // final/volatile field writes visible.
 1453   //
 1454   // n.b. the translation rules below which rely on detection of the
 1455   // volatile signatures and insert ldar<x> or stlr<x> are failsafe.
 1456   // If we see anything other than the signature configurations we
 1457   // always just translate the loads and stores to ldr<x> and str<x>
 1458   // and translate acquire, release and volatile membars to the
 1459   // relevant dmb instructions.
 1460   //
 1461 
 1462   // is_CAS(int opcode, bool maybe_volatile)
 1463   //
 1464   // return true if opcode is one of the possible CompareAndSwapX
 1465   // values otherwise false.
 1466 
 1467   bool is_CAS(int opcode, bool maybe_volatile)
 1468   {
 1469     switch(opcode) {
 1470       // We handle these
 1471     case Op_CompareAndSwapI:
 1472     case Op_CompareAndSwapL:
 1473     case Op_CompareAndSwapP:
 1474     case Op_CompareAndSwapN:
 1475     case Op_ShenandoahCompareAndSwapP:
 1476     case Op_ShenandoahCompareAndSwapN:
 1477     case Op_CompareAndSwapB:
 1478     case Op_CompareAndSwapS:
 1479     case Op_GetAndSetI:
 1480     case Op_GetAndSetL:
 1481     case Op_GetAndSetP:
 1482     case Op_GetAndSetN:
 1483     case Op_GetAndAddI:
 1484     case Op_GetAndAddL:
 1485       return true;
 1486     case Op_CompareAndExchangeI:
 1487     case Op_CompareAndExchangeN:
 1488     case Op_CompareAndExchangeB:
 1489     case Op_CompareAndExchangeS:
 1490     case Op_CompareAndExchangeL:
 1491     case Op_CompareAndExchangeP:
 1492     case Op_WeakCompareAndSwapB:
 1493     case Op_WeakCompareAndSwapS:
 1494     case Op_WeakCompareAndSwapI:
 1495     case Op_WeakCompareAndSwapL:
 1496     case Op_WeakCompareAndSwapP:
 1497     case Op_WeakCompareAndSwapN:
 1498     case Op_ShenandoahWeakCompareAndSwapP:
 1499     case Op_ShenandoahWeakCompareAndSwapN:
 1500     case Op_ShenandoahCompareAndExchangeP:
 1501     case Op_ShenandoahCompareAndExchangeN:
 1502       return maybe_volatile;
 1503     default:
 1504       return false;
 1505     }
 1506   }
 1507 
 1508   // helper to determine the maximum number of Phi nodes we may need to
 1509   // traverse when searching from a card mark membar for the merge mem
 1510   // feeding a trailing membar or vice versa
 1511 
 1512 // predicates controlling emit of ldr<x>/ldar<x>
 1513 
 1514 bool unnecessary_acquire(const Node *barrier)
 1515 {
 1516   assert(barrier->is_MemBar(), "expecting a membar");
 1517 
 1518   MemBarNode* mb = barrier->as_MemBar();
 1519 
 1520   if (mb->trailing_load()) {
 1521     return true;
 1522   }
 1523 
 1524   if (mb->trailing_load_store()) {
 1525     Node* load_store = mb->in(MemBarNode::Precedent);
 1526     assert(load_store->is_LoadStore(), "unexpected graph shape");
 1527     return is_CAS(load_store->Opcode(), true);
 1528   }
 1529 
 1530   return false;
 1531 }
 1532 
 1533 bool needs_acquiring_load(const Node *n)
 1534 {
 1535   assert(n->is_Load(), "expecting a load");
 1536   LoadNode *ld = n->as_Load();
 1537   return ld->is_acquire();
 1538 }
 1539 
 1540 bool unnecessary_release(const Node *n)
 1541 {
 1542   assert((n->is_MemBar() &&
 1543           n->Opcode() == Op_MemBarRelease),
 1544          "expecting a release membar");
 1545 
 1546   MemBarNode *barrier = n->as_MemBar();
 1547   if (!barrier->leading()) {
 1548     return false;
 1549   } else {
 1550     Node* trailing = barrier->trailing_membar();
 1551     MemBarNode* trailing_mb = trailing->as_MemBar();
 1552     assert(trailing_mb->trailing(), "Not a trailing membar?");
 1553     assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
 1554 
 1555     Node* mem = trailing_mb->in(MemBarNode::Precedent);
 1556     if (mem->is_Store()) {
 1557       assert(mem->as_Store()->is_release(), "");
 1558       assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
 1559       return true;
 1560     } else {
 1561       assert(mem->is_LoadStore(), "");
 1562       assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
 1563       return is_CAS(mem->Opcode(), true);
 1564     }
 1565   }
 1566   return false;
 1567 }
 1568 
 1569 bool unnecessary_volatile(const Node *n)
 1570 {
 1571   // assert n->is_MemBar();
 1572   MemBarNode *mbvol = n->as_MemBar();
 1573 
 1574   bool release = mbvol->trailing_store();
 1575   assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
 1576 #ifdef ASSERT
 1577   if (release) {
 1578     Node* leading = mbvol->leading_membar();
 1579     assert(leading->Opcode() == Op_MemBarRelease, "");
 1580     assert(leading->as_MemBar()->leading_store(), "");
 1581     assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
 1582   }
 1583 #endif
 1584 
 1585   return release;
 1586 }
 1587 
 1588 // predicates controlling emit of str<x>/stlr<x>
 1589 
 1590 bool needs_releasing_store(const Node *n)
 1591 {
 1592   // assert n->is_Store();
 1593   StoreNode *st = n->as_Store();
 1594   return st->trailing_membar() != nullptr;
 1595 }
 1596 
 1597 // predicate controlling translation of CAS
 1598 //
 1599 // returns true if CAS needs to use an acquiring load otherwise false
 1600 
 1601 bool needs_acquiring_load_exclusive(const Node *n)
 1602 {
 1603   assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
 1604   LoadStoreNode* ldst = n->as_LoadStore();
 1605   if (is_CAS(n->Opcode(), false)) {
 1606     assert(ldst->trailing_membar() != nullptr, "expected trailing membar");
 1607   } else {
 1608     return ldst->trailing_membar() != nullptr;
 1609   }
 1610 
 1611   // so we can just return true here
 1612   return true;
 1613 }
 1614 
 1615 #define __ masm->
 1616 
 1617 // advance declarations for helper functions to convert register
 1618 // indices to register objects
 1619 
 1620 // the ad file has to provide implementations of certain methods
 1621 // expected by the generic code
 1622 //
 1623 // REQUIRED FUNCTIONALITY
 1624 
 1625 //=============================================================================
 1626 
 1627 // !!!!! Special hack to get all types of calls to specify the byte offset
 1628 //       from the start of the call to the point where the return address
 1629 //       will point.
 1630 
 1631 int MachCallStaticJavaNode::ret_addr_offset()
 1632 {
 1633   // call should be a simple bl
 1634   int off = 4;
 1635   return off;
 1636 }
 1637 
 1638 int MachCallDynamicJavaNode::ret_addr_offset()
 1639 {
 1640   return 16; // movz, movk, movk, bl
 1641 }
 1642 
 1643 int MachCallRuntimeNode::ret_addr_offset() {
 1644   // for generated stubs the call will be
 1645   //   bl(addr)
 1646   // or with far branches
 1647   //   bl(trampoline_stub)
 1648   // for real runtime callouts it will be six instructions
 1649   // see aarch64_enc_java_to_runtime
 1650   //   adr(rscratch2, retaddr)
 1651   //   str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
 1652   //   lea(rscratch1, RuntimeAddress(addr)
 1653   //   blr(rscratch1)
 1654   CodeBlob *cb = CodeCache::find_blob(_entry_point);
 1655   if (cb) {
 1656     return 1 * NativeInstruction::instruction_size;
 1657   } else {
 1658     return 6 * NativeInstruction::instruction_size;
 1659   }
 1660 }
 1661 
 1662 //=============================================================================
 1663 
 1664 #ifndef PRODUCT
 1665 void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1666   st->print("BREAKPOINT");
 1667 }
 1668 #endif
 1669 
 1670 void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1671   __ brk(0);
 1672 }
 1673 
 1674 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
 1675   return MachNode::size(ra_);
 1676 }
 1677 
 1678 //=============================================================================
 1679 
 1680 #ifndef PRODUCT
 1681   void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
 1682     st->print("nop \t# %d bytes pad for loops and calls", _count);
 1683   }
 1684 #endif
 1685 
 1686   void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc*) const {
 1687     for (int i = 0; i < _count; i++) {
 1688       __ nop();
 1689     }
 1690   }
 1691 
 1692   uint MachNopNode::size(PhaseRegAlloc*) const {
 1693     return _count * NativeInstruction::instruction_size;
 1694   }
 1695 
 1696 //=============================================================================
 1697 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
 1698 
 1699 int ConstantTable::calculate_table_base_offset() const {
 1700   return 0;  // absolute addressing, no offset
 1701 }
 1702 
 1703 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
 1704 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
 1705   ShouldNotReachHere();
 1706 }
 1707 
 1708 void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
 1709   // Empty encoding
 1710 }
 1711 
 1712 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
 1713   return 0;
 1714 }
 1715 
 1716 #ifndef PRODUCT
 1717 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
 1718   st->print("-- \t// MachConstantBaseNode (empty encoding)");
 1719 }
 1720 #endif
 1721 
 1722 #ifndef PRODUCT
 1723 void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1724   Compile* C = ra_->C;
 1725 
 1726   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1727 
 1728   if (C->output()->need_stack_bang(framesize))
 1729     st->print("# stack bang size=%d\n\t", framesize);
 1730 
 1731   if (VM_Version::use_rop_protection()) {
 1732     st->print("ldr  zr, [lr]\n\t");
 1733     st->print("paciaz\n\t");
 1734   }
 1735   if (framesize < ((1 << 9) + 2 * wordSize)) {
 1736     st->print("sub  sp, sp, #%d\n\t", framesize);
 1737     st->print("stp  rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
 1738     if (PreserveFramePointer) st->print("\n\tadd  rfp, sp, #%d", framesize - 2 * wordSize);
 1739   } else {
 1740     st->print("stp  lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
 1741     if (PreserveFramePointer) st->print("mov  rfp, sp\n\t");
 1742     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1743     st->print("sub  sp, sp, rscratch1");
 1744   }
 1745   if (C->stub_function() == nullptr) {
 1746     st->print("\n\t");
 1747     st->print("ldr  rscratch1, [guard]\n\t");
 1748     st->print("dmb ishld\n\t");
 1749     st->print("ldr  rscratch2, [rthread, #thread_disarmed_guard_value_offset]\n\t");
 1750     st->print("cmp  rscratch1, rscratch2\n\t");
 1751     st->print("b.eq skip");
 1752     st->print("\n\t");
 1753     st->print("blr #nmethod_entry_barrier_stub\n\t");
 1754     st->print("b skip\n\t");
 1755     st->print("guard: int\n\t");
 1756     st->print("\n\t");
 1757     st->print("skip:\n\t");
 1758   }
 1759 }
 1760 #endif
 1761 
 1762 void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1763   Compile* C = ra_->C;
 1764 
 1765   // n.b. frame size includes space for return pc and rfp
 1766   const int framesize = C->output()->frame_size_in_bytes();
 1767 
 1768   // insert a nop at the start of the prolog so we can patch in a
 1769   // branch if we need to invalidate the method later
 1770   __ nop();
 1771 
 1772   if (C->clinit_barrier_on_entry()) {
 1773     assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
 1774 
 1775     Label L_skip_barrier;
 1776 
 1777     __ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
 1778     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 1779     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 1780     __ bind(L_skip_barrier);
 1781   }
 1782 
 1783   if (C->max_vector_size() > 0) {
 1784     __ reinitialize_ptrue();
 1785   }
 1786 
 1787   int bangsize = C->output()->bang_size_in_bytes();
 1788   if (C->output()->need_stack_bang(bangsize))
 1789     __ generate_stack_overflow_check(bangsize);
 1790 
 1791   __ build_frame(framesize);
 1792 
 1793   if (C->stub_function() == nullptr) {
 1794     BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 1795     // Dummy labels for just measuring the code size
 1796     Label dummy_slow_path;
 1797     Label dummy_continuation;
 1798     Label dummy_guard;
 1799     Label* slow_path = &dummy_slow_path;
 1800     Label* continuation = &dummy_continuation;
 1801     Label* guard = &dummy_guard;
 1802     if (!Compile::current()->output()->in_scratch_emit_size()) {
 1803       // Use real labels from actual stub when not emitting code for the purpose of measuring its size
 1804       C2EntryBarrierStub* stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub();
 1805       Compile::current()->output()->add_stub(stub);
 1806       slow_path = &stub->entry();
 1807       continuation = &stub->continuation();
 1808       guard = &stub->guard();
 1809     }
 1810     // In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
 1811     bs->nmethod_entry_barrier(masm, slow_path, continuation, guard);
 1812   }
 1813 
 1814   if (VerifyStackAtCalls) {
 1815     Unimplemented();
 1816   }
 1817 
 1818   C->output()->set_frame_complete(__ offset());
 1819 
 1820   if (C->has_mach_constant_base_node()) {
 1821     // NOTE: We set the table base offset here because users might be
 1822     // emitted before MachConstantBaseNode.
 1823     ConstantTable& constant_table = C->output()->constant_table();
 1824     constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
 1825   }
 1826 }
 1827 
 1828 uint MachPrologNode::size(PhaseRegAlloc* ra_) const
 1829 {
 1830   return MachNode::size(ra_); // too many variables; just compute it
 1831                               // the hard way
 1832 }
 1833 
 1834 int MachPrologNode::reloc() const
 1835 {
 1836   return 0;
 1837 }
 1838 
 1839 //=============================================================================
 1840 
 1841 #ifndef PRODUCT
 1842 void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 1843   Compile* C = ra_->C;
 1844   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1845 
 1846   st->print("# pop frame %d\n\t",framesize);
 1847 
 1848   if (framesize == 0) {
 1849     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1850   } else if (framesize < ((1 << 9) + 2 * wordSize)) {
 1851     st->print("ldp  lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
 1852     st->print("add  sp, sp, #%d\n\t", framesize);
 1853   } else {
 1854     st->print("mov  rscratch1, #%d\n\t", framesize - 2 * wordSize);
 1855     st->print("add  sp, sp, rscratch1\n\t");
 1856     st->print("ldp  lr, rfp, [sp],#%d\n\t", (2 * wordSize));
 1857   }
 1858   if (VM_Version::use_rop_protection()) {
 1859     st->print("autiaz\n\t");
 1860     st->print("ldr  zr, [lr]\n\t");
 1861   }
 1862 
 1863   if (do_polling() && C->is_method_compilation()) {
 1864     st->print("# test polling word\n\t");
 1865     st->print("ldr  rscratch1, [rthread],#%d\n\t", in_bytes(JavaThread::polling_word_offset()));
 1866     st->print("cmp  sp, rscratch1\n\t");
 1867     st->print("bhi #slow_path");
 1868   }
 1869 }
 1870 #endif
 1871 
 1872 void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 1873   Compile* C = ra_->C;
 1874   int framesize = C->output()->frame_slots() << LogBytesPerInt;
 1875 
 1876   __ remove_frame(framesize);
 1877 
 1878   if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
 1879     __ reserved_stack_check();
 1880   }
 1881 
 1882   if (do_polling() && C->is_method_compilation()) {
 1883     Label dummy_label;
 1884     Label* code_stub = &dummy_label;
 1885     if (!C->output()->in_scratch_emit_size()) {
 1886       C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
 1887       C->output()->add_stub(stub);
 1888       code_stub = &stub->entry();
 1889     }
 1890     __ relocate(relocInfo::poll_return_type);
 1891     __ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
 1892   }
 1893 }
 1894 
 1895 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
 1896   // Variable size. Determine dynamically.
 1897   return MachNode::size(ra_);
 1898 }
 1899 
 1900 int MachEpilogNode::reloc() const {
 1901   // Return number of relocatable values contained in this instruction.
 1902   return 1; // 1 for polling page.
 1903 }
 1904 
 1905 const Pipeline * MachEpilogNode::pipeline() const {
 1906   return MachNode::pipeline_class();
 1907 }
 1908 
 1909 //=============================================================================
 1910 
 1911 static enum RC rc_class(OptoReg::Name reg) {
 1912 
 1913   if (reg == OptoReg::Bad) {
 1914     return rc_bad;
 1915   }
 1916 
 1917   // we have 32 int registers * 2 halves
 1918   int slots_of_int_registers = Register::number_of_registers * Register::max_slots_per_register;
 1919 
 1920   if (reg < slots_of_int_registers) {
 1921     return rc_int;
 1922   }
 1923 
 1924   // we have 32 float register * 8 halves
 1925   int slots_of_float_registers = FloatRegister::number_of_registers * FloatRegister::max_slots_per_register;
 1926   if (reg < slots_of_int_registers + slots_of_float_registers) {
 1927     return rc_float;
 1928   }
 1929 
 1930   int slots_of_predicate_registers = PRegister::number_of_registers * PRegister::max_slots_per_register;
 1931   if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_predicate_registers) {
 1932     return rc_predicate;
 1933   }
 1934 
 1935   // Between predicate regs & stack is the flags.
 1936   assert(OptoReg::is_stack(reg), "blow up if spilling flags");
 1937 
 1938   return rc_stack;
 1939 }
 1940 
 1941 uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
 1942   Compile* C = ra_->C;
 1943 
 1944   // Get registers to move.
 1945   OptoReg::Name src_hi = ra_->get_reg_second(in(1));
 1946   OptoReg::Name src_lo = ra_->get_reg_first(in(1));
 1947   OptoReg::Name dst_hi = ra_->get_reg_second(this);
 1948   OptoReg::Name dst_lo = ra_->get_reg_first(this);
 1949 
 1950   enum RC src_hi_rc = rc_class(src_hi);
 1951   enum RC src_lo_rc = rc_class(src_lo);
 1952   enum RC dst_hi_rc = rc_class(dst_hi);
 1953   enum RC dst_lo_rc = rc_class(dst_lo);
 1954 
 1955   assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
 1956 
 1957   if (src_hi != OptoReg::Bad && !bottom_type()->isa_vectmask()) {
 1958     assert((src_lo&1)==0 && src_lo+1==src_hi &&
 1959            (dst_lo&1)==0 && dst_lo+1==dst_hi,
 1960            "expected aligned-adjacent pairs");
 1961   }
 1962 
 1963   if (src_lo == dst_lo && src_hi == dst_hi) {
 1964     return 0;            // Self copy, no move.
 1965   }
 1966 
 1967   bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
 1968               (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
 1969   int src_offset = ra_->reg2offset(src_lo);
 1970   int dst_offset = ra_->reg2offset(dst_lo);
 1971 
 1972   if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 1973     uint ireg = ideal_reg();
 1974     if (ireg == Op_VecA && masm) {
 1975       int sve_vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
 1976       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1977         // stack->stack
 1978         __ spill_copy_sve_vector_stack_to_stack(src_offset, dst_offset,
 1979                                                 sve_vector_reg_size_in_bytes);
 1980       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 1981         __ spill_sve_vector(as_FloatRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 1982                             sve_vector_reg_size_in_bytes);
 1983       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 1984         __ unspill_sve_vector(as_FloatRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 1985                               sve_vector_reg_size_in_bytes);
 1986       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 1987         __ sve_orr(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 1988                    as_FloatRegister(Matcher::_regEncode[src_lo]),
 1989                    as_FloatRegister(Matcher::_regEncode[src_lo]));
 1990       } else {
 1991         ShouldNotReachHere();
 1992       }
 1993     } else if (masm) {
 1994       assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
 1995       assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
 1996       if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
 1997         // stack->stack
 1998         assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
 1999         if (ireg == Op_VecD) {
 2000           __ unspill(rscratch1, true, src_offset);
 2001           __ spill(rscratch1, true, dst_offset);
 2002         } else {
 2003           __ spill_copy128(src_offset, dst_offset);
 2004         }
 2005       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
 2006         __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2007                ireg == Op_VecD ? __ T8B : __ T16B,
 2008                as_FloatRegister(Matcher::_regEncode[src_lo]));
 2009       } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
 2010         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2011                  ireg == Op_VecD ? __ D : __ Q,
 2012                  ra_->reg2offset(dst_lo));
 2013       } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
 2014         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2015                    ireg == Op_VecD ? __ D : __ Q,
 2016                    ra_->reg2offset(src_lo));
 2017       } else {
 2018         ShouldNotReachHere();
 2019       }
 2020     }
 2021   } else if (masm) {
 2022     switch (src_lo_rc) {
 2023     case rc_int:
 2024       if (dst_lo_rc == rc_int) {  // gpr --> gpr copy
 2025         if (is64) {
 2026             __ mov(as_Register(Matcher::_regEncode[dst_lo]),
 2027                    as_Register(Matcher::_regEncode[src_lo]));
 2028         } else {
 2029             __ movw(as_Register(Matcher::_regEncode[dst_lo]),
 2030                     as_Register(Matcher::_regEncode[src_lo]));
 2031         }
 2032       } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
 2033         if (is64) {
 2034             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2035                      as_Register(Matcher::_regEncode[src_lo]));
 2036         } else {
 2037             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2038                      as_Register(Matcher::_regEncode[src_lo]));
 2039         }
 2040       } else {                    // gpr --> stack spill
 2041         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2042         __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
 2043       }
 2044       break;
 2045     case rc_float:
 2046       if (dst_lo_rc == rc_int) {  // fpr --> gpr copy
 2047         if (is64) {
 2048             __ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
 2049                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2050         } else {
 2051             __ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
 2052                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2053         }
 2054       } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
 2055         if (is64) {
 2056             __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2057                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2058         } else {
 2059             __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2060                      as_FloatRegister(Matcher::_regEncode[src_lo]));
 2061         }
 2062       } else {                    // fpr --> stack spill
 2063         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2064         __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
 2065                  is64 ? __ D : __ S, dst_offset);
 2066       }
 2067       break;
 2068     case rc_stack:
 2069       if (dst_lo_rc == rc_int) {  // stack --> gpr load
 2070         __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
 2071       } else if (dst_lo_rc == rc_float) { // stack --> fpr load
 2072         __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
 2073                    is64 ? __ D : __ S, src_offset);
 2074       } else if (dst_lo_rc == rc_predicate) {
 2075         __ unspill_sve_predicate(as_PRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
 2076                                  Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2077       } else {                    // stack --> stack copy
 2078         assert(dst_lo_rc == rc_stack, "spill to bad register class");
 2079         if (ideal_reg() == Op_RegVectMask) {
 2080           __ spill_copy_sve_predicate_stack_to_stack(src_offset, dst_offset,
 2081                                                      Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2082         } else {
 2083           __ unspill(rscratch1, is64, src_offset);
 2084           __ spill(rscratch1, is64, dst_offset);
 2085         }
 2086       }
 2087       break;
 2088     case rc_predicate:
 2089       if (dst_lo_rc == rc_predicate) {
 2090         __ sve_mov(as_PRegister(Matcher::_regEncode[dst_lo]), as_PRegister(Matcher::_regEncode[src_lo]));
 2091       } else if (dst_lo_rc == rc_stack) {
 2092         __ spill_sve_predicate(as_PRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
 2093                                Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
 2094       } else {
 2095         assert(false, "bad src and dst rc_class combination.");
 2096         ShouldNotReachHere();
 2097       }
 2098       break;
 2099     default:
 2100       assert(false, "bad rc_class for spill");
 2101       ShouldNotReachHere();
 2102     }
 2103   }
 2104 
 2105   if (st) {
 2106     st->print("spill ");
 2107     if (src_lo_rc == rc_stack) {
 2108       st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
 2109     } else {
 2110       st->print("%s -> ", Matcher::regName[src_lo]);
 2111     }
 2112     if (dst_lo_rc == rc_stack) {
 2113       st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
 2114     } else {
 2115       st->print("%s", Matcher::regName[dst_lo]);
 2116     }
 2117     if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
 2118       int vsize = 0;
 2119       switch (ideal_reg()) {
 2120       case Op_VecD:
 2121         vsize = 64;
 2122         break;
 2123       case Op_VecX:
 2124         vsize = 128;
 2125         break;
 2126       case Op_VecA:
 2127         vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
 2128         break;
 2129       default:
 2130         assert(false, "bad register type for spill");
 2131         ShouldNotReachHere();
 2132       }
 2133       st->print("\t# vector spill size = %d", vsize);
 2134     } else if (ideal_reg() == Op_RegVectMask) {
 2135       assert(Matcher::supports_scalable_vector(), "bad register type for spill");
 2136       int vsize = Matcher::scalable_predicate_reg_slots() * 32;
 2137       st->print("\t# predicate spill size = %d", vsize);
 2138     } else {
 2139       st->print("\t# spill size = %d", is64 ? 64 : 32);
 2140     }
 2141   }
 2142 
 2143   return 0;
 2144 
 2145 }
 2146 
 2147 #ifndef PRODUCT
 2148 void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2149   if (!ra_)
 2150     st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
 2151   else
 2152     implementation(nullptr, ra_, false, st);
 2153 }
 2154 #endif
 2155 
 2156 void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 2157   implementation(masm, ra_, false, nullptr);
 2158 }
 2159 
 2160 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
 2161   return MachNode::size(ra_);
 2162 }
 2163 
 2164 //=============================================================================
 2165 
 2166 #ifndef PRODUCT
 2167 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
 2168   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2169   int reg = ra_->get_reg_first(this);
 2170   st->print("add %s, rsp, #%d]\t# box lock",
 2171             Matcher::regName[reg], offset);
 2172 }
 2173 #endif
 2174 
 2175 void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
 2176   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2177   int reg    = ra_->get_encode(this);
 2178 
 2179   // This add will handle any 24-bit signed offset. 24 bits allows an
 2180   // 8 megabyte stack frame.
 2181   __ add(as_Register(reg), sp, offset);
 2182 }
 2183 
 2184 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
 2185   // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
 2186   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
 2187 
 2188   if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
 2189     return NativeInstruction::instruction_size;
 2190   } else {
 2191     return 2 * NativeInstruction::instruction_size;
 2192   }
 2193 }
 2194 
 2195 //=============================================================================
 2196 
 2197 #ifndef PRODUCT
 2198 void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
 2199 {
 2200   st->print_cr("# MachUEPNode");
 2201   if (UseCompressedClassPointers) {
 2202     st->print_cr("\tldrw rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2203     st->print_cr("\tldrw r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 2204     st->print_cr("\tcmpw rscratch1, r10");
 2205   } else {
 2206     st->print_cr("\tldr rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
 2207     st->print_cr("\tldr r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
 2208     st->print_cr("\tcmp rscratch1, r10");
 2209   }
 2210   st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
 2211 }
 2212 #endif
 2213 
 2214 void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
 2215 {
 2216   __ ic_check(InteriorEntryAlignment);
 2217 }
 2218 
 2219 uint MachUEPNode::size(PhaseRegAlloc* ra_) const
 2220 {
 2221   return MachNode::size(ra_);
 2222 }
 2223 
 2224 // REQUIRED EMIT CODE
 2225 
 2226 //=============================================================================
 2227 
 2228 // Emit exception handler code.
 2229 int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
 2230 {
 2231   // mov rscratch1 #exception_blob_entry_point
 2232   // br rscratch1
 2233   // Note that the code buffer's insts_mark is always relative to insts.
 2234   // That's why we must use the macroassembler to generate a handler.
 2235   address base = __ start_a_stub(size_exception_handler());
 2236   if (base == nullptr) {
 2237     ciEnv::current()->record_failure("CodeCache is full");
 2238     return 0;  // CodeBuffer::expand failed
 2239   }
 2240   int offset = __ offset();
 2241   __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
 2242   assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
 2243   __ end_a_stub();
 2244   return offset;
 2245 }
 2246 
 2247 // Emit deopt handler code.
 2248 int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
 2249 {
 2250   // Note that the code buffer's insts_mark is always relative to insts.
 2251   // That's why we must use the macroassembler to generate a handler.
 2252   address base = __ start_a_stub(size_deopt_handler());
 2253   if (base == nullptr) {
 2254     ciEnv::current()->record_failure("CodeCache is full");
 2255     return 0;  // CodeBuffer::expand failed
 2256   }
 2257   int offset = __ offset();
 2258 
 2259   __ adr(lr, __ pc());
 2260   __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
 2261 
 2262   assert(__ offset() - offset == (int) size_deopt_handler(), "overflow");
 2263   __ end_a_stub();
 2264   return offset;
 2265 }
 2266 
 2267 // REQUIRED MATCHER CODE
 2268 
 2269 //=============================================================================
 2270 
 2271 bool Matcher::match_rule_supported(int opcode) {
 2272   if (!has_match_rule(opcode))
 2273     return false;
 2274 
 2275   switch (opcode) {
 2276     case Op_OnSpinWait:
 2277       return VM_Version::supports_on_spin_wait();
 2278     case Op_CacheWB:
 2279     case Op_CacheWBPreSync:
 2280     case Op_CacheWBPostSync:
 2281       if (!VM_Version::supports_data_cache_line_flush()) {
 2282         return false;
 2283       }
 2284       break;
 2285     case Op_ExpandBits:
 2286     case Op_CompressBits:
 2287       if (!VM_Version::supports_svebitperm()) {
 2288         return false;
 2289       }
 2290       break;
 2291     case Op_FmaF:
 2292     case Op_FmaD:
 2293     case Op_FmaVF:
 2294     case Op_FmaVD:
 2295       if (!UseFMA) {
 2296         return false;
 2297       }
 2298       break;
 2299   }
 2300 
 2301   return true; // Per default match rules are supported.
 2302 }
 2303 
 2304 const RegMask* Matcher::predicate_reg_mask(void) {
 2305   return &_PR_REG_mask;
 2306 }
 2307 
 2308 bool Matcher::supports_vector_calling_convention(void) {
 2309   return EnableVectorSupport && UseVectorStubs;
 2310 }
 2311 
 2312 OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
 2313   assert(EnableVectorSupport && UseVectorStubs, "sanity");
 2314   int lo = V0_num;
 2315   int hi = V0_H_num;
 2316   if (ideal_reg == Op_VecX || ideal_reg == Op_VecA) {
 2317     hi = V0_K_num;
 2318   }
 2319   return OptoRegPair(hi, lo);
 2320 }
 2321 
 2322 // Is this branch offset short enough that a short branch can be used?
 2323 //
 2324 // NOTE: If the platform does not provide any short branch variants, then
 2325 //       this method should return false for offset 0.
 2326 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
 2327   // The passed offset is relative to address of the branch.
 2328 
 2329   return (-32768 <= offset && offset < 32768);
 2330 }
 2331 
 2332 // Vector width in bytes.
 2333 int Matcher::vector_width_in_bytes(BasicType bt) {
 2334   // The MaxVectorSize should have been set by detecting SVE max vector register size.
 2335   int size = MIN2((UseSVE > 0) ? (int)FloatRegister::sve_vl_max : (int)FloatRegister::neon_vl, (int)MaxVectorSize);
 2336   // Minimum 2 values in vector
 2337   if (size < 2*type2aelembytes(bt)) size = 0;
 2338   // But never < 4
 2339   if (size < 4) size = 0;
 2340   return size;
 2341 }
 2342 
 2343 // Limits on vector size (number of elements) loaded into vector.
 2344 int Matcher::max_vector_size(const BasicType bt) {
 2345   return vector_width_in_bytes(bt)/type2aelembytes(bt);
 2346 }
 2347 
 2348 int Matcher::min_vector_size(const BasicType bt) {
 2349   int max_size = max_vector_size(bt);
 2350   // Limit the min vector size to 8 bytes.
 2351   int size = 8 / type2aelembytes(bt);
 2352   if (bt == T_BYTE) {
 2353     // To support vector api shuffle/rearrange.
 2354     size = 4;
 2355   } else if (bt == T_BOOLEAN) {
 2356     // To support vector api load/store mask.
 2357     size = 2;
 2358   }
 2359   if (size < 2) size = 2;
 2360   return MIN2(size, max_size);
 2361 }
 2362 
 2363 int Matcher::max_vector_size_auto_vectorization(const BasicType bt) {
 2364   return Matcher::max_vector_size(bt);
 2365 }
 2366 
 2367 // Actual max scalable vector register length.
 2368 int Matcher::scalable_vector_reg_size(const BasicType bt) {
 2369   return Matcher::max_vector_size(bt);
 2370 }
 2371 
 2372 // Vector ideal reg.
 2373 uint Matcher::vector_ideal_reg(int len) {
 2374   if (UseSVE > 0 && FloatRegister::neon_vl < len && len <= FloatRegister::sve_vl_max) {
 2375     return Op_VecA;
 2376   }
 2377   switch(len) {
 2378     // For 16-bit/32-bit mask vector, reuse VecD.
 2379     case  2:
 2380     case  4:
 2381     case  8: return Op_VecD;
 2382     case 16: return Op_VecX;
 2383   }
 2384   ShouldNotReachHere();
 2385   return 0;
 2386 }
 2387 
 2388 MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* generic_opnd, uint ideal_reg, bool is_temp) {
 2389   assert(Matcher::is_generic_vector(generic_opnd), "not generic");
 2390   switch (ideal_reg) {
 2391     case Op_VecA: return new vecAOper();
 2392     case Op_VecD: return new vecDOper();
 2393     case Op_VecX: return new vecXOper();
 2394   }
 2395   ShouldNotReachHere();
 2396   return nullptr;
 2397 }
 2398 
 2399 bool Matcher::is_reg2reg_move(MachNode* m) {
 2400   return false;
 2401 }
 2402 
 2403 bool Matcher::is_generic_vector(MachOper* opnd)  {
 2404   return opnd->opcode() == VREG;
 2405 }
 2406 
 2407 // Return whether or not this register is ever used as an argument.
 2408 // This function is used on startup to build the trampoline stubs in
 2409 // generateOptoStub.  Registers not mentioned will be killed by the VM
 2410 // call in the trampoline, and arguments in those registers not be
 2411 // available to the callee.
 2412 bool Matcher::can_be_java_arg(int reg)
 2413 {
 2414   return
 2415     reg ==  R0_num || reg == R0_H_num ||
 2416     reg ==  R1_num || reg == R1_H_num ||
 2417     reg ==  R2_num || reg == R2_H_num ||
 2418     reg ==  R3_num || reg == R3_H_num ||
 2419     reg ==  R4_num || reg == R4_H_num ||
 2420     reg ==  R5_num || reg == R5_H_num ||
 2421     reg ==  R6_num || reg == R6_H_num ||
 2422     reg ==  R7_num || reg == R7_H_num ||
 2423     reg ==  V0_num || reg == V0_H_num ||
 2424     reg ==  V1_num || reg == V1_H_num ||
 2425     reg ==  V2_num || reg == V2_H_num ||
 2426     reg ==  V3_num || reg == V3_H_num ||
 2427     reg ==  V4_num || reg == V4_H_num ||
 2428     reg ==  V5_num || reg == V5_H_num ||
 2429     reg ==  V6_num || reg == V6_H_num ||
 2430     reg ==  V7_num || reg == V7_H_num;
 2431 }
 2432 
 2433 bool Matcher::is_spillable_arg(int reg)
 2434 {
 2435   return can_be_java_arg(reg);
 2436 }
 2437 
 2438 uint Matcher::int_pressure_limit()
 2439 {
 2440   // JDK-8183543: When taking the number of available registers as int
 2441   // register pressure threshold, the jtreg test:
 2442   // test/hotspot/jtreg/compiler/regalloc/TestC2IntPressure.java
 2443   // failed due to C2 compilation failure with
 2444   // "COMPILE SKIPPED: failed spill-split-recycle sanity check".
 2445   //
 2446   // A derived pointer is live at CallNode and then is flagged by RA
 2447   // as a spilled LRG. Spilling heuristics(Spill-USE) explicitly skip
 2448   // derived pointers and lastly fail to spill after reaching maximum
 2449   // number of iterations. Lowering the default pressure threshold to
 2450   // (_NO_SPECIAL_REG32_mask.Size() minus 1) forces CallNode to become
 2451   // a high register pressure area of the code so that split_DEF can
 2452   // generate DefinitionSpillCopy for the derived pointer.
 2453   uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.Size() - 1;
 2454   if (!PreserveFramePointer) {
 2455     // When PreserveFramePointer is off, frame pointer is allocatable,
 2456     // but different from other SOC registers, it is excluded from
 2457     // fatproj's mask because its save type is No-Save. Decrease 1 to
 2458     // ensure high pressure at fatproj when PreserveFramePointer is off.
 2459     // See check_pressure_at_fatproj().
 2460     default_int_pressure_threshold--;
 2461   }
 2462   return (INTPRESSURE == -1) ? default_int_pressure_threshold : INTPRESSURE;
 2463 }
 2464 
 2465 uint Matcher::float_pressure_limit()
 2466 {
 2467   // _FLOAT_REG_mask is generated by adlc from the float_reg register class.
 2468   return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.Size() : FLOATPRESSURE;
 2469 }
 2470 
 2471 bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
 2472   return false;
 2473 }
 2474 
 2475 RegMask Matcher::divI_proj_mask() {
 2476   ShouldNotReachHere();
 2477   return RegMask();
 2478 }
 2479 
 2480 // Register for MODI projection of divmodI.
 2481 RegMask Matcher::modI_proj_mask() {
 2482   ShouldNotReachHere();
 2483   return RegMask();
 2484 }
 2485 
 2486 // Register for DIVL projection of divmodL.
 2487 RegMask Matcher::divL_proj_mask() {
 2488   ShouldNotReachHere();
 2489   return RegMask();
 2490 }
 2491 
 2492 // Register for MODL projection of divmodL.
 2493 RegMask Matcher::modL_proj_mask() {
 2494   ShouldNotReachHere();
 2495   return RegMask();
 2496 }
 2497 
 2498 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
 2499   return FP_REG_mask();
 2500 }
 2501 
 2502 bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
 2503   for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
 2504     Node* u = addp->fast_out(i);
 2505     if (u->is_LoadStore()) {
 2506       // On AArch64, LoadStoreNodes (i.e. compare and swap
 2507       // instructions) only take register indirect as an operand, so
 2508       // any attempt to use an AddPNode as an input to a LoadStoreNode
 2509       // must fail.
 2510       return false;
 2511     }
 2512     if (u->is_Mem()) {
 2513       int opsize = u->as_Mem()->memory_size();
 2514       assert(opsize > 0, "unexpected memory operand size");
 2515       if (u->as_Mem()->memory_size() != (1<<shift)) {
 2516         return false;
 2517       }
 2518     }
 2519   }
 2520   return true;
 2521 }
 2522 
 2523 // Convert BootTest condition to Assembler condition.
 2524 // Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
 2525 Assembler::Condition to_assembler_cond(BoolTest::mask cond) {
 2526   Assembler::Condition result;
 2527   switch(cond) {
 2528     case BoolTest::eq:
 2529       result = Assembler::EQ; break;
 2530     case BoolTest::ne:
 2531       result = Assembler::NE; break;
 2532     case BoolTest::le:
 2533       result = Assembler::LE; break;
 2534     case BoolTest::ge:
 2535       result = Assembler::GE; break;
 2536     case BoolTest::lt:
 2537       result = Assembler::LT; break;
 2538     case BoolTest::gt:
 2539       result = Assembler::GT; break;
 2540     case BoolTest::ule:
 2541       result = Assembler::LS; break;
 2542     case BoolTest::uge:
 2543       result = Assembler::HS; break;
 2544     case BoolTest::ult:
 2545       result = Assembler::LO; break;
 2546     case BoolTest::ugt:
 2547       result = Assembler::HI; break;
 2548     case BoolTest::overflow:
 2549       result = Assembler::VS; break;
 2550     case BoolTest::no_overflow:
 2551       result = Assembler::VC; break;
 2552     default:
 2553       ShouldNotReachHere();
 2554       return Assembler::Condition(-1);
 2555   }
 2556 
 2557   // Check conversion
 2558   if (cond & BoolTest::unsigned_compare) {
 2559     assert(cmpOpUOper((BoolTest::mask)((int)cond & ~(BoolTest::unsigned_compare))).ccode() == result, "Invalid conversion");
 2560   } else {
 2561     assert(cmpOpOper(cond).ccode() == result, "Invalid conversion");
 2562   }
 2563 
 2564   return result;
 2565 }
 2566 
 2567 // Binary src (Replicate con)
 2568 static bool is_valid_sve_arith_imm_pattern(Node* n, Node* m) {
 2569   if (n == nullptr || m == nullptr) {
 2570     return false;
 2571   }
 2572 
 2573   if (UseSVE == 0 || m->Opcode() != Op_Replicate) {
 2574     return false;
 2575   }
 2576 
 2577   Node* imm_node = m->in(1);
 2578   if (!imm_node->is_Con()) {
 2579     return false;
 2580   }
 2581 
 2582   const Type* t = imm_node->bottom_type();
 2583   if (!(t->isa_int() || t->isa_long())) {
 2584     return false;
 2585   }
 2586 
 2587   switch (n->Opcode()) {
 2588   case Op_AndV:
 2589   case Op_OrV:
 2590   case Op_XorV: {
 2591     Assembler::SIMD_RegVariant T = Assembler::elemType_to_regVariant(Matcher::vector_element_basic_type(n));
 2592     uint64_t value = t->isa_long() ? (uint64_t)imm_node->get_long() : (uint64_t)imm_node->get_int();
 2593     return Assembler::operand_valid_for_sve_logical_immediate(Assembler::regVariant_to_elemBits(T), value);
 2594   }
 2595   case Op_AddVB:
 2596     return (imm_node->get_int() <= 255 && imm_node->get_int() >= -255);
 2597   case Op_AddVS:
 2598   case Op_AddVI:
 2599     return Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)imm_node->get_int());
 2600   case Op_AddVL:
 2601     return Assembler::operand_valid_for_sve_add_sub_immediate(imm_node->get_long());
 2602   default:
 2603     return false;
 2604   }
 2605 }
 2606 
 2607 // (XorV src (Replicate m1))
 2608 // (XorVMask src (MaskAll m1))
 2609 static bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
 2610   if (n != nullptr && m != nullptr) {
 2611     return (n->Opcode() == Op_XorV || n->Opcode() == Op_XorVMask) &&
 2612            VectorNode::is_all_ones_vector(m);
 2613   }
 2614   return false;
 2615 }
 2616 
 2617 // Should the matcher clone input 'm' of node 'n'?
 2618 bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
 2619   if (is_vshift_con_pattern(n, m) ||
 2620       is_vector_bitwise_not_pattern(n, m) ||
 2621       is_valid_sve_arith_imm_pattern(n, m) ||
 2622       is_encode_and_store_pattern(n, m)) {
 2623     mstack.push(m, Visit);
 2624     return true;
 2625   }
 2626   return false;
 2627 }
 2628 
 2629 // Should the Matcher clone shifts on addressing modes, expecting them
 2630 // to be subsumed into complex addressing expressions or compute them
 2631 // into registers?
 2632 bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
 2633 
 2634   // Loads and stores with indirect memory input (e.g., volatile loads and
 2635   // stores) do not subsume the input into complex addressing expressions. If
 2636   // the addressing expression is input to at least one such load or store, do
 2637   // not clone the addressing expression. Query needs_acquiring_load and
 2638   // needs_releasing_store as a proxy for indirect memory input, as it is not
 2639   // possible to directly query for indirect memory input at this stage.
 2640   for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
 2641     Node* n = m->fast_out(i);
 2642     if (n->is_Load() && needs_acquiring_load(n)) {
 2643       return false;
 2644     }
 2645     if (n->is_Store() && needs_releasing_store(n)) {
 2646       return false;
 2647     }
 2648   }
 2649 
 2650   if (clone_base_plus_offset_address(m, mstack, address_visited)) {
 2651     return true;
 2652   }
 2653 
 2654   Node *off = m->in(AddPNode::Offset);
 2655   if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
 2656       size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
 2657       // Are there other uses besides address expressions?
 2658       !is_visited(off)) {
 2659     address_visited.set(off->_idx); // Flag as address_visited
 2660     mstack.push(off->in(2), Visit);
 2661     Node *conv = off->in(1);
 2662     if (conv->Opcode() == Op_ConvI2L &&
 2663         // Are there other uses besides address expressions?
 2664         !is_visited(conv)) {
 2665       address_visited.set(conv->_idx); // Flag as address_visited
 2666       mstack.push(conv->in(1), Pre_Visit);
 2667     } else {
 2668       mstack.push(conv, Pre_Visit);
 2669     }
 2670     address_visited.test_set(m->_idx); // Flag as address_visited
 2671     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2672     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2673     return true;
 2674   } else if (off->Opcode() == Op_ConvI2L &&
 2675              // Are there other uses besides address expressions?
 2676              !is_visited(off)) {
 2677     address_visited.test_set(m->_idx); // Flag as address_visited
 2678     address_visited.set(off->_idx); // Flag as address_visited
 2679     mstack.push(off->in(1), Pre_Visit);
 2680     mstack.push(m->in(AddPNode::Address), Pre_Visit);
 2681     mstack.push(m->in(AddPNode::Base), Pre_Visit);
 2682     return true;
 2683   }
 2684   return false;
 2685 }
 2686 
 2687 #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN)      \
 2688   {                                                                     \
 2689     guarantee(INDEX == -1, "mode not permitted for volatile");          \
 2690     guarantee(DISP == 0, "mode not permitted for volatile");            \
 2691     guarantee(SCALE == 0, "mode not permitted for volatile");           \
 2692     __ INSN(REG, as_Register(BASE));                                    \
 2693   }
 2694 
 2695 
 2696 static Address mem2address(int opcode, Register base, int index, int size, int disp)
 2697   {
 2698     Address::extend scale;
 2699 
 2700     // Hooboy, this is fugly.  We need a way to communicate to the
 2701     // encoder that the index needs to be sign extended, so we have to
 2702     // enumerate all the cases.
 2703     switch (opcode) {
 2704     case INDINDEXSCALEDI2L:
 2705     case INDINDEXSCALEDI2LN:
 2706     case INDINDEXI2L:
 2707     case INDINDEXI2LN:
 2708       scale = Address::sxtw(size);
 2709       break;
 2710     default:
 2711       scale = Address::lsl(size);
 2712     }
 2713 
 2714     if (index == -1) {
 2715       return Address(base, disp);
 2716     } else {
 2717       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2718       return Address(base, as_Register(index), scale);
 2719     }
 2720   }
 2721 
 2722 
 2723 typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
 2724 typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
 2725 typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
 2726 typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
 2727                                   MacroAssembler::SIMD_RegVariant T, const Address &adr);
 2728 
 2729   // Used for all non-volatile memory accesses.  The use of
 2730   // $mem->opcode() to discover whether this pattern uses sign-extended
 2731   // offsets is something of a kludge.
 2732   static void loadStore(C2_MacroAssembler* masm, mem_insn insn,
 2733                         Register reg, int opcode,
 2734                         Register base, int index, int scale, int disp,
 2735                         int size_in_memory)
 2736   {
 2737     Address addr = mem2address(opcode, base, index, scale, disp);
 2738     if (addr.getMode() == Address::base_plus_offset) {
 2739       /* Fix up any out-of-range offsets. */
 2740       assert_different_registers(rscratch1, base);
 2741       assert_different_registers(rscratch1, reg);
 2742       addr = __ legitimize_address(addr, size_in_memory, rscratch1);
 2743     }
 2744     (masm->*insn)(reg, addr);
 2745   }
 2746 
 2747   static void loadStore(C2_MacroAssembler* masm, mem_float_insn insn,
 2748                         FloatRegister reg, int opcode,
 2749                         Register base, int index, int size, int disp,
 2750                         int size_in_memory)
 2751   {
 2752     Address::extend scale;
 2753 
 2754     switch (opcode) {
 2755     case INDINDEXSCALEDI2L:
 2756     case INDINDEXSCALEDI2LN:
 2757       scale = Address::sxtw(size);
 2758       break;
 2759     default:
 2760       scale = Address::lsl(size);
 2761     }
 2762 
 2763     if (index == -1) {
 2764       // Fix up any out-of-range offsets.
 2765       assert_different_registers(rscratch1, base);
 2766       Address addr = Address(base, disp);
 2767       addr = __ legitimize_address(addr, size_in_memory, rscratch1);
 2768       (masm->*insn)(reg, addr);
 2769     } else {
 2770       assert(disp == 0, "unsupported address mode: disp = %d", disp);
 2771       (masm->*insn)(reg, Address(base, as_Register(index), scale));
 2772     }
 2773   }
 2774 
 2775   static void loadStore(C2_MacroAssembler* masm, mem_vector_insn insn,
 2776                         FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
 2777                         int opcode, Register base, int index, int size, int disp)
 2778   {
 2779     if (index == -1) {
 2780       (masm->*insn)(reg, T, Address(base, disp));
 2781     } else {
 2782       assert(disp == 0, "unsupported address mode");
 2783       (masm->*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
 2784     }
 2785   }
 2786 
 2787 %}
 2788 
 2789 
 2790 
 2791 //----------ENCODING BLOCK-----------------------------------------------------
 2792 // This block specifies the encoding classes used by the compiler to
 2793 // output byte streams.  Encoding classes are parameterized macros
 2794 // used by Machine Instruction Nodes in order to generate the bit
 2795 // encoding of the instruction.  Operands specify their base encoding
 2796 // interface with the interface keyword.  There are currently
 2797 // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
 2798 // COND_INTER.  REG_INTER causes an operand to generate a function
 2799 // which returns its register number when queried.  CONST_INTER causes
 2800 // an operand to generate a function which returns the value of the
 2801 // constant when queried.  MEMORY_INTER causes an operand to generate
 2802 // four functions which return the Base Register, the Index Register,
 2803 // the Scale Value, and the Offset Value of the operand when queried.
 2804 // COND_INTER causes an operand to generate six functions which return
 2805 // the encoding code (ie - encoding bits for the instruction)
 2806 // associated with each basic boolean condition for a conditional
 2807 // instruction.
 2808 //
 2809 // Instructions specify two basic values for encoding.  Again, a
 2810 // function is available to check if the constant displacement is an
 2811 // oop. They use the ins_encode keyword to specify their encoding
 2812 // classes (which must be a sequence of enc_class names, and their
 2813 // parameters, specified in the encoding block), and they use the
 2814 // opcode keyword to specify, in order, their primary, secondary, and
 2815 // tertiary opcode.  Only the opcode sections which a particular
 2816 // instruction needs for encoding need to be specified.
 2817 encode %{
 2818   // Build emit functions for each basic byte or larger field in the
 2819   // intel encoding scheme (opcode, rm, sib, immediate), and call them
 2820   // from C++ code in the enc_class source block.  Emit functions will
 2821   // live in the main source block for now.  In future, we can
 2822   // generalize this by adding a syntax that specifies the sizes of
 2823   // fields in an order, so that the adlc can build the emit functions
 2824   // automagically
 2825 
 2826   // catch all for unimplemented encodings
 2827   enc_class enc_unimplemented %{
 2828     __ unimplemented("C2 catch all");
 2829   %}
 2830 
 2831   // BEGIN Non-volatile memory access
 2832 
 2833   // This encoding class is generated automatically from ad_encode.m4.
 2834   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2835   enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
 2836     Register dst_reg = as_Register($dst$$reg);
 2837     loadStore(masm, &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
 2838                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2839   %}
 2840 
 2841   // This encoding class is generated automatically from ad_encode.m4.
 2842   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2843   enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
 2844     Register dst_reg = as_Register($dst$$reg);
 2845     loadStore(masm, &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
 2846                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2847   %}
 2848 
 2849   // This encoding class is generated automatically from ad_encode.m4.
 2850   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2851   enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
 2852     Register dst_reg = as_Register($dst$$reg);
 2853     loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2854                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2855   %}
 2856 
 2857   // This encoding class is generated automatically from ad_encode.m4.
 2858   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2859   enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
 2860     Register dst_reg = as_Register($dst$$reg);
 2861     loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
 2862                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2863   %}
 2864 
 2865   // This encoding class is generated automatically from ad_encode.m4.
 2866   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2867   enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
 2868     Register dst_reg = as_Register($dst$$reg);
 2869     loadStore(masm, &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
 2870                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2871   %}
 2872 
 2873   // This encoding class is generated automatically from ad_encode.m4.
 2874   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2875   enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
 2876     Register dst_reg = as_Register($dst$$reg);
 2877     loadStore(masm, &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
 2878                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2879   %}
 2880 
 2881   // This encoding class is generated automatically from ad_encode.m4.
 2882   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2883   enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
 2884     Register dst_reg = as_Register($dst$$reg);
 2885     loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2886                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2887   %}
 2888 
 2889   // This encoding class is generated automatically from ad_encode.m4.
 2890   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2891   enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
 2892     Register dst_reg = as_Register($dst$$reg);
 2893     loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
 2894                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2895   %}
 2896 
 2897   // This encoding class is generated automatically from ad_encode.m4.
 2898   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2899   enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
 2900     Register dst_reg = as_Register($dst$$reg);
 2901     loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2902                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2903   %}
 2904 
 2905   // This encoding class is generated automatically from ad_encode.m4.
 2906   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2907   enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
 2908     Register dst_reg = as_Register($dst$$reg);
 2909     loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
 2910                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2911   %}
 2912 
 2913   // This encoding class is generated automatically from ad_encode.m4.
 2914   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2915   enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
 2916     Register dst_reg = as_Register($dst$$reg);
 2917     loadStore(masm, &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
 2918                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2919   %}
 2920 
 2921   // This encoding class is generated automatically from ad_encode.m4.
 2922   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2923   enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
 2924     Register dst_reg = as_Register($dst$$reg);
 2925     loadStore(masm, &MacroAssembler::ldr, dst_reg, $mem->opcode(),
 2926                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2927   %}
 2928 
 2929   // This encoding class is generated automatically from ad_encode.m4.
 2930   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2931   enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
 2932     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2933     loadStore(masm, &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
 2934                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2935   %}
 2936 
 2937   // This encoding class is generated automatically from ad_encode.m4.
 2938   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2939   enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
 2940     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 2941     loadStore(masm, &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
 2942                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 2943   %}
 2944 
 2945   // This encoding class is generated automatically from ad_encode.m4.
 2946   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2947   enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
 2948     Register src_reg = as_Register($src$$reg);
 2949     loadStore(masm, &MacroAssembler::strb, src_reg, $mem->opcode(),
 2950                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2951   %}
 2952 
 2953   // This encoding class is generated automatically from ad_encode.m4.
 2954   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2955   enc_class aarch64_enc_strb0(memory1 mem) %{
 2956     loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
 2957                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 2958   %}
 2959 
 2960   // This encoding class is generated automatically from ad_encode.m4.
 2961   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2962   enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
 2963     Register src_reg = as_Register($src$$reg);
 2964     loadStore(masm, &MacroAssembler::strh, src_reg, $mem->opcode(),
 2965                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2966   %}
 2967 
 2968   // This encoding class is generated automatically from ad_encode.m4.
 2969   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2970   enc_class aarch64_enc_strh0(memory2 mem) %{
 2971     loadStore(masm, &MacroAssembler::strh, zr, $mem->opcode(),
 2972                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
 2973   %}
 2974 
 2975   // This encoding class is generated automatically from ad_encode.m4.
 2976   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2977   enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
 2978     Register src_reg = as_Register($src$$reg);
 2979     loadStore(masm, &MacroAssembler::strw, src_reg, $mem->opcode(),
 2980                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2981   %}
 2982 
 2983   // This encoding class is generated automatically from ad_encode.m4.
 2984   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2985   enc_class aarch64_enc_strw0(memory4 mem) %{
 2986     loadStore(masm, &MacroAssembler::strw, zr, $mem->opcode(),
 2987                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 2988   %}
 2989 
 2990   // This encoding class is generated automatically from ad_encode.m4.
 2991   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 2992   enc_class aarch64_enc_str(iRegL src, memory8 mem) %{
 2993     Register src_reg = as_Register($src$$reg);
 2994     // we sometimes get asked to store the stack pointer into the
 2995     // current thread -- we cannot do that directly on AArch64
 2996     if (src_reg == r31_sp) {
 2997       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 2998       __ mov(rscratch2, sp);
 2999       src_reg = rscratch2;
 3000     }
 3001     loadStore(masm, &MacroAssembler::str, src_reg, $mem->opcode(),
 3002                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3003   %}
 3004 
 3005   // This encoding class is generated automatically from ad_encode.m4.
 3006   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3007   enc_class aarch64_enc_str0(memory8 mem) %{
 3008     loadStore(masm, &MacroAssembler::str, zr, $mem->opcode(),
 3009                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3010   %}
 3011 
 3012   // This encoding class is generated automatically from ad_encode.m4.
 3013   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3014   enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
 3015     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3016     loadStore(masm, &MacroAssembler::strs, src_reg, $mem->opcode(),
 3017                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 3018   %}
 3019 
 3020   // This encoding class is generated automatically from ad_encode.m4.
 3021   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3022   enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
 3023     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3024     loadStore(masm, &MacroAssembler::strd, src_reg, $mem->opcode(),
 3025                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 3026   %}
 3027 
 3028   // This encoding class is generated automatically from ad_encode.m4.
 3029   // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 3030   enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
 3031       __ membar(Assembler::StoreStore);
 3032       loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
 3033                as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
 3034   %}
 3035 
 3036   // END Non-volatile memory access
 3037 
 3038   // Vector loads and stores
 3039   enc_class aarch64_enc_ldrvH(vReg dst, memory mem) %{
 3040     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3041     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::H,
 3042        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3043   %}
 3044 
 3045   enc_class aarch64_enc_ldrvS(vReg dst, memory mem) %{
 3046     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3047     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
 3048        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3049   %}
 3050 
 3051   enc_class aarch64_enc_ldrvD(vReg dst, memory mem) %{
 3052     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3053     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
 3054        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3055   %}
 3056 
 3057   enc_class aarch64_enc_ldrvQ(vReg dst, memory mem) %{
 3058     FloatRegister dst_reg = as_FloatRegister($dst$$reg);
 3059     loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
 3060        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3061   %}
 3062 
 3063   enc_class aarch64_enc_strvH(vReg src, memory mem) %{
 3064     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3065     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::H,
 3066        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3067   %}
 3068 
 3069   enc_class aarch64_enc_strvS(vReg src, memory mem) %{
 3070     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3071     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::S,
 3072        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3073   %}
 3074 
 3075   enc_class aarch64_enc_strvD(vReg src, memory mem) %{
 3076     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3077     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::D,
 3078        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3079   %}
 3080 
 3081   enc_class aarch64_enc_strvQ(vReg src, memory mem) %{
 3082     FloatRegister src_reg = as_FloatRegister($src$$reg);
 3083     loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::Q,
 3084        $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
 3085   %}
 3086 
 3087   // volatile loads and stores
 3088 
 3089   enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
 3090     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3091                  rscratch1, stlrb);
 3092   %}
 3093 
 3094   enc_class aarch64_enc_stlrb0(memory mem) %{
 3095     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3096                  rscratch1, stlrb);
 3097   %}
 3098 
 3099   enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
 3100     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3101                  rscratch1, stlrh);
 3102   %}
 3103 
 3104   enc_class aarch64_enc_stlrh0(memory mem) %{
 3105     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3106                  rscratch1, stlrh);
 3107   %}
 3108 
 3109   enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
 3110     MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3111                  rscratch1, stlrw);
 3112   %}
 3113 
 3114   enc_class aarch64_enc_stlrw0(memory mem) %{
 3115     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3116                  rscratch1, stlrw);
 3117   %}
 3118 
 3119   enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
 3120     Register dst_reg = as_Register($dst$$reg);
 3121     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3122              rscratch1, ldarb);
 3123     __ sxtbw(dst_reg, dst_reg);
 3124   %}
 3125 
 3126   enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
 3127     Register dst_reg = as_Register($dst$$reg);
 3128     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3129              rscratch1, ldarb);
 3130     __ sxtb(dst_reg, dst_reg);
 3131   %}
 3132 
 3133   enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
 3134     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3135              rscratch1, ldarb);
 3136   %}
 3137 
 3138   enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
 3139     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3140              rscratch1, ldarb);
 3141   %}
 3142 
 3143   enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
 3144     Register dst_reg = as_Register($dst$$reg);
 3145     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3146              rscratch1, ldarh);
 3147     __ sxthw(dst_reg, dst_reg);
 3148   %}
 3149 
 3150   enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
 3151     Register dst_reg = as_Register($dst$$reg);
 3152     MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3153              rscratch1, ldarh);
 3154     __ sxth(dst_reg, dst_reg);
 3155   %}
 3156 
 3157   enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
 3158     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3159              rscratch1, ldarh);
 3160   %}
 3161 
 3162   enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
 3163     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3164              rscratch1, ldarh);
 3165   %}
 3166 
 3167   enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
 3168     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3169              rscratch1, ldarw);
 3170   %}
 3171 
 3172   enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
 3173     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3174              rscratch1, ldarw);
 3175   %}
 3176 
 3177   enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
 3178     MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3179              rscratch1, ldar);
 3180   %}
 3181 
 3182   enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
 3183     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3184              rscratch1, ldarw);
 3185     __ fmovs(as_FloatRegister($dst$$reg), rscratch1);
 3186   %}
 3187 
 3188   enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
 3189     MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3190              rscratch1, ldar);
 3191     __ fmovd(as_FloatRegister($dst$$reg), rscratch1);
 3192   %}
 3193 
 3194   enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
 3195     Register src_reg = as_Register($src$$reg);
 3196     // we sometimes get asked to store the stack pointer into the
 3197     // current thread -- we cannot do that directly on AArch64
 3198     if (src_reg == r31_sp) {
 3199       assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
 3200       __ mov(rscratch2, sp);
 3201       src_reg = rscratch2;
 3202     }
 3203     MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3204                  rscratch1, stlr);
 3205   %}
 3206 
 3207   enc_class aarch64_enc_stlr0(memory mem) %{
 3208     MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3209                  rscratch1, stlr);
 3210   %}
 3211 
 3212   enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
 3213     {
 3214       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3215       __ fmovs(rscratch2, src_reg);
 3216     }
 3217     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3218                  rscratch1, stlrw);
 3219   %}
 3220 
 3221   enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
 3222     {
 3223       FloatRegister src_reg = as_FloatRegister($src$$reg);
 3224       __ fmovd(rscratch2, src_reg);
 3225     }
 3226     MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
 3227                  rscratch1, stlr);
 3228   %}
 3229 
 3230   // synchronized read/update encodings
 3231 
 3232   enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
 3233     Register dst_reg = as_Register($dst$$reg);
 3234     Register base = as_Register($mem$$base);
 3235     int index = $mem$$index;
 3236     int scale = $mem$$scale;
 3237     int disp = $mem$$disp;
 3238     if (index == -1) {
 3239        if (disp != 0) {
 3240         __ lea(rscratch1, Address(base, disp));
 3241         __ ldaxr(dst_reg, rscratch1);
 3242       } else {
 3243         // TODO
 3244         // should we ever get anything other than this case?
 3245         __ ldaxr(dst_reg, base);
 3246       }
 3247     } else {
 3248       Register index_reg = as_Register(index);
 3249       if (disp == 0) {
 3250         __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
 3251         __ ldaxr(dst_reg, rscratch1);
 3252       } else {
 3253         __ lea(rscratch1, Address(base, disp));
 3254         __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
 3255         __ ldaxr(dst_reg, rscratch1);
 3256       }
 3257     }
 3258   %}
 3259 
 3260   enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
 3261     Register src_reg = as_Register($src$$reg);
 3262     Register base = as_Register($mem$$base);
 3263     int index = $mem$$index;
 3264     int scale = $mem$$scale;
 3265     int disp = $mem$$disp;
 3266     if (index == -1) {
 3267        if (disp != 0) {
 3268         __ lea(rscratch2, Address(base, disp));
 3269         __ stlxr(rscratch1, src_reg, rscratch2);
 3270       } else {
 3271         // TODO
 3272         // should we ever get anything other than this case?
 3273         __ stlxr(rscratch1, src_reg, base);
 3274       }
 3275     } else {
 3276       Register index_reg = as_Register(index);
 3277       if (disp == 0) {
 3278         __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
 3279         __ stlxr(rscratch1, src_reg, rscratch2);
 3280       } else {
 3281         __ lea(rscratch2, Address(base, disp));
 3282         __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
 3283         __ stlxr(rscratch1, src_reg, rscratch2);
 3284       }
 3285     }
 3286     __ cmpw(rscratch1, zr);
 3287   %}
 3288 
 3289   enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3290     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3291     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3292                Assembler::xword, /*acquire*/ false, /*release*/ true,
 3293                /*weak*/ false, noreg);
 3294   %}
 3295 
 3296   enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3297     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3298     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3299                Assembler::word, /*acquire*/ false, /*release*/ true,
 3300                /*weak*/ false, noreg);
 3301   %}
 3302 
 3303   enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3304     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3305     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3306                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 3307                /*weak*/ false, noreg);
 3308   %}
 3309 
 3310   enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3311     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3312     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3313                Assembler::byte, /*acquire*/ false, /*release*/ true,
 3314                /*weak*/ false, noreg);
 3315   %}
 3316 
 3317 
 3318   // The only difference between aarch64_enc_cmpxchg and
 3319   // aarch64_enc_cmpxchg_acq is that we use load-acquire in the
 3320   // CompareAndSwap sequence to serve as a barrier on acquiring a
 3321   // lock.
 3322   enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
 3323     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3324     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3325                Assembler::xword, /*acquire*/ true, /*release*/ true,
 3326                /*weak*/ false, noreg);
 3327   %}
 3328 
 3329   enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3330     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3331     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3332                Assembler::word, /*acquire*/ true, /*release*/ true,
 3333                /*weak*/ false, noreg);
 3334   %}
 3335 
 3336   enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3337     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3338     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3339                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 3340                /*weak*/ false, noreg);
 3341   %}
 3342 
 3343   enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
 3344     guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
 3345     __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
 3346                Assembler::byte, /*acquire*/ true, /*release*/ true,
 3347                /*weak*/ false, noreg);
 3348   %}
 3349 
 3350   // auxiliary used for CompareAndSwapX to set result register
 3351   enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
 3352     Register res_reg = as_Register($res$$reg);
 3353     __ cset(res_reg, Assembler::EQ);
 3354   %}
 3355 
 3356   // prefetch encodings
 3357 
 3358   enc_class aarch64_enc_prefetchw(memory mem) %{
 3359     Register base = as_Register($mem$$base);
 3360     int index = $mem$$index;
 3361     int scale = $mem$$scale;
 3362     int disp = $mem$$disp;
 3363     if (index == -1) {
 3364       // Fix up any out-of-range offsets.
 3365       assert_different_registers(rscratch1, base);
 3366       Address addr = Address(base, disp);
 3367       addr = __ legitimize_address(addr, 8, rscratch1);
 3368       __ prfm(addr, PSTL1KEEP);
 3369     } else {
 3370       Register index_reg = as_Register(index);
 3371       if (disp == 0) {
 3372         __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3373       } else {
 3374         __ lea(rscratch1, Address(base, disp));
 3375 	__ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
 3376       }
 3377     }
 3378   %}
 3379 
 3380   // mov encodings
 3381 
 3382   enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
 3383     uint32_t con = (uint32_t)$src$$constant;
 3384     Register dst_reg = as_Register($dst$$reg);
 3385     if (con == 0) {
 3386       __ movw(dst_reg, zr);
 3387     } else {
 3388       __ movw(dst_reg, con);
 3389     }
 3390   %}
 3391 
 3392   enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
 3393     Register dst_reg = as_Register($dst$$reg);
 3394     uint64_t con = (uint64_t)$src$$constant;
 3395     if (con == 0) {
 3396       __ mov(dst_reg, zr);
 3397     } else {
 3398       __ mov(dst_reg, con);
 3399     }
 3400   %}
 3401 
 3402   enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
 3403     Register dst_reg = as_Register($dst$$reg);
 3404     address con = (address)$src$$constant;
 3405     if (con == nullptr || con == (address)1) {
 3406       ShouldNotReachHere();
 3407     } else {
 3408       relocInfo::relocType rtype = $src->constant_reloc();
 3409       if (rtype == relocInfo::oop_type) {
 3410         __ movoop(dst_reg, (jobject)con);
 3411       } else if (rtype == relocInfo::metadata_type) {
 3412         __ mov_metadata(dst_reg, (Metadata*)con);
 3413       } else {
 3414         assert(rtype == relocInfo::none, "unexpected reloc type");
 3415         if (! __ is_valid_AArch64_address(con) ||
 3416             con < (address)(uintptr_t)os::vm_page_size()) {
 3417           __ mov(dst_reg, con);
 3418         } else {
 3419           uint64_t offset;
 3420           __ adrp(dst_reg, con, offset);
 3421           __ add(dst_reg, dst_reg, offset);
 3422         }
 3423       }
 3424     }
 3425   %}
 3426 
 3427   enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
 3428     Register dst_reg = as_Register($dst$$reg);
 3429     __ mov(dst_reg, zr);
 3430   %}
 3431 
 3432   enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
 3433     Register dst_reg = as_Register($dst$$reg);
 3434     __ mov(dst_reg, (uint64_t)1);
 3435   %}
 3436 
 3437   enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
 3438     __ load_byte_map_base($dst$$Register);
 3439   %}
 3440 
 3441   enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
 3442     Register dst_reg = as_Register($dst$$reg);
 3443     address con = (address)$src$$constant;
 3444     if (con == nullptr) {
 3445       ShouldNotReachHere();
 3446     } else {
 3447       relocInfo::relocType rtype = $src->constant_reloc();
 3448       assert(rtype == relocInfo::oop_type, "unexpected reloc type");
 3449       __ set_narrow_oop(dst_reg, (jobject)con);
 3450     }
 3451   %}
 3452 
 3453   enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
 3454     Register dst_reg = as_Register($dst$$reg);
 3455     __ mov(dst_reg, zr);
 3456   %}
 3457 
 3458   enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
 3459     Register dst_reg = as_Register($dst$$reg);
 3460     address con = (address)$src$$constant;
 3461     if (con == nullptr) {
 3462       ShouldNotReachHere();
 3463     } else {
 3464       relocInfo::relocType rtype = $src->constant_reloc();
 3465       assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
 3466       __ set_narrow_klass(dst_reg, (Klass *)con);
 3467     }
 3468   %}
 3469 
 3470   // arithmetic encodings
 3471 
 3472   enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
 3473     Register dst_reg = as_Register($dst$$reg);
 3474     Register src_reg = as_Register($src1$$reg);
 3475     int32_t con = (int32_t)$src2$$constant;
 3476     // add has primary == 0, subtract has primary == 1
 3477     if ($primary) { con = -con; }
 3478     if (con < 0) {
 3479       __ subw(dst_reg, src_reg, -con);
 3480     } else {
 3481       __ addw(dst_reg, src_reg, con);
 3482     }
 3483   %}
 3484 
 3485   enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
 3486     Register dst_reg = as_Register($dst$$reg);
 3487     Register src_reg = as_Register($src1$$reg);
 3488     int32_t con = (int32_t)$src2$$constant;
 3489     // add has primary == 0, subtract has primary == 1
 3490     if ($primary) { con = -con; }
 3491     if (con < 0) {
 3492       __ sub(dst_reg, src_reg, -con);
 3493     } else {
 3494       __ add(dst_reg, src_reg, con);
 3495     }
 3496   %}
 3497 
 3498   enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
 3499    Register dst_reg = as_Register($dst$$reg);
 3500    Register src1_reg = as_Register($src1$$reg);
 3501    Register src2_reg = as_Register($src2$$reg);
 3502     __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3503   %}
 3504 
 3505   enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
 3506    Register dst_reg = as_Register($dst$$reg);
 3507    Register src1_reg = as_Register($src1$$reg);
 3508    Register src2_reg = as_Register($src2$$reg);
 3509     __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
 3510   %}
 3511 
 3512   enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
 3513    Register dst_reg = as_Register($dst$$reg);
 3514    Register src1_reg = as_Register($src1$$reg);
 3515    Register src2_reg = as_Register($src2$$reg);
 3516     __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3517   %}
 3518 
 3519   enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
 3520    Register dst_reg = as_Register($dst$$reg);
 3521    Register src1_reg = as_Register($src1$$reg);
 3522    Register src2_reg = as_Register($src2$$reg);
 3523     __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
 3524   %}
 3525 
 3526   // compare instruction encodings
 3527 
 3528   enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
 3529     Register reg1 = as_Register($src1$$reg);
 3530     Register reg2 = as_Register($src2$$reg);
 3531     __ cmpw(reg1, reg2);
 3532   %}
 3533 
 3534   enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
 3535     Register reg = as_Register($src1$$reg);
 3536     int32_t val = $src2$$constant;
 3537     if (val >= 0) {
 3538       __ subsw(zr, reg, val);
 3539     } else {
 3540       __ addsw(zr, reg, -val);
 3541     }
 3542   %}
 3543 
 3544   enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
 3545     Register reg1 = as_Register($src1$$reg);
 3546     uint32_t val = (uint32_t)$src2$$constant;
 3547     __ movw(rscratch1, val);
 3548     __ cmpw(reg1, rscratch1);
 3549   %}
 3550 
 3551   enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
 3552     Register reg1 = as_Register($src1$$reg);
 3553     Register reg2 = as_Register($src2$$reg);
 3554     __ cmp(reg1, reg2);
 3555   %}
 3556 
 3557   enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
 3558     Register reg = as_Register($src1$$reg);
 3559     int64_t val = $src2$$constant;
 3560     if (val >= 0) {
 3561       __ subs(zr, reg, val);
 3562     } else if (val != -val) {
 3563       __ adds(zr, reg, -val);
 3564     } else {
 3565     // aargh, Long.MIN_VALUE is a special case
 3566       __ orr(rscratch1, zr, (uint64_t)val);
 3567       __ subs(zr, reg, rscratch1);
 3568     }
 3569   %}
 3570 
 3571   enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
 3572     Register reg1 = as_Register($src1$$reg);
 3573     uint64_t val = (uint64_t)$src2$$constant;
 3574     __ mov(rscratch1, val);
 3575     __ cmp(reg1, rscratch1);
 3576   %}
 3577 
 3578   enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
 3579     Register reg1 = as_Register($src1$$reg);
 3580     Register reg2 = as_Register($src2$$reg);
 3581     __ cmp(reg1, reg2);
 3582   %}
 3583 
 3584   enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
 3585     Register reg1 = as_Register($src1$$reg);
 3586     Register reg2 = as_Register($src2$$reg);
 3587     __ cmpw(reg1, reg2);
 3588   %}
 3589 
 3590   enc_class aarch64_enc_testp(iRegP src) %{
 3591     Register reg = as_Register($src$$reg);
 3592     __ cmp(reg, zr);
 3593   %}
 3594 
 3595   enc_class aarch64_enc_testn(iRegN src) %{
 3596     Register reg = as_Register($src$$reg);
 3597     __ cmpw(reg, zr);
 3598   %}
 3599 
 3600   enc_class aarch64_enc_b(label lbl) %{
 3601     Label *L = $lbl$$label;
 3602     __ b(*L);
 3603   %}
 3604 
 3605   enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
 3606     Label *L = $lbl$$label;
 3607     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3608   %}
 3609 
 3610   enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
 3611     Label *L = $lbl$$label;
 3612     __ br ((Assembler::Condition)$cmp$$cmpcode, *L);
 3613   %}
 3614 
 3615   enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
 3616   %{
 3617      Register sub_reg = as_Register($sub$$reg);
 3618      Register super_reg = as_Register($super$$reg);
 3619      Register temp_reg = as_Register($temp$$reg);
 3620      Register result_reg = as_Register($result$$reg);
 3621 
 3622      Label miss;
 3623      __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
 3624                                      nullptr, &miss,
 3625                                      /*set_cond_codes:*/ true);
 3626      if ($primary) {
 3627        __ mov(result_reg, zr);
 3628      }
 3629      __ bind(miss);
 3630   %}
 3631 
 3632   enc_class aarch64_enc_java_static_call(method meth) %{
 3633     address addr = (address)$meth$$method;
 3634     address call;
 3635     if (!_method) {
 3636       // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
 3637       call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type));
 3638       if (call == nullptr) {
 3639         ciEnv::current()->record_failure("CodeCache is full");
 3640         return;
 3641       }
 3642     } else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
 3643       // The NOP here is purely to ensure that eliding a call to
 3644       // JVM_EnsureMaterializedForStackWalk doesn't change the code size.
 3645       __ nop();
 3646       __ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
 3647     } else {
 3648       int method_index = resolved_method_index(masm);
 3649       RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
 3650                                                   : static_call_Relocation::spec(method_index);
 3651       call = __ trampoline_call(Address(addr, rspec));
 3652       if (call == nullptr) {
 3653         ciEnv::current()->record_failure("CodeCache is full");
 3654         return;
 3655       }
 3656       if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
 3657         // Calls of the same statically bound method can share
 3658         // a stub to the interpreter.
 3659         __ code()->shared_stub_to_interp_for(_method, call - __ begin());
 3660       } else {
 3661         // Emit stub for static call
 3662         address stub = CompiledDirectCall::emit_to_interp_stub(masm, call);
 3663         if (stub == nullptr) {
 3664           ciEnv::current()->record_failure("CodeCache is full");
 3665           return;
 3666         }
 3667       }
 3668     }
 3669 
 3670     __ post_call_nop();
 3671 
 3672     // Only non uncommon_trap calls need to reinitialize ptrue.
 3673     if (Compile::current()->max_vector_size() > 0 && uncommon_trap_request() == 0) {
 3674       __ reinitialize_ptrue();
 3675     }
 3676   %}
 3677 
 3678   enc_class aarch64_enc_java_dynamic_call(method meth) %{
 3679     int method_index = resolved_method_index(masm);
 3680     address call = __ ic_call((address)$meth$$method, method_index);
 3681     if (call == nullptr) {
 3682       ciEnv::current()->record_failure("CodeCache is full");
 3683       return;
 3684     }
 3685     __ post_call_nop();
 3686     if (Compile::current()->max_vector_size() > 0) {
 3687       __ reinitialize_ptrue();
 3688     }
 3689   %}
 3690 
 3691   enc_class aarch64_enc_call_epilog() %{
 3692     if (VerifyStackAtCalls) {
 3693       // Check that stack depth is unchanged: find majik cookie on stack
 3694       __ call_Unimplemented();
 3695     }
 3696   %}
 3697 
 3698   enc_class aarch64_enc_java_to_runtime(method meth) %{
 3699     // some calls to generated routines (arraycopy code) are scheduled
 3700     // by C2 as runtime calls. if so we can call them using a br (they
 3701     // will be in a reachable segment) otherwise we have to use a blr
 3702     // which loads the absolute address into a register.
 3703     address entry = (address)$meth$$method;
 3704     CodeBlob *cb = CodeCache::find_blob(entry);
 3705     if (cb) {
 3706       address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
 3707       if (call == nullptr) {
 3708         ciEnv::current()->record_failure("CodeCache is full");
 3709         return;
 3710       }
 3711       __ post_call_nop();
 3712     } else {
 3713       Label retaddr;
 3714       // Make the anchor frame walkable
 3715       __ adr(rscratch2, retaddr);
 3716       __ str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
 3717       __ lea(rscratch1, RuntimeAddress(entry));
 3718       __ blr(rscratch1);
 3719       __ bind(retaddr);
 3720       __ post_call_nop();
 3721     }
 3722     if (Compile::current()->max_vector_size() > 0) {
 3723       __ reinitialize_ptrue();
 3724     }
 3725   %}
 3726 
 3727   enc_class aarch64_enc_rethrow() %{
 3728     __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
 3729   %}
 3730 
 3731   enc_class aarch64_enc_ret() %{
 3732 #ifdef ASSERT
 3733     if (Compile::current()->max_vector_size() > 0) {
 3734       __ verify_ptrue();
 3735     }
 3736 #endif
 3737     __ ret(lr);
 3738   %}
 3739 
 3740   enc_class aarch64_enc_tail_call(iRegP jump_target) %{
 3741     Register target_reg = as_Register($jump_target$$reg);
 3742     __ br(target_reg);
 3743   %}
 3744 
 3745   enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
 3746     Register target_reg = as_Register($jump_target$$reg);
 3747     // exception oop should be in r0
 3748     // ret addr has been popped into lr
 3749     // callee expects it in r3
 3750     __ mov(r3, lr);
 3751     __ br(target_reg);
 3752   %}
 3753 
 3754 %}
 3755 
 3756 //----------FRAME--------------------------------------------------------------
 3757 // Definition of frame structure and management information.
 3758 //
 3759 //  S T A C K   L A Y O U T    Allocators stack-slot number
 3760 //                             |   (to get allocators register number
 3761 //  G  Owned by    |        |  v    add OptoReg::stack0())
 3762 //  r   CALLER     |        |
 3763 //  o     |        +--------+      pad to even-align allocators stack-slot
 3764 //  w     V        |  pad0  |        numbers; owned by CALLER
 3765 //  t   -----------+--------+----> Matcher::_in_arg_limit, unaligned
 3766 //  h     ^        |   in   |  5
 3767 //        |        |  args  |  4   Holes in incoming args owned by SELF
 3768 //  |     |        |        |  3
 3769 //  |     |        +--------+
 3770 //  V     |        | old out|      Empty on Intel, window on Sparc
 3771 //        |    old |preserve|      Must be even aligned.
 3772 //        |     SP-+--------+----> Matcher::_old_SP, even aligned
 3773 //        |        |   in   |  3   area for Intel ret address
 3774 //     Owned by    |preserve|      Empty on Sparc.
 3775 //       SELF      +--------+
 3776 //        |        |  pad2  |  2   pad to align old SP
 3777 //        |        +--------+  1
 3778 //        |        | locks  |  0
 3779 //        |        +--------+----> OptoReg::stack0(), even aligned
 3780 //        |        |  pad1  | 11   pad to align new SP
 3781 //        |        +--------+
 3782 //        |        |        | 10
 3783 //        |        | spills |  9   spills
 3784 //        V        |        |  8   (pad0 slot for callee)
 3785 //      -----------+--------+----> Matcher::_out_arg_limit, unaligned
 3786 //        ^        |  out   |  7
 3787 //        |        |  args  |  6   Holes in outgoing args owned by CALLEE
 3788 //     Owned by    +--------+
 3789 //      CALLEE     | new out|  6   Empty on Intel, window on Sparc
 3790 //        |    new |preserve|      Must be even-aligned.
 3791 //        |     SP-+--------+----> Matcher::_new_SP, even aligned
 3792 //        |        |        |
 3793 //
 3794 // Note 1: Only region 8-11 is determined by the allocator.  Region 0-5 is
 3795 //         known from SELF's arguments and the Java calling convention.
 3796 //         Region 6-7 is determined per call site.
 3797 // Note 2: If the calling convention leaves holes in the incoming argument
 3798 //         area, those holes are owned by SELF.  Holes in the outgoing area
 3799 //         are owned by the CALLEE.  Holes should not be necessary in the
 3800 //         incoming area, as the Java calling convention is completely under
 3801 //         the control of the AD file.  Doubles can be sorted and packed to
 3802 //         avoid holes.  Holes in the outgoing arguments may be necessary for
 3803 //         varargs C calling conventions.
 3804 // Note 3: Region 0-3 is even aligned, with pad2 as needed.  Region 3-5 is
 3805 //         even aligned with pad0 as needed.
 3806 //         Region 6 is even aligned.  Region 6-7 is NOT even aligned;
 3807 //           (the latter is true on Intel but is it false on AArch64?)
 3808 //         region 6-11 is even aligned; it may be padded out more so that
 3809 //         the region from SP to FP meets the minimum stack alignment.
 3810 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
 3811 //         alignment.  Region 11, pad1, may be dynamically extended so that
 3812 //         SP meets the minimum alignment.
 3813 
 3814 frame %{
 3815   // These three registers define part of the calling convention
 3816   // between compiled code and the interpreter.
 3817 
 3818   // Inline Cache Register or Method for I2C.
 3819   inline_cache_reg(R12);
 3820 
 3821   // Number of stack slots consumed by locking an object
 3822   sync_stack_slots(2);
 3823 
 3824   // Compiled code's Frame Pointer
 3825   frame_pointer(R31);
 3826 
 3827   // Interpreter stores its frame pointer in a register which is
 3828   // stored to the stack by I2CAdaptors.
 3829   // I2CAdaptors convert from interpreted java to compiled java.
 3830   interpreter_frame_pointer(R29);
 3831 
 3832   // Stack alignment requirement
 3833   stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
 3834 
 3835   // Number of outgoing stack slots killed above the out_preserve_stack_slots
 3836   // for calls to C.  Supports the var-args backing area for register parms.
 3837   varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
 3838 
 3839   // The after-PROLOG location of the return address.  Location of
 3840   // return address specifies a type (REG or STACK) and a number
 3841   // representing the register number (i.e. - use a register name) or
 3842   // stack slot.
 3843   // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
 3844   // Otherwise, it is above the locks and verification slot and alignment word
 3845   // TODO this may well be correct but need to check why that - 2 is there
 3846   // ppc port uses 0 but we definitely need to allow for fixed_slots
 3847   // which folds in the space used for monitors
 3848   return_addr(STACK - 2 +
 3849               align_up((Compile::current()->in_preserve_stack_slots() +
 3850                         Compile::current()->fixed_slots()),
 3851                        stack_alignment_in_slots()));
 3852 
 3853   // Location of compiled Java return values.  Same as C for now.
 3854   return_value
 3855   %{
 3856     // TODO do we allow ideal_reg == Op_RegN???
 3857     assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
 3858            "only return normal values");
 3859 
 3860     static const int lo[Op_RegL + 1] = { // enum name
 3861       0,                                 // Op_Node
 3862       0,                                 // Op_Set
 3863       R0_num,                            // Op_RegN
 3864       R0_num,                            // Op_RegI
 3865       R0_num,                            // Op_RegP
 3866       V0_num,                            // Op_RegF
 3867       V0_num,                            // Op_RegD
 3868       R0_num                             // Op_RegL
 3869     };
 3870 
 3871     static const int hi[Op_RegL + 1] = { // enum name
 3872       0,                                 // Op_Node
 3873       0,                                 // Op_Set
 3874       OptoReg::Bad,                      // Op_RegN
 3875       OptoReg::Bad,                      // Op_RegI
 3876       R0_H_num,                          // Op_RegP
 3877       OptoReg::Bad,                      // Op_RegF
 3878       V0_H_num,                          // Op_RegD
 3879       R0_H_num                           // Op_RegL
 3880     };
 3881 
 3882     return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
 3883   %}
 3884 %}
 3885 
 3886 //----------ATTRIBUTES---------------------------------------------------------
 3887 //----------Operand Attributes-------------------------------------------------
 3888 op_attrib op_cost(1);        // Required cost attribute
 3889 
 3890 //----------Instruction Attributes---------------------------------------------
 3891 ins_attrib ins_cost(INSN_COST); // Required cost attribute
 3892 ins_attrib ins_size(32);        // Required size attribute (in bits)
 3893 ins_attrib ins_short_branch(0); // Required flag: is this instruction
 3894                                 // a non-matching short branch variant
 3895                                 // of some long branch?
 3896 ins_attrib ins_alignment(4);    // Required alignment attribute (must
 3897                                 // be a power of 2) specifies the
 3898                                 // alignment that some part of the
 3899                                 // instruction (not necessarily the
 3900                                 // start) requires.  If > 1, a
 3901                                 // compute_padding() function must be
 3902                                 // provided for the instruction
 3903 
 3904 //----------OPERANDS-----------------------------------------------------------
 3905 // Operand definitions must precede instruction definitions for correct parsing
 3906 // in the ADLC because operands constitute user defined types which are used in
 3907 // instruction definitions.
 3908 
 3909 //----------Simple Operands----------------------------------------------------
 3910 
 3911 // Integer operands 32 bit
 3912 // 32 bit immediate
 3913 operand immI()
 3914 %{
 3915   match(ConI);
 3916 
 3917   op_cost(0);
 3918   format %{ %}
 3919   interface(CONST_INTER);
 3920 %}
 3921 
 3922 // 32 bit zero
 3923 operand immI0()
 3924 %{
 3925   predicate(n->get_int() == 0);
 3926   match(ConI);
 3927 
 3928   op_cost(0);
 3929   format %{ %}
 3930   interface(CONST_INTER);
 3931 %}
 3932 
 3933 // 32 bit unit increment
 3934 operand immI_1()
 3935 %{
 3936   predicate(n->get_int() == 1);
 3937   match(ConI);
 3938 
 3939   op_cost(0);
 3940   format %{ %}
 3941   interface(CONST_INTER);
 3942 %}
 3943 
 3944 // 32 bit unit decrement
 3945 operand immI_M1()
 3946 %{
 3947   predicate(n->get_int() == -1);
 3948   match(ConI);
 3949 
 3950   op_cost(0);
 3951   format %{ %}
 3952   interface(CONST_INTER);
 3953 %}
 3954 
 3955 // Shift values for add/sub extension shift
 3956 operand immIExt()
 3957 %{
 3958   predicate(0 <= n->get_int() && (n->get_int() <= 4));
 3959   match(ConI);
 3960 
 3961   op_cost(0);
 3962   format %{ %}
 3963   interface(CONST_INTER);
 3964 %}
 3965 
 3966 operand immI_gt_1()
 3967 %{
 3968   predicate(n->get_int() > 1);
 3969   match(ConI);
 3970 
 3971   op_cost(0);
 3972   format %{ %}
 3973   interface(CONST_INTER);
 3974 %}
 3975 
 3976 operand immI_le_4()
 3977 %{
 3978   predicate(n->get_int() <= 4);
 3979   match(ConI);
 3980 
 3981   op_cost(0);
 3982   format %{ %}
 3983   interface(CONST_INTER);
 3984 %}
 3985 
 3986 operand immI_16()
 3987 %{
 3988   predicate(n->get_int() == 16);
 3989   match(ConI);
 3990 
 3991   op_cost(0);
 3992   format %{ %}
 3993   interface(CONST_INTER);
 3994 %}
 3995 
 3996 operand immI_24()
 3997 %{
 3998   predicate(n->get_int() == 24);
 3999   match(ConI);
 4000 
 4001   op_cost(0);
 4002   format %{ %}
 4003   interface(CONST_INTER);
 4004 %}
 4005 
 4006 operand immI_32()
 4007 %{
 4008   predicate(n->get_int() == 32);
 4009   match(ConI);
 4010 
 4011   op_cost(0);
 4012   format %{ %}
 4013   interface(CONST_INTER);
 4014 %}
 4015 
 4016 operand immI_48()
 4017 %{
 4018   predicate(n->get_int() == 48);
 4019   match(ConI);
 4020 
 4021   op_cost(0);
 4022   format %{ %}
 4023   interface(CONST_INTER);
 4024 %}
 4025 
 4026 operand immI_56()
 4027 %{
 4028   predicate(n->get_int() == 56);
 4029   match(ConI);
 4030 
 4031   op_cost(0);
 4032   format %{ %}
 4033   interface(CONST_INTER);
 4034 %}
 4035 
 4036 operand immI_255()
 4037 %{
 4038   predicate(n->get_int() == 255);
 4039   match(ConI);
 4040 
 4041   op_cost(0);
 4042   format %{ %}
 4043   interface(CONST_INTER);
 4044 %}
 4045 
 4046 operand immI_65535()
 4047 %{
 4048   predicate(n->get_int() == 65535);
 4049   match(ConI);
 4050 
 4051   op_cost(0);
 4052   format %{ %}
 4053   interface(CONST_INTER);
 4054 %}
 4055 
 4056 operand immI_positive()
 4057 %{
 4058   predicate(n->get_int() > 0);
 4059   match(ConI);
 4060 
 4061   op_cost(0);
 4062   format %{ %}
 4063   interface(CONST_INTER);
 4064 %}
 4065 
 4066 // BoolTest condition for signed compare
 4067 operand immI_cmp_cond()
 4068 %{
 4069   predicate(!Matcher::is_unsigned_booltest_pred(n->get_int()));
 4070   match(ConI);
 4071 
 4072   op_cost(0);
 4073   format %{ %}
 4074   interface(CONST_INTER);
 4075 %}
 4076 
 4077 // BoolTest condition for unsigned compare
 4078 operand immI_cmpU_cond()
 4079 %{
 4080   predicate(Matcher::is_unsigned_booltest_pred(n->get_int()));
 4081   match(ConI);
 4082 
 4083   op_cost(0);
 4084   format %{ %}
 4085   interface(CONST_INTER);
 4086 %}
 4087 
 4088 operand immL_255()
 4089 %{
 4090   predicate(n->get_long() == 255L);
 4091   match(ConL);
 4092 
 4093   op_cost(0);
 4094   format %{ %}
 4095   interface(CONST_INTER);
 4096 %}
 4097 
 4098 operand immL_65535()
 4099 %{
 4100   predicate(n->get_long() == 65535L);
 4101   match(ConL);
 4102 
 4103   op_cost(0);
 4104   format %{ %}
 4105   interface(CONST_INTER);
 4106 %}
 4107 
 4108 operand immL_4294967295()
 4109 %{
 4110   predicate(n->get_long() == 4294967295L);
 4111   match(ConL);
 4112 
 4113   op_cost(0);
 4114   format %{ %}
 4115   interface(CONST_INTER);
 4116 %}
 4117 
 4118 operand immL_bitmask()
 4119 %{
 4120   predicate((n->get_long() != 0)
 4121             && ((n->get_long() & 0xc000000000000000l) == 0)
 4122             && is_power_of_2(n->get_long() + 1));
 4123   match(ConL);
 4124 
 4125   op_cost(0);
 4126   format %{ %}
 4127   interface(CONST_INTER);
 4128 %}
 4129 
 4130 operand immI_bitmask()
 4131 %{
 4132   predicate((n->get_int() != 0)
 4133             && ((n->get_int() & 0xc0000000) == 0)
 4134             && is_power_of_2(n->get_int() + 1));
 4135   match(ConI);
 4136 
 4137   op_cost(0);
 4138   format %{ %}
 4139   interface(CONST_INTER);
 4140 %}
 4141 
 4142 operand immL_positive_bitmaskI()
 4143 %{
 4144   predicate((n->get_long() != 0)
 4145             && ((julong)n->get_long() < 0x80000000ULL)
 4146             && is_power_of_2(n->get_long() + 1));
 4147   match(ConL);
 4148 
 4149   op_cost(0);
 4150   format %{ %}
 4151   interface(CONST_INTER);
 4152 %}
 4153 
 4154 // Scale values for scaled offset addressing modes (up to long but not quad)
 4155 operand immIScale()
 4156 %{
 4157   predicate(0 <= n->get_int() && (n->get_int() <= 3));
 4158   match(ConI);
 4159 
 4160   op_cost(0);
 4161   format %{ %}
 4162   interface(CONST_INTER);
 4163 %}
 4164 
 4165 // 5 bit signed integer
 4166 operand immI5()
 4167 %{
 4168   predicate(Assembler::is_simm(n->get_int(), 5));
 4169   match(ConI);
 4170 
 4171   op_cost(0);
 4172   format %{ %}
 4173   interface(CONST_INTER);
 4174 %}
 4175 
 4176 // 7 bit unsigned integer
 4177 operand immIU7()
 4178 %{
 4179   predicate(Assembler::is_uimm(n->get_int(), 7));
 4180   match(ConI);
 4181 
 4182   op_cost(0);
 4183   format %{ %}
 4184   interface(CONST_INTER);
 4185 %}
 4186 
 4187 // Offset for scaled or unscaled immediate loads and stores
 4188 operand immIOffset()
 4189 %{
 4190   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4191   match(ConI);
 4192 
 4193   op_cost(0);
 4194   format %{ %}
 4195   interface(CONST_INTER);
 4196 %}
 4197 
 4198 operand immIOffset1()
 4199 %{
 4200   predicate(Address::offset_ok_for_immed(n->get_int(), 0));
 4201   match(ConI);
 4202 
 4203   op_cost(0);
 4204   format %{ %}
 4205   interface(CONST_INTER);
 4206 %}
 4207 
 4208 operand immIOffset2()
 4209 %{
 4210   predicate(Address::offset_ok_for_immed(n->get_int(), 1));
 4211   match(ConI);
 4212 
 4213   op_cost(0);
 4214   format %{ %}
 4215   interface(CONST_INTER);
 4216 %}
 4217 
 4218 operand immIOffset4()
 4219 %{
 4220   predicate(Address::offset_ok_for_immed(n->get_int(), 2));
 4221   match(ConI);
 4222 
 4223   op_cost(0);
 4224   format %{ %}
 4225   interface(CONST_INTER);
 4226 %}
 4227 
 4228 operand immIOffset8()
 4229 %{
 4230   predicate(Address::offset_ok_for_immed(n->get_int(), 3));
 4231   match(ConI);
 4232 
 4233   op_cost(0);
 4234   format %{ %}
 4235   interface(CONST_INTER);
 4236 %}
 4237 
 4238 operand immIOffset16()
 4239 %{
 4240   predicate(Address::offset_ok_for_immed(n->get_int(), 4));
 4241   match(ConI);
 4242 
 4243   op_cost(0);
 4244   format %{ %}
 4245   interface(CONST_INTER);
 4246 %}
 4247 
 4248 operand immLOffset()
 4249 %{
 4250   predicate(n->get_long() >= -256 && n->get_long() <= 65520);
 4251   match(ConL);
 4252 
 4253   op_cost(0);
 4254   format %{ %}
 4255   interface(CONST_INTER);
 4256 %}
 4257 
 4258 operand immLoffset1()
 4259 %{
 4260   predicate(Address::offset_ok_for_immed(n->get_long(), 0));
 4261   match(ConL);
 4262 
 4263   op_cost(0);
 4264   format %{ %}
 4265   interface(CONST_INTER);
 4266 %}
 4267 
 4268 operand immLoffset2()
 4269 %{
 4270   predicate(Address::offset_ok_for_immed(n->get_long(), 1));
 4271   match(ConL);
 4272 
 4273   op_cost(0);
 4274   format %{ %}
 4275   interface(CONST_INTER);
 4276 %}
 4277 
 4278 operand immLoffset4()
 4279 %{
 4280   predicate(Address::offset_ok_for_immed(n->get_long(), 2));
 4281   match(ConL);
 4282 
 4283   op_cost(0);
 4284   format %{ %}
 4285   interface(CONST_INTER);
 4286 %}
 4287 
 4288 operand immLoffset8()
 4289 %{
 4290   predicate(Address::offset_ok_for_immed(n->get_long(), 3));
 4291   match(ConL);
 4292 
 4293   op_cost(0);
 4294   format %{ %}
 4295   interface(CONST_INTER);
 4296 %}
 4297 
 4298 operand immLoffset16()
 4299 %{
 4300   predicate(Address::offset_ok_for_immed(n->get_long(), 4));
 4301   match(ConL);
 4302 
 4303   op_cost(0);
 4304   format %{ %}
 4305   interface(CONST_INTER);
 4306 %}
 4307 
 4308 // 5 bit signed long integer
 4309 operand immL5()
 4310 %{
 4311   predicate(Assembler::is_simm(n->get_long(), 5));
 4312   match(ConL);
 4313 
 4314   op_cost(0);
 4315   format %{ %}
 4316   interface(CONST_INTER);
 4317 %}
 4318 
 4319 // 7 bit unsigned long integer
 4320 operand immLU7()
 4321 %{
 4322   predicate(Assembler::is_uimm(n->get_long(), 7));
 4323   match(ConL);
 4324 
 4325   op_cost(0);
 4326   format %{ %}
 4327   interface(CONST_INTER);
 4328 %}
 4329 
 4330 // 8 bit signed value.
 4331 operand immI8()
 4332 %{
 4333   predicate(n->get_int() <= 127 && n->get_int() >= -128);
 4334   match(ConI);
 4335 
 4336   op_cost(0);
 4337   format %{ %}
 4338   interface(CONST_INTER);
 4339 %}
 4340 
 4341 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4342 operand immI8_shift8()
 4343 %{
 4344   predicate((n->get_int() <= 127 && n->get_int() >= -128) ||
 4345             (n->get_int() <= 32512 && n->get_int() >= -32768 && (n->get_int() & 0xff) == 0));
 4346   match(ConI);
 4347 
 4348   op_cost(0);
 4349   format %{ %}
 4350   interface(CONST_INTER);
 4351 %}
 4352 
 4353 // 8 bit signed value (simm8), or #simm8 LSL 8.
 4354 operand immL8_shift8()
 4355 %{
 4356   predicate((n->get_long() <= 127 && n->get_long() >= -128) ||
 4357             (n->get_long() <= 32512 && n->get_long() >= -32768 && (n->get_long() & 0xff) == 0));
 4358   match(ConL);
 4359 
 4360   op_cost(0);
 4361   format %{ %}
 4362   interface(CONST_INTER);
 4363 %}
 4364 
 4365 // 8 bit integer valid for vector add sub immediate
 4366 operand immBAddSubV()
 4367 %{
 4368   predicate(n->get_int() <= 255 && n->get_int() >= -255);
 4369   match(ConI);
 4370 
 4371   op_cost(0);
 4372   format %{ %}
 4373   interface(CONST_INTER);
 4374 %}
 4375 
 4376 // 32 bit integer valid for add sub immediate
 4377 operand immIAddSub()
 4378 %{
 4379   predicate(Assembler::operand_valid_for_add_sub_immediate((int64_t)n->get_int()));
 4380   match(ConI);
 4381   op_cost(0);
 4382   format %{ %}
 4383   interface(CONST_INTER);
 4384 %}
 4385 
 4386 // 32 bit integer valid for vector add sub immediate
 4387 operand immIAddSubV()
 4388 %{
 4389   predicate(Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)n->get_int()));
 4390   match(ConI);
 4391 
 4392   op_cost(0);
 4393   format %{ %}
 4394   interface(CONST_INTER);
 4395 %}
 4396 
 4397 // 32 bit unsigned integer valid for logical immediate
 4398 
 4399 operand immBLog()
 4400 %{
 4401   predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerByte, (uint64_t)n->get_int()));
 4402   match(ConI);
 4403 
 4404   op_cost(0);
 4405   format %{ %}
 4406   interface(CONST_INTER);
 4407 %}
 4408 
 4409 operand immSLog()
 4410 %{
 4411   predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerShort, (uint64_t)n->get_int()));
 4412   match(ConI);
 4413 
 4414   op_cost(0);
 4415   format %{ %}
 4416   interface(CONST_INTER);
 4417 %}
 4418 
 4419 operand immILog()
 4420 %{
 4421   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (uint64_t)n->get_int()));
 4422   match(ConI);
 4423 
 4424   op_cost(0);
 4425   format %{ %}
 4426   interface(CONST_INTER);
 4427 %}
 4428 
 4429 // Integer operands 64 bit
 4430 // 64 bit immediate
 4431 operand immL()
 4432 %{
 4433   match(ConL);
 4434 
 4435   op_cost(0);
 4436   format %{ %}
 4437   interface(CONST_INTER);
 4438 %}
 4439 
 4440 // 64 bit zero
 4441 operand immL0()
 4442 %{
 4443   predicate(n->get_long() == 0);
 4444   match(ConL);
 4445 
 4446   op_cost(0);
 4447   format %{ %}
 4448   interface(CONST_INTER);
 4449 %}
 4450 
 4451 // 64 bit unit decrement
 4452 operand immL_M1()
 4453 %{
 4454   predicate(n->get_long() == -1);
 4455   match(ConL);
 4456 
 4457   op_cost(0);
 4458   format %{ %}
 4459   interface(CONST_INTER);
 4460 %}
 4461 
 4462 // 64 bit integer valid for add sub immediate
 4463 operand immLAddSub()
 4464 %{
 4465   predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
 4466   match(ConL);
 4467   op_cost(0);
 4468   format %{ %}
 4469   interface(CONST_INTER);
 4470 %}
 4471 
 4472 // 64 bit integer valid for addv subv immediate
 4473 operand immLAddSubV()
 4474 %{
 4475   predicate(Assembler::operand_valid_for_sve_add_sub_immediate(n->get_long()));
 4476   match(ConL);
 4477 
 4478   op_cost(0);
 4479   format %{ %}
 4480   interface(CONST_INTER);
 4481 %}
 4482 
 4483 // 64 bit integer valid for logical immediate
 4484 operand immLLog()
 4485 %{
 4486   predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (uint64_t)n->get_long()));
 4487   match(ConL);
 4488   op_cost(0);
 4489   format %{ %}
 4490   interface(CONST_INTER);
 4491 %}
 4492 
 4493 // Long Immediate: low 32-bit mask
 4494 operand immL_32bits()
 4495 %{
 4496   predicate(n->get_long() == 0xFFFFFFFFL);
 4497   match(ConL);
 4498   op_cost(0);
 4499   format %{ %}
 4500   interface(CONST_INTER);
 4501 %}
 4502 
 4503 // Pointer operands
 4504 // Pointer Immediate
 4505 operand immP()
 4506 %{
 4507   match(ConP);
 4508 
 4509   op_cost(0);
 4510   format %{ %}
 4511   interface(CONST_INTER);
 4512 %}
 4513 
 4514 // nullptr Pointer Immediate
 4515 operand immP0()
 4516 %{
 4517   predicate(n->get_ptr() == 0);
 4518   match(ConP);
 4519 
 4520   op_cost(0);
 4521   format %{ %}
 4522   interface(CONST_INTER);
 4523 %}
 4524 
 4525 // Pointer Immediate One
 4526 // this is used in object initialization (initial object header)
 4527 operand immP_1()
 4528 %{
 4529   predicate(n->get_ptr() == 1);
 4530   match(ConP);
 4531 
 4532   op_cost(0);
 4533   format %{ %}
 4534   interface(CONST_INTER);
 4535 %}
 4536 
 4537 // Card Table Byte Map Base
 4538 operand immByteMapBase()
 4539 %{
 4540   // Get base of card map
 4541   predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
 4542             SHENANDOAHGC_ONLY(!BarrierSet::barrier_set()->is_a(BarrierSet::ShenandoahBarrierSet) &&)
 4543             (CardTable::CardValue*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
 4544   match(ConP);
 4545 
 4546   op_cost(0);
 4547   format %{ %}
 4548   interface(CONST_INTER);
 4549 %}
 4550 
 4551 // Float and Double operands
 4552 // Double Immediate
 4553 operand immD()
 4554 %{
 4555   match(ConD);
 4556   op_cost(0);
 4557   format %{ %}
 4558   interface(CONST_INTER);
 4559 %}
 4560 
 4561 // Double Immediate: +0.0d
 4562 operand immD0()
 4563 %{
 4564   predicate(jlong_cast(n->getd()) == 0);
 4565   match(ConD);
 4566 
 4567   op_cost(0);
 4568   format %{ %}
 4569   interface(CONST_INTER);
 4570 %}
 4571 
 4572 // constant 'double +0.0'.
 4573 operand immDPacked()
 4574 %{
 4575   predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
 4576   match(ConD);
 4577   op_cost(0);
 4578   format %{ %}
 4579   interface(CONST_INTER);
 4580 %}
 4581 
 4582 // Float Immediate
 4583 operand immF()
 4584 %{
 4585   match(ConF);
 4586   op_cost(0);
 4587   format %{ %}
 4588   interface(CONST_INTER);
 4589 %}
 4590 
 4591 // Float Immediate: +0.0f.
 4592 operand immF0()
 4593 %{
 4594   predicate(jint_cast(n->getf()) == 0);
 4595   match(ConF);
 4596 
 4597   op_cost(0);
 4598   format %{ %}
 4599   interface(CONST_INTER);
 4600 %}
 4601 
 4602 //
 4603 operand immFPacked()
 4604 %{
 4605   predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
 4606   match(ConF);
 4607   op_cost(0);
 4608   format %{ %}
 4609   interface(CONST_INTER);
 4610 %}
 4611 
 4612 // Narrow pointer operands
 4613 // Narrow Pointer Immediate
 4614 operand immN()
 4615 %{
 4616   match(ConN);
 4617 
 4618   op_cost(0);
 4619   format %{ %}
 4620   interface(CONST_INTER);
 4621 %}
 4622 
 4623 // Narrow nullptr Pointer Immediate
 4624 operand immN0()
 4625 %{
 4626   predicate(n->get_narrowcon() == 0);
 4627   match(ConN);
 4628 
 4629   op_cost(0);
 4630   format %{ %}
 4631   interface(CONST_INTER);
 4632 %}
 4633 
 4634 operand immNKlass()
 4635 %{
 4636   match(ConNKlass);
 4637 
 4638   op_cost(0);
 4639   format %{ %}
 4640   interface(CONST_INTER);
 4641 %}
 4642 
 4643 // Integer 32 bit Register Operands
 4644 // Integer 32 bitRegister (excludes SP)
 4645 operand iRegI()
 4646 %{
 4647   constraint(ALLOC_IN_RC(any_reg32));
 4648   match(RegI);
 4649   match(iRegINoSp);
 4650   op_cost(0);
 4651   format %{ %}
 4652   interface(REG_INTER);
 4653 %}
 4654 
 4655 // Integer 32 bit Register not Special
 4656 operand iRegINoSp()
 4657 %{
 4658   constraint(ALLOC_IN_RC(no_special_reg32));
 4659   match(RegI);
 4660   op_cost(0);
 4661   format %{ %}
 4662   interface(REG_INTER);
 4663 %}
 4664 
 4665 // Integer 64 bit Register Operands
 4666 // Integer 64 bit Register (includes SP)
 4667 operand iRegL()
 4668 %{
 4669   constraint(ALLOC_IN_RC(any_reg));
 4670   match(RegL);
 4671   match(iRegLNoSp);
 4672   op_cost(0);
 4673   format %{ %}
 4674   interface(REG_INTER);
 4675 %}
 4676 
 4677 // Integer 64 bit Register not Special
 4678 operand iRegLNoSp()
 4679 %{
 4680   constraint(ALLOC_IN_RC(no_special_reg));
 4681   match(RegL);
 4682   match(iRegL_R0);
 4683   format %{ %}
 4684   interface(REG_INTER);
 4685 %}
 4686 
 4687 // Pointer Register Operands
 4688 // Pointer Register
 4689 operand iRegP()
 4690 %{
 4691   constraint(ALLOC_IN_RC(ptr_reg));
 4692   match(RegP);
 4693   match(iRegPNoSp);
 4694   match(iRegP_R0);
 4695   //match(iRegP_R2);
 4696   //match(iRegP_R4);
 4697   match(iRegP_R5);
 4698   match(thread_RegP);
 4699   op_cost(0);
 4700   format %{ %}
 4701   interface(REG_INTER);
 4702 %}
 4703 
 4704 // Pointer 64 bit Register not Special
 4705 operand iRegPNoSp()
 4706 %{
 4707   constraint(ALLOC_IN_RC(no_special_ptr_reg));
 4708   match(RegP);
 4709   // match(iRegP);
 4710   // match(iRegP_R0);
 4711   // match(iRegP_R2);
 4712   // match(iRegP_R4);
 4713   // match(iRegP_R5);
 4714   // match(thread_RegP);
 4715   op_cost(0);
 4716   format %{ %}
 4717   interface(REG_INTER);
 4718 %}
 4719 
 4720 // This operand is not allowed to use rfp even if
 4721 // rfp is not used to hold the frame pointer.
 4722 operand iRegPNoSpNoRfp()
 4723 %{
 4724   constraint(ALLOC_IN_RC(no_special_no_rfp_ptr_reg));
 4725   match(RegP);
 4726   match(iRegPNoSp);
 4727   op_cost(0);
 4728   format %{ %}
 4729   interface(REG_INTER);
 4730 %}
 4731 
 4732 // Pointer 64 bit Register R0 only
 4733 operand iRegP_R0()
 4734 %{
 4735   constraint(ALLOC_IN_RC(r0_reg));
 4736   match(RegP);
 4737   // match(iRegP);
 4738   match(iRegPNoSp);
 4739   op_cost(0);
 4740   format %{ %}
 4741   interface(REG_INTER);
 4742 %}
 4743 
 4744 // Pointer 64 bit Register R1 only
 4745 operand iRegP_R1()
 4746 %{
 4747   constraint(ALLOC_IN_RC(r1_reg));
 4748   match(RegP);
 4749   // match(iRegP);
 4750   match(iRegPNoSp);
 4751   op_cost(0);
 4752   format %{ %}
 4753   interface(REG_INTER);
 4754 %}
 4755 
 4756 // Pointer 64 bit Register R2 only
 4757 operand iRegP_R2()
 4758 %{
 4759   constraint(ALLOC_IN_RC(r2_reg));
 4760   match(RegP);
 4761   // match(iRegP);
 4762   match(iRegPNoSp);
 4763   op_cost(0);
 4764   format %{ %}
 4765   interface(REG_INTER);
 4766 %}
 4767 
 4768 // Pointer 64 bit Register R3 only
 4769 operand iRegP_R3()
 4770 %{
 4771   constraint(ALLOC_IN_RC(r3_reg));
 4772   match(RegP);
 4773   // match(iRegP);
 4774   match(iRegPNoSp);
 4775   op_cost(0);
 4776   format %{ %}
 4777   interface(REG_INTER);
 4778 %}
 4779 
 4780 // Pointer 64 bit Register R4 only
 4781 operand iRegP_R4()
 4782 %{
 4783   constraint(ALLOC_IN_RC(r4_reg));
 4784   match(RegP);
 4785   // match(iRegP);
 4786   match(iRegPNoSp);
 4787   op_cost(0);
 4788   format %{ %}
 4789   interface(REG_INTER);
 4790 %}
 4791 
 4792 // Pointer 64 bit Register R5 only
 4793 operand iRegP_R5()
 4794 %{
 4795   constraint(ALLOC_IN_RC(r5_reg));
 4796   match(RegP);
 4797   // match(iRegP);
 4798   match(iRegPNoSp);
 4799   op_cost(0);
 4800   format %{ %}
 4801   interface(REG_INTER);
 4802 %}
 4803 
 4804 // Pointer 64 bit Register R10 only
 4805 operand iRegP_R10()
 4806 %{
 4807   constraint(ALLOC_IN_RC(r10_reg));
 4808   match(RegP);
 4809   // match(iRegP);
 4810   match(iRegPNoSp);
 4811   op_cost(0);
 4812   format %{ %}
 4813   interface(REG_INTER);
 4814 %}
 4815 
 4816 // Long 64 bit Register R0 only
 4817 operand iRegL_R0()
 4818 %{
 4819   constraint(ALLOC_IN_RC(r0_reg));
 4820   match(RegL);
 4821   match(iRegLNoSp);
 4822   op_cost(0);
 4823   format %{ %}
 4824   interface(REG_INTER);
 4825 %}
 4826 
 4827 // Long 64 bit Register R11 only
 4828 operand iRegL_R11()
 4829 %{
 4830   constraint(ALLOC_IN_RC(r11_reg));
 4831   match(RegL);
 4832   match(iRegLNoSp);
 4833   op_cost(0);
 4834   format %{ %}
 4835   interface(REG_INTER);
 4836 %}
 4837 
 4838 // Register R0 only
 4839 operand iRegI_R0()
 4840 %{
 4841   constraint(ALLOC_IN_RC(int_r0_reg));
 4842   match(RegI);
 4843   match(iRegINoSp);
 4844   op_cost(0);
 4845   format %{ %}
 4846   interface(REG_INTER);
 4847 %}
 4848 
 4849 // Register R2 only
 4850 operand iRegI_R2()
 4851 %{
 4852   constraint(ALLOC_IN_RC(int_r2_reg));
 4853   match(RegI);
 4854   match(iRegINoSp);
 4855   op_cost(0);
 4856   format %{ %}
 4857   interface(REG_INTER);
 4858 %}
 4859 
 4860 // Register R3 only
 4861 operand iRegI_R3()
 4862 %{
 4863   constraint(ALLOC_IN_RC(int_r3_reg));
 4864   match(RegI);
 4865   match(iRegINoSp);
 4866   op_cost(0);
 4867   format %{ %}
 4868   interface(REG_INTER);
 4869 %}
 4870 
 4871 
 4872 // Register R4 only
 4873 operand iRegI_R4()
 4874 %{
 4875   constraint(ALLOC_IN_RC(int_r4_reg));
 4876   match(RegI);
 4877   match(iRegINoSp);
 4878   op_cost(0);
 4879   format %{ %}
 4880   interface(REG_INTER);
 4881 %}
 4882 
 4883 
 4884 // Pointer Register Operands
 4885 // Narrow Pointer Register
 4886 operand iRegN()
 4887 %{
 4888   constraint(ALLOC_IN_RC(any_reg32));
 4889   match(RegN);
 4890   match(iRegNNoSp);
 4891   op_cost(0);
 4892   format %{ %}
 4893   interface(REG_INTER);
 4894 %}
 4895 
 4896 // Integer 64 bit Register not Special
 4897 operand iRegNNoSp()
 4898 %{
 4899   constraint(ALLOC_IN_RC(no_special_reg32));
 4900   match(RegN);
 4901   op_cost(0);
 4902   format %{ %}
 4903   interface(REG_INTER);
 4904 %}
 4905 
 4906 // Float Register
 4907 // Float register operands
 4908 operand vRegF()
 4909 %{
 4910   constraint(ALLOC_IN_RC(float_reg));
 4911   match(RegF);
 4912 
 4913   op_cost(0);
 4914   format %{ %}
 4915   interface(REG_INTER);
 4916 %}
 4917 
 4918 // Double Register
 4919 // Double register operands
 4920 operand vRegD()
 4921 %{
 4922   constraint(ALLOC_IN_RC(double_reg));
 4923   match(RegD);
 4924 
 4925   op_cost(0);
 4926   format %{ %}
 4927   interface(REG_INTER);
 4928 %}
 4929 
 4930 // Generic vector class. This will be used for
 4931 // all vector operands, including NEON and SVE.
 4932 operand vReg()
 4933 %{
 4934   constraint(ALLOC_IN_RC(dynamic));
 4935   match(VecA);
 4936   match(VecD);
 4937   match(VecX);
 4938 
 4939   op_cost(0);
 4940   format %{ %}
 4941   interface(REG_INTER);
 4942 %}
 4943 
 4944 operand vecA()
 4945 %{
 4946   constraint(ALLOC_IN_RC(vectora_reg));
 4947   match(VecA);
 4948 
 4949   op_cost(0);
 4950   format %{ %}
 4951   interface(REG_INTER);
 4952 %}
 4953 
 4954 operand vecD()
 4955 %{
 4956   constraint(ALLOC_IN_RC(vectord_reg));
 4957   match(VecD);
 4958 
 4959   op_cost(0);
 4960   format %{ %}
 4961   interface(REG_INTER);
 4962 %}
 4963 
 4964 operand vecX()
 4965 %{
 4966   constraint(ALLOC_IN_RC(vectorx_reg));
 4967   match(VecX);
 4968 
 4969   op_cost(0);
 4970   format %{ %}
 4971   interface(REG_INTER);
 4972 %}
 4973 
 4974 operand vRegD_V0()
 4975 %{
 4976   constraint(ALLOC_IN_RC(v0_reg));
 4977   match(RegD);
 4978   op_cost(0);
 4979   format %{ %}
 4980   interface(REG_INTER);
 4981 %}
 4982 
 4983 operand vRegD_V1()
 4984 %{
 4985   constraint(ALLOC_IN_RC(v1_reg));
 4986   match(RegD);
 4987   op_cost(0);
 4988   format %{ %}
 4989   interface(REG_INTER);
 4990 %}
 4991 
 4992 operand vRegD_V2()
 4993 %{
 4994   constraint(ALLOC_IN_RC(v2_reg));
 4995   match(RegD);
 4996   op_cost(0);
 4997   format %{ %}
 4998   interface(REG_INTER);
 4999 %}
 5000 
 5001 operand vRegD_V3()
 5002 %{
 5003   constraint(ALLOC_IN_RC(v3_reg));
 5004   match(RegD);
 5005   op_cost(0);
 5006   format %{ %}
 5007   interface(REG_INTER);
 5008 %}
 5009 
 5010 operand vRegD_V4()
 5011 %{
 5012   constraint(ALLOC_IN_RC(v4_reg));
 5013   match(RegD);
 5014   op_cost(0);
 5015   format %{ %}
 5016   interface(REG_INTER);
 5017 %}
 5018 
 5019 operand vRegD_V5()
 5020 %{
 5021   constraint(ALLOC_IN_RC(v5_reg));
 5022   match(RegD);
 5023   op_cost(0);
 5024   format %{ %}
 5025   interface(REG_INTER);
 5026 %}
 5027 
 5028 operand vRegD_V6()
 5029 %{
 5030   constraint(ALLOC_IN_RC(v6_reg));
 5031   match(RegD);
 5032   op_cost(0);
 5033   format %{ %}
 5034   interface(REG_INTER);
 5035 %}
 5036 
 5037 operand vRegD_V7()
 5038 %{
 5039   constraint(ALLOC_IN_RC(v7_reg));
 5040   match(RegD);
 5041   op_cost(0);
 5042   format %{ %}
 5043   interface(REG_INTER);
 5044 %}
 5045 
 5046 operand vRegD_V12()
 5047 %{
 5048   constraint(ALLOC_IN_RC(v12_reg));
 5049   match(RegD);
 5050   op_cost(0);
 5051   format %{ %}
 5052   interface(REG_INTER);
 5053 %}
 5054 
 5055 operand vRegD_V13()
 5056 %{
 5057   constraint(ALLOC_IN_RC(v13_reg));
 5058   match(RegD);
 5059   op_cost(0);
 5060   format %{ %}
 5061   interface(REG_INTER);
 5062 %}
 5063 
 5064 operand pReg()
 5065 %{
 5066   constraint(ALLOC_IN_RC(pr_reg));
 5067   match(RegVectMask);
 5068   match(pRegGov);
 5069   op_cost(0);
 5070   format %{ %}
 5071   interface(REG_INTER);
 5072 %}
 5073 
 5074 operand pRegGov()
 5075 %{
 5076   constraint(ALLOC_IN_RC(gov_pr));
 5077   match(RegVectMask);
 5078   match(pReg);
 5079   op_cost(0);
 5080   format %{ %}
 5081   interface(REG_INTER);
 5082 %}
 5083 
 5084 operand pRegGov_P0()
 5085 %{
 5086   constraint(ALLOC_IN_RC(p0_reg));
 5087   match(RegVectMask);
 5088   op_cost(0);
 5089   format %{ %}
 5090   interface(REG_INTER);
 5091 %}
 5092 
 5093 operand pRegGov_P1()
 5094 %{
 5095   constraint(ALLOC_IN_RC(p1_reg));
 5096   match(RegVectMask);
 5097   op_cost(0);
 5098   format %{ %}
 5099   interface(REG_INTER);
 5100 %}
 5101 
 5102 // Flags register, used as output of signed compare instructions
 5103 
 5104 // note that on AArch64 we also use this register as the output for
 5105 // for floating point compare instructions (CmpF CmpD). this ensures
 5106 // that ordered inequality tests use GT, GE, LT or LE none of which
 5107 // pass through cases where the result is unordered i.e. one or both
 5108 // inputs to the compare is a NaN. this means that the ideal code can
 5109 // replace e.g. a GT with an LE and not end up capturing the NaN case
 5110 // (where the comparison should always fail). EQ and NE tests are
 5111 // always generated in ideal code so that unordered folds into the NE
 5112 // case, matching the behaviour of AArch64 NE.
 5113 //
 5114 // This differs from x86 where the outputs of FP compares use a
 5115 // special FP flags registers and where compares based on this
 5116 // register are distinguished into ordered inequalities (cmpOpUCF) and
 5117 // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
 5118 // to explicitly handle the unordered case in branches. x86 also has
 5119 // to include extra CMoveX rules to accept a cmpOpUCF input.
 5120 
 5121 operand rFlagsReg()
 5122 %{
 5123   constraint(ALLOC_IN_RC(int_flags));
 5124   match(RegFlags);
 5125 
 5126   op_cost(0);
 5127   format %{ "RFLAGS" %}
 5128   interface(REG_INTER);
 5129 %}
 5130 
 5131 // Flags register, used as output of unsigned compare instructions
 5132 operand rFlagsRegU()
 5133 %{
 5134   constraint(ALLOC_IN_RC(int_flags));
 5135   match(RegFlags);
 5136 
 5137   op_cost(0);
 5138   format %{ "RFLAGSU" %}
 5139   interface(REG_INTER);
 5140 %}
 5141 
 5142 // Special Registers
 5143 
 5144 // Method Register
 5145 operand inline_cache_RegP(iRegP reg)
 5146 %{
 5147   constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
 5148   match(reg);
 5149   match(iRegPNoSp);
 5150   op_cost(0);
 5151   format %{ %}
 5152   interface(REG_INTER);
 5153 %}
 5154 
 5155 // Thread Register
 5156 operand thread_RegP(iRegP reg)
 5157 %{
 5158   constraint(ALLOC_IN_RC(thread_reg)); // link_reg
 5159   match(reg);
 5160   op_cost(0);
 5161   format %{ %}
 5162   interface(REG_INTER);
 5163 %}
 5164 
 5165 //----------Memory Operands----------------------------------------------------
 5166 
 5167 operand indirect(iRegP reg)
 5168 %{
 5169   constraint(ALLOC_IN_RC(ptr_reg));
 5170   match(reg);
 5171   op_cost(0);
 5172   format %{ "[$reg]" %}
 5173   interface(MEMORY_INTER) %{
 5174     base($reg);
 5175     index(0xffffffff);
 5176     scale(0x0);
 5177     disp(0x0);
 5178   %}
 5179 %}
 5180 
 5181 operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
 5182 %{
 5183   constraint(ALLOC_IN_RC(ptr_reg));
 5184   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5185   match(AddP reg (LShiftL (ConvI2L ireg) scale));
 5186   op_cost(0);
 5187   format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
 5188   interface(MEMORY_INTER) %{
 5189     base($reg);
 5190     index($ireg);
 5191     scale($scale);
 5192     disp(0x0);
 5193   %}
 5194 %}
 5195 
 5196 operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
 5197 %{
 5198   constraint(ALLOC_IN_RC(ptr_reg));
 5199   predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5200   match(AddP reg (LShiftL lreg scale));
 5201   op_cost(0);
 5202   format %{ "$reg, $lreg lsl($scale)" %}
 5203   interface(MEMORY_INTER) %{
 5204     base($reg);
 5205     index($lreg);
 5206     scale($scale);
 5207     disp(0x0);
 5208   %}
 5209 %}
 5210 
 5211 operand indIndexI2L(iRegP reg, iRegI ireg)
 5212 %{
 5213   constraint(ALLOC_IN_RC(ptr_reg));
 5214   match(AddP reg (ConvI2L ireg));
 5215   op_cost(0);
 5216   format %{ "$reg, $ireg, 0, I2L" %}
 5217   interface(MEMORY_INTER) %{
 5218     base($reg);
 5219     index($ireg);
 5220     scale(0x0);
 5221     disp(0x0);
 5222   %}
 5223 %}
 5224 
 5225 operand indIndex(iRegP reg, iRegL lreg)
 5226 %{
 5227   constraint(ALLOC_IN_RC(ptr_reg));
 5228   match(AddP reg lreg);
 5229   op_cost(0);
 5230   format %{ "$reg, $lreg" %}
 5231   interface(MEMORY_INTER) %{
 5232     base($reg);
 5233     index($lreg);
 5234     scale(0x0);
 5235     disp(0x0);
 5236   %}
 5237 %}
 5238 
 5239 operand indOffI1(iRegP reg, immIOffset1 off)
 5240 %{
 5241   constraint(ALLOC_IN_RC(ptr_reg));
 5242   match(AddP reg off);
 5243   op_cost(0);
 5244   format %{ "[$reg, $off]" %}
 5245   interface(MEMORY_INTER) %{
 5246     base($reg);
 5247     index(0xffffffff);
 5248     scale(0x0);
 5249     disp($off);
 5250   %}
 5251 %}
 5252 
 5253 operand indOffI2(iRegP reg, immIOffset2 off)
 5254 %{
 5255   constraint(ALLOC_IN_RC(ptr_reg));
 5256   match(AddP reg off);
 5257   op_cost(0);
 5258   format %{ "[$reg, $off]" %}
 5259   interface(MEMORY_INTER) %{
 5260     base($reg);
 5261     index(0xffffffff);
 5262     scale(0x0);
 5263     disp($off);
 5264   %}
 5265 %}
 5266 
 5267 operand indOffI4(iRegP reg, immIOffset4 off)
 5268 %{
 5269   constraint(ALLOC_IN_RC(ptr_reg));
 5270   match(AddP reg off);
 5271   op_cost(0);
 5272   format %{ "[$reg, $off]" %}
 5273   interface(MEMORY_INTER) %{
 5274     base($reg);
 5275     index(0xffffffff);
 5276     scale(0x0);
 5277     disp($off);
 5278   %}
 5279 %}
 5280 
 5281 operand indOffI8(iRegP reg, immIOffset8 off)
 5282 %{
 5283   constraint(ALLOC_IN_RC(ptr_reg));
 5284   match(AddP reg off);
 5285   op_cost(0);
 5286   format %{ "[$reg, $off]" %}
 5287   interface(MEMORY_INTER) %{
 5288     base($reg);
 5289     index(0xffffffff);
 5290     scale(0x0);
 5291     disp($off);
 5292   %}
 5293 %}
 5294 
 5295 operand indOffI16(iRegP reg, immIOffset16 off)
 5296 %{
 5297   constraint(ALLOC_IN_RC(ptr_reg));
 5298   match(AddP reg off);
 5299   op_cost(0);
 5300   format %{ "[$reg, $off]" %}
 5301   interface(MEMORY_INTER) %{
 5302     base($reg);
 5303     index(0xffffffff);
 5304     scale(0x0);
 5305     disp($off);
 5306   %}
 5307 %}
 5308 
 5309 operand indOffL1(iRegP reg, immLoffset1 off)
 5310 %{
 5311   constraint(ALLOC_IN_RC(ptr_reg));
 5312   match(AddP reg off);
 5313   op_cost(0);
 5314   format %{ "[$reg, $off]" %}
 5315   interface(MEMORY_INTER) %{
 5316     base($reg);
 5317     index(0xffffffff);
 5318     scale(0x0);
 5319     disp($off);
 5320   %}
 5321 %}
 5322 
 5323 operand indOffL2(iRegP reg, immLoffset2 off)
 5324 %{
 5325   constraint(ALLOC_IN_RC(ptr_reg));
 5326   match(AddP reg off);
 5327   op_cost(0);
 5328   format %{ "[$reg, $off]" %}
 5329   interface(MEMORY_INTER) %{
 5330     base($reg);
 5331     index(0xffffffff);
 5332     scale(0x0);
 5333     disp($off);
 5334   %}
 5335 %}
 5336 
 5337 operand indOffL4(iRegP reg, immLoffset4 off)
 5338 %{
 5339   constraint(ALLOC_IN_RC(ptr_reg));
 5340   match(AddP reg off);
 5341   op_cost(0);
 5342   format %{ "[$reg, $off]" %}
 5343   interface(MEMORY_INTER) %{
 5344     base($reg);
 5345     index(0xffffffff);
 5346     scale(0x0);
 5347     disp($off);
 5348   %}
 5349 %}
 5350 
 5351 operand indOffL8(iRegP reg, immLoffset8 off)
 5352 %{
 5353   constraint(ALLOC_IN_RC(ptr_reg));
 5354   match(AddP reg off);
 5355   op_cost(0);
 5356   format %{ "[$reg, $off]" %}
 5357   interface(MEMORY_INTER) %{
 5358     base($reg);
 5359     index(0xffffffff);
 5360     scale(0x0);
 5361     disp($off);
 5362   %}
 5363 %}
 5364 
 5365 operand indOffL16(iRegP reg, immLoffset16 off)
 5366 %{
 5367   constraint(ALLOC_IN_RC(ptr_reg));
 5368   match(AddP reg off);
 5369   op_cost(0);
 5370   format %{ "[$reg, $off]" %}
 5371   interface(MEMORY_INTER) %{
 5372     base($reg);
 5373     index(0xffffffff);
 5374     scale(0x0);
 5375     disp($off);
 5376   %}
 5377 %}
 5378 
 5379 operand indirectX2P(iRegL reg)
 5380 %{
 5381   constraint(ALLOC_IN_RC(ptr_reg));
 5382   match(CastX2P reg);
 5383   op_cost(0);
 5384   format %{ "[$reg]\t# long -> ptr" %}
 5385   interface(MEMORY_INTER) %{
 5386     base($reg);
 5387     index(0xffffffff);
 5388     scale(0x0);
 5389     disp(0x0);
 5390   %}
 5391 %}
 5392 
 5393 operand indOffX2P(iRegL reg, immLOffset off)
 5394 %{
 5395   constraint(ALLOC_IN_RC(ptr_reg));
 5396   match(AddP (CastX2P reg) off);
 5397   op_cost(0);
 5398   format %{ "[$reg, $off]\t# long -> ptr" %}
 5399   interface(MEMORY_INTER) %{
 5400     base($reg);
 5401     index(0xffffffff);
 5402     scale(0x0);
 5403     disp($off);
 5404   %}
 5405 %}
 5406 
 5407 operand indirectN(iRegN reg)
 5408 %{
 5409   predicate(CompressedOops::shift() == 0);
 5410   constraint(ALLOC_IN_RC(ptr_reg));
 5411   match(DecodeN reg);
 5412   op_cost(0);
 5413   format %{ "[$reg]\t# narrow" %}
 5414   interface(MEMORY_INTER) %{
 5415     base($reg);
 5416     index(0xffffffff);
 5417     scale(0x0);
 5418     disp(0x0);
 5419   %}
 5420 %}
 5421 
 5422 operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
 5423 %{
 5424   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5425   constraint(ALLOC_IN_RC(ptr_reg));
 5426   match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
 5427   op_cost(0);
 5428   format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
 5429   interface(MEMORY_INTER) %{
 5430     base($reg);
 5431     index($ireg);
 5432     scale($scale);
 5433     disp(0x0);
 5434   %}
 5435 %}
 5436 
 5437 operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
 5438 %{
 5439   predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
 5440   constraint(ALLOC_IN_RC(ptr_reg));
 5441   match(AddP (DecodeN reg) (LShiftL lreg scale));
 5442   op_cost(0);
 5443   format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
 5444   interface(MEMORY_INTER) %{
 5445     base($reg);
 5446     index($lreg);
 5447     scale($scale);
 5448     disp(0x0);
 5449   %}
 5450 %}
 5451 
 5452 operand indIndexI2LN(iRegN reg, iRegI ireg)
 5453 %{
 5454   predicate(CompressedOops::shift() == 0);
 5455   constraint(ALLOC_IN_RC(ptr_reg));
 5456   match(AddP (DecodeN reg) (ConvI2L ireg));
 5457   op_cost(0);
 5458   format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
 5459   interface(MEMORY_INTER) %{
 5460     base($reg);
 5461     index($ireg);
 5462     scale(0x0);
 5463     disp(0x0);
 5464   %}
 5465 %}
 5466 
 5467 operand indIndexN(iRegN reg, iRegL lreg)
 5468 %{
 5469   predicate(CompressedOops::shift() == 0);
 5470   constraint(ALLOC_IN_RC(ptr_reg));
 5471   match(AddP (DecodeN reg) lreg);
 5472   op_cost(0);
 5473   format %{ "$reg, $lreg\t# narrow" %}
 5474   interface(MEMORY_INTER) %{
 5475     base($reg);
 5476     index($lreg);
 5477     scale(0x0);
 5478     disp(0x0);
 5479   %}
 5480 %}
 5481 
 5482 operand indOffIN(iRegN reg, immIOffset off)
 5483 %{
 5484   predicate(CompressedOops::shift() == 0);
 5485   constraint(ALLOC_IN_RC(ptr_reg));
 5486   match(AddP (DecodeN reg) off);
 5487   op_cost(0);
 5488   format %{ "[$reg, $off]\t# narrow" %}
 5489   interface(MEMORY_INTER) %{
 5490     base($reg);
 5491     index(0xffffffff);
 5492     scale(0x0);
 5493     disp($off);
 5494   %}
 5495 %}
 5496 
 5497 operand indOffLN(iRegN reg, immLOffset off)
 5498 %{
 5499   predicate(CompressedOops::shift() == 0);
 5500   constraint(ALLOC_IN_RC(ptr_reg));
 5501   match(AddP (DecodeN reg) off);
 5502   op_cost(0);
 5503   format %{ "[$reg, $off]\t# narrow" %}
 5504   interface(MEMORY_INTER) %{
 5505     base($reg);
 5506     index(0xffffffff);
 5507     scale(0x0);
 5508     disp($off);
 5509   %}
 5510 %}
 5511 
 5512 
 5513 //----------Special Memory Operands--------------------------------------------
 5514 // Stack Slot Operand - This operand is used for loading and storing temporary
 5515 //                      values on the stack where a match requires a value to
 5516 //                      flow through memory.
 5517 operand stackSlotP(sRegP reg)
 5518 %{
 5519   constraint(ALLOC_IN_RC(stack_slots));
 5520   op_cost(100);
 5521   // No match rule because this operand is only generated in matching
 5522   // match(RegP);
 5523   format %{ "[$reg]" %}
 5524   interface(MEMORY_INTER) %{
 5525     base(0x1e);  // RSP
 5526     index(0x0);  // No Index
 5527     scale(0x0);  // No Scale
 5528     disp($reg);  // Stack Offset
 5529   %}
 5530 %}
 5531 
 5532 operand stackSlotI(sRegI reg)
 5533 %{
 5534   constraint(ALLOC_IN_RC(stack_slots));
 5535   // No match rule because this operand is only generated in matching
 5536   // match(RegI);
 5537   format %{ "[$reg]" %}
 5538   interface(MEMORY_INTER) %{
 5539     base(0x1e);  // RSP
 5540     index(0x0);  // No Index
 5541     scale(0x0);  // No Scale
 5542     disp($reg);  // Stack Offset
 5543   %}
 5544 %}
 5545 
 5546 operand stackSlotF(sRegF reg)
 5547 %{
 5548   constraint(ALLOC_IN_RC(stack_slots));
 5549   // No match rule because this operand is only generated in matching
 5550   // match(RegF);
 5551   format %{ "[$reg]" %}
 5552   interface(MEMORY_INTER) %{
 5553     base(0x1e);  // RSP
 5554     index(0x0);  // No Index
 5555     scale(0x0);  // No Scale
 5556     disp($reg);  // Stack Offset
 5557   %}
 5558 %}
 5559 
 5560 operand stackSlotD(sRegD reg)
 5561 %{
 5562   constraint(ALLOC_IN_RC(stack_slots));
 5563   // No match rule because this operand is only generated in matching
 5564   // match(RegD);
 5565   format %{ "[$reg]" %}
 5566   interface(MEMORY_INTER) %{
 5567     base(0x1e);  // RSP
 5568     index(0x0);  // No Index
 5569     scale(0x0);  // No Scale
 5570     disp($reg);  // Stack Offset
 5571   %}
 5572 %}
 5573 
 5574 operand stackSlotL(sRegL reg)
 5575 %{
 5576   constraint(ALLOC_IN_RC(stack_slots));
 5577   // No match rule because this operand is only generated in matching
 5578   // match(RegL);
 5579   format %{ "[$reg]" %}
 5580   interface(MEMORY_INTER) %{
 5581     base(0x1e);  // RSP
 5582     index(0x0);  // No Index
 5583     scale(0x0);  // No Scale
 5584     disp($reg);  // Stack Offset
 5585   %}
 5586 %}
 5587 
 5588 // Operands for expressing Control Flow
 5589 // NOTE: Label is a predefined operand which should not be redefined in
 5590 //       the AD file. It is generically handled within the ADLC.
 5591 
 5592 //----------Conditional Branch Operands----------------------------------------
 5593 // Comparison Op  - This is the operation of the comparison, and is limited to
 5594 //                  the following set of codes:
 5595 //                  L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
 5596 //
 5597 // Other attributes of the comparison, such as unsignedness, are specified
 5598 // by the comparison instruction that sets a condition code flags register.
 5599 // That result is represented by a flags operand whose subtype is appropriate
 5600 // to the unsignedness (etc.) of the comparison.
 5601 //
 5602 // Later, the instruction which matches both the Comparison Op (a Bool) and
 5603 // the flags (produced by the Cmp) specifies the coding of the comparison op
 5604 // by matching a specific subtype of Bool operand below, such as cmpOpU.
 5605 
 5606 // used for signed integral comparisons and fp comparisons
 5607 
 5608 operand cmpOp()
 5609 %{
 5610   match(Bool);
 5611 
 5612   format %{ "" %}
 5613   interface(COND_INTER) %{
 5614     equal(0x0, "eq");
 5615     not_equal(0x1, "ne");
 5616     less(0xb, "lt");
 5617     greater_equal(0xa, "ge");
 5618     less_equal(0xd, "le");
 5619     greater(0xc, "gt");
 5620     overflow(0x6, "vs");
 5621     no_overflow(0x7, "vc");
 5622   %}
 5623 %}
 5624 
 5625 // used for unsigned integral comparisons
 5626 
 5627 operand cmpOpU()
 5628 %{
 5629   match(Bool);
 5630 
 5631   format %{ "" %}
 5632   interface(COND_INTER) %{
 5633     equal(0x0, "eq");
 5634     not_equal(0x1, "ne");
 5635     less(0x3, "lo");
 5636     greater_equal(0x2, "hs");
 5637     less_equal(0x9, "ls");
 5638     greater(0x8, "hi");
 5639     overflow(0x6, "vs");
 5640     no_overflow(0x7, "vc");
 5641   %}
 5642 %}
 5643 
 5644 // used for certain integral comparisons which can be
 5645 // converted to cbxx or tbxx instructions
 5646 
 5647 operand cmpOpEqNe()
 5648 %{
 5649   match(Bool);
 5650   op_cost(0);
 5651   predicate(n->as_Bool()->_test._test == BoolTest::ne
 5652             || n->as_Bool()->_test._test == BoolTest::eq);
 5653 
 5654   format %{ "" %}
 5655   interface(COND_INTER) %{
 5656     equal(0x0, "eq");
 5657     not_equal(0x1, "ne");
 5658     less(0xb, "lt");
 5659     greater_equal(0xa, "ge");
 5660     less_equal(0xd, "le");
 5661     greater(0xc, "gt");
 5662     overflow(0x6, "vs");
 5663     no_overflow(0x7, "vc");
 5664   %}
 5665 %}
 5666 
 5667 // used for certain integral comparisons which can be
 5668 // converted to cbxx or tbxx instructions
 5669 
 5670 operand cmpOpLtGe()
 5671 %{
 5672   match(Bool);
 5673   op_cost(0);
 5674 
 5675   predicate(n->as_Bool()->_test._test == BoolTest::lt
 5676             || n->as_Bool()->_test._test == BoolTest::ge);
 5677 
 5678   format %{ "" %}
 5679   interface(COND_INTER) %{
 5680     equal(0x0, "eq");
 5681     not_equal(0x1, "ne");
 5682     less(0xb, "lt");
 5683     greater_equal(0xa, "ge");
 5684     less_equal(0xd, "le");
 5685     greater(0xc, "gt");
 5686     overflow(0x6, "vs");
 5687     no_overflow(0x7, "vc");
 5688   %}
 5689 %}
 5690 
 5691 // used for certain unsigned integral comparisons which can be
 5692 // converted to cbxx or tbxx instructions
 5693 
 5694 operand cmpOpUEqNeLeGt()
 5695 %{
 5696   match(Bool);
 5697   op_cost(0);
 5698 
 5699   predicate(n->as_Bool()->_test._test == BoolTest::eq ||
 5700             n->as_Bool()->_test._test == BoolTest::ne ||
 5701             n->as_Bool()->_test._test == BoolTest::le ||
 5702             n->as_Bool()->_test._test == BoolTest::gt);
 5703 
 5704   format %{ "" %}
 5705   interface(COND_INTER) %{
 5706     equal(0x0, "eq");
 5707     not_equal(0x1, "ne");
 5708     less(0x3, "lo");
 5709     greater_equal(0x2, "hs");
 5710     less_equal(0x9, "ls");
 5711     greater(0x8, "hi");
 5712     overflow(0x6, "vs");
 5713     no_overflow(0x7, "vc");
 5714   %}
 5715 %}
 5716 
 5717 // Special operand allowing long args to int ops to be truncated for free
 5718 
 5719 operand iRegL2I(iRegL reg) %{
 5720 
 5721   op_cost(0);
 5722 
 5723   match(ConvL2I reg);
 5724 
 5725   format %{ "l2i($reg)" %}
 5726 
 5727   interface(REG_INTER)
 5728 %}
 5729 
 5730 operand iRegL2P(iRegL reg) %{
 5731 
 5732   op_cost(0);
 5733 
 5734   match(CastX2P reg);
 5735 
 5736   format %{ "l2p($reg)" %}
 5737 
 5738   interface(REG_INTER)
 5739 %}
 5740 
 5741 opclass vmem2(indirect, indIndex, indOffI2, indOffL2);
 5742 opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
 5743 opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
 5744 opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
 5745 
 5746 //----------OPERAND CLASSES----------------------------------------------------
 5747 // Operand Classes are groups of operands that are used as to simplify
 5748 // instruction definitions by not requiring the AD writer to specify
 5749 // separate instructions for every form of operand when the
 5750 // instruction accepts multiple operand types with the same basic
 5751 // encoding and format. The classic case of this is memory operands.
 5752 
 5753 // memory is used to define read/write location for load/store
 5754 // instruction defs. we can turn a memory op into an Address
 5755 
 5756 opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1,
 5757                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indirectX2P, indOffX2P);
 5758 
 5759 opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2,
 5760                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indirectX2P, indOffX2P);
 5761 
 5762 opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4,
 5763                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
 5764 
 5765 opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8,
 5766                 indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
 5767 
 5768 // All of the memory operands. For the pipeline description.
 5769 opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex,
 5770                indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
 5771                indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
 5772 
 5773 opclass memory_noindex(indirect,
 5774                        indOffI1, indOffL1,indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
 5775                        indirectN, indOffIN, indOffLN, indirectX2P, indOffX2P);
 5776 
 5777 // iRegIorL2I is used for src inputs in rules for 32 bit int (I)
 5778 // operations. it allows the src to be either an iRegI or a (ConvL2I
 5779 // iRegL). in the latter case the l2i normally planted for a ConvL2I
 5780 // can be elided because the 32-bit instruction will just employ the
 5781 // lower 32 bits anyway.
 5782 //
 5783 // n.b. this does not elide all L2I conversions. if the truncated
 5784 // value is consumed by more than one operation then the ConvL2I
 5785 // cannot be bundled into the consuming nodes so an l2i gets planted
 5786 // (actually a movw $dst $src) and the downstream instructions consume
 5787 // the result of the l2i as an iRegI input. That's a shame since the
 5788 // movw is actually redundant but its not too costly.
 5789 
 5790 opclass iRegIorL2I(iRegI, iRegL2I);
 5791 opclass iRegPorL2P(iRegP, iRegL2P);
 5792 
 5793 //----------PIPELINE-----------------------------------------------------------
 5794 // Rules which define the behavior of the target architectures pipeline.
 5795 
 5796 // For specific pipelines, eg A53, define the stages of that pipeline
 5797 //pipe_desc(ISS, EX1, EX2, WR);
 5798 #define ISS S0
 5799 #define EX1 S1
 5800 #define EX2 S2
 5801 #define WR  S3
 5802 
 5803 // Integer ALU reg operation
 5804 pipeline %{
 5805 
 5806 attributes %{
 5807   // ARM instructions are of fixed length
 5808   fixed_size_instructions;        // Fixed size instructions TODO does
 5809   max_instructions_per_bundle = 4;   // A53 = 2, A57 = 4
 5810   // ARM instructions come in 32-bit word units
 5811   instruction_unit_size = 4;         // An instruction is 4 bytes long
 5812   instruction_fetch_unit_size = 64;  // The processor fetches one line
 5813   instruction_fetch_units = 1;       // of 64 bytes
 5814 
 5815   // List of nop instructions
 5816   nops( MachNop );
 5817 %}
 5818 
 5819 // We don't use an actual pipeline model so don't care about resources
 5820 // or description. we do use pipeline classes to introduce fixed
 5821 // latencies
 5822 
 5823 //----------RESOURCES----------------------------------------------------------
 5824 // Resources are the functional units available to the machine
 5825 
 5826 resources( INS0, INS1, INS01 = INS0 | INS1,
 5827            ALU0, ALU1, ALU = ALU0 | ALU1,
 5828            MAC,
 5829            DIV,
 5830            BRANCH,
 5831            LDST,
 5832            NEON_FP);
 5833 
 5834 //----------PIPELINE DESCRIPTION-----------------------------------------------
 5835 // Pipeline Description specifies the stages in the machine's pipeline
 5836 
 5837 // Define the pipeline as a generic 6 stage pipeline
 5838 pipe_desc(S0, S1, S2, S3, S4, S5);
 5839 
 5840 //----------PIPELINE CLASSES---------------------------------------------------
 5841 // Pipeline Classes describe the stages in which input and output are
 5842 // referenced by the hardware pipeline.
 5843 
 5844 pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
 5845 %{
 5846   single_instruction;
 5847   src1   : S1(read);
 5848   src2   : S2(read);
 5849   dst    : S5(write);
 5850   INS01  : ISS;
 5851   NEON_FP : S5;
 5852 %}
 5853 
 5854 pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
 5855 %{
 5856   single_instruction;
 5857   src1   : S1(read);
 5858   src2   : S2(read);
 5859   dst    : S5(write);
 5860   INS01  : ISS;
 5861   NEON_FP : S5;
 5862 %}
 5863 
 5864 pipe_class fp_uop_s(vRegF dst, vRegF src)
 5865 %{
 5866   single_instruction;
 5867   src    : S1(read);
 5868   dst    : S5(write);
 5869   INS01  : ISS;
 5870   NEON_FP : S5;
 5871 %}
 5872 
 5873 pipe_class fp_uop_d(vRegD dst, vRegD src)
 5874 %{
 5875   single_instruction;
 5876   src    : S1(read);
 5877   dst    : S5(write);
 5878   INS01  : ISS;
 5879   NEON_FP : S5;
 5880 %}
 5881 
 5882 pipe_class fp_d2f(vRegF dst, vRegD src)
 5883 %{
 5884   single_instruction;
 5885   src    : S1(read);
 5886   dst    : S5(write);
 5887   INS01  : ISS;
 5888   NEON_FP : S5;
 5889 %}
 5890 
 5891 pipe_class fp_f2d(vRegD dst, vRegF src)
 5892 %{
 5893   single_instruction;
 5894   src    : S1(read);
 5895   dst    : S5(write);
 5896   INS01  : ISS;
 5897   NEON_FP : S5;
 5898 %}
 5899 
 5900 pipe_class fp_f2i(iRegINoSp dst, vRegF src)
 5901 %{
 5902   single_instruction;
 5903   src    : S1(read);
 5904   dst    : S5(write);
 5905   INS01  : ISS;
 5906   NEON_FP : S5;
 5907 %}
 5908 
 5909 pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
 5910 %{
 5911   single_instruction;
 5912   src    : S1(read);
 5913   dst    : S5(write);
 5914   INS01  : ISS;
 5915   NEON_FP : S5;
 5916 %}
 5917 
 5918 pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
 5919 %{
 5920   single_instruction;
 5921   src    : S1(read);
 5922   dst    : S5(write);
 5923   INS01  : ISS;
 5924   NEON_FP : S5;
 5925 %}
 5926 
 5927 pipe_class fp_l2f(vRegF dst, iRegL src)
 5928 %{
 5929   single_instruction;
 5930   src    : S1(read);
 5931   dst    : S5(write);
 5932   INS01  : ISS;
 5933   NEON_FP : S5;
 5934 %}
 5935 
 5936 pipe_class fp_d2i(iRegINoSp dst, vRegD src)
 5937 %{
 5938   single_instruction;
 5939   src    : S1(read);
 5940   dst    : S5(write);
 5941   INS01  : ISS;
 5942   NEON_FP : S5;
 5943 %}
 5944 
 5945 pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
 5946 %{
 5947   single_instruction;
 5948   src    : S1(read);
 5949   dst    : S5(write);
 5950   INS01  : ISS;
 5951   NEON_FP : S5;
 5952 %}
 5953 
 5954 pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
 5955 %{
 5956   single_instruction;
 5957   src    : S1(read);
 5958   dst    : S5(write);
 5959   INS01  : ISS;
 5960   NEON_FP : S5;
 5961 %}
 5962 
 5963 pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
 5964 %{
 5965   single_instruction;
 5966   src    : S1(read);
 5967   dst    : S5(write);
 5968   INS01  : ISS;
 5969   NEON_FP : S5;
 5970 %}
 5971 
 5972 pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
 5973 %{
 5974   single_instruction;
 5975   src1   : S1(read);
 5976   src2   : S2(read);
 5977   dst    : S5(write);
 5978   INS0   : ISS;
 5979   NEON_FP : S5;
 5980 %}
 5981 
 5982 pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
 5983 %{
 5984   single_instruction;
 5985   src1   : S1(read);
 5986   src2   : S2(read);
 5987   dst    : S5(write);
 5988   INS0   : ISS;
 5989   NEON_FP : S5;
 5990 %}
 5991 
 5992 pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
 5993 %{
 5994   single_instruction;
 5995   cr     : S1(read);
 5996   src1   : S1(read);
 5997   src2   : S1(read);
 5998   dst    : S3(write);
 5999   INS01  : ISS;
 6000   NEON_FP : S3;
 6001 %}
 6002 
 6003 pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
 6004 %{
 6005   single_instruction;
 6006   cr     : S1(read);
 6007   src1   : S1(read);
 6008   src2   : S1(read);
 6009   dst    : S3(write);
 6010   INS01  : ISS;
 6011   NEON_FP : S3;
 6012 %}
 6013 
 6014 pipe_class fp_imm_s(vRegF dst)
 6015 %{
 6016   single_instruction;
 6017   dst    : S3(write);
 6018   INS01  : ISS;
 6019   NEON_FP : S3;
 6020 %}
 6021 
 6022 pipe_class fp_imm_d(vRegD dst)
 6023 %{
 6024   single_instruction;
 6025   dst    : S3(write);
 6026   INS01  : ISS;
 6027   NEON_FP : S3;
 6028 %}
 6029 
 6030 pipe_class fp_load_constant_s(vRegF dst)
 6031 %{
 6032   single_instruction;
 6033   dst    : S4(write);
 6034   INS01  : ISS;
 6035   NEON_FP : S4;
 6036 %}
 6037 
 6038 pipe_class fp_load_constant_d(vRegD dst)
 6039 %{
 6040   single_instruction;
 6041   dst    : S4(write);
 6042   INS01  : ISS;
 6043   NEON_FP : S4;
 6044 %}
 6045 
 6046 //------- Integer ALU operations --------------------------
 6047 
 6048 // Integer ALU reg-reg operation
 6049 // Operands needed in EX1, result generated in EX2
 6050 // Eg.  ADD     x0, x1, x2
 6051 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6052 %{
 6053   single_instruction;
 6054   dst    : EX2(write);
 6055   src1   : EX1(read);
 6056   src2   : EX1(read);
 6057   INS01  : ISS; // Dual issue as instruction 0 or 1
 6058   ALU    : EX2;
 6059 %}
 6060 
 6061 // Integer ALU reg-reg operation with constant shift
 6062 // Shifted register must be available in LATE_ISS instead of EX1
 6063 // Eg.  ADD     x0, x1, x2, LSL #2
 6064 pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
 6065 %{
 6066   single_instruction;
 6067   dst    : EX2(write);
 6068   src1   : EX1(read);
 6069   src2   : ISS(read);
 6070   INS01  : ISS;
 6071   ALU    : EX2;
 6072 %}
 6073 
 6074 // Integer ALU reg operation with constant shift
 6075 // Eg.  LSL     x0, x1, #shift
 6076 pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
 6077 %{
 6078   single_instruction;
 6079   dst    : EX2(write);
 6080   src1   : ISS(read);
 6081   INS01  : ISS;
 6082   ALU    : EX2;
 6083 %}
 6084 
 6085 // Integer ALU reg-reg operation with variable shift
 6086 // Both operands must be available in LATE_ISS instead of EX1
 6087 // Result is available in EX1 instead of EX2
 6088 // Eg.  LSLV    x0, x1, x2
 6089 pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
 6090 %{
 6091   single_instruction;
 6092   dst    : EX1(write);
 6093   src1   : ISS(read);
 6094   src2   : ISS(read);
 6095   INS01  : ISS;
 6096   ALU    : EX1;
 6097 %}
 6098 
 6099 // Integer ALU reg-reg operation with extract
 6100 // As for _vshift above, but result generated in EX2
 6101 // Eg.  EXTR    x0, x1, x2, #N
 6102 pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
 6103 %{
 6104   single_instruction;
 6105   dst    : EX2(write);
 6106   src1   : ISS(read);
 6107   src2   : ISS(read);
 6108   INS1   : ISS; // Can only dual issue as Instruction 1
 6109   ALU    : EX1;
 6110 %}
 6111 
 6112 // Integer ALU reg operation
 6113 // Eg.  NEG     x0, x1
 6114 pipe_class ialu_reg(iRegI dst, iRegI src)
 6115 %{
 6116   single_instruction;
 6117   dst    : EX2(write);
 6118   src    : EX1(read);
 6119   INS01  : ISS;
 6120   ALU    : EX2;
 6121 %}
 6122 
 6123 // Integer ALU reg mmediate operation
 6124 // Eg.  ADD     x0, x1, #N
 6125 pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
 6126 %{
 6127   single_instruction;
 6128   dst    : EX2(write);
 6129   src1   : EX1(read);
 6130   INS01  : ISS;
 6131   ALU    : EX2;
 6132 %}
 6133 
 6134 // Integer ALU immediate operation (no source operands)
 6135 // Eg.  MOV     x0, #N
 6136 pipe_class ialu_imm(iRegI dst)
 6137 %{
 6138   single_instruction;
 6139   dst    : EX1(write);
 6140   INS01  : ISS;
 6141   ALU    : EX1;
 6142 %}
 6143 
 6144 //------- Compare operation -------------------------------
 6145 
 6146 // Compare reg-reg
 6147 // Eg.  CMP     x0, x1
 6148 pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
 6149 %{
 6150   single_instruction;
 6151 //  fixed_latency(16);
 6152   cr     : EX2(write);
 6153   op1    : EX1(read);
 6154   op2    : EX1(read);
 6155   INS01  : ISS;
 6156   ALU    : EX2;
 6157 %}
 6158 
 6159 // Compare reg-reg
 6160 // Eg.  CMP     x0, #N
 6161 pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
 6162 %{
 6163   single_instruction;
 6164 //  fixed_latency(16);
 6165   cr     : EX2(write);
 6166   op1    : EX1(read);
 6167   INS01  : ISS;
 6168   ALU    : EX2;
 6169 %}
 6170 
 6171 //------- Conditional instructions ------------------------
 6172 
 6173 // Conditional no operands
 6174 // Eg.  CSINC   x0, zr, zr, <cond>
 6175 pipe_class icond_none(iRegI dst, rFlagsReg cr)
 6176 %{
 6177   single_instruction;
 6178   cr     : EX1(read);
 6179   dst    : EX2(write);
 6180   INS01  : ISS;
 6181   ALU    : EX2;
 6182 %}
 6183 
 6184 // Conditional 2 operand
 6185 // EG.  CSEL    X0, X1, X2, <cond>
 6186 pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
 6187 %{
 6188   single_instruction;
 6189   cr     : EX1(read);
 6190   src1   : EX1(read);
 6191   src2   : EX1(read);
 6192   dst    : EX2(write);
 6193   INS01  : ISS;
 6194   ALU    : EX2;
 6195 %}
 6196 
 6197 // Conditional 2 operand
 6198 // EG.  CSEL    X0, X1, X2, <cond>
 6199 pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
 6200 %{
 6201   single_instruction;
 6202   cr     : EX1(read);
 6203   src    : EX1(read);
 6204   dst    : EX2(write);
 6205   INS01  : ISS;
 6206   ALU    : EX2;
 6207 %}
 6208 
 6209 //------- Multiply pipeline operations --------------------
 6210 
 6211 // Multiply reg-reg
 6212 // Eg.  MUL     w0, w1, w2
 6213 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6214 %{
 6215   single_instruction;
 6216   dst    : WR(write);
 6217   src1   : ISS(read);
 6218   src2   : ISS(read);
 6219   INS01  : ISS;
 6220   MAC    : WR;
 6221 %}
 6222 
 6223 // Multiply accumulate
 6224 // Eg.  MADD    w0, w1, w2, w3
 6225 pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6226 %{
 6227   single_instruction;
 6228   dst    : WR(write);
 6229   src1   : ISS(read);
 6230   src2   : ISS(read);
 6231   src3   : ISS(read);
 6232   INS01  : ISS;
 6233   MAC    : WR;
 6234 %}
 6235 
 6236 // Eg.  MUL     w0, w1, w2
 6237 pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6238 %{
 6239   single_instruction;
 6240   fixed_latency(3); // Maximum latency for 64 bit mul
 6241   dst    : WR(write);
 6242   src1   : ISS(read);
 6243   src2   : ISS(read);
 6244   INS01  : ISS;
 6245   MAC    : WR;
 6246 %}
 6247 
 6248 // Multiply accumulate
 6249 // Eg.  MADD    w0, w1, w2, w3
 6250 pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
 6251 %{
 6252   single_instruction;
 6253   fixed_latency(3); // Maximum latency for 64 bit mul
 6254   dst    : WR(write);
 6255   src1   : ISS(read);
 6256   src2   : ISS(read);
 6257   src3   : ISS(read);
 6258   INS01  : ISS;
 6259   MAC    : WR;
 6260 %}
 6261 
 6262 //------- Divide pipeline operations --------------------
 6263 
 6264 // Eg.  SDIV    w0, w1, w2
 6265 pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6266 %{
 6267   single_instruction;
 6268   fixed_latency(8); // Maximum latency for 32 bit divide
 6269   dst    : WR(write);
 6270   src1   : ISS(read);
 6271   src2   : ISS(read);
 6272   INS0   : ISS; // Can only dual issue as instruction 0
 6273   DIV    : WR;
 6274 %}
 6275 
 6276 // Eg.  SDIV    x0, x1, x2
 6277 pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
 6278 %{
 6279   single_instruction;
 6280   fixed_latency(16); // Maximum latency for 64 bit divide
 6281   dst    : WR(write);
 6282   src1   : ISS(read);
 6283   src2   : ISS(read);
 6284   INS0   : ISS; // Can only dual issue as instruction 0
 6285   DIV    : WR;
 6286 %}
 6287 
 6288 //------- Load pipeline operations ------------------------
 6289 
 6290 // Load - prefetch
 6291 // Eg.  PFRM    <mem>
 6292 pipe_class iload_prefetch(memory mem)
 6293 %{
 6294   single_instruction;
 6295   mem    : ISS(read);
 6296   INS01  : ISS;
 6297   LDST   : WR;
 6298 %}
 6299 
 6300 // Load - reg, mem
 6301 // Eg.  LDR     x0, <mem>
 6302 pipe_class iload_reg_mem(iRegI dst, memory mem)
 6303 %{
 6304   single_instruction;
 6305   dst    : WR(write);
 6306   mem    : ISS(read);
 6307   INS01  : ISS;
 6308   LDST   : WR;
 6309 %}
 6310 
 6311 // Load - reg, reg
 6312 // Eg.  LDR     x0, [sp, x1]
 6313 pipe_class iload_reg_reg(iRegI dst, iRegI src)
 6314 %{
 6315   single_instruction;
 6316   dst    : WR(write);
 6317   src    : ISS(read);
 6318   INS01  : ISS;
 6319   LDST   : WR;
 6320 %}
 6321 
 6322 //------- Store pipeline operations -----------------------
 6323 
 6324 // Store - zr, mem
 6325 // Eg.  STR     zr, <mem>
 6326 pipe_class istore_mem(memory mem)
 6327 %{
 6328   single_instruction;
 6329   mem    : ISS(read);
 6330   INS01  : ISS;
 6331   LDST   : WR;
 6332 %}
 6333 
 6334 // Store - reg, mem
 6335 // Eg.  STR     x0, <mem>
 6336 pipe_class istore_reg_mem(iRegI src, memory mem)
 6337 %{
 6338   single_instruction;
 6339   mem    : ISS(read);
 6340   src    : EX2(read);
 6341   INS01  : ISS;
 6342   LDST   : WR;
 6343 %}
 6344 
 6345 // Store - reg, reg
 6346 // Eg. STR      x0, [sp, x1]
 6347 pipe_class istore_reg_reg(iRegI dst, iRegI src)
 6348 %{
 6349   single_instruction;
 6350   dst    : ISS(read);
 6351   src    : EX2(read);
 6352   INS01  : ISS;
 6353   LDST   : WR;
 6354 %}
 6355 
 6356 //------- Store pipeline operations -----------------------
 6357 
 6358 // Branch
 6359 pipe_class pipe_branch()
 6360 %{
 6361   single_instruction;
 6362   INS01  : ISS;
 6363   BRANCH : EX1;
 6364 %}
 6365 
 6366 // Conditional branch
 6367 pipe_class pipe_branch_cond(rFlagsReg cr)
 6368 %{
 6369   single_instruction;
 6370   cr     : EX1(read);
 6371   INS01  : ISS;
 6372   BRANCH : EX1;
 6373 %}
 6374 
 6375 // Compare & Branch
 6376 // EG.  CBZ/CBNZ
 6377 pipe_class pipe_cmp_branch(iRegI op1)
 6378 %{
 6379   single_instruction;
 6380   op1    : EX1(read);
 6381   INS01  : ISS;
 6382   BRANCH : EX1;
 6383 %}
 6384 
 6385 //------- Synchronisation operations ----------------------
 6386 
 6387 // Any operation requiring serialization.
 6388 // EG.  DMB/Atomic Ops/Load Acquire/Str Release
 6389 pipe_class pipe_serial()
 6390 %{
 6391   single_instruction;
 6392   force_serialization;
 6393   fixed_latency(16);
 6394   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6395   LDST   : WR;
 6396 %}
 6397 
 6398 // Generic big/slow expanded idiom - also serialized
 6399 pipe_class pipe_slow()
 6400 %{
 6401   instruction_count(10);
 6402   multiple_bundles;
 6403   force_serialization;
 6404   fixed_latency(16);
 6405   INS01  : ISS(2); // Cannot dual issue with any other instruction
 6406   LDST   : WR;
 6407 %}
 6408 
 6409 // Empty pipeline class
 6410 pipe_class pipe_class_empty()
 6411 %{
 6412   single_instruction;
 6413   fixed_latency(0);
 6414 %}
 6415 
 6416 // Default pipeline class.
 6417 pipe_class pipe_class_default()
 6418 %{
 6419   single_instruction;
 6420   fixed_latency(2);
 6421 %}
 6422 
 6423 // Pipeline class for compares.
 6424 pipe_class pipe_class_compare()
 6425 %{
 6426   single_instruction;
 6427   fixed_latency(16);
 6428 %}
 6429 
 6430 // Pipeline class for memory operations.
 6431 pipe_class pipe_class_memory()
 6432 %{
 6433   single_instruction;
 6434   fixed_latency(16);
 6435 %}
 6436 
 6437 // Pipeline class for call.
 6438 pipe_class pipe_class_call()
 6439 %{
 6440   single_instruction;
 6441   fixed_latency(100);
 6442 %}
 6443 
 6444 // Define the class for the Nop node.
 6445 define %{
 6446    MachNop = pipe_class_empty;
 6447 %}
 6448 
 6449 %}
 6450 //----------INSTRUCTIONS-------------------------------------------------------
 6451 //
 6452 // match      -- States which machine-independent subtree may be replaced
 6453 //               by this instruction.
 6454 // ins_cost   -- The estimated cost of this instruction is used by instruction
 6455 //               selection to identify a minimum cost tree of machine
 6456 //               instructions that matches a tree of machine-independent
 6457 //               instructions.
 6458 // format     -- A string providing the disassembly for this instruction.
 6459 //               The value of an instruction's operand may be inserted
 6460 //               by referring to it with a '$' prefix.
 6461 // opcode     -- Three instruction opcodes may be provided.  These are referred
 6462 //               to within an encode class as $primary, $secondary, and $tertiary
 6463 //               rrspectively.  The primary opcode is commonly used to
 6464 //               indicate the type of machine instruction, while secondary
 6465 //               and tertiary are often used for prefix options or addressing
 6466 //               modes.
 6467 // ins_encode -- A list of encode classes with parameters. The encode class
 6468 //               name must have been defined in an 'enc_class' specification
 6469 //               in the encode section of the architecture description.
 6470 
 6471 // ============================================================================
 6472 // Memory (Load/Store) Instructions
 6473 
 6474 // Load Instructions
 6475 
 6476 // Load Byte (8 bit signed)
 6477 instruct loadB(iRegINoSp dst, memory1 mem)
 6478 %{
 6479   match(Set dst (LoadB mem));
 6480   predicate(!needs_acquiring_load(n));
 6481 
 6482   ins_cost(4 * INSN_COST);
 6483   format %{ "ldrsbw  $dst, $mem\t# byte" %}
 6484 
 6485   ins_encode(aarch64_enc_ldrsbw(dst, mem));
 6486 
 6487   ins_pipe(iload_reg_mem);
 6488 %}
 6489 
 6490 // Load Byte (8 bit signed) into long
 6491 instruct loadB2L(iRegLNoSp dst, memory1 mem)
 6492 %{
 6493   match(Set dst (ConvI2L (LoadB mem)));
 6494   predicate(!needs_acquiring_load(n->in(1)));
 6495 
 6496   ins_cost(4 * INSN_COST);
 6497   format %{ "ldrsb  $dst, $mem\t# byte" %}
 6498 
 6499   ins_encode(aarch64_enc_ldrsb(dst, mem));
 6500 
 6501   ins_pipe(iload_reg_mem);
 6502 %}
 6503 
 6504 // Load Byte (8 bit unsigned)
 6505 instruct loadUB(iRegINoSp dst, memory1 mem)
 6506 %{
 6507   match(Set dst (LoadUB mem));
 6508   predicate(!needs_acquiring_load(n));
 6509 
 6510   ins_cost(4 * INSN_COST);
 6511   format %{ "ldrbw  $dst, $mem\t# byte" %}
 6512 
 6513   ins_encode(aarch64_enc_ldrb(dst, mem));
 6514 
 6515   ins_pipe(iload_reg_mem);
 6516 %}
 6517 
 6518 // Load Byte (8 bit unsigned) into long
 6519 instruct loadUB2L(iRegLNoSp dst, memory1 mem)
 6520 %{
 6521   match(Set dst (ConvI2L (LoadUB mem)));
 6522   predicate(!needs_acquiring_load(n->in(1)));
 6523 
 6524   ins_cost(4 * INSN_COST);
 6525   format %{ "ldrb  $dst, $mem\t# byte" %}
 6526 
 6527   ins_encode(aarch64_enc_ldrb(dst, mem));
 6528 
 6529   ins_pipe(iload_reg_mem);
 6530 %}
 6531 
 6532 // Load Short (16 bit signed)
 6533 instruct loadS(iRegINoSp dst, memory2 mem)
 6534 %{
 6535   match(Set dst (LoadS mem));
 6536   predicate(!needs_acquiring_load(n));
 6537 
 6538   ins_cost(4 * INSN_COST);
 6539   format %{ "ldrshw  $dst, $mem\t# short" %}
 6540 
 6541   ins_encode(aarch64_enc_ldrshw(dst, mem));
 6542 
 6543   ins_pipe(iload_reg_mem);
 6544 %}
 6545 
 6546 // Load Short (16 bit signed) into long
 6547 instruct loadS2L(iRegLNoSp dst, memory2 mem)
 6548 %{
 6549   match(Set dst (ConvI2L (LoadS mem)));
 6550   predicate(!needs_acquiring_load(n->in(1)));
 6551 
 6552   ins_cost(4 * INSN_COST);
 6553   format %{ "ldrsh  $dst, $mem\t# short" %}
 6554 
 6555   ins_encode(aarch64_enc_ldrsh(dst, mem));
 6556 
 6557   ins_pipe(iload_reg_mem);
 6558 %}
 6559 
 6560 // Load Char (16 bit unsigned)
 6561 instruct loadUS(iRegINoSp dst, memory2 mem)
 6562 %{
 6563   match(Set dst (LoadUS mem));
 6564   predicate(!needs_acquiring_load(n));
 6565 
 6566   ins_cost(4 * INSN_COST);
 6567   format %{ "ldrh  $dst, $mem\t# short" %}
 6568 
 6569   ins_encode(aarch64_enc_ldrh(dst, mem));
 6570 
 6571   ins_pipe(iload_reg_mem);
 6572 %}
 6573 
 6574 // Load Short/Char (16 bit unsigned) into long
 6575 instruct loadUS2L(iRegLNoSp dst, memory2 mem)
 6576 %{
 6577   match(Set dst (ConvI2L (LoadUS mem)));
 6578   predicate(!needs_acquiring_load(n->in(1)));
 6579 
 6580   ins_cost(4 * INSN_COST);
 6581   format %{ "ldrh  $dst, $mem\t# short" %}
 6582 
 6583   ins_encode(aarch64_enc_ldrh(dst, mem));
 6584 
 6585   ins_pipe(iload_reg_mem);
 6586 %}
 6587 
 6588 // Load Integer (32 bit signed)
 6589 instruct loadI(iRegINoSp dst, memory4 mem)
 6590 %{
 6591   match(Set dst (LoadI mem));
 6592   predicate(!needs_acquiring_load(n));
 6593 
 6594   ins_cost(4 * INSN_COST);
 6595   format %{ "ldrw  $dst, $mem\t# int" %}
 6596 
 6597   ins_encode(aarch64_enc_ldrw(dst, mem));
 6598 
 6599   ins_pipe(iload_reg_mem);
 6600 %}
 6601 
 6602 // Load Integer (32 bit signed) into long
 6603 instruct loadI2L(iRegLNoSp dst, memory4 mem)
 6604 %{
 6605   match(Set dst (ConvI2L (LoadI mem)));
 6606   predicate(!needs_acquiring_load(n->in(1)));
 6607 
 6608   ins_cost(4 * INSN_COST);
 6609   format %{ "ldrsw  $dst, $mem\t# int" %}
 6610 
 6611   ins_encode(aarch64_enc_ldrsw(dst, mem));
 6612 
 6613   ins_pipe(iload_reg_mem);
 6614 %}
 6615 
 6616 // Load Integer (32 bit unsigned) into long
 6617 instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask)
 6618 %{
 6619   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 6620   predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
 6621 
 6622   ins_cost(4 * INSN_COST);
 6623   format %{ "ldrw  $dst, $mem\t# int" %}
 6624 
 6625   ins_encode(aarch64_enc_ldrw(dst, mem));
 6626 
 6627   ins_pipe(iload_reg_mem);
 6628 %}
 6629 
 6630 // Load Long (64 bit signed)
 6631 instruct loadL(iRegLNoSp dst, memory8 mem)
 6632 %{
 6633   match(Set dst (LoadL mem));
 6634   predicate(!needs_acquiring_load(n));
 6635 
 6636   ins_cost(4 * INSN_COST);
 6637   format %{ "ldr  $dst, $mem\t# int" %}
 6638 
 6639   ins_encode(aarch64_enc_ldr(dst, mem));
 6640 
 6641   ins_pipe(iload_reg_mem);
 6642 %}
 6643 
 6644 // Load Range
 6645 instruct loadRange(iRegINoSp dst, memory4 mem)
 6646 %{
 6647   match(Set dst (LoadRange mem));
 6648 
 6649   ins_cost(4 * INSN_COST);
 6650   format %{ "ldrw  $dst, $mem\t# range" %}
 6651 
 6652   ins_encode(aarch64_enc_ldrw(dst, mem));
 6653 
 6654   ins_pipe(iload_reg_mem);
 6655 %}
 6656 
 6657 // Load Pointer
 6658 instruct loadP(iRegPNoSp dst, memory8 mem)
 6659 %{
 6660   match(Set dst (LoadP mem));
 6661   predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
 6662 
 6663   ins_cost(4 * INSN_COST);
 6664   format %{ "ldr  $dst, $mem\t# ptr" %}
 6665 
 6666   ins_encode(aarch64_enc_ldr(dst, mem));
 6667 
 6668   ins_pipe(iload_reg_mem);
 6669 %}
 6670 
 6671 // Load Compressed Pointer
 6672 instruct loadN(iRegNNoSp dst, memory4 mem)
 6673 %{
 6674   match(Set dst (LoadN mem));
 6675   predicate(!needs_acquiring_load(n) && n->as_Load()->barrier_data() == 0);
 6676 
 6677   ins_cost(4 * INSN_COST);
 6678   format %{ "ldrw  $dst, $mem\t# compressed ptr" %}
 6679 
 6680   ins_encode(aarch64_enc_ldrw(dst, mem));
 6681 
 6682   ins_pipe(iload_reg_mem);
 6683 %}
 6684 
 6685 // Load Klass Pointer
 6686 instruct loadKlass(iRegPNoSp dst, memory8 mem)
 6687 %{
 6688   match(Set dst (LoadKlass mem));
 6689   predicate(!needs_acquiring_load(n));
 6690 
 6691   ins_cost(4 * INSN_COST);
 6692   format %{ "ldr  $dst, $mem\t# class" %}
 6693 
 6694   ins_encode(aarch64_enc_ldr(dst, mem));
 6695 
 6696   ins_pipe(iload_reg_mem);
 6697 %}
 6698 
 6699 // Load Narrow Klass Pointer
 6700 instruct loadNKlass(iRegNNoSp dst, memory4 mem)
 6701 %{
 6702   match(Set dst (LoadNKlass mem));
 6703   predicate(!needs_acquiring_load(n) && !UseCompactObjectHeaders);
 6704 
 6705   ins_cost(4 * INSN_COST);
 6706   format %{ "ldrw  $dst, $mem\t# compressed class ptr" %}
 6707 
 6708   ins_encode(aarch64_enc_ldrw(dst, mem));
 6709 
 6710   ins_pipe(iload_reg_mem);
 6711 %}
 6712 
 6713 instruct loadNKlassCompactHeaders(iRegNNoSp dst, memory_noindex mem)
 6714 %{
 6715   match(Set dst (LoadNKlass mem));
 6716   predicate(!needs_acquiring_load(n) && UseCompactObjectHeaders);
 6717 
 6718   ins_cost(4 * INSN_COST);
 6719   format %{
 6720     "ldrw  $dst, $mem\t# compressed class ptr, shifted\n\t"
 6721     "lsrw  $dst, $dst, markWord::klass_shift"
 6722   %}
 6723   ins_encode %{
 6724     assert($mem$$index$$Register == noreg, "must not have indexed address");
 6725     // The incoming address is pointing into obj-start + klass_offset_in_bytes. We need to extract
 6726     // obj-start, so that we can load from the object's mark-word instead.
 6727     __ ldrw($dst$$Register, Address($mem$$base$$Register, $mem$$disp - Type::klass_offset()));
 6728     __ lsrw($dst$$Register, $dst$$Register, markWord::klass_shift);
 6729   %}
 6730   ins_pipe(iload_reg_mem);
 6731 %}
 6732 
 6733 // Load Float
 6734 instruct loadF(vRegF dst, memory4 mem)
 6735 %{
 6736   match(Set dst (LoadF mem));
 6737   predicate(!needs_acquiring_load(n));
 6738 
 6739   ins_cost(4 * INSN_COST);
 6740   format %{ "ldrs  $dst, $mem\t# float" %}
 6741 
 6742   ins_encode( aarch64_enc_ldrs(dst, mem) );
 6743 
 6744   ins_pipe(pipe_class_memory);
 6745 %}
 6746 
 6747 // Load Double
 6748 instruct loadD(vRegD dst, memory8 mem)
 6749 %{
 6750   match(Set dst (LoadD mem));
 6751   predicate(!needs_acquiring_load(n));
 6752 
 6753   ins_cost(4 * INSN_COST);
 6754   format %{ "ldrd  $dst, $mem\t# double" %}
 6755 
 6756   ins_encode( aarch64_enc_ldrd(dst, mem) );
 6757 
 6758   ins_pipe(pipe_class_memory);
 6759 %}
 6760 
 6761 
 6762 // Load Int Constant
 6763 instruct loadConI(iRegINoSp dst, immI src)
 6764 %{
 6765   match(Set dst src);
 6766 
 6767   ins_cost(INSN_COST);
 6768   format %{ "mov $dst, $src\t# int" %}
 6769 
 6770   ins_encode( aarch64_enc_movw_imm(dst, src) );
 6771 
 6772   ins_pipe(ialu_imm);
 6773 %}
 6774 
 6775 // Load Long Constant
 6776 instruct loadConL(iRegLNoSp dst, immL src)
 6777 %{
 6778   match(Set dst src);
 6779 
 6780   ins_cost(INSN_COST);
 6781   format %{ "mov $dst, $src\t# long" %}
 6782 
 6783   ins_encode( aarch64_enc_mov_imm(dst, src) );
 6784 
 6785   ins_pipe(ialu_imm);
 6786 %}
 6787 
 6788 // Load Pointer Constant
 6789 
 6790 instruct loadConP(iRegPNoSp dst, immP con)
 6791 %{
 6792   match(Set dst con);
 6793 
 6794   ins_cost(INSN_COST * 4);
 6795   format %{
 6796     "mov  $dst, $con\t# ptr\n\t"
 6797   %}
 6798 
 6799   ins_encode(aarch64_enc_mov_p(dst, con));
 6800 
 6801   ins_pipe(ialu_imm);
 6802 %}
 6803 
 6804 // Load Null Pointer Constant
 6805 
 6806 instruct loadConP0(iRegPNoSp dst, immP0 con)
 6807 %{
 6808   match(Set dst con);
 6809 
 6810   ins_cost(INSN_COST);
 6811   format %{ "mov  $dst, $con\t# nullptr ptr" %}
 6812 
 6813   ins_encode(aarch64_enc_mov_p0(dst, con));
 6814 
 6815   ins_pipe(ialu_imm);
 6816 %}
 6817 
 6818 // Load Pointer Constant One
 6819 
 6820 instruct loadConP1(iRegPNoSp dst, immP_1 con)
 6821 %{
 6822   match(Set dst con);
 6823 
 6824   ins_cost(INSN_COST);
 6825   format %{ "mov  $dst, $con\t# nullptr ptr" %}
 6826 
 6827   ins_encode(aarch64_enc_mov_p1(dst, con));
 6828 
 6829   ins_pipe(ialu_imm);
 6830 %}
 6831 
 6832 // Load Byte Map Base Constant
 6833 
 6834 instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
 6835 %{
 6836   match(Set dst con);
 6837 
 6838   ins_cost(INSN_COST);
 6839   format %{ "adr  $dst, $con\t# Byte Map Base" %}
 6840 
 6841   ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
 6842 
 6843   ins_pipe(ialu_imm);
 6844 %}
 6845 
 6846 // Load Narrow Pointer Constant
 6847 
 6848 instruct loadConN(iRegNNoSp dst, immN con)
 6849 %{
 6850   match(Set dst con);
 6851 
 6852   ins_cost(INSN_COST * 4);
 6853   format %{ "mov  $dst, $con\t# compressed ptr" %}
 6854 
 6855   ins_encode(aarch64_enc_mov_n(dst, con));
 6856 
 6857   ins_pipe(ialu_imm);
 6858 %}
 6859 
 6860 // Load Narrow Null Pointer Constant
 6861 
 6862 instruct loadConN0(iRegNNoSp dst, immN0 con)
 6863 %{
 6864   match(Set dst con);
 6865 
 6866   ins_cost(INSN_COST);
 6867   format %{ "mov  $dst, $con\t# compressed nullptr ptr" %}
 6868 
 6869   ins_encode(aarch64_enc_mov_n0(dst, con));
 6870 
 6871   ins_pipe(ialu_imm);
 6872 %}
 6873 
 6874 // Load Narrow Klass Constant
 6875 
 6876 instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
 6877 %{
 6878   match(Set dst con);
 6879 
 6880   ins_cost(INSN_COST);
 6881   format %{ "mov  $dst, $con\t# compressed klass ptr" %}
 6882 
 6883   ins_encode(aarch64_enc_mov_nk(dst, con));
 6884 
 6885   ins_pipe(ialu_imm);
 6886 %}
 6887 
 6888 // Load Packed Float Constant
 6889 
 6890 instruct loadConF_packed(vRegF dst, immFPacked con) %{
 6891   match(Set dst con);
 6892   ins_cost(INSN_COST * 4);
 6893   format %{ "fmovs  $dst, $con"%}
 6894   ins_encode %{
 6895     __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
 6896   %}
 6897 
 6898   ins_pipe(fp_imm_s);
 6899 %}
 6900 
 6901 // Load Float Constant
 6902 
 6903 instruct loadConF(vRegF dst, immF con) %{
 6904   match(Set dst con);
 6905 
 6906   ins_cost(INSN_COST * 4);
 6907 
 6908   format %{
 6909     "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 6910   %}
 6911 
 6912   ins_encode %{
 6913     __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
 6914   %}
 6915 
 6916   ins_pipe(fp_load_constant_s);
 6917 %}
 6918 
 6919 // Load Packed Double Constant
 6920 
 6921 instruct loadConD_packed(vRegD dst, immDPacked con) %{
 6922   match(Set dst con);
 6923   ins_cost(INSN_COST);
 6924   format %{ "fmovd  $dst, $con"%}
 6925   ins_encode %{
 6926     __ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
 6927   %}
 6928 
 6929   ins_pipe(fp_imm_d);
 6930 %}
 6931 
 6932 // Load Double Constant
 6933 
 6934 instruct loadConD(vRegD dst, immD con) %{
 6935   match(Set dst con);
 6936 
 6937   ins_cost(INSN_COST * 5);
 6938   format %{
 6939     "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
 6940   %}
 6941 
 6942   ins_encode %{
 6943     __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
 6944   %}
 6945 
 6946   ins_pipe(fp_load_constant_d);
 6947 %}
 6948 
 6949 // Store Instructions
 6950 
 6951 // Store Byte
 6952 instruct storeB(iRegIorL2I src, memory1 mem)
 6953 %{
 6954   match(Set mem (StoreB mem src));
 6955   predicate(!needs_releasing_store(n));
 6956 
 6957   ins_cost(INSN_COST);
 6958   format %{ "strb  $src, $mem\t# byte" %}
 6959 
 6960   ins_encode(aarch64_enc_strb(src, mem));
 6961 
 6962   ins_pipe(istore_reg_mem);
 6963 %}
 6964 
 6965 
 6966 instruct storeimmB0(immI0 zero, memory1 mem)
 6967 %{
 6968   match(Set mem (StoreB mem zero));
 6969   predicate(!needs_releasing_store(n));
 6970 
 6971   ins_cost(INSN_COST);
 6972   format %{ "strb rscractch2, $mem\t# byte" %}
 6973 
 6974   ins_encode(aarch64_enc_strb0(mem));
 6975 
 6976   ins_pipe(istore_mem);
 6977 %}
 6978 
 6979 // Store Char/Short
 6980 instruct storeC(iRegIorL2I src, memory2 mem)
 6981 %{
 6982   match(Set mem (StoreC mem src));
 6983   predicate(!needs_releasing_store(n));
 6984 
 6985   ins_cost(INSN_COST);
 6986   format %{ "strh  $src, $mem\t# short" %}
 6987 
 6988   ins_encode(aarch64_enc_strh(src, mem));
 6989 
 6990   ins_pipe(istore_reg_mem);
 6991 %}
 6992 
 6993 instruct storeimmC0(immI0 zero, memory2 mem)
 6994 %{
 6995   match(Set mem (StoreC mem zero));
 6996   predicate(!needs_releasing_store(n));
 6997 
 6998   ins_cost(INSN_COST);
 6999   format %{ "strh  zr, $mem\t# short" %}
 7000 
 7001   ins_encode(aarch64_enc_strh0(mem));
 7002 
 7003   ins_pipe(istore_mem);
 7004 %}
 7005 
 7006 // Store Integer
 7007 
 7008 instruct storeI(iRegIorL2I src, memory4 mem)
 7009 %{
 7010   match(Set mem(StoreI mem src));
 7011   predicate(!needs_releasing_store(n));
 7012 
 7013   ins_cost(INSN_COST);
 7014   format %{ "strw  $src, $mem\t# int" %}
 7015 
 7016   ins_encode(aarch64_enc_strw(src, mem));
 7017 
 7018   ins_pipe(istore_reg_mem);
 7019 %}
 7020 
 7021 instruct storeimmI0(immI0 zero, memory4 mem)
 7022 %{
 7023   match(Set mem(StoreI mem zero));
 7024   predicate(!needs_releasing_store(n));
 7025 
 7026   ins_cost(INSN_COST);
 7027   format %{ "strw  zr, $mem\t# int" %}
 7028 
 7029   ins_encode(aarch64_enc_strw0(mem));
 7030 
 7031   ins_pipe(istore_mem);
 7032 %}
 7033 
 7034 // Store Long (64 bit signed)
 7035 instruct storeL(iRegL src, memory8 mem)
 7036 %{
 7037   match(Set mem (StoreL mem src));
 7038   predicate(!needs_releasing_store(n));
 7039 
 7040   ins_cost(INSN_COST);
 7041   format %{ "str  $src, $mem\t# int" %}
 7042 
 7043   ins_encode(aarch64_enc_str(src, mem));
 7044 
 7045   ins_pipe(istore_reg_mem);
 7046 %}
 7047 
 7048 // Store Long (64 bit signed)
 7049 instruct storeimmL0(immL0 zero, memory8 mem)
 7050 %{
 7051   match(Set mem (StoreL mem zero));
 7052   predicate(!needs_releasing_store(n));
 7053 
 7054   ins_cost(INSN_COST);
 7055   format %{ "str  zr, $mem\t# int" %}
 7056 
 7057   ins_encode(aarch64_enc_str0(mem));
 7058 
 7059   ins_pipe(istore_mem);
 7060 %}
 7061 
 7062 // Store Pointer
 7063 instruct storeP(iRegP src, memory8 mem)
 7064 %{
 7065   match(Set mem (StoreP mem src));
 7066   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7067 
 7068   ins_cost(INSN_COST);
 7069   format %{ "str  $src, $mem\t# ptr" %}
 7070 
 7071   ins_encode(aarch64_enc_str(src, mem));
 7072 
 7073   ins_pipe(istore_reg_mem);
 7074 %}
 7075 
 7076 // Store Pointer
 7077 instruct storeimmP0(immP0 zero, memory8 mem)
 7078 %{
 7079   match(Set mem (StoreP mem zero));
 7080   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7081 
 7082   ins_cost(INSN_COST);
 7083   format %{ "str zr, $mem\t# ptr" %}
 7084 
 7085   ins_encode(aarch64_enc_str0(mem));
 7086 
 7087   ins_pipe(istore_mem);
 7088 %}
 7089 
 7090 // Store Compressed Pointer
 7091 instruct storeN(iRegN src, memory4 mem)
 7092 %{
 7093   match(Set mem (StoreN mem src));
 7094   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7095 
 7096   ins_cost(INSN_COST);
 7097   format %{ "strw  $src, $mem\t# compressed ptr" %}
 7098 
 7099   ins_encode(aarch64_enc_strw(src, mem));
 7100 
 7101   ins_pipe(istore_reg_mem);
 7102 %}
 7103 
 7104 instruct storeImmN0(immN0 zero, memory4 mem)
 7105 %{
 7106   match(Set mem (StoreN mem zero));
 7107   predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
 7108 
 7109   ins_cost(INSN_COST);
 7110   format %{ "strw  zr, $mem\t# compressed ptr" %}
 7111 
 7112   ins_encode(aarch64_enc_strw0(mem));
 7113 
 7114   ins_pipe(istore_mem);
 7115 %}
 7116 
 7117 // Store Float
 7118 instruct storeF(vRegF src, memory4 mem)
 7119 %{
 7120   match(Set mem (StoreF mem src));
 7121   predicate(!needs_releasing_store(n));
 7122 
 7123   ins_cost(INSN_COST);
 7124   format %{ "strs  $src, $mem\t# float" %}
 7125 
 7126   ins_encode( aarch64_enc_strs(src, mem) );
 7127 
 7128   ins_pipe(pipe_class_memory);
 7129 %}
 7130 
 7131 // TODO
 7132 // implement storeImmF0 and storeFImmPacked
 7133 
 7134 // Store Double
 7135 instruct storeD(vRegD src, memory8 mem)
 7136 %{
 7137   match(Set mem (StoreD mem src));
 7138   predicate(!needs_releasing_store(n));
 7139 
 7140   ins_cost(INSN_COST);
 7141   format %{ "strd  $src, $mem\t# double" %}
 7142 
 7143   ins_encode( aarch64_enc_strd(src, mem) );
 7144 
 7145   ins_pipe(pipe_class_memory);
 7146 %}
 7147 
 7148 // Store Compressed Klass Pointer
 7149 instruct storeNKlass(iRegN src, memory4 mem)
 7150 %{
 7151   predicate(!needs_releasing_store(n));
 7152   match(Set mem (StoreNKlass mem src));
 7153 
 7154   ins_cost(INSN_COST);
 7155   format %{ "strw  $src, $mem\t# compressed klass ptr" %}
 7156 
 7157   ins_encode(aarch64_enc_strw(src, mem));
 7158 
 7159   ins_pipe(istore_reg_mem);
 7160 %}
 7161 
 7162 // TODO
 7163 // implement storeImmD0 and storeDImmPacked
 7164 
 7165 // prefetch instructions
 7166 // Must be safe to execute with invalid address (cannot fault).
 7167 
 7168 instruct prefetchalloc( memory8 mem ) %{
 7169   match(PrefetchAllocation mem);
 7170 
 7171   ins_cost(INSN_COST);
 7172   format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
 7173 
 7174   ins_encode( aarch64_enc_prefetchw(mem) );
 7175 
 7176   ins_pipe(iload_prefetch);
 7177 %}
 7178 
 7179 //  ---------------- volatile loads and stores ----------------
 7180 
 7181 // Load Byte (8 bit signed)
 7182 instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7183 %{
 7184   match(Set dst (LoadB mem));
 7185 
 7186   ins_cost(VOLATILE_REF_COST);
 7187   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7188 
 7189   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7190 
 7191   ins_pipe(pipe_serial);
 7192 %}
 7193 
 7194 // Load Byte (8 bit signed) into long
 7195 instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7196 %{
 7197   match(Set dst (ConvI2L (LoadB mem)));
 7198 
 7199   ins_cost(VOLATILE_REF_COST);
 7200   format %{ "ldarsb  $dst, $mem\t# byte" %}
 7201 
 7202   ins_encode(aarch64_enc_ldarsb(dst, mem));
 7203 
 7204   ins_pipe(pipe_serial);
 7205 %}
 7206 
 7207 // Load Byte (8 bit unsigned)
 7208 instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7209 %{
 7210   match(Set dst (LoadUB mem));
 7211 
 7212   ins_cost(VOLATILE_REF_COST);
 7213   format %{ "ldarb  $dst, $mem\t# byte" %}
 7214 
 7215   ins_encode(aarch64_enc_ldarb(dst, mem));
 7216 
 7217   ins_pipe(pipe_serial);
 7218 %}
 7219 
 7220 // Load Byte (8 bit unsigned) into long
 7221 instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7222 %{
 7223   match(Set dst (ConvI2L (LoadUB mem)));
 7224 
 7225   ins_cost(VOLATILE_REF_COST);
 7226   format %{ "ldarb  $dst, $mem\t# byte" %}
 7227 
 7228   ins_encode(aarch64_enc_ldarb(dst, mem));
 7229 
 7230   ins_pipe(pipe_serial);
 7231 %}
 7232 
 7233 // Load Short (16 bit signed)
 7234 instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7235 %{
 7236   match(Set dst (LoadS mem));
 7237 
 7238   ins_cost(VOLATILE_REF_COST);
 7239   format %{ "ldarshw  $dst, $mem\t# short" %}
 7240 
 7241   ins_encode(aarch64_enc_ldarshw(dst, mem));
 7242 
 7243   ins_pipe(pipe_serial);
 7244 %}
 7245 
 7246 instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7247 %{
 7248   match(Set dst (LoadUS mem));
 7249 
 7250   ins_cost(VOLATILE_REF_COST);
 7251   format %{ "ldarhw  $dst, $mem\t# short" %}
 7252 
 7253   ins_encode(aarch64_enc_ldarhw(dst, mem));
 7254 
 7255   ins_pipe(pipe_serial);
 7256 %}
 7257 
 7258 // Load Short/Char (16 bit unsigned) into long
 7259 instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7260 %{
 7261   match(Set dst (ConvI2L (LoadUS mem)));
 7262 
 7263   ins_cost(VOLATILE_REF_COST);
 7264   format %{ "ldarh  $dst, $mem\t# short" %}
 7265 
 7266   ins_encode(aarch64_enc_ldarh(dst, mem));
 7267 
 7268   ins_pipe(pipe_serial);
 7269 %}
 7270 
 7271 // Load Short/Char (16 bit signed) into long
 7272 instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7273 %{
 7274   match(Set dst (ConvI2L (LoadS mem)));
 7275 
 7276   ins_cost(VOLATILE_REF_COST);
 7277   format %{ "ldarh  $dst, $mem\t# short" %}
 7278 
 7279   ins_encode(aarch64_enc_ldarsh(dst, mem));
 7280 
 7281   ins_pipe(pipe_serial);
 7282 %}
 7283 
 7284 // Load Integer (32 bit signed)
 7285 instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
 7286 %{
 7287   match(Set dst (LoadI mem));
 7288 
 7289   ins_cost(VOLATILE_REF_COST);
 7290   format %{ "ldarw  $dst, $mem\t# int" %}
 7291 
 7292   ins_encode(aarch64_enc_ldarw(dst, mem));
 7293 
 7294   ins_pipe(pipe_serial);
 7295 %}
 7296 
 7297 // Load Integer (32 bit unsigned) into long
 7298 instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
 7299 %{
 7300   match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
 7301 
 7302   ins_cost(VOLATILE_REF_COST);
 7303   format %{ "ldarw  $dst, $mem\t# int" %}
 7304 
 7305   ins_encode(aarch64_enc_ldarw(dst, mem));
 7306 
 7307   ins_pipe(pipe_serial);
 7308 %}
 7309 
 7310 // Load Long (64 bit signed)
 7311 instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
 7312 %{
 7313   match(Set dst (LoadL mem));
 7314 
 7315   ins_cost(VOLATILE_REF_COST);
 7316   format %{ "ldar  $dst, $mem\t# int" %}
 7317 
 7318   ins_encode(aarch64_enc_ldar(dst, mem));
 7319 
 7320   ins_pipe(pipe_serial);
 7321 %}
 7322 
 7323 // Load Pointer
 7324 instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
 7325 %{
 7326   match(Set dst (LoadP mem));
 7327   predicate(n->as_Load()->barrier_data() == 0);
 7328 
 7329   ins_cost(VOLATILE_REF_COST);
 7330   format %{ "ldar  $dst, $mem\t# ptr" %}
 7331 
 7332   ins_encode(aarch64_enc_ldar(dst, mem));
 7333 
 7334   ins_pipe(pipe_serial);
 7335 %}
 7336 
 7337 // Load Compressed Pointer
 7338 instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
 7339 %{
 7340   match(Set dst (LoadN mem));
 7341   predicate(n->as_Load()->barrier_data() == 0);
 7342 
 7343   ins_cost(VOLATILE_REF_COST);
 7344   format %{ "ldarw  $dst, $mem\t# compressed ptr" %}
 7345 
 7346   ins_encode(aarch64_enc_ldarw(dst, mem));
 7347 
 7348   ins_pipe(pipe_serial);
 7349 %}
 7350 
 7351 // Load Float
 7352 instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
 7353 %{
 7354   match(Set dst (LoadF mem));
 7355 
 7356   ins_cost(VOLATILE_REF_COST);
 7357   format %{ "ldars  $dst, $mem\t# float" %}
 7358 
 7359   ins_encode( aarch64_enc_fldars(dst, mem) );
 7360 
 7361   ins_pipe(pipe_serial);
 7362 %}
 7363 
 7364 // Load Double
 7365 instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
 7366 %{
 7367   match(Set dst (LoadD mem));
 7368 
 7369   ins_cost(VOLATILE_REF_COST);
 7370   format %{ "ldard  $dst, $mem\t# double" %}
 7371 
 7372   ins_encode( aarch64_enc_fldard(dst, mem) );
 7373 
 7374   ins_pipe(pipe_serial);
 7375 %}
 7376 
 7377 // Store Byte
 7378 instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7379 %{
 7380   match(Set mem (StoreB mem src));
 7381 
 7382   ins_cost(VOLATILE_REF_COST);
 7383   format %{ "stlrb  $src, $mem\t# byte" %}
 7384 
 7385   ins_encode(aarch64_enc_stlrb(src, mem));
 7386 
 7387   ins_pipe(pipe_class_memory);
 7388 %}
 7389 
 7390 instruct storeimmB0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7391 %{
 7392   match(Set mem (StoreB mem zero));
 7393 
 7394   ins_cost(VOLATILE_REF_COST);
 7395   format %{ "stlrb  zr, $mem\t# byte" %}
 7396 
 7397   ins_encode(aarch64_enc_stlrb0(mem));
 7398 
 7399   ins_pipe(pipe_class_memory);
 7400 %}
 7401 
 7402 // Store Char/Short
 7403 instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7404 %{
 7405   match(Set mem (StoreC mem src));
 7406 
 7407   ins_cost(VOLATILE_REF_COST);
 7408   format %{ "stlrh  $src, $mem\t# short" %}
 7409 
 7410   ins_encode(aarch64_enc_stlrh(src, mem));
 7411 
 7412   ins_pipe(pipe_class_memory);
 7413 %}
 7414 
 7415 instruct storeimmC0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7416 %{
 7417   match(Set mem (StoreC mem zero));
 7418 
 7419   ins_cost(VOLATILE_REF_COST);
 7420   format %{ "stlrh  zr, $mem\t# short" %}
 7421 
 7422   ins_encode(aarch64_enc_stlrh0(mem));
 7423 
 7424   ins_pipe(pipe_class_memory);
 7425 %}
 7426 
 7427 // Store Integer
 7428 
 7429 instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
 7430 %{
 7431   match(Set mem(StoreI mem src));
 7432 
 7433   ins_cost(VOLATILE_REF_COST);
 7434   format %{ "stlrw  $src, $mem\t# int" %}
 7435 
 7436   ins_encode(aarch64_enc_stlrw(src, mem));
 7437 
 7438   ins_pipe(pipe_class_memory);
 7439 %}
 7440 
 7441 instruct storeimmI0_volatile(immI0 zero, /* sync_memory*/indirect mem)
 7442 %{
 7443   match(Set mem(StoreI mem zero));
 7444 
 7445   ins_cost(VOLATILE_REF_COST);
 7446   format %{ "stlrw  zr, $mem\t# int" %}
 7447 
 7448   ins_encode(aarch64_enc_stlrw0(mem));
 7449 
 7450   ins_pipe(pipe_class_memory);
 7451 %}
 7452 
 7453 // Store Long (64 bit signed)
 7454 instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
 7455 %{
 7456   match(Set mem (StoreL mem src));
 7457 
 7458   ins_cost(VOLATILE_REF_COST);
 7459   format %{ "stlr  $src, $mem\t# int" %}
 7460 
 7461   ins_encode(aarch64_enc_stlr(src, mem));
 7462 
 7463   ins_pipe(pipe_class_memory);
 7464 %}
 7465 
 7466 instruct storeimmL0_volatile(immL0 zero, /* sync_memory*/indirect mem)
 7467 %{
 7468   match(Set mem (StoreL mem zero));
 7469 
 7470   ins_cost(VOLATILE_REF_COST);
 7471   format %{ "stlr  zr, $mem\t# int" %}
 7472 
 7473   ins_encode(aarch64_enc_stlr0(mem));
 7474 
 7475   ins_pipe(pipe_class_memory);
 7476 %}
 7477 
 7478 // Store Pointer
 7479 instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
 7480 %{
 7481   match(Set mem (StoreP mem src));
 7482   predicate(n->as_Store()->barrier_data() == 0);
 7483 
 7484   ins_cost(VOLATILE_REF_COST);
 7485   format %{ "stlr  $src, $mem\t# ptr" %}
 7486 
 7487   ins_encode(aarch64_enc_stlr(src, mem));
 7488 
 7489   ins_pipe(pipe_class_memory);
 7490 %}
 7491 
 7492 instruct storeimmP0_volatile(immP0 zero, /* sync_memory*/indirect mem)
 7493 %{
 7494   match(Set mem (StoreP mem zero));
 7495   predicate(n->as_Store()->barrier_data() == 0);
 7496 
 7497   ins_cost(VOLATILE_REF_COST);
 7498   format %{ "stlr  zr, $mem\t# ptr" %}
 7499 
 7500   ins_encode(aarch64_enc_stlr0(mem));
 7501 
 7502   ins_pipe(pipe_class_memory);
 7503 %}
 7504 
 7505 // Store Compressed Pointer
 7506 instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
 7507 %{
 7508   match(Set mem (StoreN mem src));
 7509   predicate(n->as_Store()->barrier_data() == 0);
 7510 
 7511   ins_cost(VOLATILE_REF_COST);
 7512   format %{ "stlrw  $src, $mem\t# compressed ptr" %}
 7513 
 7514   ins_encode(aarch64_enc_stlrw(src, mem));
 7515 
 7516   ins_pipe(pipe_class_memory);
 7517 %}
 7518 
 7519 instruct storeimmN0_volatile(immN0 zero, /* sync_memory*/indirect mem)
 7520 %{
 7521   match(Set mem (StoreN mem zero));
 7522   predicate(n->as_Store()->barrier_data() == 0);
 7523 
 7524   ins_cost(VOLATILE_REF_COST);
 7525   format %{ "stlrw  zr, $mem\t# compressed ptr" %}
 7526 
 7527   ins_encode(aarch64_enc_stlrw0(mem));
 7528 
 7529   ins_pipe(pipe_class_memory);
 7530 %}
 7531 
 7532 // Store Float
 7533 instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
 7534 %{
 7535   match(Set mem (StoreF mem src));
 7536 
 7537   ins_cost(VOLATILE_REF_COST);
 7538   format %{ "stlrs  $src, $mem\t# float" %}
 7539 
 7540   ins_encode( aarch64_enc_fstlrs(src, mem) );
 7541 
 7542   ins_pipe(pipe_class_memory);
 7543 %}
 7544 
 7545 // TODO
 7546 // implement storeImmF0 and storeFImmPacked
 7547 
 7548 // Store Double
 7549 instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
 7550 %{
 7551   match(Set mem (StoreD mem src));
 7552 
 7553   ins_cost(VOLATILE_REF_COST);
 7554   format %{ "stlrd  $src, $mem\t# double" %}
 7555 
 7556   ins_encode( aarch64_enc_fstlrd(src, mem) );
 7557 
 7558   ins_pipe(pipe_class_memory);
 7559 %}
 7560 
 7561 //  ---------------- end of volatile loads and stores ----------------
 7562 
 7563 instruct cacheWB(indirect addr)
 7564 %{
 7565   predicate(VM_Version::supports_data_cache_line_flush());
 7566   match(CacheWB addr);
 7567 
 7568   ins_cost(100);
 7569   format %{"cache wb $addr" %}
 7570   ins_encode %{
 7571     assert($addr->index_position() < 0, "should be");
 7572     assert($addr$$disp == 0, "should be");
 7573     __ cache_wb(Address($addr$$base$$Register, 0));
 7574   %}
 7575   ins_pipe(pipe_slow); // XXX
 7576 %}
 7577 
 7578 instruct cacheWBPreSync()
 7579 %{
 7580   predicate(VM_Version::supports_data_cache_line_flush());
 7581   match(CacheWBPreSync);
 7582 
 7583   ins_cost(100);
 7584   format %{"cache wb presync" %}
 7585   ins_encode %{
 7586     __ cache_wbsync(true);
 7587   %}
 7588   ins_pipe(pipe_slow); // XXX
 7589 %}
 7590 
 7591 instruct cacheWBPostSync()
 7592 %{
 7593   predicate(VM_Version::supports_data_cache_line_flush());
 7594   match(CacheWBPostSync);
 7595 
 7596   ins_cost(100);
 7597   format %{"cache wb postsync" %}
 7598   ins_encode %{
 7599     __ cache_wbsync(false);
 7600   %}
 7601   ins_pipe(pipe_slow); // XXX
 7602 %}
 7603 
 7604 // ============================================================================
 7605 // BSWAP Instructions
 7606 
 7607 instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
 7608   match(Set dst (ReverseBytesI src));
 7609 
 7610   ins_cost(INSN_COST);
 7611   format %{ "revw  $dst, $src" %}
 7612 
 7613   ins_encode %{
 7614     __ revw(as_Register($dst$$reg), as_Register($src$$reg));
 7615   %}
 7616 
 7617   ins_pipe(ialu_reg);
 7618 %}
 7619 
 7620 instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
 7621   match(Set dst (ReverseBytesL src));
 7622 
 7623   ins_cost(INSN_COST);
 7624   format %{ "rev  $dst, $src" %}
 7625 
 7626   ins_encode %{
 7627     __ rev(as_Register($dst$$reg), as_Register($src$$reg));
 7628   %}
 7629 
 7630   ins_pipe(ialu_reg);
 7631 %}
 7632 
 7633 instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
 7634   match(Set dst (ReverseBytesUS src));
 7635 
 7636   ins_cost(INSN_COST);
 7637   format %{ "rev16w  $dst, $src" %}
 7638 
 7639   ins_encode %{
 7640     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 7641   %}
 7642 
 7643   ins_pipe(ialu_reg);
 7644 %}
 7645 
 7646 instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
 7647   match(Set dst (ReverseBytesS src));
 7648 
 7649   ins_cost(INSN_COST);
 7650   format %{ "rev16w  $dst, $src\n\t"
 7651             "sbfmw $dst, $dst, #0, #15" %}
 7652 
 7653   ins_encode %{
 7654     __ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
 7655     __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
 7656   %}
 7657 
 7658   ins_pipe(ialu_reg);
 7659 %}
 7660 
 7661 // ============================================================================
 7662 // Zero Count Instructions
 7663 
 7664 instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 7665   match(Set dst (CountLeadingZerosI src));
 7666 
 7667   ins_cost(INSN_COST);
 7668   format %{ "clzw  $dst, $src" %}
 7669   ins_encode %{
 7670     __ clzw(as_Register($dst$$reg), as_Register($src$$reg));
 7671   %}
 7672 
 7673   ins_pipe(ialu_reg);
 7674 %}
 7675 
 7676 instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
 7677   match(Set dst (CountLeadingZerosL src));
 7678 
 7679   ins_cost(INSN_COST);
 7680   format %{ "clz   $dst, $src" %}
 7681   ins_encode %{
 7682     __ clz(as_Register($dst$$reg), as_Register($src$$reg));
 7683   %}
 7684 
 7685   ins_pipe(ialu_reg);
 7686 %}
 7687 
 7688 instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
 7689   match(Set dst (CountTrailingZerosI src));
 7690 
 7691   ins_cost(INSN_COST * 2);
 7692   format %{ "rbitw  $dst, $src\n\t"
 7693             "clzw   $dst, $dst" %}
 7694   ins_encode %{
 7695     __ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
 7696     __ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
 7697   %}
 7698 
 7699   ins_pipe(ialu_reg);
 7700 %}
 7701 
 7702 instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
 7703   match(Set dst (CountTrailingZerosL src));
 7704 
 7705   ins_cost(INSN_COST * 2);
 7706   format %{ "rbit   $dst, $src\n\t"
 7707             "clz    $dst, $dst" %}
 7708   ins_encode %{
 7709     __ rbit(as_Register($dst$$reg), as_Register($src$$reg));
 7710     __ clz(as_Register($dst$$reg), as_Register($dst$$reg));
 7711   %}
 7712 
 7713   ins_pipe(ialu_reg);
 7714 %}
 7715 
 7716 //---------- Population Count Instructions -------------------------------------
 7717 //
 7718 
 7719 instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
 7720   match(Set dst (PopCountI src));
 7721   effect(TEMP tmp);
 7722   ins_cost(INSN_COST * 13);
 7723 
 7724   format %{ "movw   $src, $src\n\t"
 7725             "mov    $tmp, $src\t# vector (1D)\n\t"
 7726             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7727             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7728             "mov    $dst, $tmp\t# vector (1D)" %}
 7729   ins_encode %{
 7730     __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0
 7731     __ mov($tmp$$FloatRegister, __ D, 0, $src$$Register);
 7732     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7733     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7734     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7735   %}
 7736 
 7737   ins_pipe(pipe_class_default);
 7738 %}
 7739 
 7740 instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
 7741   match(Set dst (PopCountI (LoadI mem)));
 7742   effect(TEMP tmp);
 7743   ins_cost(INSN_COST * 13);
 7744 
 7745   format %{ "ldrs   $tmp, $mem\n\t"
 7746             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7747             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7748             "mov    $dst, $tmp\t# vector (1D)" %}
 7749   ins_encode %{
 7750     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 7751     loadStore(masm, &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
 7752               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
 7753     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7754     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7755     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7756   %}
 7757 
 7758   ins_pipe(pipe_class_default);
 7759 %}
 7760 
 7761 // Note: Long.bitCount(long) returns an int.
 7762 instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
 7763   match(Set dst (PopCountL src));
 7764   effect(TEMP tmp);
 7765   ins_cost(INSN_COST * 13);
 7766 
 7767   format %{ "mov    $tmp, $src\t# vector (1D)\n\t"
 7768             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7769             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7770             "mov    $dst, $tmp\t# vector (1D)" %}
 7771   ins_encode %{
 7772     __ mov($tmp$$FloatRegister, __ D, 0, $src$$Register);
 7773     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7774     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7775     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7776   %}
 7777 
 7778   ins_pipe(pipe_class_default);
 7779 %}
 7780 
 7781 instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
 7782   match(Set dst (PopCountL (LoadL mem)));
 7783   effect(TEMP tmp);
 7784   ins_cost(INSN_COST * 13);
 7785 
 7786   format %{ "ldrd   $tmp, $mem\n\t"
 7787             "cnt    $tmp, $tmp\t# vector (8B)\n\t"
 7788             "addv   $tmp, $tmp\t# vector (8B)\n\t"
 7789             "mov    $dst, $tmp\t# vector (1D)" %}
 7790   ins_encode %{
 7791     FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
 7792     loadStore(masm, &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
 7793               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
 7794     __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7795     __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
 7796     __ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
 7797   %}
 7798 
 7799   ins_pipe(pipe_class_default);
 7800 %}
 7801 
 7802 // ============================================================================
 7803 // VerifyVectorAlignment Instruction
 7804 
 7805 instruct verify_vector_alignment(iRegP addr, immL_positive_bitmaskI mask, rFlagsReg cr) %{
 7806   match(Set addr (VerifyVectorAlignment addr mask));
 7807   effect(KILL cr);
 7808   format %{ "verify_vector_alignment $addr $mask \t! verify alignment" %}
 7809   ins_encode %{
 7810     Label Lskip;
 7811     // check if masked bits of addr are zero
 7812     __ tst($addr$$Register, $mask$$constant);
 7813     __ br(Assembler::EQ, Lskip);
 7814     __ stop("verify_vector_alignment found a misaligned vector memory access");
 7815     __ bind(Lskip);
 7816   %}
 7817   ins_pipe(pipe_slow);
 7818 %}
 7819 
 7820 // ============================================================================
 7821 // MemBar Instruction
 7822 
 7823 instruct load_fence() %{
 7824   match(LoadFence);
 7825   ins_cost(VOLATILE_REF_COST);
 7826 
 7827   format %{ "load_fence" %}
 7828 
 7829   ins_encode %{
 7830     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 7831   %}
 7832   ins_pipe(pipe_serial);
 7833 %}
 7834 
 7835 instruct unnecessary_membar_acquire() %{
 7836   predicate(unnecessary_acquire(n));
 7837   match(MemBarAcquire);
 7838   ins_cost(0);
 7839 
 7840   format %{ "membar_acquire (elided)" %}
 7841 
 7842   ins_encode %{
 7843     __ block_comment("membar_acquire (elided)");
 7844   %}
 7845 
 7846   ins_pipe(pipe_class_empty);
 7847 %}
 7848 
 7849 instruct membar_acquire() %{
 7850   match(MemBarAcquire);
 7851   ins_cost(VOLATILE_REF_COST);
 7852 
 7853   format %{ "membar_acquire\n\t"
 7854             "dmb ishld" %}
 7855 
 7856   ins_encode %{
 7857     __ block_comment("membar_acquire");
 7858     __ membar(Assembler::LoadLoad|Assembler::LoadStore);
 7859   %}
 7860 
 7861   ins_pipe(pipe_serial);
 7862 %}
 7863 
 7864 
 7865 instruct membar_acquire_lock() %{
 7866   match(MemBarAcquireLock);
 7867   ins_cost(VOLATILE_REF_COST);
 7868 
 7869   format %{ "membar_acquire_lock (elided)" %}
 7870 
 7871   ins_encode %{
 7872     __ block_comment("membar_acquire_lock (elided)");
 7873   %}
 7874 
 7875   ins_pipe(pipe_serial);
 7876 %}
 7877 
 7878 instruct store_fence() %{
 7879   match(StoreFence);
 7880   ins_cost(VOLATILE_REF_COST);
 7881 
 7882   format %{ "store_fence" %}
 7883 
 7884   ins_encode %{
 7885     __ membar(Assembler::LoadStore|Assembler::StoreStore);
 7886   %}
 7887   ins_pipe(pipe_serial);
 7888 %}
 7889 
 7890 instruct unnecessary_membar_release() %{
 7891   predicate(unnecessary_release(n));
 7892   match(MemBarRelease);
 7893   ins_cost(0);
 7894 
 7895   format %{ "membar_release (elided)" %}
 7896 
 7897   ins_encode %{
 7898     __ block_comment("membar_release (elided)");
 7899   %}
 7900   ins_pipe(pipe_serial);
 7901 %}
 7902 
 7903 instruct membar_release() %{
 7904   match(MemBarRelease);
 7905   ins_cost(VOLATILE_REF_COST);
 7906 
 7907   format %{ "membar_release\n\t"
 7908             "dmb ishst\n\tdmb ishld" %}
 7909 
 7910   ins_encode %{
 7911     __ block_comment("membar_release");
 7912     // These will be merged if AlwaysMergeDMB is enabled.
 7913     __ membar(Assembler::StoreStore);
 7914     __ membar(Assembler::LoadStore);
 7915   %}
 7916   ins_pipe(pipe_serial);
 7917 %}
 7918 
 7919 instruct membar_storestore() %{
 7920   match(MemBarStoreStore);
 7921   match(StoreStoreFence);
 7922   ins_cost(VOLATILE_REF_COST);
 7923 
 7924   format %{ "MEMBAR-store-store" %}
 7925 
 7926   ins_encode %{
 7927     __ membar(Assembler::StoreStore);
 7928   %}
 7929   ins_pipe(pipe_serial);
 7930 %}
 7931 
 7932 instruct membar_release_lock() %{
 7933   match(MemBarReleaseLock);
 7934   ins_cost(VOLATILE_REF_COST);
 7935 
 7936   format %{ "membar_release_lock (elided)" %}
 7937 
 7938   ins_encode %{
 7939     __ block_comment("membar_release_lock (elided)");
 7940   %}
 7941 
 7942   ins_pipe(pipe_serial);
 7943 %}
 7944 
 7945 instruct unnecessary_membar_volatile() %{
 7946   predicate(unnecessary_volatile(n));
 7947   match(MemBarVolatile);
 7948   ins_cost(0);
 7949 
 7950   format %{ "membar_volatile (elided)" %}
 7951 
 7952   ins_encode %{
 7953     __ block_comment("membar_volatile (elided)");
 7954   %}
 7955 
 7956   ins_pipe(pipe_serial);
 7957 %}
 7958 
 7959 instruct membar_volatile() %{
 7960   match(MemBarVolatile);
 7961   ins_cost(VOLATILE_REF_COST*100);
 7962 
 7963   format %{ "membar_volatile\n\t"
 7964              "dmb ish"%}
 7965 
 7966   ins_encode %{
 7967     __ block_comment("membar_volatile");
 7968     __ membar(Assembler::StoreLoad);
 7969   %}
 7970 
 7971   ins_pipe(pipe_serial);
 7972 %}
 7973 
 7974 // ============================================================================
 7975 // Cast/Convert Instructions
 7976 
 7977 instruct castX2P(iRegPNoSp dst, iRegL src) %{
 7978   match(Set dst (CastX2P src));
 7979 
 7980   ins_cost(INSN_COST);
 7981   format %{ "mov $dst, $src\t# long -> ptr" %}
 7982 
 7983   ins_encode %{
 7984     if ($dst$$reg != $src$$reg) {
 7985       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 7986     }
 7987   %}
 7988 
 7989   ins_pipe(ialu_reg);
 7990 %}
 7991 
 7992 instruct castP2X(iRegLNoSp dst, iRegP src) %{
 7993   match(Set dst (CastP2X src));
 7994 
 7995   ins_cost(INSN_COST);
 7996   format %{ "mov $dst, $src\t# ptr -> long" %}
 7997 
 7998   ins_encode %{
 7999     if ($dst$$reg != $src$$reg) {
 8000       __ mov(as_Register($dst$$reg), as_Register($src$$reg));
 8001     }
 8002   %}
 8003 
 8004   ins_pipe(ialu_reg);
 8005 %}
 8006 
 8007 // Convert oop into int for vectors alignment masking
 8008 instruct convP2I(iRegINoSp dst, iRegP src) %{
 8009   match(Set dst (ConvL2I (CastP2X src)));
 8010 
 8011   ins_cost(INSN_COST);
 8012   format %{ "movw $dst, $src\t# ptr -> int" %}
 8013   ins_encode %{
 8014     __ movw($dst$$Register, $src$$Register);
 8015   %}
 8016 
 8017   ins_pipe(ialu_reg);
 8018 %}
 8019 
 8020 // Convert compressed oop into int for vectors alignment masking
 8021 // in case of 32bit oops (heap < 4Gb).
 8022 instruct convN2I(iRegINoSp dst, iRegN src)
 8023 %{
 8024   predicate(CompressedOops::shift() == 0);
 8025   match(Set dst (ConvL2I (CastP2X (DecodeN src))));
 8026 
 8027   ins_cost(INSN_COST);
 8028   format %{ "mov dst, $src\t# compressed ptr -> int" %}
 8029   ins_encode %{
 8030     __ movw($dst$$Register, $src$$Register);
 8031   %}
 8032 
 8033   ins_pipe(ialu_reg);
 8034 %}
 8035 
 8036 
 8037 // Convert oop pointer into compressed form
 8038 instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8039   predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
 8040   match(Set dst (EncodeP src));
 8041   effect(KILL cr);
 8042   ins_cost(INSN_COST * 3);
 8043   format %{ "encode_heap_oop $dst, $src" %}
 8044   ins_encode %{
 8045     Register s = $src$$Register;
 8046     Register d = $dst$$Register;
 8047     __ encode_heap_oop(d, s);
 8048   %}
 8049   ins_pipe(ialu_reg);
 8050 %}
 8051 
 8052 instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
 8053   predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
 8054   match(Set dst (EncodeP src));
 8055   ins_cost(INSN_COST * 3);
 8056   format %{ "encode_heap_oop_not_null $dst, $src" %}
 8057   ins_encode %{
 8058     __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
 8059   %}
 8060   ins_pipe(ialu_reg);
 8061 %}
 8062 
 8063 instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8064   predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
 8065             n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
 8066   match(Set dst (DecodeN src));
 8067   ins_cost(INSN_COST * 3);
 8068   format %{ "decode_heap_oop $dst, $src" %}
 8069   ins_encode %{
 8070     Register s = $src$$Register;
 8071     Register d = $dst$$Register;
 8072     __ decode_heap_oop(d, s);
 8073   %}
 8074   ins_pipe(ialu_reg);
 8075 %}
 8076 
 8077 instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
 8078   predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
 8079             n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
 8080   match(Set dst (DecodeN src));
 8081   ins_cost(INSN_COST * 3);
 8082   format %{ "decode_heap_oop_not_null $dst, $src" %}
 8083   ins_encode %{
 8084     Register s = $src$$Register;
 8085     Register d = $dst$$Register;
 8086     __ decode_heap_oop_not_null(d, s);
 8087   %}
 8088   ins_pipe(ialu_reg);
 8089 %}
 8090 
 8091 // n.b. AArch64 implementations of encode_klass_not_null and
 8092 // decode_klass_not_null do not modify the flags register so, unlike
 8093 // Intel, we don't kill CR as a side effect here
 8094 
 8095 instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
 8096   match(Set dst (EncodePKlass src));
 8097 
 8098   ins_cost(INSN_COST * 3);
 8099   format %{ "encode_klass_not_null $dst,$src" %}
 8100 
 8101   ins_encode %{
 8102     Register src_reg = as_Register($src$$reg);
 8103     Register dst_reg = as_Register($dst$$reg);
 8104     __ encode_klass_not_null(dst_reg, src_reg);
 8105   %}
 8106 
 8107    ins_pipe(ialu_reg);
 8108 %}
 8109 
 8110 instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
 8111   match(Set dst (DecodeNKlass src));
 8112 
 8113   ins_cost(INSN_COST * 3);
 8114   format %{ "decode_klass_not_null $dst,$src" %}
 8115 
 8116   ins_encode %{
 8117     Register src_reg = as_Register($src$$reg);
 8118     Register dst_reg = as_Register($dst$$reg);
 8119     if (dst_reg != src_reg) {
 8120       __ decode_klass_not_null(dst_reg, src_reg);
 8121     } else {
 8122       __ decode_klass_not_null(dst_reg);
 8123     }
 8124   %}
 8125 
 8126    ins_pipe(ialu_reg);
 8127 %}
 8128 
 8129 instruct checkCastPP(iRegPNoSp dst)
 8130 %{
 8131   match(Set dst (CheckCastPP dst));
 8132 
 8133   size(0);
 8134   format %{ "# checkcastPP of $dst" %}
 8135   ins_encode(/* empty encoding */);
 8136   ins_pipe(pipe_class_empty);
 8137 %}
 8138 
 8139 instruct castPP(iRegPNoSp dst)
 8140 %{
 8141   match(Set dst (CastPP dst));
 8142 
 8143   size(0);
 8144   format %{ "# castPP of $dst" %}
 8145   ins_encode(/* empty encoding */);
 8146   ins_pipe(pipe_class_empty);
 8147 %}
 8148 
 8149 instruct castII(iRegI dst)
 8150 %{
 8151   match(Set dst (CastII dst));
 8152 
 8153   size(0);
 8154   format %{ "# castII of $dst" %}
 8155   ins_encode(/* empty encoding */);
 8156   ins_cost(0);
 8157   ins_pipe(pipe_class_empty);
 8158 %}
 8159 
 8160 instruct castLL(iRegL dst)
 8161 %{
 8162   match(Set dst (CastLL dst));
 8163 
 8164   size(0);
 8165   format %{ "# castLL of $dst" %}
 8166   ins_encode(/* empty encoding */);
 8167   ins_cost(0);
 8168   ins_pipe(pipe_class_empty);
 8169 %}
 8170 
 8171 instruct castFF(vRegF dst)
 8172 %{
 8173   match(Set dst (CastFF dst));
 8174 
 8175   size(0);
 8176   format %{ "# castFF of $dst" %}
 8177   ins_encode(/* empty encoding */);
 8178   ins_cost(0);
 8179   ins_pipe(pipe_class_empty);
 8180 %}
 8181 
 8182 instruct castDD(vRegD dst)
 8183 %{
 8184   match(Set dst (CastDD dst));
 8185 
 8186   size(0);
 8187   format %{ "# castDD of $dst" %}
 8188   ins_encode(/* empty encoding */);
 8189   ins_cost(0);
 8190   ins_pipe(pipe_class_empty);
 8191 %}
 8192 
 8193 instruct castVV(vReg dst)
 8194 %{
 8195   match(Set dst (CastVV dst));
 8196 
 8197   size(0);
 8198   format %{ "# castVV of $dst" %}
 8199   ins_encode(/* empty encoding */);
 8200   ins_cost(0);
 8201   ins_pipe(pipe_class_empty);
 8202 %}
 8203 
 8204 instruct castVVMask(pRegGov dst)
 8205 %{
 8206   match(Set dst (CastVV dst));
 8207 
 8208   size(0);
 8209   format %{ "# castVV of $dst" %}
 8210   ins_encode(/* empty encoding */);
 8211   ins_cost(0);
 8212   ins_pipe(pipe_class_empty);
 8213 %}
 8214 
 8215 // ============================================================================
 8216 // Atomic operation instructions
 8217 //
 8218 
 8219 // standard CompareAndSwapX when we are using barriers
 8220 // these have higher priority than the rules selected by a predicate
 8221 
 8222 // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
 8223 // can't match them
 8224 
 8225 instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8226 
 8227   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8228   ins_cost(2 * VOLATILE_REF_COST);
 8229 
 8230   effect(KILL cr);
 8231 
 8232   format %{
 8233     "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8234     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8235   %}
 8236 
 8237   ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
 8238             aarch64_enc_cset_eq(res));
 8239 
 8240   ins_pipe(pipe_slow);
 8241 %}
 8242 
 8243 instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8244 
 8245   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8246   ins_cost(2 * VOLATILE_REF_COST);
 8247 
 8248   effect(KILL cr);
 8249 
 8250   format %{
 8251     "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8252     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8253   %}
 8254 
 8255   ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
 8256             aarch64_enc_cset_eq(res));
 8257 
 8258   ins_pipe(pipe_slow);
 8259 %}
 8260 
 8261 instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8262 
 8263   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8264   ins_cost(2 * VOLATILE_REF_COST);
 8265 
 8266   effect(KILL cr);
 8267 
 8268  format %{
 8269     "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8270     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8271  %}
 8272 
 8273  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8274             aarch64_enc_cset_eq(res));
 8275 
 8276   ins_pipe(pipe_slow);
 8277 %}
 8278 
 8279 instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 8280 
 8281   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 8282   ins_cost(2 * VOLATILE_REF_COST);
 8283 
 8284   effect(KILL cr);
 8285 
 8286  format %{
 8287     "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 8288     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8289  %}
 8290 
 8291  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8292             aarch64_enc_cset_eq(res));
 8293 
 8294   ins_pipe(pipe_slow);
 8295 %}
 8296 
 8297 instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8298 
 8299   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 8300   predicate(n->as_LoadStore()->barrier_data() == 0);
 8301   ins_cost(2 * VOLATILE_REF_COST);
 8302 
 8303   effect(KILL cr);
 8304 
 8305  format %{
 8306     "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 8307     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8308  %}
 8309 
 8310  ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
 8311             aarch64_enc_cset_eq(res));
 8312 
 8313   ins_pipe(pipe_slow);
 8314 %}
 8315 
 8316 instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 8317 
 8318   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 8319   predicate(n->as_LoadStore()->barrier_data() == 0);
 8320   ins_cost(2 * VOLATILE_REF_COST);
 8321 
 8322   effect(KILL cr);
 8323 
 8324  format %{
 8325     "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 8326     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8327  %}
 8328 
 8329  ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
 8330             aarch64_enc_cset_eq(res));
 8331 
 8332   ins_pipe(pipe_slow);
 8333 %}
 8334 
 8335 // alternative CompareAndSwapX when we are eliding barriers
 8336 
 8337 instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8338 
 8339   predicate(needs_acquiring_load_exclusive(n));
 8340   match(Set res (CompareAndSwapB mem (Binary oldval newval)));
 8341   ins_cost(VOLATILE_REF_COST);
 8342 
 8343   effect(KILL cr);
 8344 
 8345   format %{
 8346     "cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8347     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8348   %}
 8349 
 8350   ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
 8351             aarch64_enc_cset_eq(res));
 8352 
 8353   ins_pipe(pipe_slow);
 8354 %}
 8355 
 8356 instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8357 
 8358   predicate(needs_acquiring_load_exclusive(n));
 8359   match(Set res (CompareAndSwapS mem (Binary oldval newval)));
 8360   ins_cost(VOLATILE_REF_COST);
 8361 
 8362   effect(KILL cr);
 8363 
 8364   format %{
 8365     "cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8366     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8367   %}
 8368 
 8369   ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
 8370             aarch64_enc_cset_eq(res));
 8371 
 8372   ins_pipe(pipe_slow);
 8373 %}
 8374 
 8375 instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
 8376 
 8377   predicate(needs_acquiring_load_exclusive(n));
 8378   match(Set res (CompareAndSwapI mem (Binary oldval newval)));
 8379   ins_cost(VOLATILE_REF_COST);
 8380 
 8381   effect(KILL cr);
 8382 
 8383  format %{
 8384     "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
 8385     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8386  %}
 8387 
 8388  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 8389             aarch64_enc_cset_eq(res));
 8390 
 8391   ins_pipe(pipe_slow);
 8392 %}
 8393 
 8394 instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
 8395 
 8396   predicate(needs_acquiring_load_exclusive(n));
 8397   match(Set res (CompareAndSwapL mem (Binary oldval newval)));
 8398   ins_cost(VOLATILE_REF_COST);
 8399 
 8400   effect(KILL cr);
 8401 
 8402  format %{
 8403     "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
 8404     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8405  %}
 8406 
 8407  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 8408             aarch64_enc_cset_eq(res));
 8409 
 8410   ins_pipe(pipe_slow);
 8411 %}
 8412 
 8413 instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8414 
 8415   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 8416   match(Set res (CompareAndSwapP mem (Binary oldval newval)));
 8417   ins_cost(VOLATILE_REF_COST);
 8418 
 8419   effect(KILL cr);
 8420 
 8421  format %{
 8422     "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
 8423     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8424  %}
 8425 
 8426  ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
 8427             aarch64_enc_cset_eq(res));
 8428 
 8429   ins_pipe(pipe_slow);
 8430 %}
 8431 
 8432 instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
 8433 
 8434   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 8435   match(Set res (CompareAndSwapN mem (Binary oldval newval)));
 8436   ins_cost(VOLATILE_REF_COST);
 8437 
 8438   effect(KILL cr);
 8439 
 8440  format %{
 8441     "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
 8442     "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8443  %}
 8444 
 8445  ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
 8446             aarch64_enc_cset_eq(res));
 8447 
 8448   ins_pipe(pipe_slow);
 8449 %}
 8450 
 8451 
 8452 // ---------------------------------------------------------------------
 8453 
 8454 // BEGIN This section of the file is automatically generated. Do not edit --------------
 8455 
 8456 // Sundry CAS operations.  Note that release is always true,
 8457 // regardless of the memory ordering of the CAS.  This is because we
 8458 // need the volatile case to be sequentially consistent but there is
 8459 // no trailing StoreLoad barrier emitted by C2.  Unfortunately we
 8460 // can't check the type of memory ordering here, so we always emit a
 8461 // STLXR.
 8462 
 8463 // This section is generated from cas.m4
 8464 
 8465 
 8466 // This pattern is generated automatically from cas.m4.
 8467 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8468 instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8469   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 8470   ins_cost(2 * VOLATILE_REF_COST);
 8471   effect(TEMP_DEF res, KILL cr);
 8472   format %{
 8473     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8474   %}
 8475   ins_encode %{
 8476     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8477                Assembler::byte, /*acquire*/ false, /*release*/ true,
 8478                /*weak*/ false, $res$$Register);
 8479     __ sxtbw($res$$Register, $res$$Register);
 8480   %}
 8481   ins_pipe(pipe_slow);
 8482 %}
 8483 
 8484 // This pattern is generated automatically from cas.m4.
 8485 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8486 instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8487   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 8488   ins_cost(2 * VOLATILE_REF_COST);
 8489   effect(TEMP_DEF res, KILL cr);
 8490   format %{
 8491     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8492   %}
 8493   ins_encode %{
 8494     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8495                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 8496                /*weak*/ false, $res$$Register);
 8497     __ sxthw($res$$Register, $res$$Register);
 8498   %}
 8499   ins_pipe(pipe_slow);
 8500 %}
 8501 
 8502 // This pattern is generated automatically from cas.m4.
 8503 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8504 instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8505   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 8506   ins_cost(2 * VOLATILE_REF_COST);
 8507   effect(TEMP_DEF res, KILL cr);
 8508   format %{
 8509     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8510   %}
 8511   ins_encode %{
 8512     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8513                Assembler::word, /*acquire*/ false, /*release*/ true,
 8514                /*weak*/ false, $res$$Register);
 8515   %}
 8516   ins_pipe(pipe_slow);
 8517 %}
 8518 
 8519 // This pattern is generated automatically from cas.m4.
 8520 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8521 instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8522   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 8523   ins_cost(2 * VOLATILE_REF_COST);
 8524   effect(TEMP_DEF res, KILL cr);
 8525   format %{
 8526     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8527   %}
 8528   ins_encode %{
 8529     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8530                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8531                /*weak*/ false, $res$$Register);
 8532   %}
 8533   ins_pipe(pipe_slow);
 8534 %}
 8535 
 8536 // This pattern is generated automatically from cas.m4.
 8537 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8538 instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8539   predicate(n->as_LoadStore()->barrier_data() == 0);
 8540   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 8541   ins_cost(2 * VOLATILE_REF_COST);
 8542   effect(TEMP_DEF res, KILL cr);
 8543   format %{
 8544     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8545   %}
 8546   ins_encode %{
 8547     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8548                Assembler::word, /*acquire*/ false, /*release*/ true,
 8549                /*weak*/ false, $res$$Register);
 8550   %}
 8551   ins_pipe(pipe_slow);
 8552 %}
 8553 
 8554 // This pattern is generated automatically from cas.m4.
 8555 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8556 instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8557   predicate(n->as_LoadStore()->barrier_data() == 0);
 8558   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 8559   ins_cost(2 * VOLATILE_REF_COST);
 8560   effect(TEMP_DEF res, KILL cr);
 8561   format %{
 8562     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8563   %}
 8564   ins_encode %{
 8565     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8566                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8567                /*weak*/ false, $res$$Register);
 8568   %}
 8569   ins_pipe(pipe_slow);
 8570 %}
 8571 
 8572 // This pattern is generated automatically from cas.m4.
 8573 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8574 instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8575   predicate(needs_acquiring_load_exclusive(n));
 8576   match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
 8577   ins_cost(VOLATILE_REF_COST);
 8578   effect(TEMP_DEF res, KILL cr);
 8579   format %{
 8580     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8581   %}
 8582   ins_encode %{
 8583     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8584                Assembler::byte, /*acquire*/ true, /*release*/ true,
 8585                /*weak*/ false, $res$$Register);
 8586     __ sxtbw($res$$Register, $res$$Register);
 8587   %}
 8588   ins_pipe(pipe_slow);
 8589 %}
 8590 
 8591 // This pattern is generated automatically from cas.m4.
 8592 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8593 instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8594   predicate(needs_acquiring_load_exclusive(n));
 8595   match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
 8596   ins_cost(VOLATILE_REF_COST);
 8597   effect(TEMP_DEF res, KILL cr);
 8598   format %{
 8599     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8600   %}
 8601   ins_encode %{
 8602     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8603                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 8604                /*weak*/ false, $res$$Register);
 8605     __ sxthw($res$$Register, $res$$Register);
 8606   %}
 8607   ins_pipe(pipe_slow);
 8608 %}
 8609 
 8610 // This pattern is generated automatically from cas.m4.
 8611 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8612 instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8613   predicate(needs_acquiring_load_exclusive(n));
 8614   match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
 8615   ins_cost(VOLATILE_REF_COST);
 8616   effect(TEMP_DEF res, KILL cr);
 8617   format %{
 8618     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8619   %}
 8620   ins_encode %{
 8621     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8622                Assembler::word, /*acquire*/ true, /*release*/ true,
 8623                /*weak*/ false, $res$$Register);
 8624   %}
 8625   ins_pipe(pipe_slow);
 8626 %}
 8627 
 8628 // This pattern is generated automatically from cas.m4.
 8629 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8630 instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8631   predicate(needs_acquiring_load_exclusive(n));
 8632   match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
 8633   ins_cost(VOLATILE_REF_COST);
 8634   effect(TEMP_DEF res, KILL cr);
 8635   format %{
 8636     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8637   %}
 8638   ins_encode %{
 8639     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8640                Assembler::xword, /*acquire*/ true, /*release*/ true,
 8641                /*weak*/ false, $res$$Register);
 8642   %}
 8643   ins_pipe(pipe_slow);
 8644 %}
 8645 
 8646 // This pattern is generated automatically from cas.m4.
 8647 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8648 instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8649   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 8650   match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
 8651   ins_cost(VOLATILE_REF_COST);
 8652   effect(TEMP_DEF res, KILL cr);
 8653   format %{
 8654     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8655   %}
 8656   ins_encode %{
 8657     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8658                Assembler::word, /*acquire*/ true, /*release*/ true,
 8659                /*weak*/ false, $res$$Register);
 8660   %}
 8661   ins_pipe(pipe_slow);
 8662 %}
 8663 
 8664 // This pattern is generated automatically from cas.m4.
 8665 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8666 instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8667   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 8668   match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
 8669   ins_cost(VOLATILE_REF_COST);
 8670   effect(TEMP_DEF res, KILL cr);
 8671   format %{
 8672     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8673   %}
 8674   ins_encode %{
 8675     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8676                Assembler::xword, /*acquire*/ true, /*release*/ true,
 8677                /*weak*/ false, $res$$Register);
 8678   %}
 8679   ins_pipe(pipe_slow);
 8680 %}
 8681 
 8682 // This pattern is generated automatically from cas.m4.
 8683 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8684 instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8685   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 8686   ins_cost(2 * VOLATILE_REF_COST);
 8687   effect(KILL cr);
 8688   format %{
 8689     "cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8690     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8691   %}
 8692   ins_encode %{
 8693     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8694                Assembler::byte, /*acquire*/ false, /*release*/ true,
 8695                /*weak*/ true, noreg);
 8696     __ csetw($res$$Register, Assembler::EQ);
 8697   %}
 8698   ins_pipe(pipe_slow);
 8699 %}
 8700 
 8701 // This pattern is generated automatically from cas.m4.
 8702 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8703 instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8704   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 8705   ins_cost(2 * VOLATILE_REF_COST);
 8706   effect(KILL cr);
 8707   format %{
 8708     "cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8709     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8710   %}
 8711   ins_encode %{
 8712     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8713                Assembler::halfword, /*acquire*/ false, /*release*/ true,
 8714                /*weak*/ true, noreg);
 8715     __ csetw($res$$Register, Assembler::EQ);
 8716   %}
 8717   ins_pipe(pipe_slow);
 8718 %}
 8719 
 8720 // This pattern is generated automatically from cas.m4.
 8721 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8722 instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8723   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 8724   ins_cost(2 * VOLATILE_REF_COST);
 8725   effect(KILL cr);
 8726   format %{
 8727     "cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8728     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8729   %}
 8730   ins_encode %{
 8731     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8732                Assembler::word, /*acquire*/ false, /*release*/ true,
 8733                /*weak*/ true, noreg);
 8734     __ csetw($res$$Register, Assembler::EQ);
 8735   %}
 8736   ins_pipe(pipe_slow);
 8737 %}
 8738 
 8739 // This pattern is generated automatically from cas.m4.
 8740 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8741 instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8742   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 8743   ins_cost(2 * VOLATILE_REF_COST);
 8744   effect(KILL cr);
 8745   format %{
 8746     "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8747     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8748   %}
 8749   ins_encode %{
 8750     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8751                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8752                /*weak*/ true, noreg);
 8753     __ csetw($res$$Register, Assembler::EQ);
 8754   %}
 8755   ins_pipe(pipe_slow);
 8756 %}
 8757 
 8758 // This pattern is generated automatically from cas.m4.
 8759 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8760 instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8761   predicate(n->as_LoadStore()->barrier_data() == 0);
 8762   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 8763   ins_cost(2 * VOLATILE_REF_COST);
 8764   effect(KILL cr);
 8765   format %{
 8766     "cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8767     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8768   %}
 8769   ins_encode %{
 8770     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8771                Assembler::word, /*acquire*/ false, /*release*/ true,
 8772                /*weak*/ true, noreg);
 8773     __ csetw($res$$Register, Assembler::EQ);
 8774   %}
 8775   ins_pipe(pipe_slow);
 8776 %}
 8777 
 8778 // This pattern is generated automatically from cas.m4.
 8779 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8780 instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8781   predicate(n->as_LoadStore()->barrier_data() == 0);
 8782   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 8783   ins_cost(2 * VOLATILE_REF_COST);
 8784   effect(KILL cr);
 8785   format %{
 8786     "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8787     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8788   %}
 8789   ins_encode %{
 8790     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8791                Assembler::xword, /*acquire*/ false, /*release*/ true,
 8792                /*weak*/ true, noreg);
 8793     __ csetw($res$$Register, Assembler::EQ);
 8794   %}
 8795   ins_pipe(pipe_slow);
 8796 %}
 8797 
 8798 // This pattern is generated automatically from cas.m4.
 8799 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8800 instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8801   predicate(needs_acquiring_load_exclusive(n));
 8802   match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
 8803   ins_cost(VOLATILE_REF_COST);
 8804   effect(KILL cr);
 8805   format %{
 8806     "cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
 8807     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8808   %}
 8809   ins_encode %{
 8810     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8811                Assembler::byte, /*acquire*/ true, /*release*/ true,
 8812                /*weak*/ true, noreg);
 8813     __ csetw($res$$Register, Assembler::EQ);
 8814   %}
 8815   ins_pipe(pipe_slow);
 8816 %}
 8817 
 8818 // This pattern is generated automatically from cas.m4.
 8819 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8820 instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8821   predicate(needs_acquiring_load_exclusive(n));
 8822   match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
 8823   ins_cost(VOLATILE_REF_COST);
 8824   effect(KILL cr);
 8825   format %{
 8826     "cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
 8827     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8828   %}
 8829   ins_encode %{
 8830     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8831                Assembler::halfword, /*acquire*/ true, /*release*/ true,
 8832                /*weak*/ true, noreg);
 8833     __ csetw($res$$Register, Assembler::EQ);
 8834   %}
 8835   ins_pipe(pipe_slow);
 8836 %}
 8837 
 8838 // This pattern is generated automatically from cas.m4.
 8839 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8840 instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
 8841   predicate(needs_acquiring_load_exclusive(n));
 8842   match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
 8843   ins_cost(VOLATILE_REF_COST);
 8844   effect(KILL cr);
 8845   format %{
 8846     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
 8847     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8848   %}
 8849   ins_encode %{
 8850     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8851                Assembler::word, /*acquire*/ true, /*release*/ true,
 8852                /*weak*/ true, noreg);
 8853     __ csetw($res$$Register, Assembler::EQ);
 8854   %}
 8855   ins_pipe(pipe_slow);
 8856 %}
 8857 
 8858 // This pattern is generated automatically from cas.m4.
 8859 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8860 instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
 8861   predicate(needs_acquiring_load_exclusive(n));
 8862   match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
 8863   ins_cost(VOLATILE_REF_COST);
 8864   effect(KILL cr);
 8865   format %{
 8866     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
 8867     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8868   %}
 8869   ins_encode %{
 8870     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8871                Assembler::xword, /*acquire*/ true, /*release*/ true,
 8872                /*weak*/ true, noreg);
 8873     __ csetw($res$$Register, Assembler::EQ);
 8874   %}
 8875   ins_pipe(pipe_slow);
 8876 %}
 8877 
 8878 // This pattern is generated automatically from cas.m4.
 8879 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8880 instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
 8881   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 8882   match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
 8883   ins_cost(VOLATILE_REF_COST);
 8884   effect(KILL cr);
 8885   format %{
 8886     "cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
 8887     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8888   %}
 8889   ins_encode %{
 8890     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8891                Assembler::word, /*acquire*/ true, /*release*/ true,
 8892                /*weak*/ true, noreg);
 8893     __ csetw($res$$Register, Assembler::EQ);
 8894   %}
 8895   ins_pipe(pipe_slow);
 8896 %}
 8897 
 8898 // This pattern is generated automatically from cas.m4.
 8899 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 8900 instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
 8901   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 8902   match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
 8903   ins_cost(VOLATILE_REF_COST);
 8904   effect(KILL cr);
 8905   format %{
 8906     "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
 8907     "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
 8908   %}
 8909   ins_encode %{
 8910     __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
 8911                Assembler::xword, /*acquire*/ true, /*release*/ true,
 8912                /*weak*/ true, noreg);
 8913     __ csetw($res$$Register, Assembler::EQ);
 8914   %}
 8915   ins_pipe(pipe_slow);
 8916 %}
 8917 
 8918 // END This section of the file is automatically generated. Do not edit --------------
 8919 // ---------------------------------------------------------------------
 8920 
 8921 instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
 8922   match(Set prev (GetAndSetI mem newv));
 8923   ins_cost(2 * VOLATILE_REF_COST);
 8924   format %{ "atomic_xchgw  $prev, $newv, [$mem]" %}
 8925   ins_encode %{
 8926     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 8927   %}
 8928   ins_pipe(pipe_serial);
 8929 %}
 8930 
 8931 instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
 8932   match(Set prev (GetAndSetL mem newv));
 8933   ins_cost(2 * VOLATILE_REF_COST);
 8934   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 8935   ins_encode %{
 8936     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 8937   %}
 8938   ins_pipe(pipe_serial);
 8939 %}
 8940 
 8941 instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
 8942   predicate(n->as_LoadStore()->barrier_data() == 0);
 8943   match(Set prev (GetAndSetN mem newv));
 8944   ins_cost(2 * VOLATILE_REF_COST);
 8945   format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
 8946   ins_encode %{
 8947     __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 8948   %}
 8949   ins_pipe(pipe_serial);
 8950 %}
 8951 
 8952 instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
 8953   predicate(n->as_LoadStore()->barrier_data() == 0);
 8954   match(Set prev (GetAndSetP mem newv));
 8955   ins_cost(2 * VOLATILE_REF_COST);
 8956   format %{ "atomic_xchg  $prev, $newv, [$mem]" %}
 8957   ins_encode %{
 8958     __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
 8959   %}
 8960   ins_pipe(pipe_serial);
 8961 %}
 8962 
 8963 instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
 8964   predicate(needs_acquiring_load_exclusive(n));
 8965   match(Set prev (GetAndSetI mem newv));
 8966   ins_cost(VOLATILE_REF_COST);
 8967   format %{ "atomic_xchgw_acq  $prev, $newv, [$mem]" %}
 8968   ins_encode %{
 8969     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 8970   %}
 8971   ins_pipe(pipe_serial);
 8972 %}
 8973 
 8974 instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
 8975   predicate(needs_acquiring_load_exclusive(n));
 8976   match(Set prev (GetAndSetL mem newv));
 8977   ins_cost(VOLATILE_REF_COST);
 8978   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 8979   ins_encode %{
 8980     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 8981   %}
 8982   ins_pipe(pipe_serial);
 8983 %}
 8984 
 8985 instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
 8986   predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
 8987   match(Set prev (GetAndSetN mem newv));
 8988   ins_cost(VOLATILE_REF_COST);
 8989   format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
 8990   ins_encode %{
 8991     __ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
 8992   %}
 8993   ins_pipe(pipe_serial);
 8994 %}
 8995 
 8996 instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
 8997   predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
 8998   match(Set prev (GetAndSetP mem newv));
 8999   ins_cost(VOLATILE_REF_COST);
 9000   format %{ "atomic_xchg_acq  $prev, $newv, [$mem]" %}
 9001   ins_encode %{
 9002     __ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
 9003   %}
 9004   ins_pipe(pipe_serial);
 9005 %}
 9006 
 9007 
 9008 instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9009   match(Set newval (GetAndAddL mem incr));
 9010   ins_cost(2 * VOLATILE_REF_COST + 1);
 9011   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9012   ins_encode %{
 9013     __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9014   %}
 9015   ins_pipe(pipe_serial);
 9016 %}
 9017 
 9018 instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
 9019   predicate(n->as_LoadStore()->result_not_used());
 9020   match(Set dummy (GetAndAddL mem incr));
 9021   ins_cost(2 * VOLATILE_REF_COST);
 9022   format %{ "get_and_addL [$mem], $incr" %}
 9023   ins_encode %{
 9024     __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
 9025   %}
 9026   ins_pipe(pipe_serial);
 9027 %}
 9028 
 9029 instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9030   match(Set newval (GetAndAddL mem incr));
 9031   ins_cost(2 * VOLATILE_REF_COST + 1);
 9032   format %{ "get_and_addL $newval, [$mem], $incr" %}
 9033   ins_encode %{
 9034     __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9035   %}
 9036   ins_pipe(pipe_serial);
 9037 %}
 9038 
 9039 instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
 9040   predicate(n->as_LoadStore()->result_not_used());
 9041   match(Set dummy (GetAndAddL mem incr));
 9042   ins_cost(2 * VOLATILE_REF_COST);
 9043   format %{ "get_and_addL [$mem], $incr" %}
 9044   ins_encode %{
 9045     __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
 9046   %}
 9047   ins_pipe(pipe_serial);
 9048 %}
 9049 
 9050 instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9051   match(Set newval (GetAndAddI mem incr));
 9052   ins_cost(2 * VOLATILE_REF_COST + 1);
 9053   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9054   ins_encode %{
 9055     __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9056   %}
 9057   ins_pipe(pipe_serial);
 9058 %}
 9059 
 9060 instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9061   predicate(n->as_LoadStore()->result_not_used());
 9062   match(Set dummy (GetAndAddI mem incr));
 9063   ins_cost(2 * VOLATILE_REF_COST);
 9064   format %{ "get_and_addI [$mem], $incr" %}
 9065   ins_encode %{
 9066     __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
 9067   %}
 9068   ins_pipe(pipe_serial);
 9069 %}
 9070 
 9071 instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9072   match(Set newval (GetAndAddI mem incr));
 9073   ins_cost(2 * VOLATILE_REF_COST + 1);
 9074   format %{ "get_and_addI $newval, [$mem], $incr" %}
 9075   ins_encode %{
 9076     __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9077   %}
 9078   ins_pipe(pipe_serial);
 9079 %}
 9080 
 9081 instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
 9082   predicate(n->as_LoadStore()->result_not_used());
 9083   match(Set dummy (GetAndAddI mem incr));
 9084   ins_cost(2 * VOLATILE_REF_COST);
 9085   format %{ "get_and_addI [$mem], $incr" %}
 9086   ins_encode %{
 9087     __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
 9088   %}
 9089   ins_pipe(pipe_serial);
 9090 %}
 9091 
 9092 instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
 9093   predicate(needs_acquiring_load_exclusive(n));
 9094   match(Set newval (GetAndAddL mem incr));
 9095   ins_cost(VOLATILE_REF_COST + 1);
 9096   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9097   ins_encode %{
 9098     __ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9099   %}
 9100   ins_pipe(pipe_serial);
 9101 %}
 9102 
 9103 instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
 9104   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9105   match(Set dummy (GetAndAddL mem incr));
 9106   ins_cost(VOLATILE_REF_COST);
 9107   format %{ "get_and_addL_acq [$mem], $incr" %}
 9108   ins_encode %{
 9109     __ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
 9110   %}
 9111   ins_pipe(pipe_serial);
 9112 %}
 9113 
 9114 instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
 9115   predicate(needs_acquiring_load_exclusive(n));
 9116   match(Set newval (GetAndAddL mem incr));
 9117   ins_cost(VOLATILE_REF_COST + 1);
 9118   format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
 9119   ins_encode %{
 9120     __ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9121   %}
 9122   ins_pipe(pipe_serial);
 9123 %}
 9124 
 9125 instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
 9126   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9127   match(Set dummy (GetAndAddL mem incr));
 9128   ins_cost(VOLATILE_REF_COST);
 9129   format %{ "get_and_addL_acq [$mem], $incr" %}
 9130   ins_encode %{
 9131     __ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
 9132   %}
 9133   ins_pipe(pipe_serial);
 9134 %}
 9135 
 9136 instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
 9137   predicate(needs_acquiring_load_exclusive(n));
 9138   match(Set newval (GetAndAddI mem incr));
 9139   ins_cost(VOLATILE_REF_COST + 1);
 9140   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9141   ins_encode %{
 9142     __ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
 9143   %}
 9144   ins_pipe(pipe_serial);
 9145 %}
 9146 
 9147 instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
 9148   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9149   match(Set dummy (GetAndAddI mem incr));
 9150   ins_cost(VOLATILE_REF_COST);
 9151   format %{ "get_and_addI_acq [$mem], $incr" %}
 9152   ins_encode %{
 9153     __ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
 9154   %}
 9155   ins_pipe(pipe_serial);
 9156 %}
 9157 
 9158 instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
 9159   predicate(needs_acquiring_load_exclusive(n));
 9160   match(Set newval (GetAndAddI mem incr));
 9161   ins_cost(VOLATILE_REF_COST + 1);
 9162   format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
 9163   ins_encode %{
 9164     __ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
 9165   %}
 9166   ins_pipe(pipe_serial);
 9167 %}
 9168 
 9169 instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
 9170   predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
 9171   match(Set dummy (GetAndAddI mem incr));
 9172   ins_cost(VOLATILE_REF_COST);
 9173   format %{ "get_and_addI_acq [$mem], $incr" %}
 9174   ins_encode %{
 9175     __ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
 9176   %}
 9177   ins_pipe(pipe_serial);
 9178 %}
 9179 
 9180 // Manifest a CmpU result in an integer register.
 9181 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9182 instruct cmpU3_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg flags)
 9183 %{
 9184   match(Set dst (CmpU3 src1 src2));
 9185   effect(KILL flags);
 9186 
 9187   ins_cost(INSN_COST * 3);
 9188   format %{
 9189       "cmpw $src1, $src2\n\t"
 9190       "csetw $dst, ne\n\t"
 9191       "cnegw $dst, lo\t# CmpU3(reg)"
 9192   %}
 9193   ins_encode %{
 9194     __ cmpw($src1$$Register, $src2$$Register);
 9195     __ csetw($dst$$Register, Assembler::NE);
 9196     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9197   %}
 9198 
 9199   ins_pipe(pipe_class_default);
 9200 %}
 9201 
 9202 instruct cmpU3_reg_imm(iRegINoSp dst, iRegI src1, immIAddSub src2, rFlagsReg flags)
 9203 %{
 9204   match(Set dst (CmpU3 src1 src2));
 9205   effect(KILL flags);
 9206 
 9207   ins_cost(INSN_COST * 3);
 9208   format %{
 9209       "subsw zr, $src1, $src2\n\t"
 9210       "csetw $dst, ne\n\t"
 9211       "cnegw $dst, lo\t# CmpU3(imm)"
 9212   %}
 9213   ins_encode %{
 9214     __ subsw(zr, $src1$$Register, (int32_t)$src2$$constant);
 9215     __ csetw($dst$$Register, Assembler::NE);
 9216     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9217   %}
 9218 
 9219   ins_pipe(pipe_class_default);
 9220 %}
 9221 
 9222 // Manifest a CmpUL result in an integer register.
 9223 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9224 instruct cmpUL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9225 %{
 9226   match(Set dst (CmpUL3 src1 src2));
 9227   effect(KILL flags);
 9228 
 9229   ins_cost(INSN_COST * 3);
 9230   format %{
 9231       "cmp $src1, $src2\n\t"
 9232       "csetw $dst, ne\n\t"
 9233       "cnegw $dst, lo\t# CmpUL3(reg)"
 9234   %}
 9235   ins_encode %{
 9236     __ cmp($src1$$Register, $src2$$Register);
 9237     __ csetw($dst$$Register, Assembler::NE);
 9238     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9239   %}
 9240 
 9241   ins_pipe(pipe_class_default);
 9242 %}
 9243 
 9244 instruct cmpUL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9245 %{
 9246   match(Set dst (CmpUL3 src1 src2));
 9247   effect(KILL flags);
 9248 
 9249   ins_cost(INSN_COST * 3);
 9250   format %{
 9251       "subs zr, $src1, $src2\n\t"
 9252       "csetw $dst, ne\n\t"
 9253       "cnegw $dst, lo\t# CmpUL3(imm)"
 9254   %}
 9255   ins_encode %{
 9256     __ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
 9257     __ csetw($dst$$Register, Assembler::NE);
 9258     __ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
 9259   %}
 9260 
 9261   ins_pipe(pipe_class_default);
 9262 %}
 9263 
 9264 // Manifest a CmpL result in an integer register.
 9265 // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
 9266 instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
 9267 %{
 9268   match(Set dst (CmpL3 src1 src2));
 9269   effect(KILL flags);
 9270 
 9271   ins_cost(INSN_COST * 3);
 9272   format %{
 9273       "cmp $src1, $src2\n\t"
 9274       "csetw $dst, ne\n\t"
 9275       "cnegw $dst, lt\t# CmpL3(reg)"
 9276   %}
 9277   ins_encode %{
 9278     __ cmp($src1$$Register, $src2$$Register);
 9279     __ csetw($dst$$Register, Assembler::NE);
 9280     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9281   %}
 9282 
 9283   ins_pipe(pipe_class_default);
 9284 %}
 9285 
 9286 instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
 9287 %{
 9288   match(Set dst (CmpL3 src1 src2));
 9289   effect(KILL flags);
 9290 
 9291   ins_cost(INSN_COST * 3);
 9292   format %{
 9293       "subs zr, $src1, $src2\n\t"
 9294       "csetw $dst, ne\n\t"
 9295       "cnegw $dst, lt\t# CmpL3(imm)"
 9296   %}
 9297   ins_encode %{
 9298     __ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
 9299     __ csetw($dst$$Register, Assembler::NE);
 9300     __ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
 9301   %}
 9302 
 9303   ins_pipe(pipe_class_default);
 9304 %}
 9305 
 9306 // ============================================================================
 9307 // Conditional Move Instructions
 9308 
 9309 // n.b. we have identical rules for both a signed compare op (cmpOp)
 9310 // and an unsigned compare op (cmpOpU). it would be nice if we could
 9311 // define an op class which merged both inputs and use it to type the
 9312 // argument to a single rule. unfortunatelyt his fails because the
 9313 // opclass does not live up to the COND_INTER interface of its
 9314 // component operands. When the generic code tries to negate the
 9315 // operand it ends up running the generci Machoper::negate method
 9316 // which throws a ShouldNotHappen. So, we have to provide two flavours
 9317 // of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
 9318 
 9319 instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9320   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9321 
 9322   ins_cost(INSN_COST * 2);
 9323   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int"  %}
 9324 
 9325   ins_encode %{
 9326     __ cselw(as_Register($dst$$reg),
 9327              as_Register($src2$$reg),
 9328              as_Register($src1$$reg),
 9329              (Assembler::Condition)$cmp$$cmpcode);
 9330   %}
 9331 
 9332   ins_pipe(icond_reg_reg);
 9333 %}
 9334 
 9335 instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9336   match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
 9337 
 9338   ins_cost(INSN_COST * 2);
 9339   format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int"  %}
 9340 
 9341   ins_encode %{
 9342     __ cselw(as_Register($dst$$reg),
 9343              as_Register($src2$$reg),
 9344              as_Register($src1$$reg),
 9345              (Assembler::Condition)$cmp$$cmpcode);
 9346   %}
 9347 
 9348   ins_pipe(icond_reg_reg);
 9349 %}
 9350 
 9351 // special cases where one arg is zero
 9352 
 9353 // n.b. this is selected in preference to the rule above because it
 9354 // avoids loading constant 0 into a source register
 9355 
 9356 // TODO
 9357 // we ought only to be able to cull one of these variants as the ideal
 9358 // transforms ought always to order the zero consistently (to left/right?)
 9359 
 9360 instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9361   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9362 
 9363   ins_cost(INSN_COST * 2);
 9364   format %{ "cselw $dst, $src, zr $cmp\t# signed, int"  %}
 9365 
 9366   ins_encode %{
 9367     __ cselw(as_Register($dst$$reg),
 9368              as_Register($src$$reg),
 9369              zr,
 9370              (Assembler::Condition)$cmp$$cmpcode);
 9371   %}
 9372 
 9373   ins_pipe(icond_reg);
 9374 %}
 9375 
 9376 instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
 9377   match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
 9378 
 9379   ins_cost(INSN_COST * 2);
 9380   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int"  %}
 9381 
 9382   ins_encode %{
 9383     __ cselw(as_Register($dst$$reg),
 9384              as_Register($src$$reg),
 9385              zr,
 9386              (Assembler::Condition)$cmp$$cmpcode);
 9387   %}
 9388 
 9389   ins_pipe(icond_reg);
 9390 %}
 9391 
 9392 instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9393   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9394 
 9395   ins_cost(INSN_COST * 2);
 9396   format %{ "cselw $dst, zr, $src $cmp\t# signed, int"  %}
 9397 
 9398   ins_encode %{
 9399     __ cselw(as_Register($dst$$reg),
 9400              zr,
 9401              as_Register($src$$reg),
 9402              (Assembler::Condition)$cmp$$cmpcode);
 9403   %}
 9404 
 9405   ins_pipe(icond_reg);
 9406 %}
 9407 
 9408 instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
 9409   match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
 9410 
 9411   ins_cost(INSN_COST * 2);
 9412   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int"  %}
 9413 
 9414   ins_encode %{
 9415     __ cselw(as_Register($dst$$reg),
 9416              zr,
 9417              as_Register($src$$reg),
 9418              (Assembler::Condition)$cmp$$cmpcode);
 9419   %}
 9420 
 9421   ins_pipe(icond_reg);
 9422 %}
 9423 
 9424 // special case for creating a boolean 0 or 1
 9425 
 9426 // n.b. this is selected in preference to the rule above because it
 9427 // avoids loading constants 0 and 1 into a source register
 9428 
 9429 instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9430   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9431 
 9432   ins_cost(INSN_COST * 2);
 9433   format %{ "csincw $dst, zr, zr $cmp\t# signed, int"  %}
 9434 
 9435   ins_encode %{
 9436     // equivalently
 9437     // cset(as_Register($dst$$reg),
 9438     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9439     __ csincw(as_Register($dst$$reg),
 9440              zr,
 9441              zr,
 9442              (Assembler::Condition)$cmp$$cmpcode);
 9443   %}
 9444 
 9445   ins_pipe(icond_none);
 9446 %}
 9447 
 9448 instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
 9449   match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
 9450 
 9451   ins_cost(INSN_COST * 2);
 9452   format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int"  %}
 9453 
 9454   ins_encode %{
 9455     // equivalently
 9456     // cset(as_Register($dst$$reg),
 9457     //      negate_condition((Assembler::Condition)$cmp$$cmpcode));
 9458     __ csincw(as_Register($dst$$reg),
 9459              zr,
 9460              zr,
 9461              (Assembler::Condition)$cmp$$cmpcode);
 9462   %}
 9463 
 9464   ins_pipe(icond_none);
 9465 %}
 9466 
 9467 instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9468   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 9469 
 9470   ins_cost(INSN_COST * 2);
 9471   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long"  %}
 9472 
 9473   ins_encode %{
 9474     __ csel(as_Register($dst$$reg),
 9475             as_Register($src2$$reg),
 9476             as_Register($src1$$reg),
 9477             (Assembler::Condition)$cmp$$cmpcode);
 9478   %}
 9479 
 9480   ins_pipe(icond_reg_reg);
 9481 %}
 9482 
 9483 instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9484   match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
 9485 
 9486   ins_cost(INSN_COST * 2);
 9487   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long"  %}
 9488 
 9489   ins_encode %{
 9490     __ csel(as_Register($dst$$reg),
 9491             as_Register($src2$$reg),
 9492             as_Register($src1$$reg),
 9493             (Assembler::Condition)$cmp$$cmpcode);
 9494   %}
 9495 
 9496   ins_pipe(icond_reg_reg);
 9497 %}
 9498 
 9499 // special cases where one arg is zero
 9500 
 9501 instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
 9502   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
 9503 
 9504   ins_cost(INSN_COST * 2);
 9505   format %{ "csel $dst, zr, $src $cmp\t# signed, long"  %}
 9506 
 9507   ins_encode %{
 9508     __ csel(as_Register($dst$$reg),
 9509             zr,
 9510             as_Register($src$$reg),
 9511             (Assembler::Condition)$cmp$$cmpcode);
 9512   %}
 9513 
 9514   ins_pipe(icond_reg);
 9515 %}
 9516 
 9517 instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
 9518   match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
 9519 
 9520   ins_cost(INSN_COST * 2);
 9521   format %{ "csel $dst, zr, $src $cmp\t# unsigned, long"  %}
 9522 
 9523   ins_encode %{
 9524     __ csel(as_Register($dst$$reg),
 9525             zr,
 9526             as_Register($src$$reg),
 9527             (Assembler::Condition)$cmp$$cmpcode);
 9528   %}
 9529 
 9530   ins_pipe(icond_reg);
 9531 %}
 9532 
 9533 instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
 9534   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
 9535 
 9536   ins_cost(INSN_COST * 2);
 9537   format %{ "csel $dst, $src, zr $cmp\t# signed, long"  %}
 9538 
 9539   ins_encode %{
 9540     __ csel(as_Register($dst$$reg),
 9541             as_Register($src$$reg),
 9542             zr,
 9543             (Assembler::Condition)$cmp$$cmpcode);
 9544   %}
 9545 
 9546   ins_pipe(icond_reg);
 9547 %}
 9548 
 9549 instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
 9550   match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
 9551 
 9552   ins_cost(INSN_COST * 2);
 9553   format %{ "csel $dst, $src, zr $cmp\t# unsigned, long"  %}
 9554 
 9555   ins_encode %{
 9556     __ csel(as_Register($dst$$reg),
 9557             as_Register($src$$reg),
 9558             zr,
 9559             (Assembler::Condition)$cmp$$cmpcode);
 9560   %}
 9561 
 9562   ins_pipe(icond_reg);
 9563 %}
 9564 
 9565 instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
 9566   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
 9567 
 9568   ins_cost(INSN_COST * 2);
 9569   format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr"  %}
 9570 
 9571   ins_encode %{
 9572     __ csel(as_Register($dst$$reg),
 9573             as_Register($src2$$reg),
 9574             as_Register($src1$$reg),
 9575             (Assembler::Condition)$cmp$$cmpcode);
 9576   %}
 9577 
 9578   ins_pipe(icond_reg_reg);
 9579 %}
 9580 
 9581 instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
 9582   match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
 9583 
 9584   ins_cost(INSN_COST * 2);
 9585   format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr"  %}
 9586 
 9587   ins_encode %{
 9588     __ csel(as_Register($dst$$reg),
 9589             as_Register($src2$$reg),
 9590             as_Register($src1$$reg),
 9591             (Assembler::Condition)$cmp$$cmpcode);
 9592   %}
 9593 
 9594   ins_pipe(icond_reg_reg);
 9595 %}
 9596 
 9597 // special cases where one arg is zero
 9598 
 9599 instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
 9600   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
 9601 
 9602   ins_cost(INSN_COST * 2);
 9603   format %{ "csel $dst, zr, $src $cmp\t# signed, ptr"  %}
 9604 
 9605   ins_encode %{
 9606     __ csel(as_Register($dst$$reg),
 9607             zr,
 9608             as_Register($src$$reg),
 9609             (Assembler::Condition)$cmp$$cmpcode);
 9610   %}
 9611 
 9612   ins_pipe(icond_reg);
 9613 %}
 9614 
 9615 instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
 9616   match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
 9617 
 9618   ins_cost(INSN_COST * 2);
 9619   format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr"  %}
 9620 
 9621   ins_encode %{
 9622     __ csel(as_Register($dst$$reg),
 9623             zr,
 9624             as_Register($src$$reg),
 9625             (Assembler::Condition)$cmp$$cmpcode);
 9626   %}
 9627 
 9628   ins_pipe(icond_reg);
 9629 %}
 9630 
 9631 instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
 9632   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
 9633 
 9634   ins_cost(INSN_COST * 2);
 9635   format %{ "csel $dst, $src, zr $cmp\t# signed, ptr"  %}
 9636 
 9637   ins_encode %{
 9638     __ csel(as_Register($dst$$reg),
 9639             as_Register($src$$reg),
 9640             zr,
 9641             (Assembler::Condition)$cmp$$cmpcode);
 9642   %}
 9643 
 9644   ins_pipe(icond_reg);
 9645 %}
 9646 
 9647 instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
 9648   match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
 9649 
 9650   ins_cost(INSN_COST * 2);
 9651   format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr"  %}
 9652 
 9653   ins_encode %{
 9654     __ csel(as_Register($dst$$reg),
 9655             as_Register($src$$reg),
 9656             zr,
 9657             (Assembler::Condition)$cmp$$cmpcode);
 9658   %}
 9659 
 9660   ins_pipe(icond_reg);
 9661 %}
 9662 
 9663 instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
 9664   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
 9665 
 9666   ins_cost(INSN_COST * 2);
 9667   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
 9668 
 9669   ins_encode %{
 9670     __ cselw(as_Register($dst$$reg),
 9671              as_Register($src2$$reg),
 9672              as_Register($src1$$reg),
 9673              (Assembler::Condition)$cmp$$cmpcode);
 9674   %}
 9675 
 9676   ins_pipe(icond_reg_reg);
 9677 %}
 9678 
 9679 instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
 9680   match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
 9681 
 9682   ins_cost(INSN_COST * 2);
 9683   format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr"  %}
 9684 
 9685   ins_encode %{
 9686     __ cselw(as_Register($dst$$reg),
 9687              as_Register($src2$$reg),
 9688              as_Register($src1$$reg),
 9689              (Assembler::Condition)$cmp$$cmpcode);
 9690   %}
 9691 
 9692   ins_pipe(icond_reg_reg);
 9693 %}
 9694 
 9695 // special cases where one arg is zero
 9696 
 9697 instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
 9698   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
 9699 
 9700   ins_cost(INSN_COST * 2);
 9701   format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr"  %}
 9702 
 9703   ins_encode %{
 9704     __ cselw(as_Register($dst$$reg),
 9705              zr,
 9706              as_Register($src$$reg),
 9707              (Assembler::Condition)$cmp$$cmpcode);
 9708   %}
 9709 
 9710   ins_pipe(icond_reg);
 9711 %}
 9712 
 9713 instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
 9714   match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
 9715 
 9716   ins_cost(INSN_COST * 2);
 9717   format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr"  %}
 9718 
 9719   ins_encode %{
 9720     __ cselw(as_Register($dst$$reg),
 9721              zr,
 9722              as_Register($src$$reg),
 9723              (Assembler::Condition)$cmp$$cmpcode);
 9724   %}
 9725 
 9726   ins_pipe(icond_reg);
 9727 %}
 9728 
 9729 instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
 9730   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
 9731 
 9732   ins_cost(INSN_COST * 2);
 9733   format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr"  %}
 9734 
 9735   ins_encode %{
 9736     __ cselw(as_Register($dst$$reg),
 9737              as_Register($src$$reg),
 9738              zr,
 9739              (Assembler::Condition)$cmp$$cmpcode);
 9740   %}
 9741 
 9742   ins_pipe(icond_reg);
 9743 %}
 9744 
 9745 instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
 9746   match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
 9747 
 9748   ins_cost(INSN_COST * 2);
 9749   format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr"  %}
 9750 
 9751   ins_encode %{
 9752     __ cselw(as_Register($dst$$reg),
 9753              as_Register($src$$reg),
 9754              zr,
 9755              (Assembler::Condition)$cmp$$cmpcode);
 9756   %}
 9757 
 9758   ins_pipe(icond_reg);
 9759 %}
 9760 
 9761 instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1,  vRegF src2)
 9762 %{
 9763   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
 9764 
 9765   ins_cost(INSN_COST * 3);
 9766 
 9767   format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
 9768   ins_encode %{
 9769     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 9770     __ fcsels(as_FloatRegister($dst$$reg),
 9771               as_FloatRegister($src2$$reg),
 9772               as_FloatRegister($src1$$reg),
 9773               cond);
 9774   %}
 9775 
 9776   ins_pipe(fp_cond_reg_reg_s);
 9777 %}
 9778 
 9779 instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1,  vRegF src2)
 9780 %{
 9781   match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
 9782 
 9783   ins_cost(INSN_COST * 3);
 9784 
 9785   format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
 9786   ins_encode %{
 9787     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 9788     __ fcsels(as_FloatRegister($dst$$reg),
 9789               as_FloatRegister($src2$$reg),
 9790               as_FloatRegister($src1$$reg),
 9791               cond);
 9792   %}
 9793 
 9794   ins_pipe(fp_cond_reg_reg_s);
 9795 %}
 9796 
 9797 instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1,  vRegD src2)
 9798 %{
 9799   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
 9800 
 9801   ins_cost(INSN_COST * 3);
 9802 
 9803   format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
 9804   ins_encode %{
 9805     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 9806     __ fcseld(as_FloatRegister($dst$$reg),
 9807               as_FloatRegister($src2$$reg),
 9808               as_FloatRegister($src1$$reg),
 9809               cond);
 9810   %}
 9811 
 9812   ins_pipe(fp_cond_reg_reg_d);
 9813 %}
 9814 
 9815 instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1,  vRegD src2)
 9816 %{
 9817   match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
 9818 
 9819   ins_cost(INSN_COST * 3);
 9820 
 9821   format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
 9822   ins_encode %{
 9823     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
 9824     __ fcseld(as_FloatRegister($dst$$reg),
 9825               as_FloatRegister($src2$$reg),
 9826               as_FloatRegister($src1$$reg),
 9827               cond);
 9828   %}
 9829 
 9830   ins_pipe(fp_cond_reg_reg_d);
 9831 %}
 9832 
 9833 // ============================================================================
 9834 // Arithmetic Instructions
 9835 //
 9836 
 9837 // Integer Addition
 9838 
 9839 // TODO
 9840 // these currently employ operations which do not set CR and hence are
 9841 // not flagged as killing CR but we would like to isolate the cases
 9842 // where we want to set flags from those where we don't. need to work
 9843 // out how to do that.
 9844 
 9845 instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
 9846   match(Set dst (AddI src1 src2));
 9847 
 9848   ins_cost(INSN_COST);
 9849   format %{ "addw  $dst, $src1, $src2" %}
 9850 
 9851   ins_encode %{
 9852     __ addw(as_Register($dst$$reg),
 9853             as_Register($src1$$reg),
 9854             as_Register($src2$$reg));
 9855   %}
 9856 
 9857   ins_pipe(ialu_reg_reg);
 9858 %}
 9859 
 9860 instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
 9861   match(Set dst (AddI src1 src2));
 9862 
 9863   ins_cost(INSN_COST);
 9864   format %{ "addw $dst, $src1, $src2" %}
 9865 
 9866   // use opcode to indicate that this is an add not a sub
 9867   opcode(0x0);
 9868 
 9869   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
 9870 
 9871   ins_pipe(ialu_reg_imm);
 9872 %}
 9873 
 9874 instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
 9875   match(Set dst (AddI (ConvL2I src1) src2));
 9876 
 9877   ins_cost(INSN_COST);
 9878   format %{ "addw $dst, $src1, $src2" %}
 9879 
 9880   // use opcode to indicate that this is an add not a sub
 9881   opcode(0x0);
 9882 
 9883   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
 9884 
 9885   ins_pipe(ialu_reg_imm);
 9886 %}
 9887 
 9888 // Pointer Addition
 9889 instruct addP_reg_reg(iRegPNoSp dst, iRegPorL2P src1, iRegL src2) %{
 9890   match(Set dst (AddP src1 src2));
 9891 
 9892   ins_cost(INSN_COST);
 9893   format %{ "add $dst, $src1, $src2\t# ptr" %}
 9894 
 9895   ins_encode %{
 9896     __ add(as_Register($dst$$reg),
 9897            as_Register($src1$$reg),
 9898            as_Register($src2$$reg));
 9899   %}
 9900 
 9901   ins_pipe(ialu_reg_reg);
 9902 %}
 9903 
 9904 instruct addP_reg_reg_ext(iRegPNoSp dst, iRegPorL2P src1, iRegIorL2I src2) %{
 9905   match(Set dst (AddP src1 (ConvI2L src2)));
 9906 
 9907   ins_cost(1.9 * INSN_COST);
 9908   format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
 9909 
 9910   ins_encode %{
 9911     __ add(as_Register($dst$$reg),
 9912            as_Register($src1$$reg),
 9913            as_Register($src2$$reg), ext::sxtw);
 9914   %}
 9915 
 9916   ins_pipe(ialu_reg_reg);
 9917 %}
 9918 
 9919 instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegPorL2P src1, iRegL src2, immIScale scale) %{
 9920   match(Set dst (AddP src1 (LShiftL src2 scale)));
 9921 
 9922   ins_cost(1.9 * INSN_COST);
 9923   format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
 9924 
 9925   ins_encode %{
 9926     __ lea(as_Register($dst$$reg),
 9927            Address(as_Register($src1$$reg), as_Register($src2$$reg),
 9928                    Address::lsl($scale$$constant)));
 9929   %}
 9930 
 9931   ins_pipe(ialu_reg_reg_shift);
 9932 %}
 9933 
 9934 instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegPorL2P src1, iRegIorL2I src2, immIScale scale) %{
 9935   match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
 9936 
 9937   ins_cost(1.9 * INSN_COST);
 9938   format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
 9939 
 9940   ins_encode %{
 9941     __ lea(as_Register($dst$$reg),
 9942            Address(as_Register($src1$$reg), as_Register($src2$$reg),
 9943                    Address::sxtw($scale$$constant)));
 9944   %}
 9945 
 9946   ins_pipe(ialu_reg_reg_shift);
 9947 %}
 9948 
 9949 instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
 9950   match(Set dst (LShiftL (ConvI2L src) scale));
 9951 
 9952   ins_cost(INSN_COST);
 9953   format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
 9954 
 9955   ins_encode %{
 9956     __ sbfiz(as_Register($dst$$reg),
 9957           as_Register($src$$reg),
 9958           $scale$$constant & 63, MIN2(32, (int)((-$scale$$constant) & 63)));
 9959   %}
 9960 
 9961   ins_pipe(ialu_reg_shift);
 9962 %}
 9963 
 9964 // Pointer Immediate Addition
 9965 // n.b. this needs to be more expensive than using an indirect memory
 9966 // operand
 9967 instruct addP_reg_imm(iRegPNoSp dst, iRegPorL2P src1, immLAddSub src2) %{
 9968   match(Set dst (AddP src1 src2));
 9969 
 9970   ins_cost(INSN_COST);
 9971   format %{ "add $dst, $src1, $src2\t# ptr" %}
 9972 
 9973   // use opcode to indicate that this is an add not a sub
 9974   opcode(0x0);
 9975 
 9976   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
 9977 
 9978   ins_pipe(ialu_reg_imm);
 9979 %}
 9980 
 9981 // Long Addition
 9982 instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
 9983 
 9984   match(Set dst (AddL src1 src2));
 9985 
 9986   ins_cost(INSN_COST);
 9987   format %{ "add  $dst, $src1, $src2" %}
 9988 
 9989   ins_encode %{
 9990     __ add(as_Register($dst$$reg),
 9991            as_Register($src1$$reg),
 9992            as_Register($src2$$reg));
 9993   %}
 9994 
 9995   ins_pipe(ialu_reg_reg);
 9996 %}
 9997 
 9998 // No constant pool entries requiredLong Immediate Addition.
 9999 instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10000   match(Set dst (AddL src1 src2));
10001 
10002   ins_cost(INSN_COST);
10003   format %{ "add $dst, $src1, $src2" %}
10004 
10005   // use opcode to indicate that this is an add not a sub
10006   opcode(0x0);
10007 
10008   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10009 
10010   ins_pipe(ialu_reg_imm);
10011 %}
10012 
10013 // Integer Subtraction
10014 instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10015   match(Set dst (SubI src1 src2));
10016 
10017   ins_cost(INSN_COST);
10018   format %{ "subw  $dst, $src1, $src2" %}
10019 
10020   ins_encode %{
10021     __ subw(as_Register($dst$$reg),
10022             as_Register($src1$$reg),
10023             as_Register($src2$$reg));
10024   %}
10025 
10026   ins_pipe(ialu_reg_reg);
10027 %}
10028 
10029 // Immediate Subtraction
10030 instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
10031   match(Set dst (SubI src1 src2));
10032 
10033   ins_cost(INSN_COST);
10034   format %{ "subw $dst, $src1, $src2" %}
10035 
10036   // use opcode to indicate that this is a sub not an add
10037   opcode(0x1);
10038 
10039   ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
10040 
10041   ins_pipe(ialu_reg_imm);
10042 %}
10043 
10044 // Long Subtraction
10045 instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10046 
10047   match(Set dst (SubL src1 src2));
10048 
10049   ins_cost(INSN_COST);
10050   format %{ "sub  $dst, $src1, $src2" %}
10051 
10052   ins_encode %{
10053     __ sub(as_Register($dst$$reg),
10054            as_Register($src1$$reg),
10055            as_Register($src2$$reg));
10056   %}
10057 
10058   ins_pipe(ialu_reg_reg);
10059 %}
10060 
10061 // No constant pool entries requiredLong Immediate Subtraction.
10062 instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
10063   match(Set dst (SubL src1 src2));
10064 
10065   ins_cost(INSN_COST);
10066   format %{ "sub$dst, $src1, $src2" %}
10067 
10068   // use opcode to indicate that this is a sub not an add
10069   opcode(0x1);
10070 
10071   ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
10072 
10073   ins_pipe(ialu_reg_imm);
10074 %}
10075 
10076 // Integer Negation (special case for sub)
10077 
10078 instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
10079   match(Set dst (SubI zero src));
10080 
10081   ins_cost(INSN_COST);
10082   format %{ "negw $dst, $src\t# int" %}
10083 
10084   ins_encode %{
10085     __ negw(as_Register($dst$$reg),
10086             as_Register($src$$reg));
10087   %}
10088 
10089   ins_pipe(ialu_reg);
10090 %}
10091 
10092 // Long Negation
10093 
10094 instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
10095   match(Set dst (SubL zero src));
10096 
10097   ins_cost(INSN_COST);
10098   format %{ "neg $dst, $src\t# long" %}
10099 
10100   ins_encode %{
10101     __ neg(as_Register($dst$$reg),
10102            as_Register($src$$reg));
10103   %}
10104 
10105   ins_pipe(ialu_reg);
10106 %}
10107 
10108 // Integer Multiply
10109 
10110 instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10111   match(Set dst (MulI src1 src2));
10112 
10113   ins_cost(INSN_COST * 3);
10114   format %{ "mulw  $dst, $src1, $src2" %}
10115 
10116   ins_encode %{
10117     __ mulw(as_Register($dst$$reg),
10118             as_Register($src1$$reg),
10119             as_Register($src2$$reg));
10120   %}
10121 
10122   ins_pipe(imul_reg_reg);
10123 %}
10124 
10125 instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10126   match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
10127 
10128   ins_cost(INSN_COST * 3);
10129   format %{ "smull  $dst, $src1, $src2" %}
10130 
10131   ins_encode %{
10132     __ smull(as_Register($dst$$reg),
10133              as_Register($src1$$reg),
10134              as_Register($src2$$reg));
10135   %}
10136 
10137   ins_pipe(imul_reg_reg);
10138 %}
10139 
10140 // Long Multiply
10141 
10142 instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10143   match(Set dst (MulL src1 src2));
10144 
10145   ins_cost(INSN_COST * 5);
10146   format %{ "mul  $dst, $src1, $src2" %}
10147 
10148   ins_encode %{
10149     __ mul(as_Register($dst$$reg),
10150            as_Register($src1$$reg),
10151            as_Register($src2$$reg));
10152   %}
10153 
10154   ins_pipe(lmul_reg_reg);
10155 %}
10156 
10157 instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10158 %{
10159   match(Set dst (MulHiL src1 src2));
10160 
10161   ins_cost(INSN_COST * 7);
10162   format %{ "smulh   $dst, $src1, $src2\t# mulhi" %}
10163 
10164   ins_encode %{
10165     __ smulh(as_Register($dst$$reg),
10166              as_Register($src1$$reg),
10167              as_Register($src2$$reg));
10168   %}
10169 
10170   ins_pipe(lmul_reg_reg);
10171 %}
10172 
10173 instruct umulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
10174 %{
10175   match(Set dst (UMulHiL src1 src2));
10176 
10177   ins_cost(INSN_COST * 7);
10178   format %{ "umulh   $dst, $src1, $src2\t# umulhi" %}
10179 
10180   ins_encode %{
10181     __ umulh(as_Register($dst$$reg),
10182              as_Register($src1$$reg),
10183              as_Register($src2$$reg));
10184   %}
10185 
10186   ins_pipe(lmul_reg_reg);
10187 %}
10188 
10189 // Combined Integer Multiply & Add/Sub
10190 
10191 instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10192   match(Set dst (AddI src3 (MulI src1 src2)));
10193 
10194   ins_cost(INSN_COST * 3);
10195   format %{ "madd  $dst, $src1, $src2, $src3" %}
10196 
10197   ins_encode %{
10198     __ maddw(as_Register($dst$$reg),
10199              as_Register($src1$$reg),
10200              as_Register($src2$$reg),
10201              as_Register($src3$$reg));
10202   %}
10203 
10204   ins_pipe(imac_reg_reg);
10205 %}
10206 
10207 instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
10208   match(Set dst (SubI src3 (MulI src1 src2)));
10209 
10210   ins_cost(INSN_COST * 3);
10211   format %{ "msub  $dst, $src1, $src2, $src3" %}
10212 
10213   ins_encode %{
10214     __ msubw(as_Register($dst$$reg),
10215              as_Register($src1$$reg),
10216              as_Register($src2$$reg),
10217              as_Register($src3$$reg));
10218   %}
10219 
10220   ins_pipe(imac_reg_reg);
10221 %}
10222 
10223 // Combined Integer Multiply & Neg
10224 
10225 instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
10226   match(Set dst (MulI (SubI zero src1) src2));
10227 
10228   ins_cost(INSN_COST * 3);
10229   format %{ "mneg  $dst, $src1, $src2" %}
10230 
10231   ins_encode %{
10232     __ mnegw(as_Register($dst$$reg),
10233              as_Register($src1$$reg),
10234              as_Register($src2$$reg));
10235   %}
10236 
10237   ins_pipe(imac_reg_reg);
10238 %}
10239 
10240 // Combined Long Multiply & Add/Sub
10241 
10242 instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10243   match(Set dst (AddL src3 (MulL src1 src2)));
10244 
10245   ins_cost(INSN_COST * 5);
10246   format %{ "madd  $dst, $src1, $src2, $src3" %}
10247 
10248   ins_encode %{
10249     __ madd(as_Register($dst$$reg),
10250             as_Register($src1$$reg),
10251             as_Register($src2$$reg),
10252             as_Register($src3$$reg));
10253   %}
10254 
10255   ins_pipe(lmac_reg_reg);
10256 %}
10257 
10258 instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
10259   match(Set dst (SubL src3 (MulL src1 src2)));
10260 
10261   ins_cost(INSN_COST * 5);
10262   format %{ "msub  $dst, $src1, $src2, $src3" %}
10263 
10264   ins_encode %{
10265     __ msub(as_Register($dst$$reg),
10266             as_Register($src1$$reg),
10267             as_Register($src2$$reg),
10268             as_Register($src3$$reg));
10269   %}
10270 
10271   ins_pipe(lmac_reg_reg);
10272 %}
10273 
10274 // Combined Long Multiply & Neg
10275 
10276 instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
10277   match(Set dst (MulL (SubL zero src1) src2));
10278 
10279   ins_cost(INSN_COST * 5);
10280   format %{ "mneg  $dst, $src1, $src2" %}
10281 
10282   ins_encode %{
10283     __ mneg(as_Register($dst$$reg),
10284             as_Register($src1$$reg),
10285             as_Register($src2$$reg));
10286   %}
10287 
10288   ins_pipe(lmac_reg_reg);
10289 %}
10290 
10291 // Combine Integer Signed Multiply & Add/Sub/Neg Long
10292 
10293 instruct smaddL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10294   match(Set dst (AddL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10295 
10296   ins_cost(INSN_COST * 3);
10297   format %{ "smaddl  $dst, $src1, $src2, $src3" %}
10298 
10299   ins_encode %{
10300     __ smaddl(as_Register($dst$$reg),
10301               as_Register($src1$$reg),
10302               as_Register($src2$$reg),
10303               as_Register($src3$$reg));
10304   %}
10305 
10306   ins_pipe(imac_reg_reg);
10307 %}
10308 
10309 instruct smsubL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
10310   match(Set dst (SubL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
10311 
10312   ins_cost(INSN_COST * 3);
10313   format %{ "smsubl  $dst, $src1, $src2, $src3" %}
10314 
10315   ins_encode %{
10316     __ smsubl(as_Register($dst$$reg),
10317               as_Register($src1$$reg),
10318               as_Register($src2$$reg),
10319               as_Register($src3$$reg));
10320   %}
10321 
10322   ins_pipe(imac_reg_reg);
10323 %}
10324 
10325 instruct smnegL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, immL0 zero) %{
10326   match(Set dst (MulL (SubL zero (ConvI2L src1)) (ConvI2L src2)));
10327 
10328   ins_cost(INSN_COST * 3);
10329   format %{ "smnegl  $dst, $src1, $src2" %}
10330 
10331   ins_encode %{
10332     __ smnegl(as_Register($dst$$reg),
10333               as_Register($src1$$reg),
10334               as_Register($src2$$reg));
10335   %}
10336 
10337   ins_pipe(imac_reg_reg);
10338 %}
10339 
10340 // Combined Multiply-Add Shorts into Integer (dst = src1 * src2 + src3 * src4)
10341 
10342 instruct muladdS2I(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3, iRegIorL2I src4) %{
10343   match(Set dst (MulAddS2I (Binary src1 src2) (Binary src3 src4)));
10344 
10345   ins_cost(INSN_COST * 5);
10346   format %{ "mulw  rscratch1, $src1, $src2\n\t"
10347             "maddw $dst, $src3, $src4, rscratch1" %}
10348 
10349   ins_encode %{
10350     __ mulw(rscratch1, as_Register($src1$$reg), as_Register($src2$$reg));
10351     __ maddw(as_Register($dst$$reg), as_Register($src3$$reg), as_Register($src4$$reg), rscratch1); %}
10352 
10353   ins_pipe(imac_reg_reg);
10354 %}
10355 
10356 // Integer Divide
10357 
10358 instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10359   match(Set dst (DivI src1 src2));
10360 
10361   ins_cost(INSN_COST * 19);
10362   format %{ "sdivw  $dst, $src1, $src2" %}
10363 
10364   ins_encode(aarch64_enc_divw(dst, src1, src2));
10365   ins_pipe(idiv_reg_reg);
10366 %}
10367 
10368 // Long Divide
10369 
10370 instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10371   match(Set dst (DivL src1 src2));
10372 
10373   ins_cost(INSN_COST * 35);
10374   format %{ "sdiv   $dst, $src1, $src2" %}
10375 
10376   ins_encode(aarch64_enc_div(dst, src1, src2));
10377   ins_pipe(ldiv_reg_reg);
10378 %}
10379 
10380 // Integer Remainder
10381 
10382 instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10383   match(Set dst (ModI src1 src2));
10384 
10385   ins_cost(INSN_COST * 22);
10386   format %{ "sdivw  rscratch1, $src1, $src2\n\t"
10387             "msubw  $dst, rscratch1, $src2, $src1" %}
10388 
10389   ins_encode(aarch64_enc_modw(dst, src1, src2));
10390   ins_pipe(idiv_reg_reg);
10391 %}
10392 
10393 // Long Remainder
10394 
10395 instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10396   match(Set dst (ModL src1 src2));
10397 
10398   ins_cost(INSN_COST * 38);
10399   format %{ "sdiv   rscratch1, $src1, $src2\n"
10400             "msub   $dst, rscratch1, $src2, $src1" %}
10401 
10402   ins_encode(aarch64_enc_mod(dst, src1, src2));
10403   ins_pipe(ldiv_reg_reg);
10404 %}
10405 
10406 // Unsigned Integer Divide
10407 
10408 instruct UdivI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10409   match(Set dst (UDivI src1 src2));
10410 
10411   ins_cost(INSN_COST * 19);
10412   format %{ "udivw  $dst, $src1, $src2" %}
10413 
10414   ins_encode %{
10415     __ udivw($dst$$Register, $src1$$Register, $src2$$Register);
10416   %}
10417 
10418   ins_pipe(idiv_reg_reg);
10419 %}
10420 
10421 //  Unsigned Long Divide
10422 
10423 instruct UdivL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10424   match(Set dst (UDivL src1 src2));
10425 
10426   ins_cost(INSN_COST * 35);
10427   format %{ "udiv   $dst, $src1, $src2" %}
10428 
10429   ins_encode %{
10430     __ udiv($dst$$Register, $src1$$Register, $src2$$Register);
10431   %}
10432 
10433   ins_pipe(ldiv_reg_reg);
10434 %}
10435 
10436 // Unsigned Integer Remainder
10437 
10438 instruct UmodI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10439   match(Set dst (UModI src1 src2));
10440 
10441   ins_cost(INSN_COST * 22);
10442   format %{ "udivw  rscratch1, $src1, $src2\n\t"
10443             "msubw  $dst, rscratch1, $src2, $src1" %}
10444 
10445   ins_encode %{
10446     __ udivw(rscratch1, $src1$$Register, $src2$$Register);
10447     __ msubw($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
10448   %}
10449 
10450   ins_pipe(idiv_reg_reg);
10451 %}
10452 
10453 // Unsigned Long Remainder
10454 
10455 instruct UModL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
10456   match(Set dst (UModL src1 src2));
10457 
10458   ins_cost(INSN_COST * 38);
10459   format %{ "udiv   rscratch1, $src1, $src2\n"
10460             "msub   $dst, rscratch1, $src2, $src1" %}
10461 
10462   ins_encode %{
10463     __ udiv(rscratch1, $src1$$Register, $src2$$Register);
10464     __ msub($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
10465   %}
10466 
10467   ins_pipe(ldiv_reg_reg);
10468 %}
10469 
10470 // Integer Shifts
10471 
10472 // Shift Left Register
10473 instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10474   match(Set dst (LShiftI src1 src2));
10475 
10476   ins_cost(INSN_COST * 2);
10477   format %{ "lslvw  $dst, $src1, $src2" %}
10478 
10479   ins_encode %{
10480     __ lslvw(as_Register($dst$$reg),
10481              as_Register($src1$$reg),
10482              as_Register($src2$$reg));
10483   %}
10484 
10485   ins_pipe(ialu_reg_reg_vshift);
10486 %}
10487 
10488 // Shift Left Immediate
10489 instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10490   match(Set dst (LShiftI src1 src2));
10491 
10492   ins_cost(INSN_COST);
10493   format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
10494 
10495   ins_encode %{
10496     __ lslw(as_Register($dst$$reg),
10497             as_Register($src1$$reg),
10498             $src2$$constant & 0x1f);
10499   %}
10500 
10501   ins_pipe(ialu_reg_shift);
10502 %}
10503 
10504 // Shift Right Logical Register
10505 instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10506   match(Set dst (URShiftI src1 src2));
10507 
10508   ins_cost(INSN_COST * 2);
10509   format %{ "lsrvw  $dst, $src1, $src2" %}
10510 
10511   ins_encode %{
10512     __ lsrvw(as_Register($dst$$reg),
10513              as_Register($src1$$reg),
10514              as_Register($src2$$reg));
10515   %}
10516 
10517   ins_pipe(ialu_reg_reg_vshift);
10518 %}
10519 
10520 // Shift Right Logical Immediate
10521 instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10522   match(Set dst (URShiftI src1 src2));
10523 
10524   ins_cost(INSN_COST);
10525   format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
10526 
10527   ins_encode %{
10528     __ lsrw(as_Register($dst$$reg),
10529             as_Register($src1$$reg),
10530             $src2$$constant & 0x1f);
10531   %}
10532 
10533   ins_pipe(ialu_reg_shift);
10534 %}
10535 
10536 // Shift Right Arithmetic Register
10537 instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
10538   match(Set dst (RShiftI src1 src2));
10539 
10540   ins_cost(INSN_COST * 2);
10541   format %{ "asrvw  $dst, $src1, $src2" %}
10542 
10543   ins_encode %{
10544     __ asrvw(as_Register($dst$$reg),
10545              as_Register($src1$$reg),
10546              as_Register($src2$$reg));
10547   %}
10548 
10549   ins_pipe(ialu_reg_reg_vshift);
10550 %}
10551 
10552 // Shift Right Arithmetic Immediate
10553 instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
10554   match(Set dst (RShiftI src1 src2));
10555 
10556   ins_cost(INSN_COST);
10557   format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
10558 
10559   ins_encode %{
10560     __ asrw(as_Register($dst$$reg),
10561             as_Register($src1$$reg),
10562             $src2$$constant & 0x1f);
10563   %}
10564 
10565   ins_pipe(ialu_reg_shift);
10566 %}
10567 
10568 // Combined Int Mask and Right Shift (using UBFM)
10569 // TODO
10570 
10571 // Long Shifts
10572 
10573 // Shift Left Register
10574 instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10575   match(Set dst (LShiftL src1 src2));
10576 
10577   ins_cost(INSN_COST * 2);
10578   format %{ "lslv  $dst, $src1, $src2" %}
10579 
10580   ins_encode %{
10581     __ lslv(as_Register($dst$$reg),
10582             as_Register($src1$$reg),
10583             as_Register($src2$$reg));
10584   %}
10585 
10586   ins_pipe(ialu_reg_reg_vshift);
10587 %}
10588 
10589 // Shift Left Immediate
10590 instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10591   match(Set dst (LShiftL src1 src2));
10592 
10593   ins_cost(INSN_COST);
10594   format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
10595 
10596   ins_encode %{
10597     __ lsl(as_Register($dst$$reg),
10598             as_Register($src1$$reg),
10599             $src2$$constant & 0x3f);
10600   %}
10601 
10602   ins_pipe(ialu_reg_shift);
10603 %}
10604 
10605 // Shift Right Logical Register
10606 instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10607   match(Set dst (URShiftL src1 src2));
10608 
10609   ins_cost(INSN_COST * 2);
10610   format %{ "lsrv  $dst, $src1, $src2" %}
10611 
10612   ins_encode %{
10613     __ lsrv(as_Register($dst$$reg),
10614             as_Register($src1$$reg),
10615             as_Register($src2$$reg));
10616   %}
10617 
10618   ins_pipe(ialu_reg_reg_vshift);
10619 %}
10620 
10621 // Shift Right Logical Immediate
10622 instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10623   match(Set dst (URShiftL src1 src2));
10624 
10625   ins_cost(INSN_COST);
10626   format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
10627 
10628   ins_encode %{
10629     __ lsr(as_Register($dst$$reg),
10630            as_Register($src1$$reg),
10631            $src2$$constant & 0x3f);
10632   %}
10633 
10634   ins_pipe(ialu_reg_shift);
10635 %}
10636 
10637 // A special-case pattern for card table stores.
10638 instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
10639   match(Set dst (URShiftL (CastP2X src1) src2));
10640 
10641   ins_cost(INSN_COST);
10642   format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
10643 
10644   ins_encode %{
10645     __ lsr(as_Register($dst$$reg),
10646            as_Register($src1$$reg),
10647            $src2$$constant & 0x3f);
10648   %}
10649 
10650   ins_pipe(ialu_reg_shift);
10651 %}
10652 
10653 // Shift Right Arithmetic Register
10654 instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
10655   match(Set dst (RShiftL src1 src2));
10656 
10657   ins_cost(INSN_COST * 2);
10658   format %{ "asrv  $dst, $src1, $src2" %}
10659 
10660   ins_encode %{
10661     __ asrv(as_Register($dst$$reg),
10662             as_Register($src1$$reg),
10663             as_Register($src2$$reg));
10664   %}
10665 
10666   ins_pipe(ialu_reg_reg_vshift);
10667 %}
10668 
10669 // Shift Right Arithmetic Immediate
10670 instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
10671   match(Set dst (RShiftL src1 src2));
10672 
10673   ins_cost(INSN_COST);
10674   format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
10675 
10676   ins_encode %{
10677     __ asr(as_Register($dst$$reg),
10678            as_Register($src1$$reg),
10679            $src2$$constant & 0x3f);
10680   %}
10681 
10682   ins_pipe(ialu_reg_shift);
10683 %}
10684 
10685 // BEGIN This section of the file is automatically generated. Do not edit --------------
10686 // This section is generated from aarch64_ad.m4
10687 
10688 // This pattern is automatically generated from aarch64_ad.m4
10689 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10690 instruct regL_not_reg(iRegLNoSp dst,
10691                          iRegL src1, immL_M1 m1,
10692                          rFlagsReg cr) %{
10693   match(Set dst (XorL src1 m1));
10694   ins_cost(INSN_COST);
10695   format %{ "eon  $dst, $src1, zr" %}
10696 
10697   ins_encode %{
10698     __ eon(as_Register($dst$$reg),
10699               as_Register($src1$$reg),
10700               zr,
10701               Assembler::LSL, 0);
10702   %}
10703 
10704   ins_pipe(ialu_reg);
10705 %}
10706 
10707 // This pattern is automatically generated from aarch64_ad.m4
10708 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10709 instruct regI_not_reg(iRegINoSp dst,
10710                          iRegIorL2I src1, immI_M1 m1,
10711                          rFlagsReg cr) %{
10712   match(Set dst (XorI src1 m1));
10713   ins_cost(INSN_COST);
10714   format %{ "eonw  $dst, $src1, zr" %}
10715 
10716   ins_encode %{
10717     __ eonw(as_Register($dst$$reg),
10718               as_Register($src1$$reg),
10719               zr,
10720               Assembler::LSL, 0);
10721   %}
10722 
10723   ins_pipe(ialu_reg);
10724 %}
10725 
10726 // This pattern is automatically generated from aarch64_ad.m4
10727 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10728 instruct NegI_reg_URShift_reg(iRegINoSp dst,
10729                               immI0 zero, iRegIorL2I src1, immI src2) %{
10730   match(Set dst (SubI zero (URShiftI src1 src2)));
10731 
10732   ins_cost(1.9 * INSN_COST);
10733   format %{ "negw  $dst, $src1, LSR $src2" %}
10734 
10735   ins_encode %{
10736     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
10737             Assembler::LSR, $src2$$constant & 0x1f);
10738   %}
10739 
10740   ins_pipe(ialu_reg_shift);
10741 %}
10742 
10743 // This pattern is automatically generated from aarch64_ad.m4
10744 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10745 instruct NegI_reg_RShift_reg(iRegINoSp dst,
10746                               immI0 zero, iRegIorL2I src1, immI src2) %{
10747   match(Set dst (SubI zero (RShiftI src1 src2)));
10748 
10749   ins_cost(1.9 * INSN_COST);
10750   format %{ "negw  $dst, $src1, ASR $src2" %}
10751 
10752   ins_encode %{
10753     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
10754             Assembler::ASR, $src2$$constant & 0x1f);
10755   %}
10756 
10757   ins_pipe(ialu_reg_shift);
10758 %}
10759 
10760 // This pattern is automatically generated from aarch64_ad.m4
10761 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10762 instruct NegI_reg_LShift_reg(iRegINoSp dst,
10763                               immI0 zero, iRegIorL2I src1, immI src2) %{
10764   match(Set dst (SubI zero (LShiftI src1 src2)));
10765 
10766   ins_cost(1.9 * INSN_COST);
10767   format %{ "negw  $dst, $src1, LSL $src2" %}
10768 
10769   ins_encode %{
10770     __ negw(as_Register($dst$$reg), as_Register($src1$$reg),
10771             Assembler::LSL, $src2$$constant & 0x1f);
10772   %}
10773 
10774   ins_pipe(ialu_reg_shift);
10775 %}
10776 
10777 // This pattern is automatically generated from aarch64_ad.m4
10778 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10779 instruct NegL_reg_URShift_reg(iRegLNoSp dst,
10780                               immL0 zero, iRegL src1, immI src2) %{
10781   match(Set dst (SubL zero (URShiftL src1 src2)));
10782 
10783   ins_cost(1.9 * INSN_COST);
10784   format %{ "neg  $dst, $src1, LSR $src2" %}
10785 
10786   ins_encode %{
10787     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
10788             Assembler::LSR, $src2$$constant & 0x3f);
10789   %}
10790 
10791   ins_pipe(ialu_reg_shift);
10792 %}
10793 
10794 // This pattern is automatically generated from aarch64_ad.m4
10795 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10796 instruct NegL_reg_RShift_reg(iRegLNoSp dst,
10797                               immL0 zero, iRegL src1, immI src2) %{
10798   match(Set dst (SubL zero (RShiftL src1 src2)));
10799 
10800   ins_cost(1.9 * INSN_COST);
10801   format %{ "neg  $dst, $src1, ASR $src2" %}
10802 
10803   ins_encode %{
10804     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
10805             Assembler::ASR, $src2$$constant & 0x3f);
10806   %}
10807 
10808   ins_pipe(ialu_reg_shift);
10809 %}
10810 
10811 // This pattern is automatically generated from aarch64_ad.m4
10812 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10813 instruct NegL_reg_LShift_reg(iRegLNoSp dst,
10814                               immL0 zero, iRegL src1, immI src2) %{
10815   match(Set dst (SubL zero (LShiftL src1 src2)));
10816 
10817   ins_cost(1.9 * INSN_COST);
10818   format %{ "neg  $dst, $src1, LSL $src2" %}
10819 
10820   ins_encode %{
10821     __ neg(as_Register($dst$$reg), as_Register($src1$$reg),
10822             Assembler::LSL, $src2$$constant & 0x3f);
10823   %}
10824 
10825   ins_pipe(ialu_reg_shift);
10826 %}
10827 
10828 // This pattern is automatically generated from aarch64_ad.m4
10829 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10830 instruct AndI_reg_not_reg(iRegINoSp dst,
10831                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
10832   match(Set dst (AndI src1 (XorI src2 m1)));
10833   ins_cost(INSN_COST);
10834   format %{ "bicw  $dst, $src1, $src2" %}
10835 
10836   ins_encode %{
10837     __ bicw(as_Register($dst$$reg),
10838               as_Register($src1$$reg),
10839               as_Register($src2$$reg),
10840               Assembler::LSL, 0);
10841   %}
10842 
10843   ins_pipe(ialu_reg_reg);
10844 %}
10845 
10846 // This pattern is automatically generated from aarch64_ad.m4
10847 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10848 instruct AndL_reg_not_reg(iRegLNoSp dst,
10849                          iRegL src1, iRegL src2, immL_M1 m1) %{
10850   match(Set dst (AndL src1 (XorL src2 m1)));
10851   ins_cost(INSN_COST);
10852   format %{ "bic  $dst, $src1, $src2" %}
10853 
10854   ins_encode %{
10855     __ bic(as_Register($dst$$reg),
10856               as_Register($src1$$reg),
10857               as_Register($src2$$reg),
10858               Assembler::LSL, 0);
10859   %}
10860 
10861   ins_pipe(ialu_reg_reg);
10862 %}
10863 
10864 // This pattern is automatically generated from aarch64_ad.m4
10865 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10866 instruct OrI_reg_not_reg(iRegINoSp dst,
10867                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
10868   match(Set dst (OrI src1 (XorI src2 m1)));
10869   ins_cost(INSN_COST);
10870   format %{ "ornw  $dst, $src1, $src2" %}
10871 
10872   ins_encode %{
10873     __ ornw(as_Register($dst$$reg),
10874               as_Register($src1$$reg),
10875               as_Register($src2$$reg),
10876               Assembler::LSL, 0);
10877   %}
10878 
10879   ins_pipe(ialu_reg_reg);
10880 %}
10881 
10882 // This pattern is automatically generated from aarch64_ad.m4
10883 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10884 instruct OrL_reg_not_reg(iRegLNoSp dst,
10885                          iRegL src1, iRegL src2, immL_M1 m1) %{
10886   match(Set dst (OrL src1 (XorL src2 m1)));
10887   ins_cost(INSN_COST);
10888   format %{ "orn  $dst, $src1, $src2" %}
10889 
10890   ins_encode %{
10891     __ orn(as_Register($dst$$reg),
10892               as_Register($src1$$reg),
10893               as_Register($src2$$reg),
10894               Assembler::LSL, 0);
10895   %}
10896 
10897   ins_pipe(ialu_reg_reg);
10898 %}
10899 
10900 // This pattern is automatically generated from aarch64_ad.m4
10901 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10902 instruct XorI_reg_not_reg(iRegINoSp dst,
10903                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
10904   match(Set dst (XorI m1 (XorI src2 src1)));
10905   ins_cost(INSN_COST);
10906   format %{ "eonw  $dst, $src1, $src2" %}
10907 
10908   ins_encode %{
10909     __ eonw(as_Register($dst$$reg),
10910               as_Register($src1$$reg),
10911               as_Register($src2$$reg),
10912               Assembler::LSL, 0);
10913   %}
10914 
10915   ins_pipe(ialu_reg_reg);
10916 %}
10917 
10918 // This pattern is automatically generated from aarch64_ad.m4
10919 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10920 instruct XorL_reg_not_reg(iRegLNoSp dst,
10921                          iRegL src1, iRegL src2, immL_M1 m1) %{
10922   match(Set dst (XorL m1 (XorL src2 src1)));
10923   ins_cost(INSN_COST);
10924   format %{ "eon  $dst, $src1, $src2" %}
10925 
10926   ins_encode %{
10927     __ eon(as_Register($dst$$reg),
10928               as_Register($src1$$reg),
10929               as_Register($src2$$reg),
10930               Assembler::LSL, 0);
10931   %}
10932 
10933   ins_pipe(ialu_reg_reg);
10934 %}
10935 
10936 // This pattern is automatically generated from aarch64_ad.m4
10937 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10938 // val & (-1 ^ (val >>> shift)) ==> bicw
10939 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
10940                          iRegIorL2I src1, iRegIorL2I src2,
10941                          immI src3, immI_M1 src4) %{
10942   match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
10943   ins_cost(1.9 * INSN_COST);
10944   format %{ "bicw  $dst, $src1, $src2, LSR $src3" %}
10945 
10946   ins_encode %{
10947     __ bicw(as_Register($dst$$reg),
10948               as_Register($src1$$reg),
10949               as_Register($src2$$reg),
10950               Assembler::LSR,
10951               $src3$$constant & 0x1f);
10952   %}
10953 
10954   ins_pipe(ialu_reg_reg_shift);
10955 %}
10956 
10957 // This pattern is automatically generated from aarch64_ad.m4
10958 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10959 // val & (-1 ^ (val >>> shift)) ==> bic
10960 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
10961                          iRegL src1, iRegL src2,
10962                          immI src3, immL_M1 src4) %{
10963   match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
10964   ins_cost(1.9 * INSN_COST);
10965   format %{ "bic  $dst, $src1, $src2, LSR $src3" %}
10966 
10967   ins_encode %{
10968     __ bic(as_Register($dst$$reg),
10969               as_Register($src1$$reg),
10970               as_Register($src2$$reg),
10971               Assembler::LSR,
10972               $src3$$constant & 0x3f);
10973   %}
10974 
10975   ins_pipe(ialu_reg_reg_shift);
10976 %}
10977 
10978 // This pattern is automatically generated from aarch64_ad.m4
10979 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
10980 // val & (-1 ^ (val >> shift)) ==> bicw
10981 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
10982                          iRegIorL2I src1, iRegIorL2I src2,
10983                          immI src3, immI_M1 src4) %{
10984   match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
10985   ins_cost(1.9 * INSN_COST);
10986   format %{ "bicw  $dst, $src1, $src2, ASR $src3" %}
10987 
10988   ins_encode %{
10989     __ bicw(as_Register($dst$$reg),
10990               as_Register($src1$$reg),
10991               as_Register($src2$$reg),
10992               Assembler::ASR,
10993               $src3$$constant & 0x1f);
10994   %}
10995 
10996   ins_pipe(ialu_reg_reg_shift);
10997 %}
10998 
10999 // This pattern is automatically generated from aarch64_ad.m4
11000 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11001 // val & (-1 ^ (val >> shift)) ==> bic
11002 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
11003                          iRegL src1, iRegL src2,
11004                          immI src3, immL_M1 src4) %{
11005   match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
11006   ins_cost(1.9 * INSN_COST);
11007   format %{ "bic  $dst, $src1, $src2, ASR $src3" %}
11008 
11009   ins_encode %{
11010     __ bic(as_Register($dst$$reg),
11011               as_Register($src1$$reg),
11012               as_Register($src2$$reg),
11013               Assembler::ASR,
11014               $src3$$constant & 0x3f);
11015   %}
11016 
11017   ins_pipe(ialu_reg_reg_shift);
11018 %}
11019 
11020 // This pattern is automatically generated from aarch64_ad.m4
11021 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11022 // val & (-1 ^ (val ror shift)) ==> bicw
11023 instruct AndI_reg_RotateRight_not_reg(iRegINoSp dst,
11024                          iRegIorL2I src1, iRegIorL2I src2,
11025                          immI src3, immI_M1 src4) %{
11026   match(Set dst (AndI src1 (XorI(RotateRight src2 src3) src4)));
11027   ins_cost(1.9 * INSN_COST);
11028   format %{ "bicw  $dst, $src1, $src2, ROR $src3" %}
11029 
11030   ins_encode %{
11031     __ bicw(as_Register($dst$$reg),
11032               as_Register($src1$$reg),
11033               as_Register($src2$$reg),
11034               Assembler::ROR,
11035               $src3$$constant & 0x1f);
11036   %}
11037 
11038   ins_pipe(ialu_reg_reg_shift);
11039 %}
11040 
11041 // This pattern is automatically generated from aarch64_ad.m4
11042 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11043 // val & (-1 ^ (val ror shift)) ==> bic
11044 instruct AndL_reg_RotateRight_not_reg(iRegLNoSp dst,
11045                          iRegL src1, iRegL src2,
11046                          immI src3, immL_M1 src4) %{
11047   match(Set dst (AndL src1 (XorL(RotateRight src2 src3) src4)));
11048   ins_cost(1.9 * INSN_COST);
11049   format %{ "bic  $dst, $src1, $src2, ROR $src3" %}
11050 
11051   ins_encode %{
11052     __ bic(as_Register($dst$$reg),
11053               as_Register($src1$$reg),
11054               as_Register($src2$$reg),
11055               Assembler::ROR,
11056               $src3$$constant & 0x3f);
11057   %}
11058 
11059   ins_pipe(ialu_reg_reg_shift);
11060 %}
11061 
11062 // This pattern is automatically generated from aarch64_ad.m4
11063 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11064 // val & (-1 ^ (val << shift)) ==> bicw
11065 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
11066                          iRegIorL2I src1, iRegIorL2I src2,
11067                          immI src3, immI_M1 src4) %{
11068   match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
11069   ins_cost(1.9 * INSN_COST);
11070   format %{ "bicw  $dst, $src1, $src2, LSL $src3" %}
11071 
11072   ins_encode %{
11073     __ bicw(as_Register($dst$$reg),
11074               as_Register($src1$$reg),
11075               as_Register($src2$$reg),
11076               Assembler::LSL,
11077               $src3$$constant & 0x1f);
11078   %}
11079 
11080   ins_pipe(ialu_reg_reg_shift);
11081 %}
11082 
11083 // This pattern is automatically generated from aarch64_ad.m4
11084 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11085 // val & (-1 ^ (val << shift)) ==> bic
11086 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
11087                          iRegL src1, iRegL src2,
11088                          immI src3, immL_M1 src4) %{
11089   match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
11090   ins_cost(1.9 * INSN_COST);
11091   format %{ "bic  $dst, $src1, $src2, LSL $src3" %}
11092 
11093   ins_encode %{
11094     __ bic(as_Register($dst$$reg),
11095               as_Register($src1$$reg),
11096               as_Register($src2$$reg),
11097               Assembler::LSL,
11098               $src3$$constant & 0x3f);
11099   %}
11100 
11101   ins_pipe(ialu_reg_reg_shift);
11102 %}
11103 
11104 // This pattern is automatically generated from aarch64_ad.m4
11105 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11106 // val ^ (-1 ^ (val >>> shift)) ==> eonw
11107 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
11108                          iRegIorL2I src1, iRegIorL2I src2,
11109                          immI src3, immI_M1 src4) %{
11110   match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
11111   ins_cost(1.9 * INSN_COST);
11112   format %{ "eonw  $dst, $src1, $src2, LSR $src3" %}
11113 
11114   ins_encode %{
11115     __ eonw(as_Register($dst$$reg),
11116               as_Register($src1$$reg),
11117               as_Register($src2$$reg),
11118               Assembler::LSR,
11119               $src3$$constant & 0x1f);
11120   %}
11121 
11122   ins_pipe(ialu_reg_reg_shift);
11123 %}
11124 
11125 // This pattern is automatically generated from aarch64_ad.m4
11126 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11127 // val ^ (-1 ^ (val >>> shift)) ==> eon
11128 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
11129                          iRegL src1, iRegL src2,
11130                          immI src3, immL_M1 src4) %{
11131   match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
11132   ins_cost(1.9 * INSN_COST);
11133   format %{ "eon  $dst, $src1, $src2, LSR $src3" %}
11134 
11135   ins_encode %{
11136     __ eon(as_Register($dst$$reg),
11137               as_Register($src1$$reg),
11138               as_Register($src2$$reg),
11139               Assembler::LSR,
11140               $src3$$constant & 0x3f);
11141   %}
11142 
11143   ins_pipe(ialu_reg_reg_shift);
11144 %}
11145 
11146 // This pattern is automatically generated from aarch64_ad.m4
11147 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11148 // val ^ (-1 ^ (val >> shift)) ==> eonw
11149 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
11150                          iRegIorL2I src1, iRegIorL2I src2,
11151                          immI src3, immI_M1 src4) %{
11152   match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
11153   ins_cost(1.9 * INSN_COST);
11154   format %{ "eonw  $dst, $src1, $src2, ASR $src3" %}
11155 
11156   ins_encode %{
11157     __ eonw(as_Register($dst$$reg),
11158               as_Register($src1$$reg),
11159               as_Register($src2$$reg),
11160               Assembler::ASR,
11161               $src3$$constant & 0x1f);
11162   %}
11163 
11164   ins_pipe(ialu_reg_reg_shift);
11165 %}
11166 
11167 // This pattern is automatically generated from aarch64_ad.m4
11168 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11169 // val ^ (-1 ^ (val >> shift)) ==> eon
11170 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
11171                          iRegL src1, iRegL src2,
11172                          immI src3, immL_M1 src4) %{
11173   match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
11174   ins_cost(1.9 * INSN_COST);
11175   format %{ "eon  $dst, $src1, $src2, ASR $src3" %}
11176 
11177   ins_encode %{
11178     __ eon(as_Register($dst$$reg),
11179               as_Register($src1$$reg),
11180               as_Register($src2$$reg),
11181               Assembler::ASR,
11182               $src3$$constant & 0x3f);
11183   %}
11184 
11185   ins_pipe(ialu_reg_reg_shift);
11186 %}
11187 
11188 // This pattern is automatically generated from aarch64_ad.m4
11189 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11190 // val ^ (-1 ^ (val ror shift)) ==> eonw
11191 instruct XorI_reg_RotateRight_not_reg(iRegINoSp dst,
11192                          iRegIorL2I src1, iRegIorL2I src2,
11193                          immI src3, immI_M1 src4) %{
11194   match(Set dst (XorI src4 (XorI(RotateRight src2 src3) src1)));
11195   ins_cost(1.9 * INSN_COST);
11196   format %{ "eonw  $dst, $src1, $src2, ROR $src3" %}
11197 
11198   ins_encode %{
11199     __ eonw(as_Register($dst$$reg),
11200               as_Register($src1$$reg),
11201               as_Register($src2$$reg),
11202               Assembler::ROR,
11203               $src3$$constant & 0x1f);
11204   %}
11205 
11206   ins_pipe(ialu_reg_reg_shift);
11207 %}
11208 
11209 // This pattern is automatically generated from aarch64_ad.m4
11210 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11211 // val ^ (-1 ^ (val ror shift)) ==> eon
11212 instruct XorL_reg_RotateRight_not_reg(iRegLNoSp dst,
11213                          iRegL src1, iRegL src2,
11214                          immI src3, immL_M1 src4) %{
11215   match(Set dst (XorL src4 (XorL(RotateRight src2 src3) src1)));
11216   ins_cost(1.9 * INSN_COST);
11217   format %{ "eon  $dst, $src1, $src2, ROR $src3" %}
11218 
11219   ins_encode %{
11220     __ eon(as_Register($dst$$reg),
11221               as_Register($src1$$reg),
11222               as_Register($src2$$reg),
11223               Assembler::ROR,
11224               $src3$$constant & 0x3f);
11225   %}
11226 
11227   ins_pipe(ialu_reg_reg_shift);
11228 %}
11229 
11230 // This pattern is automatically generated from aarch64_ad.m4
11231 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11232 // val ^ (-1 ^ (val << shift)) ==> eonw
11233 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
11234                          iRegIorL2I src1, iRegIorL2I src2,
11235                          immI src3, immI_M1 src4) %{
11236   match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
11237   ins_cost(1.9 * INSN_COST);
11238   format %{ "eonw  $dst, $src1, $src2, LSL $src3" %}
11239 
11240   ins_encode %{
11241     __ eonw(as_Register($dst$$reg),
11242               as_Register($src1$$reg),
11243               as_Register($src2$$reg),
11244               Assembler::LSL,
11245               $src3$$constant & 0x1f);
11246   %}
11247 
11248   ins_pipe(ialu_reg_reg_shift);
11249 %}
11250 
11251 // This pattern is automatically generated from aarch64_ad.m4
11252 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11253 // val ^ (-1 ^ (val << shift)) ==> eon
11254 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
11255                          iRegL src1, iRegL src2,
11256                          immI src3, immL_M1 src4) %{
11257   match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
11258   ins_cost(1.9 * INSN_COST);
11259   format %{ "eon  $dst, $src1, $src2, LSL $src3" %}
11260 
11261   ins_encode %{
11262     __ eon(as_Register($dst$$reg),
11263               as_Register($src1$$reg),
11264               as_Register($src2$$reg),
11265               Assembler::LSL,
11266               $src3$$constant & 0x3f);
11267   %}
11268 
11269   ins_pipe(ialu_reg_reg_shift);
11270 %}
11271 
11272 // This pattern is automatically generated from aarch64_ad.m4
11273 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11274 // val | (-1 ^ (val >>> shift)) ==> ornw
11275 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
11276                          iRegIorL2I src1, iRegIorL2I src2,
11277                          immI src3, immI_M1 src4) %{
11278   match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
11279   ins_cost(1.9 * INSN_COST);
11280   format %{ "ornw  $dst, $src1, $src2, LSR $src3" %}
11281 
11282   ins_encode %{
11283     __ ornw(as_Register($dst$$reg),
11284               as_Register($src1$$reg),
11285               as_Register($src2$$reg),
11286               Assembler::LSR,
11287               $src3$$constant & 0x1f);
11288   %}
11289 
11290   ins_pipe(ialu_reg_reg_shift);
11291 %}
11292 
11293 // This pattern is automatically generated from aarch64_ad.m4
11294 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11295 // val | (-1 ^ (val >>> shift)) ==> orn
11296 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
11297                          iRegL src1, iRegL src2,
11298                          immI src3, immL_M1 src4) %{
11299   match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
11300   ins_cost(1.9 * INSN_COST);
11301   format %{ "orn  $dst, $src1, $src2, LSR $src3" %}
11302 
11303   ins_encode %{
11304     __ orn(as_Register($dst$$reg),
11305               as_Register($src1$$reg),
11306               as_Register($src2$$reg),
11307               Assembler::LSR,
11308               $src3$$constant & 0x3f);
11309   %}
11310 
11311   ins_pipe(ialu_reg_reg_shift);
11312 %}
11313 
11314 // This pattern is automatically generated from aarch64_ad.m4
11315 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11316 // val | (-1 ^ (val >> shift)) ==> ornw
11317 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
11318                          iRegIorL2I src1, iRegIorL2I src2,
11319                          immI src3, immI_M1 src4) %{
11320   match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
11321   ins_cost(1.9 * INSN_COST);
11322   format %{ "ornw  $dst, $src1, $src2, ASR $src3" %}
11323 
11324   ins_encode %{
11325     __ ornw(as_Register($dst$$reg),
11326               as_Register($src1$$reg),
11327               as_Register($src2$$reg),
11328               Assembler::ASR,
11329               $src3$$constant & 0x1f);
11330   %}
11331 
11332   ins_pipe(ialu_reg_reg_shift);
11333 %}
11334 
11335 // This pattern is automatically generated from aarch64_ad.m4
11336 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11337 // val | (-1 ^ (val >> shift)) ==> orn
11338 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
11339                          iRegL src1, iRegL src2,
11340                          immI src3, immL_M1 src4) %{
11341   match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
11342   ins_cost(1.9 * INSN_COST);
11343   format %{ "orn  $dst, $src1, $src2, ASR $src3" %}
11344 
11345   ins_encode %{
11346     __ orn(as_Register($dst$$reg),
11347               as_Register($src1$$reg),
11348               as_Register($src2$$reg),
11349               Assembler::ASR,
11350               $src3$$constant & 0x3f);
11351   %}
11352 
11353   ins_pipe(ialu_reg_reg_shift);
11354 %}
11355 
11356 // This pattern is automatically generated from aarch64_ad.m4
11357 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11358 // val | (-1 ^ (val ror shift)) ==> ornw
11359 instruct OrI_reg_RotateRight_not_reg(iRegINoSp dst,
11360                          iRegIorL2I src1, iRegIorL2I src2,
11361                          immI src3, immI_M1 src4) %{
11362   match(Set dst (OrI src1 (XorI(RotateRight src2 src3) src4)));
11363   ins_cost(1.9 * INSN_COST);
11364   format %{ "ornw  $dst, $src1, $src2, ROR $src3" %}
11365 
11366   ins_encode %{
11367     __ ornw(as_Register($dst$$reg),
11368               as_Register($src1$$reg),
11369               as_Register($src2$$reg),
11370               Assembler::ROR,
11371               $src3$$constant & 0x1f);
11372   %}
11373 
11374   ins_pipe(ialu_reg_reg_shift);
11375 %}
11376 
11377 // This pattern is automatically generated from aarch64_ad.m4
11378 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11379 // val | (-1 ^ (val ror shift)) ==> orn
11380 instruct OrL_reg_RotateRight_not_reg(iRegLNoSp dst,
11381                          iRegL src1, iRegL src2,
11382                          immI src3, immL_M1 src4) %{
11383   match(Set dst (OrL src1 (XorL(RotateRight src2 src3) src4)));
11384   ins_cost(1.9 * INSN_COST);
11385   format %{ "orn  $dst, $src1, $src2, ROR $src3" %}
11386 
11387   ins_encode %{
11388     __ orn(as_Register($dst$$reg),
11389               as_Register($src1$$reg),
11390               as_Register($src2$$reg),
11391               Assembler::ROR,
11392               $src3$$constant & 0x3f);
11393   %}
11394 
11395   ins_pipe(ialu_reg_reg_shift);
11396 %}
11397 
11398 // This pattern is automatically generated from aarch64_ad.m4
11399 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11400 // val | (-1 ^ (val << shift)) ==> ornw
11401 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
11402                          iRegIorL2I src1, iRegIorL2I src2,
11403                          immI src3, immI_M1 src4) %{
11404   match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
11405   ins_cost(1.9 * INSN_COST);
11406   format %{ "ornw  $dst, $src1, $src2, LSL $src3" %}
11407 
11408   ins_encode %{
11409     __ ornw(as_Register($dst$$reg),
11410               as_Register($src1$$reg),
11411               as_Register($src2$$reg),
11412               Assembler::LSL,
11413               $src3$$constant & 0x1f);
11414   %}
11415 
11416   ins_pipe(ialu_reg_reg_shift);
11417 %}
11418 
11419 // This pattern is automatically generated from aarch64_ad.m4
11420 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11421 // val | (-1 ^ (val << shift)) ==> orn
11422 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
11423                          iRegL src1, iRegL src2,
11424                          immI src3, immL_M1 src4) %{
11425   match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
11426   ins_cost(1.9 * INSN_COST);
11427   format %{ "orn  $dst, $src1, $src2, LSL $src3" %}
11428 
11429   ins_encode %{
11430     __ orn(as_Register($dst$$reg),
11431               as_Register($src1$$reg),
11432               as_Register($src2$$reg),
11433               Assembler::LSL,
11434               $src3$$constant & 0x3f);
11435   %}
11436 
11437   ins_pipe(ialu_reg_reg_shift);
11438 %}
11439 
11440 // This pattern is automatically generated from aarch64_ad.m4
11441 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11442 instruct AndI_reg_URShift_reg(iRegINoSp dst,
11443                          iRegIorL2I src1, iRegIorL2I src2,
11444                          immI src3) %{
11445   match(Set dst (AndI src1 (URShiftI src2 src3)));
11446 
11447   ins_cost(1.9 * INSN_COST);
11448   format %{ "andw  $dst, $src1, $src2, LSR $src3" %}
11449 
11450   ins_encode %{
11451     __ andw(as_Register($dst$$reg),
11452               as_Register($src1$$reg),
11453               as_Register($src2$$reg),
11454               Assembler::LSR,
11455               $src3$$constant & 0x1f);
11456   %}
11457 
11458   ins_pipe(ialu_reg_reg_shift);
11459 %}
11460 
11461 // This pattern is automatically generated from aarch64_ad.m4
11462 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11463 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
11464                          iRegL src1, iRegL src2,
11465                          immI src3) %{
11466   match(Set dst (AndL src1 (URShiftL src2 src3)));
11467 
11468   ins_cost(1.9 * INSN_COST);
11469   format %{ "andr  $dst, $src1, $src2, LSR $src3" %}
11470 
11471   ins_encode %{
11472     __ andr(as_Register($dst$$reg),
11473               as_Register($src1$$reg),
11474               as_Register($src2$$reg),
11475               Assembler::LSR,
11476               $src3$$constant & 0x3f);
11477   %}
11478 
11479   ins_pipe(ialu_reg_reg_shift);
11480 %}
11481 
11482 // This pattern is automatically generated from aarch64_ad.m4
11483 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11484 instruct AndI_reg_RShift_reg(iRegINoSp dst,
11485                          iRegIorL2I src1, iRegIorL2I src2,
11486                          immI src3) %{
11487   match(Set dst (AndI src1 (RShiftI src2 src3)));
11488 
11489   ins_cost(1.9 * INSN_COST);
11490   format %{ "andw  $dst, $src1, $src2, ASR $src3" %}
11491 
11492   ins_encode %{
11493     __ andw(as_Register($dst$$reg),
11494               as_Register($src1$$reg),
11495               as_Register($src2$$reg),
11496               Assembler::ASR,
11497               $src3$$constant & 0x1f);
11498   %}
11499 
11500   ins_pipe(ialu_reg_reg_shift);
11501 %}
11502 
11503 // This pattern is automatically generated from aarch64_ad.m4
11504 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11505 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
11506                          iRegL src1, iRegL src2,
11507                          immI src3) %{
11508   match(Set dst (AndL src1 (RShiftL src2 src3)));
11509 
11510   ins_cost(1.9 * INSN_COST);
11511   format %{ "andr  $dst, $src1, $src2, ASR $src3" %}
11512 
11513   ins_encode %{
11514     __ andr(as_Register($dst$$reg),
11515               as_Register($src1$$reg),
11516               as_Register($src2$$reg),
11517               Assembler::ASR,
11518               $src3$$constant & 0x3f);
11519   %}
11520 
11521   ins_pipe(ialu_reg_reg_shift);
11522 %}
11523 
11524 // This pattern is automatically generated from aarch64_ad.m4
11525 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11526 instruct AndI_reg_LShift_reg(iRegINoSp dst,
11527                          iRegIorL2I src1, iRegIorL2I src2,
11528                          immI src3) %{
11529   match(Set dst (AndI src1 (LShiftI src2 src3)));
11530 
11531   ins_cost(1.9 * INSN_COST);
11532   format %{ "andw  $dst, $src1, $src2, LSL $src3" %}
11533 
11534   ins_encode %{
11535     __ andw(as_Register($dst$$reg),
11536               as_Register($src1$$reg),
11537               as_Register($src2$$reg),
11538               Assembler::LSL,
11539               $src3$$constant & 0x1f);
11540   %}
11541 
11542   ins_pipe(ialu_reg_reg_shift);
11543 %}
11544 
11545 // This pattern is automatically generated from aarch64_ad.m4
11546 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11547 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
11548                          iRegL src1, iRegL src2,
11549                          immI src3) %{
11550   match(Set dst (AndL src1 (LShiftL src2 src3)));
11551 
11552   ins_cost(1.9 * INSN_COST);
11553   format %{ "andr  $dst, $src1, $src2, LSL $src3" %}
11554 
11555   ins_encode %{
11556     __ andr(as_Register($dst$$reg),
11557               as_Register($src1$$reg),
11558               as_Register($src2$$reg),
11559               Assembler::LSL,
11560               $src3$$constant & 0x3f);
11561   %}
11562 
11563   ins_pipe(ialu_reg_reg_shift);
11564 %}
11565 
11566 // This pattern is automatically generated from aarch64_ad.m4
11567 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11568 instruct AndI_reg_RotateRight_reg(iRegINoSp dst,
11569                          iRegIorL2I src1, iRegIorL2I src2,
11570                          immI src3) %{
11571   match(Set dst (AndI src1 (RotateRight src2 src3)));
11572 
11573   ins_cost(1.9 * INSN_COST);
11574   format %{ "andw  $dst, $src1, $src2, ROR $src3" %}
11575 
11576   ins_encode %{
11577     __ andw(as_Register($dst$$reg),
11578               as_Register($src1$$reg),
11579               as_Register($src2$$reg),
11580               Assembler::ROR,
11581               $src3$$constant & 0x1f);
11582   %}
11583 
11584   ins_pipe(ialu_reg_reg_shift);
11585 %}
11586 
11587 // This pattern is automatically generated from aarch64_ad.m4
11588 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11589 instruct AndL_reg_RotateRight_reg(iRegLNoSp dst,
11590                          iRegL src1, iRegL src2,
11591                          immI src3) %{
11592   match(Set dst (AndL src1 (RotateRight src2 src3)));
11593 
11594   ins_cost(1.9 * INSN_COST);
11595   format %{ "andr  $dst, $src1, $src2, ROR $src3" %}
11596 
11597   ins_encode %{
11598     __ andr(as_Register($dst$$reg),
11599               as_Register($src1$$reg),
11600               as_Register($src2$$reg),
11601               Assembler::ROR,
11602               $src3$$constant & 0x3f);
11603   %}
11604 
11605   ins_pipe(ialu_reg_reg_shift);
11606 %}
11607 
11608 // This pattern is automatically generated from aarch64_ad.m4
11609 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11610 instruct XorI_reg_URShift_reg(iRegINoSp dst,
11611                          iRegIorL2I src1, iRegIorL2I src2,
11612                          immI src3) %{
11613   match(Set dst (XorI src1 (URShiftI src2 src3)));
11614 
11615   ins_cost(1.9 * INSN_COST);
11616   format %{ "eorw  $dst, $src1, $src2, LSR $src3" %}
11617 
11618   ins_encode %{
11619     __ eorw(as_Register($dst$$reg),
11620               as_Register($src1$$reg),
11621               as_Register($src2$$reg),
11622               Assembler::LSR,
11623               $src3$$constant & 0x1f);
11624   %}
11625 
11626   ins_pipe(ialu_reg_reg_shift);
11627 %}
11628 
11629 // This pattern is automatically generated from aarch64_ad.m4
11630 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11631 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
11632                          iRegL src1, iRegL src2,
11633                          immI src3) %{
11634   match(Set dst (XorL src1 (URShiftL src2 src3)));
11635 
11636   ins_cost(1.9 * INSN_COST);
11637   format %{ "eor  $dst, $src1, $src2, LSR $src3" %}
11638 
11639   ins_encode %{
11640     __ eor(as_Register($dst$$reg),
11641               as_Register($src1$$reg),
11642               as_Register($src2$$reg),
11643               Assembler::LSR,
11644               $src3$$constant & 0x3f);
11645   %}
11646 
11647   ins_pipe(ialu_reg_reg_shift);
11648 %}
11649 
11650 // This pattern is automatically generated from aarch64_ad.m4
11651 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11652 instruct XorI_reg_RShift_reg(iRegINoSp dst,
11653                          iRegIorL2I src1, iRegIorL2I src2,
11654                          immI src3) %{
11655   match(Set dst (XorI src1 (RShiftI src2 src3)));
11656 
11657   ins_cost(1.9 * INSN_COST);
11658   format %{ "eorw  $dst, $src1, $src2, ASR $src3" %}
11659 
11660   ins_encode %{
11661     __ eorw(as_Register($dst$$reg),
11662               as_Register($src1$$reg),
11663               as_Register($src2$$reg),
11664               Assembler::ASR,
11665               $src3$$constant & 0x1f);
11666   %}
11667 
11668   ins_pipe(ialu_reg_reg_shift);
11669 %}
11670 
11671 // This pattern is automatically generated from aarch64_ad.m4
11672 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11673 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
11674                          iRegL src1, iRegL src2,
11675                          immI src3) %{
11676   match(Set dst (XorL src1 (RShiftL src2 src3)));
11677 
11678   ins_cost(1.9 * INSN_COST);
11679   format %{ "eor  $dst, $src1, $src2, ASR $src3" %}
11680 
11681   ins_encode %{
11682     __ eor(as_Register($dst$$reg),
11683               as_Register($src1$$reg),
11684               as_Register($src2$$reg),
11685               Assembler::ASR,
11686               $src3$$constant & 0x3f);
11687   %}
11688 
11689   ins_pipe(ialu_reg_reg_shift);
11690 %}
11691 
11692 // This pattern is automatically generated from aarch64_ad.m4
11693 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11694 instruct XorI_reg_LShift_reg(iRegINoSp dst,
11695                          iRegIorL2I src1, iRegIorL2I src2,
11696                          immI src3) %{
11697   match(Set dst (XorI src1 (LShiftI src2 src3)));
11698 
11699   ins_cost(1.9 * INSN_COST);
11700   format %{ "eorw  $dst, $src1, $src2, LSL $src3" %}
11701 
11702   ins_encode %{
11703     __ eorw(as_Register($dst$$reg),
11704               as_Register($src1$$reg),
11705               as_Register($src2$$reg),
11706               Assembler::LSL,
11707               $src3$$constant & 0x1f);
11708   %}
11709 
11710   ins_pipe(ialu_reg_reg_shift);
11711 %}
11712 
11713 // This pattern is automatically generated from aarch64_ad.m4
11714 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11715 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
11716                          iRegL src1, iRegL src2,
11717                          immI src3) %{
11718   match(Set dst (XorL src1 (LShiftL src2 src3)));
11719 
11720   ins_cost(1.9 * INSN_COST);
11721   format %{ "eor  $dst, $src1, $src2, LSL $src3" %}
11722 
11723   ins_encode %{
11724     __ eor(as_Register($dst$$reg),
11725               as_Register($src1$$reg),
11726               as_Register($src2$$reg),
11727               Assembler::LSL,
11728               $src3$$constant & 0x3f);
11729   %}
11730 
11731   ins_pipe(ialu_reg_reg_shift);
11732 %}
11733 
11734 // This pattern is automatically generated from aarch64_ad.m4
11735 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11736 instruct XorI_reg_RotateRight_reg(iRegINoSp dst,
11737                          iRegIorL2I src1, iRegIorL2I src2,
11738                          immI src3) %{
11739   match(Set dst (XorI src1 (RotateRight src2 src3)));
11740 
11741   ins_cost(1.9 * INSN_COST);
11742   format %{ "eorw  $dst, $src1, $src2, ROR $src3" %}
11743 
11744   ins_encode %{
11745     __ eorw(as_Register($dst$$reg),
11746               as_Register($src1$$reg),
11747               as_Register($src2$$reg),
11748               Assembler::ROR,
11749               $src3$$constant & 0x1f);
11750   %}
11751 
11752   ins_pipe(ialu_reg_reg_shift);
11753 %}
11754 
11755 // This pattern is automatically generated from aarch64_ad.m4
11756 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11757 instruct XorL_reg_RotateRight_reg(iRegLNoSp dst,
11758                          iRegL src1, iRegL src2,
11759                          immI src3) %{
11760   match(Set dst (XorL src1 (RotateRight src2 src3)));
11761 
11762   ins_cost(1.9 * INSN_COST);
11763   format %{ "eor  $dst, $src1, $src2, ROR $src3" %}
11764 
11765   ins_encode %{
11766     __ eor(as_Register($dst$$reg),
11767               as_Register($src1$$reg),
11768               as_Register($src2$$reg),
11769               Assembler::ROR,
11770               $src3$$constant & 0x3f);
11771   %}
11772 
11773   ins_pipe(ialu_reg_reg_shift);
11774 %}
11775 
11776 // This pattern is automatically generated from aarch64_ad.m4
11777 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11778 instruct OrI_reg_URShift_reg(iRegINoSp dst,
11779                          iRegIorL2I src1, iRegIorL2I src2,
11780                          immI src3) %{
11781   match(Set dst (OrI src1 (URShiftI src2 src3)));
11782 
11783   ins_cost(1.9 * INSN_COST);
11784   format %{ "orrw  $dst, $src1, $src2, LSR $src3" %}
11785 
11786   ins_encode %{
11787     __ orrw(as_Register($dst$$reg),
11788               as_Register($src1$$reg),
11789               as_Register($src2$$reg),
11790               Assembler::LSR,
11791               $src3$$constant & 0x1f);
11792   %}
11793 
11794   ins_pipe(ialu_reg_reg_shift);
11795 %}
11796 
11797 // This pattern is automatically generated from aarch64_ad.m4
11798 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11799 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
11800                          iRegL src1, iRegL src2,
11801                          immI src3) %{
11802   match(Set dst (OrL src1 (URShiftL src2 src3)));
11803 
11804   ins_cost(1.9 * INSN_COST);
11805   format %{ "orr  $dst, $src1, $src2, LSR $src3" %}
11806 
11807   ins_encode %{
11808     __ orr(as_Register($dst$$reg),
11809               as_Register($src1$$reg),
11810               as_Register($src2$$reg),
11811               Assembler::LSR,
11812               $src3$$constant & 0x3f);
11813   %}
11814 
11815   ins_pipe(ialu_reg_reg_shift);
11816 %}
11817 
11818 // This pattern is automatically generated from aarch64_ad.m4
11819 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11820 instruct OrI_reg_RShift_reg(iRegINoSp dst,
11821                          iRegIorL2I src1, iRegIorL2I src2,
11822                          immI src3) %{
11823   match(Set dst (OrI src1 (RShiftI src2 src3)));
11824 
11825   ins_cost(1.9 * INSN_COST);
11826   format %{ "orrw  $dst, $src1, $src2, ASR $src3" %}
11827 
11828   ins_encode %{
11829     __ orrw(as_Register($dst$$reg),
11830               as_Register($src1$$reg),
11831               as_Register($src2$$reg),
11832               Assembler::ASR,
11833               $src3$$constant & 0x1f);
11834   %}
11835 
11836   ins_pipe(ialu_reg_reg_shift);
11837 %}
11838 
11839 // This pattern is automatically generated from aarch64_ad.m4
11840 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11841 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
11842                          iRegL src1, iRegL src2,
11843                          immI src3) %{
11844   match(Set dst (OrL src1 (RShiftL src2 src3)));
11845 
11846   ins_cost(1.9 * INSN_COST);
11847   format %{ "orr  $dst, $src1, $src2, ASR $src3" %}
11848 
11849   ins_encode %{
11850     __ orr(as_Register($dst$$reg),
11851               as_Register($src1$$reg),
11852               as_Register($src2$$reg),
11853               Assembler::ASR,
11854               $src3$$constant & 0x3f);
11855   %}
11856 
11857   ins_pipe(ialu_reg_reg_shift);
11858 %}
11859 
11860 // This pattern is automatically generated from aarch64_ad.m4
11861 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11862 instruct OrI_reg_LShift_reg(iRegINoSp dst,
11863                          iRegIorL2I src1, iRegIorL2I src2,
11864                          immI src3) %{
11865   match(Set dst (OrI src1 (LShiftI src2 src3)));
11866 
11867   ins_cost(1.9 * INSN_COST);
11868   format %{ "orrw  $dst, $src1, $src2, LSL $src3" %}
11869 
11870   ins_encode %{
11871     __ orrw(as_Register($dst$$reg),
11872               as_Register($src1$$reg),
11873               as_Register($src2$$reg),
11874               Assembler::LSL,
11875               $src3$$constant & 0x1f);
11876   %}
11877 
11878   ins_pipe(ialu_reg_reg_shift);
11879 %}
11880 
11881 // This pattern is automatically generated from aarch64_ad.m4
11882 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11883 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
11884                          iRegL src1, iRegL src2,
11885                          immI src3) %{
11886   match(Set dst (OrL src1 (LShiftL src2 src3)));
11887 
11888   ins_cost(1.9 * INSN_COST);
11889   format %{ "orr  $dst, $src1, $src2, LSL $src3" %}
11890 
11891   ins_encode %{
11892     __ orr(as_Register($dst$$reg),
11893               as_Register($src1$$reg),
11894               as_Register($src2$$reg),
11895               Assembler::LSL,
11896               $src3$$constant & 0x3f);
11897   %}
11898 
11899   ins_pipe(ialu_reg_reg_shift);
11900 %}
11901 
11902 // This pattern is automatically generated from aarch64_ad.m4
11903 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11904 instruct OrI_reg_RotateRight_reg(iRegINoSp dst,
11905                          iRegIorL2I src1, iRegIorL2I src2,
11906                          immI src3) %{
11907   match(Set dst (OrI src1 (RotateRight src2 src3)));
11908 
11909   ins_cost(1.9 * INSN_COST);
11910   format %{ "orrw  $dst, $src1, $src2, ROR $src3" %}
11911 
11912   ins_encode %{
11913     __ orrw(as_Register($dst$$reg),
11914               as_Register($src1$$reg),
11915               as_Register($src2$$reg),
11916               Assembler::ROR,
11917               $src3$$constant & 0x1f);
11918   %}
11919 
11920   ins_pipe(ialu_reg_reg_shift);
11921 %}
11922 
11923 // This pattern is automatically generated from aarch64_ad.m4
11924 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11925 instruct OrL_reg_RotateRight_reg(iRegLNoSp dst,
11926                          iRegL src1, iRegL src2,
11927                          immI src3) %{
11928   match(Set dst (OrL src1 (RotateRight src2 src3)));
11929 
11930   ins_cost(1.9 * INSN_COST);
11931   format %{ "orr  $dst, $src1, $src2, ROR $src3" %}
11932 
11933   ins_encode %{
11934     __ orr(as_Register($dst$$reg),
11935               as_Register($src1$$reg),
11936               as_Register($src2$$reg),
11937               Assembler::ROR,
11938               $src3$$constant & 0x3f);
11939   %}
11940 
11941   ins_pipe(ialu_reg_reg_shift);
11942 %}
11943 
11944 // This pattern is automatically generated from aarch64_ad.m4
11945 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11946 instruct AddI_reg_URShift_reg(iRegINoSp dst,
11947                          iRegIorL2I src1, iRegIorL2I src2,
11948                          immI src3) %{
11949   match(Set dst (AddI src1 (URShiftI src2 src3)));
11950 
11951   ins_cost(1.9 * INSN_COST);
11952   format %{ "addw  $dst, $src1, $src2, LSR $src3" %}
11953 
11954   ins_encode %{
11955     __ addw(as_Register($dst$$reg),
11956               as_Register($src1$$reg),
11957               as_Register($src2$$reg),
11958               Assembler::LSR,
11959               $src3$$constant & 0x1f);
11960   %}
11961 
11962   ins_pipe(ialu_reg_reg_shift);
11963 %}
11964 
11965 // This pattern is automatically generated from aarch64_ad.m4
11966 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11967 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
11968                          iRegL src1, iRegL src2,
11969                          immI src3) %{
11970   match(Set dst (AddL src1 (URShiftL src2 src3)));
11971 
11972   ins_cost(1.9 * INSN_COST);
11973   format %{ "add  $dst, $src1, $src2, LSR $src3" %}
11974 
11975   ins_encode %{
11976     __ add(as_Register($dst$$reg),
11977               as_Register($src1$$reg),
11978               as_Register($src2$$reg),
11979               Assembler::LSR,
11980               $src3$$constant & 0x3f);
11981   %}
11982 
11983   ins_pipe(ialu_reg_reg_shift);
11984 %}
11985 
11986 // This pattern is automatically generated from aarch64_ad.m4
11987 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
11988 instruct AddI_reg_RShift_reg(iRegINoSp dst,
11989                          iRegIorL2I src1, iRegIorL2I src2,
11990                          immI src3) %{
11991   match(Set dst (AddI src1 (RShiftI src2 src3)));
11992 
11993   ins_cost(1.9 * INSN_COST);
11994   format %{ "addw  $dst, $src1, $src2, ASR $src3" %}
11995 
11996   ins_encode %{
11997     __ addw(as_Register($dst$$reg),
11998               as_Register($src1$$reg),
11999               as_Register($src2$$reg),
12000               Assembler::ASR,
12001               $src3$$constant & 0x1f);
12002   %}
12003 
12004   ins_pipe(ialu_reg_reg_shift);
12005 %}
12006 
12007 // This pattern is automatically generated from aarch64_ad.m4
12008 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12009 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
12010                          iRegL src1, iRegL src2,
12011                          immI src3) %{
12012   match(Set dst (AddL src1 (RShiftL src2 src3)));
12013 
12014   ins_cost(1.9 * INSN_COST);
12015   format %{ "add  $dst, $src1, $src2, ASR $src3" %}
12016 
12017   ins_encode %{
12018     __ add(as_Register($dst$$reg),
12019               as_Register($src1$$reg),
12020               as_Register($src2$$reg),
12021               Assembler::ASR,
12022               $src3$$constant & 0x3f);
12023   %}
12024 
12025   ins_pipe(ialu_reg_reg_shift);
12026 %}
12027 
12028 // This pattern is automatically generated from aarch64_ad.m4
12029 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12030 instruct AddI_reg_LShift_reg(iRegINoSp dst,
12031                          iRegIorL2I src1, iRegIorL2I src2,
12032                          immI src3) %{
12033   match(Set dst (AddI src1 (LShiftI src2 src3)));
12034 
12035   ins_cost(1.9 * INSN_COST);
12036   format %{ "addw  $dst, $src1, $src2, LSL $src3" %}
12037 
12038   ins_encode %{
12039     __ addw(as_Register($dst$$reg),
12040               as_Register($src1$$reg),
12041               as_Register($src2$$reg),
12042               Assembler::LSL,
12043               $src3$$constant & 0x1f);
12044   %}
12045 
12046   ins_pipe(ialu_reg_reg_shift);
12047 %}
12048 
12049 // This pattern is automatically generated from aarch64_ad.m4
12050 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12051 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
12052                          iRegL src1, iRegL src2,
12053                          immI src3) %{
12054   match(Set dst (AddL src1 (LShiftL src2 src3)));
12055 
12056   ins_cost(1.9 * INSN_COST);
12057   format %{ "add  $dst, $src1, $src2, LSL $src3" %}
12058 
12059   ins_encode %{
12060     __ add(as_Register($dst$$reg),
12061               as_Register($src1$$reg),
12062               as_Register($src2$$reg),
12063               Assembler::LSL,
12064               $src3$$constant & 0x3f);
12065   %}
12066 
12067   ins_pipe(ialu_reg_reg_shift);
12068 %}
12069 
12070 // This pattern is automatically generated from aarch64_ad.m4
12071 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12072 instruct SubI_reg_URShift_reg(iRegINoSp dst,
12073                          iRegIorL2I src1, iRegIorL2I src2,
12074                          immI src3) %{
12075   match(Set dst (SubI src1 (URShiftI src2 src3)));
12076 
12077   ins_cost(1.9 * INSN_COST);
12078   format %{ "subw  $dst, $src1, $src2, LSR $src3" %}
12079 
12080   ins_encode %{
12081     __ subw(as_Register($dst$$reg),
12082               as_Register($src1$$reg),
12083               as_Register($src2$$reg),
12084               Assembler::LSR,
12085               $src3$$constant & 0x1f);
12086   %}
12087 
12088   ins_pipe(ialu_reg_reg_shift);
12089 %}
12090 
12091 // This pattern is automatically generated from aarch64_ad.m4
12092 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12093 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
12094                          iRegL src1, iRegL src2,
12095                          immI src3) %{
12096   match(Set dst (SubL src1 (URShiftL src2 src3)));
12097 
12098   ins_cost(1.9 * INSN_COST);
12099   format %{ "sub  $dst, $src1, $src2, LSR $src3" %}
12100 
12101   ins_encode %{
12102     __ sub(as_Register($dst$$reg),
12103               as_Register($src1$$reg),
12104               as_Register($src2$$reg),
12105               Assembler::LSR,
12106               $src3$$constant & 0x3f);
12107   %}
12108 
12109   ins_pipe(ialu_reg_reg_shift);
12110 %}
12111 
12112 // This pattern is automatically generated from aarch64_ad.m4
12113 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12114 instruct SubI_reg_RShift_reg(iRegINoSp dst,
12115                          iRegIorL2I src1, iRegIorL2I src2,
12116                          immI src3) %{
12117   match(Set dst (SubI src1 (RShiftI src2 src3)));
12118 
12119   ins_cost(1.9 * INSN_COST);
12120   format %{ "subw  $dst, $src1, $src2, ASR $src3" %}
12121 
12122   ins_encode %{
12123     __ subw(as_Register($dst$$reg),
12124               as_Register($src1$$reg),
12125               as_Register($src2$$reg),
12126               Assembler::ASR,
12127               $src3$$constant & 0x1f);
12128   %}
12129 
12130   ins_pipe(ialu_reg_reg_shift);
12131 %}
12132 
12133 // This pattern is automatically generated from aarch64_ad.m4
12134 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12135 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
12136                          iRegL src1, iRegL src2,
12137                          immI src3) %{
12138   match(Set dst (SubL src1 (RShiftL src2 src3)));
12139 
12140   ins_cost(1.9 * INSN_COST);
12141   format %{ "sub  $dst, $src1, $src2, ASR $src3" %}
12142 
12143   ins_encode %{
12144     __ sub(as_Register($dst$$reg),
12145               as_Register($src1$$reg),
12146               as_Register($src2$$reg),
12147               Assembler::ASR,
12148               $src3$$constant & 0x3f);
12149   %}
12150 
12151   ins_pipe(ialu_reg_reg_shift);
12152 %}
12153 
12154 // This pattern is automatically generated from aarch64_ad.m4
12155 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12156 instruct SubI_reg_LShift_reg(iRegINoSp dst,
12157                          iRegIorL2I src1, iRegIorL2I src2,
12158                          immI src3) %{
12159   match(Set dst (SubI src1 (LShiftI src2 src3)));
12160 
12161   ins_cost(1.9 * INSN_COST);
12162   format %{ "subw  $dst, $src1, $src2, LSL $src3" %}
12163 
12164   ins_encode %{
12165     __ subw(as_Register($dst$$reg),
12166               as_Register($src1$$reg),
12167               as_Register($src2$$reg),
12168               Assembler::LSL,
12169               $src3$$constant & 0x1f);
12170   %}
12171 
12172   ins_pipe(ialu_reg_reg_shift);
12173 %}
12174 
12175 // This pattern is automatically generated from aarch64_ad.m4
12176 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12177 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
12178                          iRegL src1, iRegL src2,
12179                          immI src3) %{
12180   match(Set dst (SubL src1 (LShiftL src2 src3)));
12181 
12182   ins_cost(1.9 * INSN_COST);
12183   format %{ "sub  $dst, $src1, $src2, LSL $src3" %}
12184 
12185   ins_encode %{
12186     __ sub(as_Register($dst$$reg),
12187               as_Register($src1$$reg),
12188               as_Register($src2$$reg),
12189               Assembler::LSL,
12190               $src3$$constant & 0x3f);
12191   %}
12192 
12193   ins_pipe(ialu_reg_reg_shift);
12194 %}
12195 
12196 // This pattern is automatically generated from aarch64_ad.m4
12197 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12198 
12199 // Shift Left followed by Shift Right.
12200 // This idiom is used by the compiler for the i2b bytecode etc.
12201 instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12202 %{
12203   match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
12204   ins_cost(INSN_COST * 2);
12205   format %{ "sbfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12206   ins_encode %{
12207     int lshift = $lshift_count$$constant & 63;
12208     int rshift = $rshift_count$$constant & 63;
12209     int s = 63 - lshift;
12210     int r = (rshift - lshift) & 63;
12211     __ sbfm(as_Register($dst$$reg),
12212             as_Register($src$$reg),
12213             r, s);
12214   %}
12215 
12216   ins_pipe(ialu_reg_shift);
12217 %}
12218 
12219 // This pattern is automatically generated from aarch64_ad.m4
12220 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12221 
12222 // Shift Left followed by Shift Right.
12223 // This idiom is used by the compiler for the i2b bytecode etc.
12224 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12225 %{
12226   match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
12227   ins_cost(INSN_COST * 2);
12228   format %{ "sbfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12229   ins_encode %{
12230     int lshift = $lshift_count$$constant & 31;
12231     int rshift = $rshift_count$$constant & 31;
12232     int s = 31 - lshift;
12233     int r = (rshift - lshift) & 31;
12234     __ sbfmw(as_Register($dst$$reg),
12235             as_Register($src$$reg),
12236             r, s);
12237   %}
12238 
12239   ins_pipe(ialu_reg_shift);
12240 %}
12241 
12242 // This pattern is automatically generated from aarch64_ad.m4
12243 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12244 
12245 // Shift Left followed by Shift Right.
12246 // This idiom is used by the compiler for the i2b bytecode etc.
12247 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
12248 %{
12249   match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
12250   ins_cost(INSN_COST * 2);
12251   format %{ "ubfm  $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
12252   ins_encode %{
12253     int lshift = $lshift_count$$constant & 63;
12254     int rshift = $rshift_count$$constant & 63;
12255     int s = 63 - lshift;
12256     int r = (rshift - lshift) & 63;
12257     __ ubfm(as_Register($dst$$reg),
12258             as_Register($src$$reg),
12259             r, s);
12260   %}
12261 
12262   ins_pipe(ialu_reg_shift);
12263 %}
12264 
12265 // This pattern is automatically generated from aarch64_ad.m4
12266 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12267 
12268 // Shift Left followed by Shift Right.
12269 // This idiom is used by the compiler for the i2b bytecode etc.
12270 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
12271 %{
12272   match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
12273   ins_cost(INSN_COST * 2);
12274   format %{ "ubfmw  $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
12275   ins_encode %{
12276     int lshift = $lshift_count$$constant & 31;
12277     int rshift = $rshift_count$$constant & 31;
12278     int s = 31 - lshift;
12279     int r = (rshift - lshift) & 31;
12280     __ ubfmw(as_Register($dst$$reg),
12281             as_Register($src$$reg),
12282             r, s);
12283   %}
12284 
12285   ins_pipe(ialu_reg_shift);
12286 %}
12287 
12288 // Bitfield extract with shift & mask
12289 
12290 // This pattern is automatically generated from aarch64_ad.m4
12291 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12292 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12293 %{
12294   match(Set dst (AndI (URShiftI src rshift) mask));
12295   // Make sure we are not going to exceed what ubfxw can do.
12296   predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12297 
12298   ins_cost(INSN_COST);
12299   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
12300   ins_encode %{
12301     int rshift = $rshift$$constant & 31;
12302     intptr_t mask = $mask$$constant;
12303     int width = exact_log2(mask+1);
12304     __ ubfxw(as_Register($dst$$reg),
12305             as_Register($src$$reg), rshift, width);
12306   %}
12307   ins_pipe(ialu_reg_shift);
12308 %}
12309 
12310 // This pattern is automatically generated from aarch64_ad.m4
12311 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12312 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
12313 %{
12314   match(Set dst (AndL (URShiftL src rshift) mask));
12315   // Make sure we are not going to exceed what ubfx can do.
12316   predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
12317 
12318   ins_cost(INSN_COST);
12319   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12320   ins_encode %{
12321     int rshift = $rshift$$constant & 63;
12322     intptr_t mask = $mask$$constant;
12323     int width = exact_log2_long(mask+1);
12324     __ ubfx(as_Register($dst$$reg),
12325             as_Register($src$$reg), rshift, width);
12326   %}
12327   ins_pipe(ialu_reg_shift);
12328 %}
12329 
12330 
12331 // This pattern is automatically generated from aarch64_ad.m4
12332 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12333 
12334 // We can use ubfx when extending an And with a mask when we know mask
12335 // is positive.  We know that because immI_bitmask guarantees it.
12336 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
12337 %{
12338   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
12339   // Make sure we are not going to exceed what ubfxw can do.
12340   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
12341 
12342   ins_cost(INSN_COST * 2);
12343   format %{ "ubfx $dst, $src, $rshift, $mask" %}
12344   ins_encode %{
12345     int rshift = $rshift$$constant & 31;
12346     intptr_t mask = $mask$$constant;
12347     int width = exact_log2(mask+1);
12348     __ ubfx(as_Register($dst$$reg),
12349             as_Register($src$$reg), rshift, width);
12350   %}
12351   ins_pipe(ialu_reg_shift);
12352 %}
12353 
12354 
12355 // This pattern is automatically generated from aarch64_ad.m4
12356 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12357 
12358 // We can use ubfiz when masking by a positive number and then left shifting the result.
12359 // We know that the mask is positive because immI_bitmask guarantees it.
12360 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12361 %{
12362   match(Set dst (LShiftI (AndI src mask) lshift));
12363   predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
12364 
12365   ins_cost(INSN_COST);
12366   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12367   ins_encode %{
12368     int lshift = $lshift$$constant & 31;
12369     intptr_t mask = $mask$$constant;
12370     int width = exact_log2(mask+1);
12371     __ ubfizw(as_Register($dst$$reg),
12372           as_Register($src$$reg), lshift, width);
12373   %}
12374   ins_pipe(ialu_reg_shift);
12375 %}
12376 
12377 // This pattern is automatically generated from aarch64_ad.m4
12378 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12379 
12380 // We can use ubfiz when masking by a positive number and then left shifting the result.
12381 // We know that the mask is positive because immL_bitmask guarantees it.
12382 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
12383 %{
12384   match(Set dst (LShiftL (AndL src mask) lshift));
12385   predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12386 
12387   ins_cost(INSN_COST);
12388   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12389   ins_encode %{
12390     int lshift = $lshift$$constant & 63;
12391     intptr_t mask = $mask$$constant;
12392     int width = exact_log2_long(mask+1);
12393     __ ubfiz(as_Register($dst$$reg),
12394           as_Register($src$$reg), lshift, width);
12395   %}
12396   ins_pipe(ialu_reg_shift);
12397 %}
12398 
12399 // This pattern is automatically generated from aarch64_ad.m4
12400 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12401 
12402 // We can use ubfiz when masking by a positive number and then left shifting the result.
12403 // We know that the mask is positive because immI_bitmask guarantees it.
12404 instruct ubfizwIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12405 %{
12406   match(Set dst (ConvI2L (LShiftI (AndI src mask) lshift)));
12407   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= 31);
12408 
12409   ins_cost(INSN_COST);
12410   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
12411   ins_encode %{
12412     int lshift = $lshift$$constant & 31;
12413     intptr_t mask = $mask$$constant;
12414     int width = exact_log2(mask+1);
12415     __ ubfizw(as_Register($dst$$reg),
12416           as_Register($src$$reg), lshift, width);
12417   %}
12418   ins_pipe(ialu_reg_shift);
12419 %}
12420 
12421 // This pattern is automatically generated from aarch64_ad.m4
12422 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12423 
12424 // We can use ubfiz when masking by a positive number and then left shifting the result.
12425 // We know that the mask is positive because immL_bitmask guarantees it.
12426 instruct ubfizLConvL2I(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12427 %{
12428   match(Set dst (ConvL2I (LShiftL (AndL src mask) lshift)));
12429   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= 31);
12430 
12431   ins_cost(INSN_COST);
12432   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12433   ins_encode %{
12434     int lshift = $lshift$$constant & 63;
12435     intptr_t mask = $mask$$constant;
12436     int width = exact_log2_long(mask+1);
12437     __ ubfiz(as_Register($dst$$reg),
12438           as_Register($src$$reg), lshift, width);
12439   %}
12440   ins_pipe(ialu_reg_shift);
12441 %}
12442 
12443 
12444 // This pattern is automatically generated from aarch64_ad.m4
12445 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12446 
12447 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
12448 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
12449 %{
12450   match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
12451   predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
12452 
12453   ins_cost(INSN_COST);
12454   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12455   ins_encode %{
12456     int lshift = $lshift$$constant & 63;
12457     intptr_t mask = $mask$$constant;
12458     int width = exact_log2(mask+1);
12459     __ ubfiz(as_Register($dst$$reg),
12460              as_Register($src$$reg), lshift, width);
12461   %}
12462   ins_pipe(ialu_reg_shift);
12463 %}
12464 
12465 // This pattern is automatically generated from aarch64_ad.m4
12466 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12467 
12468 // If there is a convert L to I block between and AndL and a LShiftI, we can also match ubfiz
12469 instruct ubfizLConvL2Ix(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
12470 %{
12471   match(Set dst (LShiftI (ConvL2I (AndL src mask)) lshift));
12472   predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 31)) <= 31);
12473 
12474   ins_cost(INSN_COST);
12475   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
12476   ins_encode %{
12477     int lshift = $lshift$$constant & 31;
12478     intptr_t mask = $mask$$constant;
12479     int width = exact_log2(mask+1);
12480     __ ubfiz(as_Register($dst$$reg),
12481              as_Register($src$$reg), lshift, width);
12482   %}
12483   ins_pipe(ialu_reg_shift);
12484 %}
12485 
12486 // This pattern is automatically generated from aarch64_ad.m4
12487 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12488 
12489 // Can skip int2long conversions after AND with small bitmask
12490 instruct ubfizIConvI2LAndI(iRegLNoSp dst, iRegI src, immI_bitmask msk)
12491 %{
12492   match(Set dst (ConvI2L (AndI src msk)));
12493   ins_cost(INSN_COST);
12494   format %{ "ubfiz $dst, $src, 0, exact_log2($msk + 1) " %}
12495   ins_encode %{
12496     __ ubfiz(as_Register($dst$$reg), as_Register($src$$reg), 0, exact_log2($msk$$constant + 1));
12497   %}
12498   ins_pipe(ialu_reg_shift);
12499 %}
12500 
12501 
12502 // Rotations
12503 
12504 // This pattern is automatically generated from aarch64_ad.m4
12505 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12506 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12507 %{
12508   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12509   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12510 
12511   ins_cost(INSN_COST);
12512   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12513 
12514   ins_encode %{
12515     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12516             $rshift$$constant & 63);
12517   %}
12518   ins_pipe(ialu_reg_reg_extr);
12519 %}
12520 
12521 
12522 // This pattern is automatically generated from aarch64_ad.m4
12523 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12524 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12525 %{
12526   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12527   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12528 
12529   ins_cost(INSN_COST);
12530   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12531 
12532   ins_encode %{
12533     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12534             $rshift$$constant & 31);
12535   %}
12536   ins_pipe(ialu_reg_reg_extr);
12537 %}
12538 
12539 
12540 // This pattern is automatically generated from aarch64_ad.m4
12541 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12542 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
12543 %{
12544   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
12545   predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
12546 
12547   ins_cost(INSN_COST);
12548   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12549 
12550   ins_encode %{
12551     __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12552             $rshift$$constant & 63);
12553   %}
12554   ins_pipe(ialu_reg_reg_extr);
12555 %}
12556 
12557 
12558 // This pattern is automatically generated from aarch64_ad.m4
12559 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12560 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
12561 %{
12562   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
12563   predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
12564 
12565   ins_cost(INSN_COST);
12566   format %{ "extr $dst, $src1, $src2, #$rshift" %}
12567 
12568   ins_encode %{
12569     __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
12570             $rshift$$constant & 31);
12571   %}
12572   ins_pipe(ialu_reg_reg_extr);
12573 %}
12574 
12575 // This pattern is automatically generated from aarch64_ad.m4
12576 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12577 instruct rorI_imm(iRegINoSp dst, iRegI src, immI shift)
12578 %{
12579   match(Set dst (RotateRight src shift));
12580 
12581   ins_cost(INSN_COST);
12582   format %{ "ror    $dst, $src, $shift" %}
12583 
12584   ins_encode %{
12585      __ extrw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
12586                $shift$$constant & 0x1f);
12587   %}
12588   ins_pipe(ialu_reg_reg_vshift);
12589 %}
12590 
12591 // This pattern is automatically generated from aarch64_ad.m4
12592 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12593 instruct rorL_imm(iRegLNoSp dst, iRegL src, immI shift)
12594 %{
12595   match(Set dst (RotateRight src shift));
12596 
12597   ins_cost(INSN_COST);
12598   format %{ "ror    $dst, $src, $shift" %}
12599 
12600   ins_encode %{
12601      __ extr(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
12602                $shift$$constant & 0x3f);
12603   %}
12604   ins_pipe(ialu_reg_reg_vshift);
12605 %}
12606 
12607 // This pattern is automatically generated from aarch64_ad.m4
12608 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12609 instruct rorI_reg(iRegINoSp dst, iRegI src, iRegI shift)
12610 %{
12611   match(Set dst (RotateRight src shift));
12612 
12613   ins_cost(INSN_COST);
12614   format %{ "ror    $dst, $src, $shift" %}
12615 
12616   ins_encode %{
12617      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
12618   %}
12619   ins_pipe(ialu_reg_reg_vshift);
12620 %}
12621 
12622 // This pattern is automatically generated from aarch64_ad.m4
12623 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12624 instruct rorL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
12625 %{
12626   match(Set dst (RotateRight src shift));
12627 
12628   ins_cost(INSN_COST);
12629   format %{ "ror    $dst, $src, $shift" %}
12630 
12631   ins_encode %{
12632      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
12633   %}
12634   ins_pipe(ialu_reg_reg_vshift);
12635 %}
12636 
12637 // This pattern is automatically generated from aarch64_ad.m4
12638 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12639 instruct rolI_reg(iRegINoSp dst, iRegI src, iRegI shift)
12640 %{
12641   match(Set dst (RotateLeft src shift));
12642 
12643   ins_cost(INSN_COST);
12644   format %{ "rol    $dst, $src, $shift" %}
12645 
12646   ins_encode %{
12647      __ subw(rscratch1, zr, as_Register($shift$$reg));
12648      __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
12649   %}
12650   ins_pipe(ialu_reg_reg_vshift);
12651 %}
12652 
12653 // This pattern is automatically generated from aarch64_ad.m4
12654 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12655 instruct rolL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
12656 %{
12657   match(Set dst (RotateLeft src shift));
12658 
12659   ins_cost(INSN_COST);
12660   format %{ "rol    $dst, $src, $shift" %}
12661 
12662   ins_encode %{
12663      __ subw(rscratch1, zr, as_Register($shift$$reg));
12664      __ rorv(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
12665   %}
12666   ins_pipe(ialu_reg_reg_vshift);
12667 %}
12668 
12669 
12670 // Add/subtract (extended)
12671 
12672 // This pattern is automatically generated from aarch64_ad.m4
12673 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12674 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12675 %{
12676   match(Set dst (AddL src1 (ConvI2L src2)));
12677   ins_cost(INSN_COST);
12678   format %{ "add  $dst, $src1, $src2, sxtw" %}
12679 
12680    ins_encode %{
12681      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12682             as_Register($src2$$reg), ext::sxtw);
12683    %}
12684   ins_pipe(ialu_reg_reg);
12685 %}
12686 
12687 // This pattern is automatically generated from aarch64_ad.m4
12688 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12689 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
12690 %{
12691   match(Set dst (SubL src1 (ConvI2L src2)));
12692   ins_cost(INSN_COST);
12693   format %{ "sub  $dst, $src1, $src2, sxtw" %}
12694 
12695    ins_encode %{
12696      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12697             as_Register($src2$$reg), ext::sxtw);
12698    %}
12699   ins_pipe(ialu_reg_reg);
12700 %}
12701 
12702 // This pattern is automatically generated from aarch64_ad.m4
12703 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12704 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
12705 %{
12706   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12707   ins_cost(INSN_COST);
12708   format %{ "add  $dst, $src1, $src2, sxth" %}
12709 
12710    ins_encode %{
12711      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12712             as_Register($src2$$reg), ext::sxth);
12713    %}
12714   ins_pipe(ialu_reg_reg);
12715 %}
12716 
12717 // This pattern is automatically generated from aarch64_ad.m4
12718 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12719 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12720 %{
12721   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
12722   ins_cost(INSN_COST);
12723   format %{ "add  $dst, $src1, $src2, sxtb" %}
12724 
12725    ins_encode %{
12726      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12727             as_Register($src2$$reg), ext::sxtb);
12728    %}
12729   ins_pipe(ialu_reg_reg);
12730 %}
12731 
12732 // This pattern is automatically generated from aarch64_ad.m4
12733 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12734 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
12735 %{
12736   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
12737   ins_cost(INSN_COST);
12738   format %{ "add  $dst, $src1, $src2, uxtb" %}
12739 
12740    ins_encode %{
12741      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12742             as_Register($src2$$reg), ext::uxtb);
12743    %}
12744   ins_pipe(ialu_reg_reg);
12745 %}
12746 
12747 // This pattern is automatically generated from aarch64_ad.m4
12748 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12749 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
12750 %{
12751   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12752   ins_cost(INSN_COST);
12753   format %{ "add  $dst, $src1, $src2, sxth" %}
12754 
12755    ins_encode %{
12756      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12757             as_Register($src2$$reg), ext::sxth);
12758    %}
12759   ins_pipe(ialu_reg_reg);
12760 %}
12761 
12762 // This pattern is automatically generated from aarch64_ad.m4
12763 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12764 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
12765 %{
12766   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12767   ins_cost(INSN_COST);
12768   format %{ "add  $dst, $src1, $src2, sxtw" %}
12769 
12770    ins_encode %{
12771      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12772             as_Register($src2$$reg), ext::sxtw);
12773    %}
12774   ins_pipe(ialu_reg_reg);
12775 %}
12776 
12777 // This pattern is automatically generated from aarch64_ad.m4
12778 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12779 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12780 %{
12781   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
12782   ins_cost(INSN_COST);
12783   format %{ "add  $dst, $src1, $src2, sxtb" %}
12784 
12785    ins_encode %{
12786      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12787             as_Register($src2$$reg), ext::sxtb);
12788    %}
12789   ins_pipe(ialu_reg_reg);
12790 %}
12791 
12792 // This pattern is automatically generated from aarch64_ad.m4
12793 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12794 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
12795 %{
12796   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
12797   ins_cost(INSN_COST);
12798   format %{ "add  $dst, $src1, $src2, uxtb" %}
12799 
12800    ins_encode %{
12801      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12802             as_Register($src2$$reg), ext::uxtb);
12803    %}
12804   ins_pipe(ialu_reg_reg);
12805 %}
12806 
12807 // This pattern is automatically generated from aarch64_ad.m4
12808 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12809 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12810 %{
12811   match(Set dst (AddI src1 (AndI src2 mask)));
12812   ins_cost(INSN_COST);
12813   format %{ "addw  $dst, $src1, $src2, uxtb" %}
12814 
12815    ins_encode %{
12816      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12817             as_Register($src2$$reg), ext::uxtb);
12818    %}
12819   ins_pipe(ialu_reg_reg);
12820 %}
12821 
12822 // This pattern is automatically generated from aarch64_ad.m4
12823 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12824 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12825 %{
12826   match(Set dst (AddI src1 (AndI src2 mask)));
12827   ins_cost(INSN_COST);
12828   format %{ "addw  $dst, $src1, $src2, uxth" %}
12829 
12830    ins_encode %{
12831      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
12832             as_Register($src2$$reg), ext::uxth);
12833    %}
12834   ins_pipe(ialu_reg_reg);
12835 %}
12836 
12837 // This pattern is automatically generated from aarch64_ad.m4
12838 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12839 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12840 %{
12841   match(Set dst (AddL src1 (AndL src2 mask)));
12842   ins_cost(INSN_COST);
12843   format %{ "add  $dst, $src1, $src2, uxtb" %}
12844 
12845    ins_encode %{
12846      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12847             as_Register($src2$$reg), ext::uxtb);
12848    %}
12849   ins_pipe(ialu_reg_reg);
12850 %}
12851 
12852 // This pattern is automatically generated from aarch64_ad.m4
12853 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12854 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12855 %{
12856   match(Set dst (AddL src1 (AndL src2 mask)));
12857   ins_cost(INSN_COST);
12858   format %{ "add  $dst, $src1, $src2, uxth" %}
12859 
12860    ins_encode %{
12861      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12862             as_Register($src2$$reg), ext::uxth);
12863    %}
12864   ins_pipe(ialu_reg_reg);
12865 %}
12866 
12867 // This pattern is automatically generated from aarch64_ad.m4
12868 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12869 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12870 %{
12871   match(Set dst (AddL src1 (AndL src2 mask)));
12872   ins_cost(INSN_COST);
12873   format %{ "add  $dst, $src1, $src2, uxtw" %}
12874 
12875    ins_encode %{
12876      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12877             as_Register($src2$$reg), ext::uxtw);
12878    %}
12879   ins_pipe(ialu_reg_reg);
12880 %}
12881 
12882 // This pattern is automatically generated from aarch64_ad.m4
12883 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12884 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
12885 %{
12886   match(Set dst (SubI src1 (AndI src2 mask)));
12887   ins_cost(INSN_COST);
12888   format %{ "subw  $dst, $src1, $src2, uxtb" %}
12889 
12890    ins_encode %{
12891      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12892             as_Register($src2$$reg), ext::uxtb);
12893    %}
12894   ins_pipe(ialu_reg_reg);
12895 %}
12896 
12897 // This pattern is automatically generated from aarch64_ad.m4
12898 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12899 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
12900 %{
12901   match(Set dst (SubI src1 (AndI src2 mask)));
12902   ins_cost(INSN_COST);
12903   format %{ "subw  $dst, $src1, $src2, uxth" %}
12904 
12905    ins_encode %{
12906      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
12907             as_Register($src2$$reg), ext::uxth);
12908    %}
12909   ins_pipe(ialu_reg_reg);
12910 %}
12911 
12912 // This pattern is automatically generated from aarch64_ad.m4
12913 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12914 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
12915 %{
12916   match(Set dst (SubL src1 (AndL src2 mask)));
12917   ins_cost(INSN_COST);
12918   format %{ "sub  $dst, $src1, $src2, uxtb" %}
12919 
12920    ins_encode %{
12921      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12922             as_Register($src2$$reg), ext::uxtb);
12923    %}
12924   ins_pipe(ialu_reg_reg);
12925 %}
12926 
12927 // This pattern is automatically generated from aarch64_ad.m4
12928 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12929 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
12930 %{
12931   match(Set dst (SubL src1 (AndL src2 mask)));
12932   ins_cost(INSN_COST);
12933   format %{ "sub  $dst, $src1, $src2, uxth" %}
12934 
12935    ins_encode %{
12936      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12937             as_Register($src2$$reg), ext::uxth);
12938    %}
12939   ins_pipe(ialu_reg_reg);
12940 %}
12941 
12942 // This pattern is automatically generated from aarch64_ad.m4
12943 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12944 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
12945 %{
12946   match(Set dst (SubL src1 (AndL src2 mask)));
12947   ins_cost(INSN_COST);
12948   format %{ "sub  $dst, $src1, $src2, uxtw" %}
12949 
12950    ins_encode %{
12951      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
12952             as_Register($src2$$reg), ext::uxtw);
12953    %}
12954   ins_pipe(ialu_reg_reg);
12955 %}
12956 
12957 
12958 // This pattern is automatically generated from aarch64_ad.m4
12959 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12960 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
12961 %{
12962   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12963   ins_cost(1.9 * INSN_COST);
12964   format %{ "add  $dst, $src1, $src2, sxtb #lshift2" %}
12965 
12966    ins_encode %{
12967      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12968             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
12969    %}
12970   ins_pipe(ialu_reg_reg_shift);
12971 %}
12972 
12973 // This pattern is automatically generated from aarch64_ad.m4
12974 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12975 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
12976 %{
12977   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12978   ins_cost(1.9 * INSN_COST);
12979   format %{ "add  $dst, $src1, $src2, sxth #lshift2" %}
12980 
12981    ins_encode %{
12982      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12983             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
12984    %}
12985   ins_pipe(ialu_reg_reg_shift);
12986 %}
12987 
12988 // This pattern is automatically generated from aarch64_ad.m4
12989 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
12990 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
12991 %{
12992   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
12993   ins_cost(1.9 * INSN_COST);
12994   format %{ "add  $dst, $src1, $src2, sxtw #lshift2" %}
12995 
12996    ins_encode %{
12997      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
12998             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
12999    %}
13000   ins_pipe(ialu_reg_reg_shift);
13001 %}
13002 
13003 // This pattern is automatically generated from aarch64_ad.m4
13004 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13005 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
13006 %{
13007   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13008   ins_cost(1.9 * INSN_COST);
13009   format %{ "sub  $dst, $src1, $src2, sxtb #lshift2" %}
13010 
13011    ins_encode %{
13012      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13013             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13014    %}
13015   ins_pipe(ialu_reg_reg_shift);
13016 %}
13017 
13018 // This pattern is automatically generated from aarch64_ad.m4
13019 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13020 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
13021 %{
13022   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13023   ins_cost(1.9 * INSN_COST);
13024   format %{ "sub  $dst, $src1, $src2, sxth #lshift2" %}
13025 
13026    ins_encode %{
13027      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13028             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13029    %}
13030   ins_pipe(ialu_reg_reg_shift);
13031 %}
13032 
13033 // This pattern is automatically generated from aarch64_ad.m4
13034 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13035 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
13036 %{
13037   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
13038   ins_cost(1.9 * INSN_COST);
13039   format %{ "sub  $dst, $src1, $src2, sxtw #lshift2" %}
13040 
13041    ins_encode %{
13042      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13043             as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
13044    %}
13045   ins_pipe(ialu_reg_reg_shift);
13046 %}
13047 
13048 // This pattern is automatically generated from aarch64_ad.m4
13049 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13050 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13051 %{
13052   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13053   ins_cost(1.9 * INSN_COST);
13054   format %{ "addw  $dst, $src1, $src2, sxtb #lshift2" %}
13055 
13056    ins_encode %{
13057      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13058             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13059    %}
13060   ins_pipe(ialu_reg_reg_shift);
13061 %}
13062 
13063 // This pattern is automatically generated from aarch64_ad.m4
13064 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13065 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13066 %{
13067   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13068   ins_cost(1.9 * INSN_COST);
13069   format %{ "addw  $dst, $src1, $src2, sxth #lshift2" %}
13070 
13071    ins_encode %{
13072      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13073             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13074    %}
13075   ins_pipe(ialu_reg_reg_shift);
13076 %}
13077 
13078 // This pattern is automatically generated from aarch64_ad.m4
13079 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13080 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
13081 %{
13082   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13083   ins_cost(1.9 * INSN_COST);
13084   format %{ "subw  $dst, $src1, $src2, sxtb #lshift2" %}
13085 
13086    ins_encode %{
13087      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13088             as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
13089    %}
13090   ins_pipe(ialu_reg_reg_shift);
13091 %}
13092 
13093 // This pattern is automatically generated from aarch64_ad.m4
13094 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13095 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
13096 %{
13097   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
13098   ins_cost(1.9 * INSN_COST);
13099   format %{ "subw  $dst, $src1, $src2, sxth #lshift2" %}
13100 
13101    ins_encode %{
13102      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13103             as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
13104    %}
13105   ins_pipe(ialu_reg_reg_shift);
13106 %}
13107 
13108 // This pattern is automatically generated from aarch64_ad.m4
13109 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13110 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13111 %{
13112   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
13113   ins_cost(1.9 * INSN_COST);
13114   format %{ "add  $dst, $src1, $src2, sxtw #lshift" %}
13115 
13116    ins_encode %{
13117      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13118             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13119    %}
13120   ins_pipe(ialu_reg_reg_shift);
13121 %}
13122 
13123 // This pattern is automatically generated from aarch64_ad.m4
13124 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13125 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
13126 %{
13127   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
13128   ins_cost(1.9 * INSN_COST);
13129   format %{ "sub  $dst, $src1, $src2, sxtw #lshift" %}
13130 
13131    ins_encode %{
13132      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13133             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
13134    %}
13135   ins_pipe(ialu_reg_reg_shift);
13136 %}
13137 
13138 // This pattern is automatically generated from aarch64_ad.m4
13139 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13140 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13141 %{
13142   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13143   ins_cost(1.9 * INSN_COST);
13144   format %{ "add  $dst, $src1, $src2, uxtb #lshift" %}
13145 
13146    ins_encode %{
13147      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13148             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13149    %}
13150   ins_pipe(ialu_reg_reg_shift);
13151 %}
13152 
13153 // This pattern is automatically generated from aarch64_ad.m4
13154 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13155 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13156 %{
13157   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13158   ins_cost(1.9 * INSN_COST);
13159   format %{ "add  $dst, $src1, $src2, uxth #lshift" %}
13160 
13161    ins_encode %{
13162      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13163             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13164    %}
13165   ins_pipe(ialu_reg_reg_shift);
13166 %}
13167 
13168 // This pattern is automatically generated from aarch64_ad.m4
13169 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13170 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13171 %{
13172   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
13173   ins_cost(1.9 * INSN_COST);
13174   format %{ "add  $dst, $src1, $src2, uxtw #lshift" %}
13175 
13176    ins_encode %{
13177      __ add(as_Register($dst$$reg), as_Register($src1$$reg),
13178             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13179    %}
13180   ins_pipe(ialu_reg_reg_shift);
13181 %}
13182 
13183 // This pattern is automatically generated from aarch64_ad.m4
13184 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13185 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
13186 %{
13187   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13188   ins_cost(1.9 * INSN_COST);
13189   format %{ "sub  $dst, $src1, $src2, uxtb #lshift" %}
13190 
13191    ins_encode %{
13192      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13193             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13194    %}
13195   ins_pipe(ialu_reg_reg_shift);
13196 %}
13197 
13198 // This pattern is automatically generated from aarch64_ad.m4
13199 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13200 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
13201 %{
13202   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13203   ins_cost(1.9 * INSN_COST);
13204   format %{ "sub  $dst, $src1, $src2, uxth #lshift" %}
13205 
13206    ins_encode %{
13207      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13208             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13209    %}
13210   ins_pipe(ialu_reg_reg_shift);
13211 %}
13212 
13213 // This pattern is automatically generated from aarch64_ad.m4
13214 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13215 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
13216 %{
13217   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
13218   ins_cost(1.9 * INSN_COST);
13219   format %{ "sub  $dst, $src1, $src2, uxtw #lshift" %}
13220 
13221    ins_encode %{
13222      __ sub(as_Register($dst$$reg), as_Register($src1$$reg),
13223             as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
13224    %}
13225   ins_pipe(ialu_reg_reg_shift);
13226 %}
13227 
13228 // This pattern is automatically generated from aarch64_ad.m4
13229 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13230 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13231 %{
13232   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13233   ins_cost(1.9 * INSN_COST);
13234   format %{ "addw  $dst, $src1, $src2, uxtb #lshift" %}
13235 
13236    ins_encode %{
13237      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13238             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13239    %}
13240   ins_pipe(ialu_reg_reg_shift);
13241 %}
13242 
13243 // This pattern is automatically generated from aarch64_ad.m4
13244 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13245 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13246 %{
13247   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
13248   ins_cost(1.9 * INSN_COST);
13249   format %{ "addw  $dst, $src1, $src2, uxth #lshift" %}
13250 
13251    ins_encode %{
13252      __ addw(as_Register($dst$$reg), as_Register($src1$$reg),
13253             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13254    %}
13255   ins_pipe(ialu_reg_reg_shift);
13256 %}
13257 
13258 // This pattern is automatically generated from aarch64_ad.m4
13259 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13260 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
13261 %{
13262   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13263   ins_cost(1.9 * INSN_COST);
13264   format %{ "subw  $dst, $src1, $src2, uxtb #lshift" %}
13265 
13266    ins_encode %{
13267      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13268             as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
13269    %}
13270   ins_pipe(ialu_reg_reg_shift);
13271 %}
13272 
13273 // This pattern is automatically generated from aarch64_ad.m4
13274 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13275 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
13276 %{
13277   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
13278   ins_cost(1.9 * INSN_COST);
13279   format %{ "subw  $dst, $src1, $src2, uxth #lshift" %}
13280 
13281    ins_encode %{
13282      __ subw(as_Register($dst$$reg), as_Register($src1$$reg),
13283             as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
13284    %}
13285   ins_pipe(ialu_reg_reg_shift);
13286 %}
13287 
13288 // This pattern is automatically generated from aarch64_ad.m4
13289 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13290 instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13291 %{
13292   effect(DEF dst, USE src1, USE src2, USE cr);
13293   ins_cost(INSN_COST * 2);
13294   format %{ "cselw $dst, $src1, $src2 lt\t"  %}
13295 
13296   ins_encode %{
13297     __ cselw($dst$$Register,
13298              $src1$$Register,
13299              $src2$$Register,
13300              Assembler::LT);
13301   %}
13302   ins_pipe(icond_reg_reg);
13303 %}
13304 
13305 // This pattern is automatically generated from aarch64_ad.m4
13306 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13307 instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
13308 %{
13309   effect(DEF dst, USE src1, USE src2, USE cr);
13310   ins_cost(INSN_COST * 2);
13311   format %{ "cselw $dst, $src1, $src2 gt\t"  %}
13312 
13313   ins_encode %{
13314     __ cselw($dst$$Register,
13315              $src1$$Register,
13316              $src2$$Register,
13317              Assembler::GT);
13318   %}
13319   ins_pipe(icond_reg_reg);
13320 %}
13321 
13322 // This pattern is automatically generated from aarch64_ad.m4
13323 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13324 instruct cmovI_reg_imm0_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13325 %{
13326   effect(DEF dst, USE src1, USE cr);
13327   ins_cost(INSN_COST * 2);
13328   format %{ "cselw $dst, $src1, zr lt\t"  %}
13329 
13330   ins_encode %{
13331     __ cselw($dst$$Register,
13332              $src1$$Register,
13333              zr,
13334              Assembler::LT);
13335   %}
13336   ins_pipe(icond_reg);
13337 %}
13338 
13339 // This pattern is automatically generated from aarch64_ad.m4
13340 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13341 instruct cmovI_reg_imm0_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13342 %{
13343   effect(DEF dst, USE src1, USE cr);
13344   ins_cost(INSN_COST * 2);
13345   format %{ "cselw $dst, $src1, zr gt\t"  %}
13346 
13347   ins_encode %{
13348     __ cselw($dst$$Register,
13349              $src1$$Register,
13350              zr,
13351              Assembler::GT);
13352   %}
13353   ins_pipe(icond_reg);
13354 %}
13355 
13356 // This pattern is automatically generated from aarch64_ad.m4
13357 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13358 instruct cmovI_reg_imm1_le(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13359 %{
13360   effect(DEF dst, USE src1, USE cr);
13361   ins_cost(INSN_COST * 2);
13362   format %{ "csincw $dst, $src1, zr le\t"  %}
13363 
13364   ins_encode %{
13365     __ csincw($dst$$Register,
13366              $src1$$Register,
13367              zr,
13368              Assembler::LE);
13369   %}
13370   ins_pipe(icond_reg);
13371 %}
13372 
13373 // This pattern is automatically generated from aarch64_ad.m4
13374 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13375 instruct cmovI_reg_imm1_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13376 %{
13377   effect(DEF dst, USE src1, USE cr);
13378   ins_cost(INSN_COST * 2);
13379   format %{ "csincw $dst, $src1, zr gt\t"  %}
13380 
13381   ins_encode %{
13382     __ csincw($dst$$Register,
13383              $src1$$Register,
13384              zr,
13385              Assembler::GT);
13386   %}
13387   ins_pipe(icond_reg);
13388 %}
13389 
13390 // This pattern is automatically generated from aarch64_ad.m4
13391 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13392 instruct cmovI_reg_immM1_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13393 %{
13394   effect(DEF dst, USE src1, USE cr);
13395   ins_cost(INSN_COST * 2);
13396   format %{ "csinvw $dst, $src1, zr lt\t"  %}
13397 
13398   ins_encode %{
13399     __ csinvw($dst$$Register,
13400              $src1$$Register,
13401              zr,
13402              Assembler::LT);
13403   %}
13404   ins_pipe(icond_reg);
13405 %}
13406 
13407 // This pattern is automatically generated from aarch64_ad.m4
13408 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13409 instruct cmovI_reg_immM1_ge(iRegINoSp dst, iRegI src1, rFlagsReg cr)
13410 %{
13411   effect(DEF dst, USE src1, USE cr);
13412   ins_cost(INSN_COST * 2);
13413   format %{ "csinvw $dst, $src1, zr ge\t"  %}
13414 
13415   ins_encode %{
13416     __ csinvw($dst$$Register,
13417              $src1$$Register,
13418              zr,
13419              Assembler::GE);
13420   %}
13421   ins_pipe(icond_reg);
13422 %}
13423 
13424 // This pattern is automatically generated from aarch64_ad.m4
13425 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13426 instruct minI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
13427 %{
13428   match(Set dst (MinI src imm));
13429   ins_cost(INSN_COST * 3);
13430   expand %{
13431     rFlagsReg cr;
13432     compI_reg_imm0(cr, src);
13433     cmovI_reg_imm0_lt(dst, src, cr);
13434   %}
13435 %}
13436 
13437 // This pattern is automatically generated from aarch64_ad.m4
13438 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13439 instruct minI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
13440 %{
13441   match(Set dst (MinI imm src));
13442   ins_cost(INSN_COST * 3);
13443   expand %{
13444     rFlagsReg cr;
13445     compI_reg_imm0(cr, src);
13446     cmovI_reg_imm0_lt(dst, src, cr);
13447   %}
13448 %}
13449 
13450 // This pattern is automatically generated from aarch64_ad.m4
13451 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13452 instruct minI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
13453 %{
13454   match(Set dst (MinI src imm));
13455   ins_cost(INSN_COST * 3);
13456   expand %{
13457     rFlagsReg cr;
13458     compI_reg_imm0(cr, src);
13459     cmovI_reg_imm1_le(dst, src, cr);
13460   %}
13461 %}
13462 
13463 // This pattern is automatically generated from aarch64_ad.m4
13464 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13465 instruct minI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
13466 %{
13467   match(Set dst (MinI imm src));
13468   ins_cost(INSN_COST * 3);
13469   expand %{
13470     rFlagsReg cr;
13471     compI_reg_imm0(cr, src);
13472     cmovI_reg_imm1_le(dst, src, cr);
13473   %}
13474 %}
13475 
13476 // This pattern is automatically generated from aarch64_ad.m4
13477 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13478 instruct minI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
13479 %{
13480   match(Set dst (MinI src imm));
13481   ins_cost(INSN_COST * 3);
13482   expand %{
13483     rFlagsReg cr;
13484     compI_reg_imm0(cr, src);
13485     cmovI_reg_immM1_lt(dst, src, cr);
13486   %}
13487 %}
13488 
13489 // This pattern is automatically generated from aarch64_ad.m4
13490 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13491 instruct minI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
13492 %{
13493   match(Set dst (MinI imm src));
13494   ins_cost(INSN_COST * 3);
13495   expand %{
13496     rFlagsReg cr;
13497     compI_reg_imm0(cr, src);
13498     cmovI_reg_immM1_lt(dst, src, cr);
13499   %}
13500 %}
13501 
13502 // This pattern is automatically generated from aarch64_ad.m4
13503 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13504 instruct maxI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
13505 %{
13506   match(Set dst (MaxI src imm));
13507   ins_cost(INSN_COST * 3);
13508   expand %{
13509     rFlagsReg cr;
13510     compI_reg_imm0(cr, src);
13511     cmovI_reg_imm0_gt(dst, src, cr);
13512   %}
13513 %}
13514 
13515 // This pattern is automatically generated from aarch64_ad.m4
13516 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13517 instruct maxI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
13518 %{
13519   match(Set dst (MaxI imm src));
13520   ins_cost(INSN_COST * 3);
13521   expand %{
13522     rFlagsReg cr;
13523     compI_reg_imm0(cr, src);
13524     cmovI_reg_imm0_gt(dst, src, cr);
13525   %}
13526 %}
13527 
13528 // This pattern is automatically generated from aarch64_ad.m4
13529 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13530 instruct maxI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
13531 %{
13532   match(Set dst (MaxI src imm));
13533   ins_cost(INSN_COST * 3);
13534   expand %{
13535     rFlagsReg cr;
13536     compI_reg_imm0(cr, src);
13537     cmovI_reg_imm1_gt(dst, src, cr);
13538   %}
13539 %}
13540 
13541 // This pattern is automatically generated from aarch64_ad.m4
13542 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13543 instruct maxI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
13544 %{
13545   match(Set dst (MaxI imm src));
13546   ins_cost(INSN_COST * 3);
13547   expand %{
13548     rFlagsReg cr;
13549     compI_reg_imm0(cr, src);
13550     cmovI_reg_imm1_gt(dst, src, cr);
13551   %}
13552 %}
13553 
13554 // This pattern is automatically generated from aarch64_ad.m4
13555 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13556 instruct maxI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
13557 %{
13558   match(Set dst (MaxI src imm));
13559   ins_cost(INSN_COST * 3);
13560   expand %{
13561     rFlagsReg cr;
13562     compI_reg_imm0(cr, src);
13563     cmovI_reg_immM1_ge(dst, src, cr);
13564   %}
13565 %}
13566 
13567 // This pattern is automatically generated from aarch64_ad.m4
13568 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13569 instruct maxI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
13570 %{
13571   match(Set dst (MaxI imm src));
13572   ins_cost(INSN_COST * 3);
13573   expand %{
13574     rFlagsReg cr;
13575     compI_reg_imm0(cr, src);
13576     cmovI_reg_immM1_ge(dst, src, cr);
13577   %}
13578 %}
13579 
13580 // This pattern is automatically generated from aarch64_ad.m4
13581 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13582 instruct bits_reverse_I(iRegINoSp dst, iRegIorL2I src)
13583 %{
13584   match(Set dst (ReverseI src));
13585   ins_cost(INSN_COST);
13586   format %{ "rbitw  $dst, $src" %}
13587   ins_encode %{
13588     __ rbitw($dst$$Register, $src$$Register);
13589   %}
13590   ins_pipe(ialu_reg);
13591 %}
13592 
13593 // This pattern is automatically generated from aarch64_ad.m4
13594 // DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
13595 instruct bits_reverse_L(iRegLNoSp dst, iRegL src)
13596 %{
13597   match(Set dst (ReverseL src));
13598   ins_cost(INSN_COST);
13599   format %{ "rbit  $dst, $src" %}
13600   ins_encode %{
13601     __ rbit($dst$$Register, $src$$Register);
13602   %}
13603   ins_pipe(ialu_reg);
13604 %}
13605 
13606 
13607 // END This section of the file is automatically generated. Do not edit --------------
13608 
13609 
13610 // ============================================================================
13611 // Floating Point Arithmetic Instructions
13612 
13613 instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13614   match(Set dst (AddF src1 src2));
13615 
13616   ins_cost(INSN_COST * 5);
13617   format %{ "fadds   $dst, $src1, $src2" %}
13618 
13619   ins_encode %{
13620     __ fadds(as_FloatRegister($dst$$reg),
13621              as_FloatRegister($src1$$reg),
13622              as_FloatRegister($src2$$reg));
13623   %}
13624 
13625   ins_pipe(fp_dop_reg_reg_s);
13626 %}
13627 
13628 instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13629   match(Set dst (AddD src1 src2));
13630 
13631   ins_cost(INSN_COST * 5);
13632   format %{ "faddd   $dst, $src1, $src2" %}
13633 
13634   ins_encode %{
13635     __ faddd(as_FloatRegister($dst$$reg),
13636              as_FloatRegister($src1$$reg),
13637              as_FloatRegister($src2$$reg));
13638   %}
13639 
13640   ins_pipe(fp_dop_reg_reg_d);
13641 %}
13642 
13643 instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13644   match(Set dst (SubF src1 src2));
13645 
13646   ins_cost(INSN_COST * 5);
13647   format %{ "fsubs   $dst, $src1, $src2" %}
13648 
13649   ins_encode %{
13650     __ fsubs(as_FloatRegister($dst$$reg),
13651              as_FloatRegister($src1$$reg),
13652              as_FloatRegister($src2$$reg));
13653   %}
13654 
13655   ins_pipe(fp_dop_reg_reg_s);
13656 %}
13657 
13658 instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13659   match(Set dst (SubD src1 src2));
13660 
13661   ins_cost(INSN_COST * 5);
13662   format %{ "fsubd   $dst, $src1, $src2" %}
13663 
13664   ins_encode %{
13665     __ fsubd(as_FloatRegister($dst$$reg),
13666              as_FloatRegister($src1$$reg),
13667              as_FloatRegister($src2$$reg));
13668   %}
13669 
13670   ins_pipe(fp_dop_reg_reg_d);
13671 %}
13672 
13673 instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13674   match(Set dst (MulF src1 src2));
13675 
13676   ins_cost(INSN_COST * 6);
13677   format %{ "fmuls   $dst, $src1, $src2" %}
13678 
13679   ins_encode %{
13680     __ fmuls(as_FloatRegister($dst$$reg),
13681              as_FloatRegister($src1$$reg),
13682              as_FloatRegister($src2$$reg));
13683   %}
13684 
13685   ins_pipe(fp_dop_reg_reg_s);
13686 %}
13687 
13688 instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13689   match(Set dst (MulD src1 src2));
13690 
13691   ins_cost(INSN_COST * 6);
13692   format %{ "fmuld   $dst, $src1, $src2" %}
13693 
13694   ins_encode %{
13695     __ fmuld(as_FloatRegister($dst$$reg),
13696              as_FloatRegister($src1$$reg),
13697              as_FloatRegister($src2$$reg));
13698   %}
13699 
13700   ins_pipe(fp_dop_reg_reg_d);
13701 %}
13702 
13703 // src1 * src2 + src3
13704 instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13705   match(Set dst (FmaF src3 (Binary src1 src2)));
13706 
13707   format %{ "fmadds   $dst, $src1, $src2, $src3" %}
13708 
13709   ins_encode %{
13710     assert(UseFMA, "Needs FMA instructions support.");
13711     __ fmadds(as_FloatRegister($dst$$reg),
13712              as_FloatRegister($src1$$reg),
13713              as_FloatRegister($src2$$reg),
13714              as_FloatRegister($src3$$reg));
13715   %}
13716 
13717   ins_pipe(pipe_class_default);
13718 %}
13719 
13720 // src1 * src2 + src3
13721 instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13722   match(Set dst (FmaD src3 (Binary src1 src2)));
13723 
13724   format %{ "fmaddd   $dst, $src1, $src2, $src3" %}
13725 
13726   ins_encode %{
13727     assert(UseFMA, "Needs FMA instructions support.");
13728     __ fmaddd(as_FloatRegister($dst$$reg),
13729              as_FloatRegister($src1$$reg),
13730              as_FloatRegister($src2$$reg),
13731              as_FloatRegister($src3$$reg));
13732   %}
13733 
13734   ins_pipe(pipe_class_default);
13735 %}
13736 
13737 // src1 * (-src2) + src3
13738 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
13739 instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13740   match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
13741 
13742   format %{ "fmsubs   $dst, $src1, $src2, $src3" %}
13743 
13744   ins_encode %{
13745     assert(UseFMA, "Needs FMA instructions support.");
13746     __ fmsubs(as_FloatRegister($dst$$reg),
13747               as_FloatRegister($src1$$reg),
13748               as_FloatRegister($src2$$reg),
13749               as_FloatRegister($src3$$reg));
13750   %}
13751 
13752   ins_pipe(pipe_class_default);
13753 %}
13754 
13755 // src1 * (-src2) + src3
13756 // "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
13757 instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13758   match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
13759 
13760   format %{ "fmsubd   $dst, $src1, $src2, $src3" %}
13761 
13762   ins_encode %{
13763     assert(UseFMA, "Needs FMA instructions support.");
13764     __ fmsubd(as_FloatRegister($dst$$reg),
13765               as_FloatRegister($src1$$reg),
13766               as_FloatRegister($src2$$reg),
13767               as_FloatRegister($src3$$reg));
13768   %}
13769 
13770   ins_pipe(pipe_class_default);
13771 %}
13772 
13773 // src1 * (-src2) - src3
13774 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
13775 instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
13776   match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
13777 
13778   format %{ "fnmadds  $dst, $src1, $src2, $src3" %}
13779 
13780   ins_encode %{
13781     assert(UseFMA, "Needs FMA instructions support.");
13782     __ fnmadds(as_FloatRegister($dst$$reg),
13783                as_FloatRegister($src1$$reg),
13784                as_FloatRegister($src2$$reg),
13785                as_FloatRegister($src3$$reg));
13786   %}
13787 
13788   ins_pipe(pipe_class_default);
13789 %}
13790 
13791 // src1 * (-src2) - src3
13792 // "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
13793 instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
13794   match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
13795 
13796   format %{ "fnmaddd   $dst, $src1, $src2, $src3" %}
13797 
13798   ins_encode %{
13799     assert(UseFMA, "Needs FMA instructions support.");
13800     __ fnmaddd(as_FloatRegister($dst$$reg),
13801                as_FloatRegister($src1$$reg),
13802                as_FloatRegister($src2$$reg),
13803                as_FloatRegister($src3$$reg));
13804   %}
13805 
13806   ins_pipe(pipe_class_default);
13807 %}
13808 
13809 // src1 * src2 - src3
13810 instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
13811   match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
13812 
13813   format %{ "fnmsubs  $dst, $src1, $src2, $src3" %}
13814 
13815   ins_encode %{
13816     assert(UseFMA, "Needs FMA instructions support.");
13817     __ fnmsubs(as_FloatRegister($dst$$reg),
13818                as_FloatRegister($src1$$reg),
13819                as_FloatRegister($src2$$reg),
13820                as_FloatRegister($src3$$reg));
13821   %}
13822 
13823   ins_pipe(pipe_class_default);
13824 %}
13825 
13826 // src1 * src2 - src3
13827 instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
13828   match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
13829 
13830   format %{ "fnmsubd   $dst, $src1, $src2, $src3" %}
13831 
13832   ins_encode %{
13833     assert(UseFMA, "Needs FMA instructions support.");
13834     // n.b. insn name should be fnmsubd
13835     __ fnmsub(as_FloatRegister($dst$$reg),
13836               as_FloatRegister($src1$$reg),
13837               as_FloatRegister($src2$$reg),
13838               as_FloatRegister($src3$$reg));
13839   %}
13840 
13841   ins_pipe(pipe_class_default);
13842 %}
13843 
13844 
13845 // Math.max(FF)F
13846 instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13847   match(Set dst (MaxF src1 src2));
13848 
13849   format %{ "fmaxs   $dst, $src1, $src2" %}
13850   ins_encode %{
13851     __ fmaxs(as_FloatRegister($dst$$reg),
13852              as_FloatRegister($src1$$reg),
13853              as_FloatRegister($src2$$reg));
13854   %}
13855 
13856   ins_pipe(fp_dop_reg_reg_s);
13857 %}
13858 
13859 // Math.min(FF)F
13860 instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13861   match(Set dst (MinF src1 src2));
13862 
13863   format %{ "fmins   $dst, $src1, $src2" %}
13864   ins_encode %{
13865     __ fmins(as_FloatRegister($dst$$reg),
13866              as_FloatRegister($src1$$reg),
13867              as_FloatRegister($src2$$reg));
13868   %}
13869 
13870   ins_pipe(fp_dop_reg_reg_s);
13871 %}
13872 
13873 // Math.max(DD)D
13874 instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13875   match(Set dst (MaxD src1 src2));
13876 
13877   format %{ "fmaxd   $dst, $src1, $src2" %}
13878   ins_encode %{
13879     __ fmaxd(as_FloatRegister($dst$$reg),
13880              as_FloatRegister($src1$$reg),
13881              as_FloatRegister($src2$$reg));
13882   %}
13883 
13884   ins_pipe(fp_dop_reg_reg_d);
13885 %}
13886 
13887 // Math.min(DD)D
13888 instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13889   match(Set dst (MinD src1 src2));
13890 
13891   format %{ "fmind   $dst, $src1, $src2" %}
13892   ins_encode %{
13893     __ fmind(as_FloatRegister($dst$$reg),
13894              as_FloatRegister($src1$$reg),
13895              as_FloatRegister($src2$$reg));
13896   %}
13897 
13898   ins_pipe(fp_dop_reg_reg_d);
13899 %}
13900 
13901 
13902 instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
13903   match(Set dst (DivF src1  src2));
13904 
13905   ins_cost(INSN_COST * 18);
13906   format %{ "fdivs   $dst, $src1, $src2" %}
13907 
13908   ins_encode %{
13909     __ fdivs(as_FloatRegister($dst$$reg),
13910              as_FloatRegister($src1$$reg),
13911              as_FloatRegister($src2$$reg));
13912   %}
13913 
13914   ins_pipe(fp_div_s);
13915 %}
13916 
13917 instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
13918   match(Set dst (DivD src1  src2));
13919 
13920   ins_cost(INSN_COST * 32);
13921   format %{ "fdivd   $dst, $src1, $src2" %}
13922 
13923   ins_encode %{
13924     __ fdivd(as_FloatRegister($dst$$reg),
13925              as_FloatRegister($src1$$reg),
13926              as_FloatRegister($src2$$reg));
13927   %}
13928 
13929   ins_pipe(fp_div_d);
13930 %}
13931 
13932 instruct negF_reg_reg(vRegF dst, vRegF src) %{
13933   match(Set dst (NegF src));
13934 
13935   ins_cost(INSN_COST * 3);
13936   format %{ "fneg   $dst, $src" %}
13937 
13938   ins_encode %{
13939     __ fnegs(as_FloatRegister($dst$$reg),
13940              as_FloatRegister($src$$reg));
13941   %}
13942 
13943   ins_pipe(fp_uop_s);
13944 %}
13945 
13946 instruct negD_reg_reg(vRegD dst, vRegD src) %{
13947   match(Set dst (NegD src));
13948 
13949   ins_cost(INSN_COST * 3);
13950   format %{ "fnegd   $dst, $src" %}
13951 
13952   ins_encode %{
13953     __ fnegd(as_FloatRegister($dst$$reg),
13954              as_FloatRegister($src$$reg));
13955   %}
13956 
13957   ins_pipe(fp_uop_d);
13958 %}
13959 
13960 instruct absI_reg(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
13961 %{
13962   match(Set dst (AbsI src));
13963 
13964   effect(KILL cr);
13965   ins_cost(INSN_COST * 2);
13966   format %{ "cmpw  $src, zr\n\t"
13967             "cnegw $dst, $src, Assembler::LT\t# int abs"
13968   %}
13969 
13970   ins_encode %{
13971     __ cmpw(as_Register($src$$reg), zr);
13972     __ cnegw(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
13973   %}
13974   ins_pipe(pipe_class_default);
13975 %}
13976 
13977 instruct absL_reg(iRegLNoSp dst, iRegL src, rFlagsReg cr)
13978 %{
13979   match(Set dst (AbsL src));
13980 
13981   effect(KILL cr);
13982   ins_cost(INSN_COST * 2);
13983   format %{ "cmp  $src, zr\n\t"
13984             "cneg $dst, $src, Assembler::LT\t# long abs"
13985   %}
13986 
13987   ins_encode %{
13988     __ cmp(as_Register($src$$reg), zr);
13989     __ cneg(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
13990   %}
13991   ins_pipe(pipe_class_default);
13992 %}
13993 
13994 instruct absF_reg(vRegF dst, vRegF src) %{
13995   match(Set dst (AbsF src));
13996 
13997   ins_cost(INSN_COST * 3);
13998   format %{ "fabss   $dst, $src" %}
13999   ins_encode %{
14000     __ fabss(as_FloatRegister($dst$$reg),
14001              as_FloatRegister($src$$reg));
14002   %}
14003 
14004   ins_pipe(fp_uop_s);
14005 %}
14006 
14007 instruct absD_reg(vRegD dst, vRegD src) %{
14008   match(Set dst (AbsD src));
14009 
14010   ins_cost(INSN_COST * 3);
14011   format %{ "fabsd   $dst, $src" %}
14012   ins_encode %{
14013     __ fabsd(as_FloatRegister($dst$$reg),
14014              as_FloatRegister($src$$reg));
14015   %}
14016 
14017   ins_pipe(fp_uop_d);
14018 %}
14019 
14020 instruct absdF_reg(vRegF dst, vRegF src1, vRegF src2) %{
14021   match(Set dst (AbsF (SubF src1 src2)));
14022 
14023   ins_cost(INSN_COST * 3);
14024   format %{ "fabds   $dst, $src1, $src2" %}
14025   ins_encode %{
14026     __ fabds(as_FloatRegister($dst$$reg),
14027              as_FloatRegister($src1$$reg),
14028              as_FloatRegister($src2$$reg));
14029   %}
14030 
14031   ins_pipe(fp_uop_s);
14032 %}
14033 
14034 instruct absdD_reg(vRegD dst, vRegD src1, vRegD src2) %{
14035   match(Set dst (AbsD (SubD src1 src2)));
14036 
14037   ins_cost(INSN_COST * 3);
14038   format %{ "fabdd   $dst, $src1, $src2" %}
14039   ins_encode %{
14040     __ fabdd(as_FloatRegister($dst$$reg),
14041              as_FloatRegister($src1$$reg),
14042              as_FloatRegister($src2$$reg));
14043   %}
14044 
14045   ins_pipe(fp_uop_d);
14046 %}
14047 
14048 instruct sqrtD_reg(vRegD dst, vRegD src) %{
14049   match(Set dst (SqrtD src));
14050 
14051   ins_cost(INSN_COST * 50);
14052   format %{ "fsqrtd  $dst, $src" %}
14053   ins_encode %{
14054     __ fsqrtd(as_FloatRegister($dst$$reg),
14055              as_FloatRegister($src$$reg));
14056   %}
14057 
14058   ins_pipe(fp_div_s);
14059 %}
14060 
14061 instruct sqrtF_reg(vRegF dst, vRegF src) %{
14062   match(Set dst (SqrtF src));
14063 
14064   ins_cost(INSN_COST * 50);
14065   format %{ "fsqrts  $dst, $src" %}
14066   ins_encode %{
14067     __ fsqrts(as_FloatRegister($dst$$reg),
14068              as_FloatRegister($src$$reg));
14069   %}
14070 
14071   ins_pipe(fp_div_d);
14072 %}
14073 
14074 // Math.rint, floor, ceil
14075 instruct roundD_reg(vRegD dst, vRegD src, immI rmode) %{
14076   match(Set dst (RoundDoubleMode src rmode));
14077   format %{ "frint  $dst, $src, $rmode" %}
14078   ins_encode %{
14079     switch ($rmode$$constant) {
14080       case RoundDoubleModeNode::rmode_rint:
14081         __ frintnd(as_FloatRegister($dst$$reg),
14082                    as_FloatRegister($src$$reg));
14083         break;
14084       case RoundDoubleModeNode::rmode_floor:
14085         __ frintmd(as_FloatRegister($dst$$reg),
14086                    as_FloatRegister($src$$reg));
14087         break;
14088       case RoundDoubleModeNode::rmode_ceil:
14089         __ frintpd(as_FloatRegister($dst$$reg),
14090                    as_FloatRegister($src$$reg));
14091         break;
14092     }
14093   %}
14094   ins_pipe(fp_uop_d);
14095 %}
14096 
14097 instruct copySignD_reg(vRegD dst, vRegD src1, vRegD src2, vRegD zero) %{
14098   match(Set dst (CopySignD src1 (Binary src2 zero)));
14099   effect(TEMP_DEF dst, USE src1, USE src2, USE zero);
14100   format %{ "CopySignD  $dst $src1 $src2" %}
14101   ins_encode %{
14102     FloatRegister dst = as_FloatRegister($dst$$reg),
14103                   src1 = as_FloatRegister($src1$$reg),
14104                   src2 = as_FloatRegister($src2$$reg),
14105                   zero = as_FloatRegister($zero$$reg);
14106     __ fnegd(dst, zero);
14107     __ bsl(dst, __ T8B, src2, src1);
14108   %}
14109   ins_pipe(fp_uop_d);
14110 %}
14111 
14112 instruct copySignF_reg(vRegF dst, vRegF src1, vRegF src2) %{
14113   match(Set dst (CopySignF src1 src2));
14114   effect(TEMP_DEF dst, USE src1, USE src2);
14115   format %{ "CopySignF  $dst $src1 $src2" %}
14116   ins_encode %{
14117     FloatRegister dst = as_FloatRegister($dst$$reg),
14118                   src1 = as_FloatRegister($src1$$reg),
14119                   src2 = as_FloatRegister($src2$$reg);
14120     __ movi(dst, __ T2S, 0x80, 24);
14121     __ bsl(dst, __ T8B, src2, src1);
14122   %}
14123   ins_pipe(fp_uop_d);
14124 %}
14125 
14126 instruct signumD_reg(vRegD dst, vRegD src, vRegD zero, vRegD one) %{
14127   match(Set dst (SignumD src (Binary zero one)));
14128   effect(TEMP_DEF dst, USE src, USE zero, USE one);
14129   format %{ "signumD  $dst, $src" %}
14130   ins_encode %{
14131     FloatRegister src = as_FloatRegister($src$$reg),
14132                   dst = as_FloatRegister($dst$$reg),
14133                   zero = as_FloatRegister($zero$$reg),
14134                   one = as_FloatRegister($one$$reg);
14135     __ facgtd(dst, src, zero); // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
14136     __ ushrd(dst, dst, 1);     // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
14137     // Bit selection instruction gets bit from "one" for each enabled bit in
14138     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
14139     // NaN the whole "src" will be copied because "dst" is zero. For all other
14140     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
14141     // from "src", and all other bits are copied from 1.0.
14142     __ bsl(dst, __ T8B, one, src);
14143   %}
14144   ins_pipe(fp_uop_d);
14145 %}
14146 
14147 instruct signumF_reg(vRegF dst, vRegF src, vRegF zero, vRegF one) %{
14148   match(Set dst (SignumF src (Binary zero one)));
14149   effect(TEMP_DEF dst, USE src, USE zero, USE one);
14150   format %{ "signumF  $dst, $src" %}
14151   ins_encode %{
14152     FloatRegister src = as_FloatRegister($src$$reg),
14153                   dst = as_FloatRegister($dst$$reg),
14154                   zero = as_FloatRegister($zero$$reg),
14155                   one = as_FloatRegister($one$$reg);
14156     __ facgts(dst, src, zero);    // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
14157     __ ushr(dst, __ T2S, dst, 1); // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
14158     // Bit selection instruction gets bit from "one" for each enabled bit in
14159     // "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
14160     // NaN the whole "src" will be copied because "dst" is zero. For all other
14161     // "src" values dst is 0x7FF..F, which means only the sign bit is copied
14162     // from "src", and all other bits are copied from 1.0.
14163     __ bsl(dst, __ T8B, one, src);
14164   %}
14165   ins_pipe(fp_uop_d);
14166 %}
14167 
14168 instruct onspinwait() %{
14169   match(OnSpinWait);
14170   ins_cost(INSN_COST);
14171 
14172   format %{ "onspinwait" %}
14173 
14174   ins_encode %{
14175     __ spin_wait();
14176   %}
14177   ins_pipe(pipe_class_empty);
14178 %}
14179 
14180 // ============================================================================
14181 // Logical Instructions
14182 
14183 // Integer Logical Instructions
14184 
14185 // And Instructions
14186 
14187 
14188 instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
14189   match(Set dst (AndI src1 src2));
14190 
14191   format %{ "andw  $dst, $src1, $src2\t# int" %}
14192 
14193   ins_cost(INSN_COST);
14194   ins_encode %{
14195     __ andw(as_Register($dst$$reg),
14196             as_Register($src1$$reg),
14197             as_Register($src2$$reg));
14198   %}
14199 
14200   ins_pipe(ialu_reg_reg);
14201 %}
14202 
14203 instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
14204   match(Set dst (AndI src1 src2));
14205 
14206   format %{ "andsw  $dst, $src1, $src2\t# int" %}
14207 
14208   ins_cost(INSN_COST);
14209   ins_encode %{
14210     __ andw(as_Register($dst$$reg),
14211             as_Register($src1$$reg),
14212             (uint64_t)($src2$$constant));
14213   %}
14214 
14215   ins_pipe(ialu_reg_imm);
14216 %}
14217 
14218 // Or Instructions
14219 
14220 instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14221   match(Set dst (OrI src1 src2));
14222 
14223   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14224 
14225   ins_cost(INSN_COST);
14226   ins_encode %{
14227     __ orrw(as_Register($dst$$reg),
14228             as_Register($src1$$reg),
14229             as_Register($src2$$reg));
14230   %}
14231 
14232   ins_pipe(ialu_reg_reg);
14233 %}
14234 
14235 instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14236   match(Set dst (OrI src1 src2));
14237 
14238   format %{ "orrw  $dst, $src1, $src2\t# int" %}
14239 
14240   ins_cost(INSN_COST);
14241   ins_encode %{
14242     __ orrw(as_Register($dst$$reg),
14243             as_Register($src1$$reg),
14244             (uint64_t)($src2$$constant));
14245   %}
14246 
14247   ins_pipe(ialu_reg_imm);
14248 %}
14249 
14250 // Xor Instructions
14251 
14252 instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
14253   match(Set dst (XorI src1 src2));
14254 
14255   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14256 
14257   ins_cost(INSN_COST);
14258   ins_encode %{
14259     __ eorw(as_Register($dst$$reg),
14260             as_Register($src1$$reg),
14261             as_Register($src2$$reg));
14262   %}
14263 
14264   ins_pipe(ialu_reg_reg);
14265 %}
14266 
14267 instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
14268   match(Set dst (XorI src1 src2));
14269 
14270   format %{ "eorw  $dst, $src1, $src2\t# int" %}
14271 
14272   ins_cost(INSN_COST);
14273   ins_encode %{
14274     __ eorw(as_Register($dst$$reg),
14275             as_Register($src1$$reg),
14276             (uint64_t)($src2$$constant));
14277   %}
14278 
14279   ins_pipe(ialu_reg_imm);
14280 %}
14281 
14282 // Long Logical Instructions
14283 // TODO
14284 
14285 instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
14286   match(Set dst (AndL src1 src2));
14287 
14288   format %{ "and  $dst, $src1, $src2\t# int" %}
14289 
14290   ins_cost(INSN_COST);
14291   ins_encode %{
14292     __ andr(as_Register($dst$$reg),
14293             as_Register($src1$$reg),
14294             as_Register($src2$$reg));
14295   %}
14296 
14297   ins_pipe(ialu_reg_reg);
14298 %}
14299 
14300 instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
14301   match(Set dst (AndL src1 src2));
14302 
14303   format %{ "and  $dst, $src1, $src2\t# int" %}
14304 
14305   ins_cost(INSN_COST);
14306   ins_encode %{
14307     __ andr(as_Register($dst$$reg),
14308             as_Register($src1$$reg),
14309             (uint64_t)($src2$$constant));
14310   %}
14311 
14312   ins_pipe(ialu_reg_imm);
14313 %}
14314 
14315 // Or Instructions
14316 
14317 instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14318   match(Set dst (OrL src1 src2));
14319 
14320   format %{ "orr  $dst, $src1, $src2\t# int" %}
14321 
14322   ins_cost(INSN_COST);
14323   ins_encode %{
14324     __ orr(as_Register($dst$$reg),
14325            as_Register($src1$$reg),
14326            as_Register($src2$$reg));
14327   %}
14328 
14329   ins_pipe(ialu_reg_reg);
14330 %}
14331 
14332 instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14333   match(Set dst (OrL src1 src2));
14334 
14335   format %{ "orr  $dst, $src1, $src2\t# int" %}
14336 
14337   ins_cost(INSN_COST);
14338   ins_encode %{
14339     __ orr(as_Register($dst$$reg),
14340            as_Register($src1$$reg),
14341            (uint64_t)($src2$$constant));
14342   %}
14343 
14344   ins_pipe(ialu_reg_imm);
14345 %}
14346 
14347 // Xor Instructions
14348 
14349 instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
14350   match(Set dst (XorL src1 src2));
14351 
14352   format %{ "eor  $dst, $src1, $src2\t# int" %}
14353 
14354   ins_cost(INSN_COST);
14355   ins_encode %{
14356     __ eor(as_Register($dst$$reg),
14357            as_Register($src1$$reg),
14358            as_Register($src2$$reg));
14359   %}
14360 
14361   ins_pipe(ialu_reg_reg);
14362 %}
14363 
14364 instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
14365   match(Set dst (XorL src1 src2));
14366 
14367   ins_cost(INSN_COST);
14368   format %{ "eor  $dst, $src1, $src2\t# int" %}
14369 
14370   ins_encode %{
14371     __ eor(as_Register($dst$$reg),
14372            as_Register($src1$$reg),
14373            (uint64_t)($src2$$constant));
14374   %}
14375 
14376   ins_pipe(ialu_reg_imm);
14377 %}
14378 
14379 instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
14380 %{
14381   match(Set dst (ConvI2L src));
14382 
14383   ins_cost(INSN_COST);
14384   format %{ "sxtw  $dst, $src\t# i2l" %}
14385   ins_encode %{
14386     __ sbfm($dst$$Register, $src$$Register, 0, 31);
14387   %}
14388   ins_pipe(ialu_reg_shift);
14389 %}
14390 
14391 // this pattern occurs in bigmath arithmetic
14392 instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
14393 %{
14394   match(Set dst (AndL (ConvI2L src) mask));
14395 
14396   ins_cost(INSN_COST);
14397   format %{ "ubfm  $dst, $src, 0, 31\t# ui2l" %}
14398   ins_encode %{
14399     __ ubfm($dst$$Register, $src$$Register, 0, 31);
14400   %}
14401 
14402   ins_pipe(ialu_reg_shift);
14403 %}
14404 
14405 instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
14406   match(Set dst (ConvL2I src));
14407 
14408   ins_cost(INSN_COST);
14409   format %{ "movw  $dst, $src \t// l2i" %}
14410 
14411   ins_encode %{
14412     __ movw(as_Register($dst$$reg), as_Register($src$$reg));
14413   %}
14414 
14415   ins_pipe(ialu_reg);
14416 %}
14417 
14418 instruct convD2F_reg(vRegF dst, vRegD src) %{
14419   match(Set dst (ConvD2F src));
14420 
14421   ins_cost(INSN_COST * 5);
14422   format %{ "fcvtd  $dst, $src \t// d2f" %}
14423 
14424   ins_encode %{
14425     __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14426   %}
14427 
14428   ins_pipe(fp_d2f);
14429 %}
14430 
14431 instruct convF2D_reg(vRegD dst, vRegF src) %{
14432   match(Set dst (ConvF2D src));
14433 
14434   ins_cost(INSN_COST * 5);
14435   format %{ "fcvts  $dst, $src \t// f2d" %}
14436 
14437   ins_encode %{
14438     __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
14439   %}
14440 
14441   ins_pipe(fp_f2d);
14442 %}
14443 
14444 instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14445   match(Set dst (ConvF2I src));
14446 
14447   ins_cost(INSN_COST * 5);
14448   format %{ "fcvtzsw  $dst, $src \t// f2i" %}
14449 
14450   ins_encode %{
14451     __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14452   %}
14453 
14454   ins_pipe(fp_f2i);
14455 %}
14456 
14457 instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
14458   match(Set dst (ConvF2L src));
14459 
14460   ins_cost(INSN_COST * 5);
14461   format %{ "fcvtzs  $dst, $src \t// f2l" %}
14462 
14463   ins_encode %{
14464     __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14465   %}
14466 
14467   ins_pipe(fp_f2l);
14468 %}
14469 
14470 instruct convF2HF_reg_reg(iRegINoSp dst, vRegF src, vRegF tmp) %{
14471   match(Set dst (ConvF2HF src));
14472   format %{ "fcvt $tmp, $src\t# convert single to half precision\n\t"
14473             "smov $dst, $tmp\t# move result from $tmp to $dst"
14474   %}
14475   effect(TEMP tmp);
14476   ins_encode %{
14477       __ flt_to_flt16($dst$$Register, $src$$FloatRegister, $tmp$$FloatRegister);
14478   %}
14479   ins_pipe(pipe_slow);
14480 %}
14481 
14482 instruct convHF2F_reg_reg(vRegF dst, iRegINoSp src, vRegF tmp) %{
14483   match(Set dst (ConvHF2F src));
14484   format %{ "mov $tmp, $src\t# move source from $src to $tmp\n\t"
14485             "fcvt $dst, $tmp\t# convert half to single precision"
14486   %}
14487   effect(TEMP tmp);
14488   ins_encode %{
14489       __ flt16_to_flt($dst$$FloatRegister, $src$$Register, $tmp$$FloatRegister);
14490   %}
14491   ins_pipe(pipe_slow);
14492 %}
14493 
14494 instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
14495   match(Set dst (ConvI2F src));
14496 
14497   ins_cost(INSN_COST * 5);
14498   format %{ "scvtfws  $dst, $src \t// i2f" %}
14499 
14500   ins_encode %{
14501     __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14502   %}
14503 
14504   ins_pipe(fp_i2f);
14505 %}
14506 
14507 instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
14508   match(Set dst (ConvL2F src));
14509 
14510   ins_cost(INSN_COST * 5);
14511   format %{ "scvtfs  $dst, $src \t// l2f" %}
14512 
14513   ins_encode %{
14514     __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14515   %}
14516 
14517   ins_pipe(fp_l2f);
14518 %}
14519 
14520 instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
14521   match(Set dst (ConvD2I src));
14522 
14523   ins_cost(INSN_COST * 5);
14524   format %{ "fcvtzdw  $dst, $src \t// d2i" %}
14525 
14526   ins_encode %{
14527     __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14528   %}
14529 
14530   ins_pipe(fp_d2i);
14531 %}
14532 
14533 instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14534   match(Set dst (ConvD2L src));
14535 
14536   ins_cost(INSN_COST * 5);
14537   format %{ "fcvtzd  $dst, $src \t// d2l" %}
14538 
14539   ins_encode %{
14540     __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
14541   %}
14542 
14543   ins_pipe(fp_d2l);
14544 %}
14545 
14546 instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
14547   match(Set dst (ConvI2D src));
14548 
14549   ins_cost(INSN_COST * 5);
14550   format %{ "scvtfwd  $dst, $src \t// i2d" %}
14551 
14552   ins_encode %{
14553     __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14554   %}
14555 
14556   ins_pipe(fp_i2d);
14557 %}
14558 
14559 instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
14560   match(Set dst (ConvL2D src));
14561 
14562   ins_cost(INSN_COST * 5);
14563   format %{ "scvtfd  $dst, $src \t// l2d" %}
14564 
14565   ins_encode %{
14566     __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
14567   %}
14568 
14569   ins_pipe(fp_l2d);
14570 %}
14571 
14572 instruct round_double_reg(iRegLNoSp dst, vRegD src, vRegD ftmp, rFlagsReg cr)
14573 %{
14574   match(Set dst (RoundD src));
14575   effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
14576   format %{ "java_round_double $dst,$src"%}
14577   ins_encode %{
14578     __ java_round_double($dst$$Register, as_FloatRegister($src$$reg),
14579                          as_FloatRegister($ftmp$$reg));
14580   %}
14581   ins_pipe(pipe_slow);
14582 %}
14583 
14584 instruct round_float_reg(iRegINoSp dst, vRegF src, vRegF ftmp, rFlagsReg cr)
14585 %{
14586   match(Set dst (RoundF src));
14587   effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
14588   format %{ "java_round_float $dst,$src"%}
14589   ins_encode %{
14590     __ java_round_float($dst$$Register, as_FloatRegister($src$$reg),
14591                         as_FloatRegister($ftmp$$reg));
14592   %}
14593   ins_pipe(pipe_slow);
14594 %}
14595 
14596 // stack <-> reg and reg <-> reg shuffles with no conversion
14597 
14598 instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
14599 
14600   match(Set dst (MoveF2I src));
14601 
14602   effect(DEF dst, USE src);
14603 
14604   ins_cost(4 * INSN_COST);
14605 
14606   format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
14607 
14608   ins_encode %{
14609     __ ldrw($dst$$Register, Address(sp, $src$$disp));
14610   %}
14611 
14612   ins_pipe(iload_reg_reg);
14613 
14614 %}
14615 
14616 instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
14617 
14618   match(Set dst (MoveI2F src));
14619 
14620   effect(DEF dst, USE src);
14621 
14622   ins_cost(4 * INSN_COST);
14623 
14624   format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
14625 
14626   ins_encode %{
14627     __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14628   %}
14629 
14630   ins_pipe(pipe_class_memory);
14631 
14632 %}
14633 
14634 instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
14635 
14636   match(Set dst (MoveD2L src));
14637 
14638   effect(DEF dst, USE src);
14639 
14640   ins_cost(4 * INSN_COST);
14641 
14642   format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
14643 
14644   ins_encode %{
14645     __ ldr($dst$$Register, Address(sp, $src$$disp));
14646   %}
14647 
14648   ins_pipe(iload_reg_reg);
14649 
14650 %}
14651 
14652 instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
14653 
14654   match(Set dst (MoveL2D src));
14655 
14656   effect(DEF dst, USE src);
14657 
14658   ins_cost(4 * INSN_COST);
14659 
14660   format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
14661 
14662   ins_encode %{
14663     __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
14664   %}
14665 
14666   ins_pipe(pipe_class_memory);
14667 
14668 %}
14669 
14670 instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
14671 
14672   match(Set dst (MoveF2I src));
14673 
14674   effect(DEF dst, USE src);
14675 
14676   ins_cost(INSN_COST);
14677 
14678   format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
14679 
14680   ins_encode %{
14681     __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14682   %}
14683 
14684   ins_pipe(pipe_class_memory);
14685 
14686 %}
14687 
14688 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
14689 
14690   match(Set dst (MoveI2F src));
14691 
14692   effect(DEF dst, USE src);
14693 
14694   ins_cost(INSN_COST);
14695 
14696   format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
14697 
14698   ins_encode %{
14699     __ strw($src$$Register, Address(sp, $dst$$disp));
14700   %}
14701 
14702   ins_pipe(istore_reg_reg);
14703 
14704 %}
14705 
14706 instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
14707 
14708   match(Set dst (MoveD2L src));
14709 
14710   effect(DEF dst, USE src);
14711 
14712   ins_cost(INSN_COST);
14713 
14714   format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
14715 
14716   ins_encode %{
14717     __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
14718   %}
14719 
14720   ins_pipe(pipe_class_memory);
14721 
14722 %}
14723 
14724 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
14725 
14726   match(Set dst (MoveL2D src));
14727 
14728   effect(DEF dst, USE src);
14729 
14730   ins_cost(INSN_COST);
14731 
14732   format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
14733 
14734   ins_encode %{
14735     __ str($src$$Register, Address(sp, $dst$$disp));
14736   %}
14737 
14738   ins_pipe(istore_reg_reg);
14739 
14740 %}
14741 
14742 instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
14743 
14744   match(Set dst (MoveF2I src));
14745 
14746   effect(DEF dst, USE src);
14747 
14748   ins_cost(INSN_COST);
14749 
14750   format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
14751 
14752   ins_encode %{
14753     __ fmovs($dst$$Register, as_FloatRegister($src$$reg));
14754   %}
14755 
14756   ins_pipe(fp_f2i);
14757 
14758 %}
14759 
14760 instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
14761 
14762   match(Set dst (MoveI2F src));
14763 
14764   effect(DEF dst, USE src);
14765 
14766   ins_cost(INSN_COST);
14767 
14768   format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
14769 
14770   ins_encode %{
14771     __ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
14772   %}
14773 
14774   ins_pipe(fp_i2f);
14775 
14776 %}
14777 
14778 instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
14779 
14780   match(Set dst (MoveD2L src));
14781 
14782   effect(DEF dst, USE src);
14783 
14784   ins_cost(INSN_COST);
14785 
14786   format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
14787 
14788   ins_encode %{
14789     __ fmovd($dst$$Register, as_FloatRegister($src$$reg));
14790   %}
14791 
14792   ins_pipe(fp_d2l);
14793 
14794 %}
14795 
14796 instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
14797 
14798   match(Set dst (MoveL2D src));
14799 
14800   effect(DEF dst, USE src);
14801 
14802   ins_cost(INSN_COST);
14803 
14804   format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
14805 
14806   ins_encode %{
14807     __ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
14808   %}
14809 
14810   ins_pipe(fp_l2d);
14811 
14812 %}
14813 
14814 // ============================================================================
14815 // clearing of an array
14816 
14817 instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
14818 %{
14819   match(Set dummy (ClearArray cnt base));
14820   effect(USE_KILL cnt, USE_KILL base, KILL cr);
14821 
14822   ins_cost(4 * INSN_COST);
14823   format %{ "ClearArray $cnt, $base" %}
14824 
14825   ins_encode %{
14826     address tpc = __ zero_words($base$$Register, $cnt$$Register);
14827     if (tpc == nullptr) {
14828       ciEnv::current()->record_failure("CodeCache is full");
14829       return;
14830     }
14831   %}
14832 
14833   ins_pipe(pipe_class_memory);
14834 %}
14835 
14836 instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 temp, Universe dummy, rFlagsReg cr)
14837 %{
14838   predicate((uint64_t)n->in(2)->get_long()
14839             < (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
14840   match(Set dummy (ClearArray cnt base));
14841   effect(TEMP temp, USE_KILL base, KILL cr);
14842 
14843   ins_cost(4 * INSN_COST);
14844   format %{ "ClearArray $cnt, $base" %}
14845 
14846   ins_encode %{
14847     address tpc = __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
14848     if (tpc == nullptr) {
14849       ciEnv::current()->record_failure("CodeCache is full");
14850       return;
14851     }
14852   %}
14853 
14854   ins_pipe(pipe_class_memory);
14855 %}
14856 
14857 // ============================================================================
14858 // Overflow Math Instructions
14859 
14860 instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14861 %{
14862   match(Set cr (OverflowAddI op1 op2));
14863 
14864   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14865   ins_cost(INSN_COST);
14866   ins_encode %{
14867     __ cmnw($op1$$Register, $op2$$Register);
14868   %}
14869 
14870   ins_pipe(icmp_reg_reg);
14871 %}
14872 
14873 instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14874 %{
14875   match(Set cr (OverflowAddI op1 op2));
14876 
14877   format %{ "cmnw  $op1, $op2\t# overflow check int" %}
14878   ins_cost(INSN_COST);
14879   ins_encode %{
14880     __ cmnw($op1$$Register, $op2$$constant);
14881   %}
14882 
14883   ins_pipe(icmp_reg_imm);
14884 %}
14885 
14886 instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14887 %{
14888   match(Set cr (OverflowAddL op1 op2));
14889 
14890   format %{ "cmn   $op1, $op2\t# overflow check long" %}
14891   ins_cost(INSN_COST);
14892   ins_encode %{
14893     __ cmn($op1$$Register, $op2$$Register);
14894   %}
14895 
14896   ins_pipe(icmp_reg_reg);
14897 %}
14898 
14899 instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14900 %{
14901   match(Set cr (OverflowAddL op1 op2));
14902 
14903   format %{ "adds  zr, $op1, $op2\t# overflow check long" %}
14904   ins_cost(INSN_COST);
14905   ins_encode %{
14906     __ adds(zr, $op1$$Register, $op2$$constant);
14907   %}
14908 
14909   ins_pipe(icmp_reg_imm);
14910 %}
14911 
14912 instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14913 %{
14914   match(Set cr (OverflowSubI op1 op2));
14915 
14916   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14917   ins_cost(INSN_COST);
14918   ins_encode %{
14919     __ cmpw($op1$$Register, $op2$$Register);
14920   %}
14921 
14922   ins_pipe(icmp_reg_reg);
14923 %}
14924 
14925 instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
14926 %{
14927   match(Set cr (OverflowSubI op1 op2));
14928 
14929   format %{ "cmpw  $op1, $op2\t# overflow check int" %}
14930   ins_cost(INSN_COST);
14931   ins_encode %{
14932     __ cmpw($op1$$Register, $op2$$constant);
14933   %}
14934 
14935   ins_pipe(icmp_reg_imm);
14936 %}
14937 
14938 instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
14939 %{
14940   match(Set cr (OverflowSubL op1 op2));
14941 
14942   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14943   ins_cost(INSN_COST);
14944   ins_encode %{
14945     __ cmp($op1$$Register, $op2$$Register);
14946   %}
14947 
14948   ins_pipe(icmp_reg_reg);
14949 %}
14950 
14951 instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
14952 %{
14953   match(Set cr (OverflowSubL op1 op2));
14954 
14955   format %{ "cmp   $op1, $op2\t# overflow check long" %}
14956   ins_cost(INSN_COST);
14957   ins_encode %{
14958     __ subs(zr, $op1$$Register, $op2$$constant);
14959   %}
14960 
14961   ins_pipe(icmp_reg_imm);
14962 %}
14963 
14964 instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
14965 %{
14966   match(Set cr (OverflowSubI zero op1));
14967 
14968   format %{ "cmpw  zr, $op1\t# overflow check int" %}
14969   ins_cost(INSN_COST);
14970   ins_encode %{
14971     __ cmpw(zr, $op1$$Register);
14972   %}
14973 
14974   ins_pipe(icmp_reg_imm);
14975 %}
14976 
14977 instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
14978 %{
14979   match(Set cr (OverflowSubL zero op1));
14980 
14981   format %{ "cmp   zr, $op1\t# overflow check long" %}
14982   ins_cost(INSN_COST);
14983   ins_encode %{
14984     __ cmp(zr, $op1$$Register);
14985   %}
14986 
14987   ins_pipe(icmp_reg_imm);
14988 %}
14989 
14990 instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
14991 %{
14992   match(Set cr (OverflowMulI op1 op2));
14993 
14994   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
14995             "cmp   rscratch1, rscratch1, sxtw\n\t"
14996             "movw  rscratch1, #0x80000000\n\t"
14997             "cselw rscratch1, rscratch1, zr, NE\n\t"
14998             "cmpw  rscratch1, #1" %}
14999   ins_cost(5 * INSN_COST);
15000   ins_encode %{
15001     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15002     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15003     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15004     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15005     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15006   %}
15007 
15008   ins_pipe(pipe_slow);
15009 %}
15010 
15011 instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
15012 %{
15013   match(If cmp (OverflowMulI op1 op2));
15014   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15015             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15016   effect(USE labl, KILL cr);
15017 
15018   format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
15019             "cmp   rscratch1, rscratch1, sxtw\n\t"
15020             "b$cmp   $labl" %}
15021   ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
15022   ins_encode %{
15023     Label* L = $labl$$label;
15024     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15025     __ smull(rscratch1, $op1$$Register, $op2$$Register);
15026     __ subs(zr, rscratch1, rscratch1, ext::sxtw);      // NE => overflow
15027     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15028   %}
15029 
15030   ins_pipe(pipe_serial);
15031 %}
15032 
15033 instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15034 %{
15035   match(Set cr (OverflowMulL op1 op2));
15036 
15037   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15038             "smulh rscratch2, $op1, $op2\n\t"
15039             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15040             "movw  rscratch1, #0x80000000\n\t"
15041             "cselw rscratch1, rscratch1, zr, NE\n\t"
15042             "cmpw  rscratch1, #1" %}
15043   ins_cost(6 * INSN_COST);
15044   ins_encode %{
15045     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15046     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15047     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15048     __ movw(rscratch1, 0x80000000);                    // Develop 0 (EQ),
15049     __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
15050     __ cmpw(rscratch1, 1);                             // 0x80000000 - 1 => VS
15051   %}
15052 
15053   ins_pipe(pipe_slow);
15054 %}
15055 
15056 instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
15057 %{
15058   match(If cmp (OverflowMulL op1 op2));
15059   predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
15060             || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
15061   effect(USE labl, KILL cr);
15062 
15063   format %{ "mul   rscratch1, $op1, $op2\t#overflow check long\n\t"
15064             "smulh rscratch2, $op1, $op2\n\t"
15065             "cmp   rscratch2, rscratch1, ASR #63\n\t"
15066             "b$cmp $labl" %}
15067   ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
15068   ins_encode %{
15069     Label* L = $labl$$label;
15070     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15071     __ mul(rscratch1, $op1$$Register, $op2$$Register);   // Result bits 0..63
15072     __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
15073     __ cmp(rscratch2, rscratch1, Assembler::ASR, 63);    // Top is pure sign ext
15074     __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
15075   %}
15076 
15077   ins_pipe(pipe_serial);
15078 %}
15079 
15080 // ============================================================================
15081 // Compare Instructions
15082 
15083 instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
15084 %{
15085   match(Set cr (CmpI op1 op2));
15086 
15087   effect(DEF cr, USE op1, USE op2);
15088 
15089   ins_cost(INSN_COST);
15090   format %{ "cmpw  $op1, $op2" %}
15091 
15092   ins_encode(aarch64_enc_cmpw(op1, op2));
15093 
15094   ins_pipe(icmp_reg_reg);
15095 %}
15096 
15097 instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
15098 %{
15099   match(Set cr (CmpI op1 zero));
15100 
15101   effect(DEF cr, USE op1);
15102 
15103   ins_cost(INSN_COST);
15104   format %{ "cmpw $op1, 0" %}
15105 
15106   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15107 
15108   ins_pipe(icmp_reg_imm);
15109 %}
15110 
15111 instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
15112 %{
15113   match(Set cr (CmpI op1 op2));
15114 
15115   effect(DEF cr, USE op1);
15116 
15117   ins_cost(INSN_COST);
15118   format %{ "cmpw  $op1, $op2" %}
15119 
15120   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15121 
15122   ins_pipe(icmp_reg_imm);
15123 %}
15124 
15125 instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
15126 %{
15127   match(Set cr (CmpI op1 op2));
15128 
15129   effect(DEF cr, USE op1);
15130 
15131   ins_cost(INSN_COST * 2);
15132   format %{ "cmpw  $op1, $op2" %}
15133 
15134   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15135 
15136   ins_pipe(icmp_reg_imm);
15137 %}
15138 
15139 // Unsigned compare Instructions; really, same as signed compare
15140 // except it should only be used to feed an If or a CMovI which takes a
15141 // cmpOpU.
15142 
15143 instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
15144 %{
15145   match(Set cr (CmpU op1 op2));
15146 
15147   effect(DEF cr, USE op1, USE op2);
15148 
15149   ins_cost(INSN_COST);
15150   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15151 
15152   ins_encode(aarch64_enc_cmpw(op1, op2));
15153 
15154   ins_pipe(icmp_reg_reg);
15155 %}
15156 
15157 instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
15158 %{
15159   match(Set cr (CmpU op1 zero));
15160 
15161   effect(DEF cr, USE op1);
15162 
15163   ins_cost(INSN_COST);
15164   format %{ "cmpw $op1, #0\t# unsigned" %}
15165 
15166   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
15167 
15168   ins_pipe(icmp_reg_imm);
15169 %}
15170 
15171 instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
15172 %{
15173   match(Set cr (CmpU op1 op2));
15174 
15175   effect(DEF cr, USE op1);
15176 
15177   ins_cost(INSN_COST);
15178   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15179 
15180   ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
15181 
15182   ins_pipe(icmp_reg_imm);
15183 %}
15184 
15185 instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
15186 %{
15187   match(Set cr (CmpU op1 op2));
15188 
15189   effect(DEF cr, USE op1);
15190 
15191   ins_cost(INSN_COST * 2);
15192   format %{ "cmpw  $op1, $op2\t# unsigned" %}
15193 
15194   ins_encode(aarch64_enc_cmpw_imm(op1, op2));
15195 
15196   ins_pipe(icmp_reg_imm);
15197 %}
15198 
15199 instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
15200 %{
15201   match(Set cr (CmpL op1 op2));
15202 
15203   effect(DEF cr, USE op1, USE op2);
15204 
15205   ins_cost(INSN_COST);
15206   format %{ "cmp  $op1, $op2" %}
15207 
15208   ins_encode(aarch64_enc_cmp(op1, op2));
15209 
15210   ins_pipe(icmp_reg_reg);
15211 %}
15212 
15213 instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
15214 %{
15215   match(Set cr (CmpL op1 zero));
15216 
15217   effect(DEF cr, USE op1);
15218 
15219   ins_cost(INSN_COST);
15220   format %{ "tst  $op1" %}
15221 
15222   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15223 
15224   ins_pipe(icmp_reg_imm);
15225 %}
15226 
15227 instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
15228 %{
15229   match(Set cr (CmpL op1 op2));
15230 
15231   effect(DEF cr, USE op1);
15232 
15233   ins_cost(INSN_COST);
15234   format %{ "cmp  $op1, $op2" %}
15235 
15236   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15237 
15238   ins_pipe(icmp_reg_imm);
15239 %}
15240 
15241 instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
15242 %{
15243   match(Set cr (CmpL op1 op2));
15244 
15245   effect(DEF cr, USE op1);
15246 
15247   ins_cost(INSN_COST * 2);
15248   format %{ "cmp  $op1, $op2" %}
15249 
15250   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15251 
15252   ins_pipe(icmp_reg_imm);
15253 %}
15254 
15255 instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
15256 %{
15257   match(Set cr (CmpUL op1 op2));
15258 
15259   effect(DEF cr, USE op1, USE op2);
15260 
15261   ins_cost(INSN_COST);
15262   format %{ "cmp  $op1, $op2" %}
15263 
15264   ins_encode(aarch64_enc_cmp(op1, op2));
15265 
15266   ins_pipe(icmp_reg_reg);
15267 %}
15268 
15269 instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
15270 %{
15271   match(Set cr (CmpUL op1 zero));
15272 
15273   effect(DEF cr, USE op1);
15274 
15275   ins_cost(INSN_COST);
15276   format %{ "tst  $op1" %}
15277 
15278   ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
15279 
15280   ins_pipe(icmp_reg_imm);
15281 %}
15282 
15283 instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
15284 %{
15285   match(Set cr (CmpUL op1 op2));
15286 
15287   effect(DEF cr, USE op1);
15288 
15289   ins_cost(INSN_COST);
15290   format %{ "cmp  $op1, $op2" %}
15291 
15292   ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
15293 
15294   ins_pipe(icmp_reg_imm);
15295 %}
15296 
15297 instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
15298 %{
15299   match(Set cr (CmpUL op1 op2));
15300 
15301   effect(DEF cr, USE op1);
15302 
15303   ins_cost(INSN_COST * 2);
15304   format %{ "cmp  $op1, $op2" %}
15305 
15306   ins_encode(aarch64_enc_cmp_imm(op1, op2));
15307 
15308   ins_pipe(icmp_reg_imm);
15309 %}
15310 
15311 instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
15312 %{
15313   match(Set cr (CmpP op1 op2));
15314 
15315   effect(DEF cr, USE op1, USE op2);
15316 
15317   ins_cost(INSN_COST);
15318   format %{ "cmp  $op1, $op2\t // ptr" %}
15319 
15320   ins_encode(aarch64_enc_cmpp(op1, op2));
15321 
15322   ins_pipe(icmp_reg_reg);
15323 %}
15324 
15325 instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
15326 %{
15327   match(Set cr (CmpN op1 op2));
15328 
15329   effect(DEF cr, USE op1, USE op2);
15330 
15331   ins_cost(INSN_COST);
15332   format %{ "cmp  $op1, $op2\t // compressed ptr" %}
15333 
15334   ins_encode(aarch64_enc_cmpn(op1, op2));
15335 
15336   ins_pipe(icmp_reg_reg);
15337 %}
15338 
15339 instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
15340 %{
15341   match(Set cr (CmpP op1 zero));
15342 
15343   effect(DEF cr, USE op1, USE zero);
15344 
15345   ins_cost(INSN_COST);
15346   format %{ "cmp  $op1, 0\t // ptr" %}
15347 
15348   ins_encode(aarch64_enc_testp(op1));
15349 
15350   ins_pipe(icmp_reg_imm);
15351 %}
15352 
15353 instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
15354 %{
15355   match(Set cr (CmpN op1 zero));
15356 
15357   effect(DEF cr, USE op1, USE zero);
15358 
15359   ins_cost(INSN_COST);
15360   format %{ "cmp  $op1, 0\t // compressed ptr" %}
15361 
15362   ins_encode(aarch64_enc_testn(op1));
15363 
15364   ins_pipe(icmp_reg_imm);
15365 %}
15366 
15367 // FP comparisons
15368 //
15369 // n.b. CmpF/CmpD set a normal flags reg which then gets compared
15370 // using normal cmpOp. See declaration of rFlagsReg for details.
15371 
15372 instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
15373 %{
15374   match(Set cr (CmpF src1 src2));
15375 
15376   ins_cost(3 * INSN_COST);
15377   format %{ "fcmps $src1, $src2" %}
15378 
15379   ins_encode %{
15380     __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15381   %}
15382 
15383   ins_pipe(pipe_class_compare);
15384 %}
15385 
15386 instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
15387 %{
15388   match(Set cr (CmpF src1 src2));
15389 
15390   ins_cost(3 * INSN_COST);
15391   format %{ "fcmps $src1, 0.0" %}
15392 
15393   ins_encode %{
15394     __ fcmps(as_FloatRegister($src1$$reg), 0.0);
15395   %}
15396 
15397   ins_pipe(pipe_class_compare);
15398 %}
15399 // FROM HERE
15400 
15401 instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
15402 %{
15403   match(Set cr (CmpD src1 src2));
15404 
15405   ins_cost(3 * INSN_COST);
15406   format %{ "fcmpd $src1, $src2" %}
15407 
15408   ins_encode %{
15409     __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
15410   %}
15411 
15412   ins_pipe(pipe_class_compare);
15413 %}
15414 
15415 instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
15416 %{
15417   match(Set cr (CmpD src1 src2));
15418 
15419   ins_cost(3 * INSN_COST);
15420   format %{ "fcmpd $src1, 0.0" %}
15421 
15422   ins_encode %{
15423     __ fcmpd(as_FloatRegister($src1$$reg), 0.0);
15424   %}
15425 
15426   ins_pipe(pipe_class_compare);
15427 %}
15428 
15429 instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
15430 %{
15431   match(Set dst (CmpF3 src1 src2));
15432   effect(KILL cr);
15433 
15434   ins_cost(5 * INSN_COST);
15435   format %{ "fcmps $src1, $src2\n\t"
15436             "csinvw($dst, zr, zr, eq\n\t"
15437             "csnegw($dst, $dst, $dst, lt)"
15438   %}
15439 
15440   ins_encode %{
15441     Label done;
15442     FloatRegister s1 = as_FloatRegister($src1$$reg);
15443     FloatRegister s2 = as_FloatRegister($src2$$reg);
15444     Register d = as_Register($dst$$reg);
15445     __ fcmps(s1, s2);
15446     // installs 0 if EQ else -1
15447     __ csinvw(d, zr, zr, Assembler::EQ);
15448     // keeps -1 if less or unordered else installs 1
15449     __ csnegw(d, d, d, Assembler::LT);
15450     __ bind(done);
15451   %}
15452 
15453   ins_pipe(pipe_class_default);
15454 
15455 %}
15456 
15457 instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
15458 %{
15459   match(Set dst (CmpD3 src1 src2));
15460   effect(KILL cr);
15461 
15462   ins_cost(5 * INSN_COST);
15463   format %{ "fcmpd $src1, $src2\n\t"
15464             "csinvw($dst, zr, zr, eq\n\t"
15465             "csnegw($dst, $dst, $dst, lt)"
15466   %}
15467 
15468   ins_encode %{
15469     Label done;
15470     FloatRegister s1 = as_FloatRegister($src1$$reg);
15471     FloatRegister s2 = as_FloatRegister($src2$$reg);
15472     Register d = as_Register($dst$$reg);
15473     __ fcmpd(s1, s2);
15474     // installs 0 if EQ else -1
15475     __ csinvw(d, zr, zr, Assembler::EQ);
15476     // keeps -1 if less or unordered else installs 1
15477     __ csnegw(d, d, d, Assembler::LT);
15478     __ bind(done);
15479   %}
15480   ins_pipe(pipe_class_default);
15481 
15482 %}
15483 
15484 instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
15485 %{
15486   match(Set dst (CmpF3 src1 zero));
15487   effect(KILL cr);
15488 
15489   ins_cost(5 * INSN_COST);
15490   format %{ "fcmps $src1, 0.0\n\t"
15491             "csinvw($dst, zr, zr, eq\n\t"
15492             "csnegw($dst, $dst, $dst, lt)"
15493   %}
15494 
15495   ins_encode %{
15496     Label done;
15497     FloatRegister s1 = as_FloatRegister($src1$$reg);
15498     Register d = as_Register($dst$$reg);
15499     __ fcmps(s1, 0.0);
15500     // installs 0 if EQ else -1
15501     __ csinvw(d, zr, zr, Assembler::EQ);
15502     // keeps -1 if less or unordered else installs 1
15503     __ csnegw(d, d, d, Assembler::LT);
15504     __ bind(done);
15505   %}
15506 
15507   ins_pipe(pipe_class_default);
15508 
15509 %}
15510 
15511 instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
15512 %{
15513   match(Set dst (CmpD3 src1 zero));
15514   effect(KILL cr);
15515 
15516   ins_cost(5 * INSN_COST);
15517   format %{ "fcmpd $src1, 0.0\n\t"
15518             "csinvw($dst, zr, zr, eq\n\t"
15519             "csnegw($dst, $dst, $dst, lt)"
15520   %}
15521 
15522   ins_encode %{
15523     Label done;
15524     FloatRegister s1 = as_FloatRegister($src1$$reg);
15525     Register d = as_Register($dst$$reg);
15526     __ fcmpd(s1, 0.0);
15527     // installs 0 if EQ else -1
15528     __ csinvw(d, zr, zr, Assembler::EQ);
15529     // keeps -1 if less or unordered else installs 1
15530     __ csnegw(d, d, d, Assembler::LT);
15531     __ bind(done);
15532   %}
15533   ins_pipe(pipe_class_default);
15534 
15535 %}
15536 
15537 instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
15538 %{
15539   match(Set dst (CmpLTMask p q));
15540   effect(KILL cr);
15541 
15542   ins_cost(3 * INSN_COST);
15543 
15544   format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
15545             "csetw $dst, lt\n\t"
15546             "subw $dst, zr, $dst"
15547   %}
15548 
15549   ins_encode %{
15550     __ cmpw(as_Register($p$$reg), as_Register($q$$reg));
15551     __ csetw(as_Register($dst$$reg), Assembler::LT);
15552     __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
15553   %}
15554 
15555   ins_pipe(ialu_reg_reg);
15556 %}
15557 
15558 instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
15559 %{
15560   match(Set dst (CmpLTMask src zero));
15561   effect(KILL cr);
15562 
15563   ins_cost(INSN_COST);
15564 
15565   format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
15566 
15567   ins_encode %{
15568     __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
15569   %}
15570 
15571   ins_pipe(ialu_reg_shift);
15572 %}
15573 
15574 // ============================================================================
15575 // Max and Min
15576 
15577 // Like compI_reg_reg or compI_reg_immI0 but without match rule and second zero parameter.
15578 
15579 instruct compI_reg_imm0(rFlagsReg cr, iRegI src)
15580 %{
15581   effect(DEF cr, USE src);
15582   ins_cost(INSN_COST);
15583   format %{ "cmpw $src, 0" %}
15584 
15585   ins_encode %{
15586     __ cmpw($src$$Register, 0);
15587   %}
15588   ins_pipe(icmp_reg_imm);
15589 %}
15590 
15591 instruct minI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
15592 %{
15593   match(Set dst (MinI src1 src2));
15594   ins_cost(INSN_COST * 3);
15595 
15596   expand %{
15597     rFlagsReg cr;
15598     compI_reg_reg(cr, src1, src2);
15599     cmovI_reg_reg_lt(dst, src1, src2, cr);
15600   %}
15601 %}
15602 
15603 instruct maxI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
15604 %{
15605   match(Set dst (MaxI src1 src2));
15606   ins_cost(INSN_COST * 3);
15607 
15608   expand %{
15609     rFlagsReg cr;
15610     compI_reg_reg(cr, src1, src2);
15611     cmovI_reg_reg_gt(dst, src1, src2, cr);
15612   %}
15613 %}
15614 
15615 
15616 // ============================================================================
15617 // Branch Instructions
15618 
15619 // Direct Branch.
15620 instruct branch(label lbl)
15621 %{
15622   match(Goto);
15623 
15624   effect(USE lbl);
15625 
15626   ins_cost(BRANCH_COST);
15627   format %{ "b  $lbl" %}
15628 
15629   ins_encode(aarch64_enc_b(lbl));
15630 
15631   ins_pipe(pipe_branch);
15632 %}
15633 
15634 // Conditional Near Branch
15635 instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
15636 %{
15637   // Same match rule as `branchConFar'.
15638   match(If cmp cr);
15639 
15640   effect(USE lbl);
15641 
15642   ins_cost(BRANCH_COST);
15643   // If set to 1 this indicates that the current instruction is a
15644   // short variant of a long branch. This avoids using this
15645   // instruction in first-pass matching. It will then only be used in
15646   // the `Shorten_branches' pass.
15647   // ins_short_branch(1);
15648   format %{ "b$cmp  $lbl" %}
15649 
15650   ins_encode(aarch64_enc_br_con(cmp, lbl));
15651 
15652   ins_pipe(pipe_branch_cond);
15653 %}
15654 
15655 // Conditional Near Branch Unsigned
15656 instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
15657 %{
15658   // Same match rule as `branchConFar'.
15659   match(If cmp cr);
15660 
15661   effect(USE lbl);
15662 
15663   ins_cost(BRANCH_COST);
15664   // If set to 1 this indicates that the current instruction is a
15665   // short variant of a long branch. This avoids using this
15666   // instruction in first-pass matching. It will then only be used in
15667   // the `Shorten_branches' pass.
15668   // ins_short_branch(1);
15669   format %{ "b$cmp  $lbl\t# unsigned" %}
15670 
15671   ins_encode(aarch64_enc_br_conU(cmp, lbl));
15672 
15673   ins_pipe(pipe_branch_cond);
15674 %}
15675 
15676 // Make use of CBZ and CBNZ.  These instructions, as well as being
15677 // shorter than (cmp; branch), have the additional benefit of not
15678 // killing the flags.
15679 
15680 instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
15681   match(If cmp (CmpI op1 op2));
15682   effect(USE labl);
15683 
15684   ins_cost(BRANCH_COST);
15685   format %{ "cbw$cmp   $op1, $labl" %}
15686   ins_encode %{
15687     Label* L = $labl$$label;
15688     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15689     if (cond == Assembler::EQ)
15690       __ cbzw($op1$$Register, *L);
15691     else
15692       __ cbnzw($op1$$Register, *L);
15693   %}
15694   ins_pipe(pipe_cmp_branch);
15695 %}
15696 
15697 instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
15698   match(If cmp (CmpL op1 op2));
15699   effect(USE labl);
15700 
15701   ins_cost(BRANCH_COST);
15702   format %{ "cb$cmp   $op1, $labl" %}
15703   ins_encode %{
15704     Label* L = $labl$$label;
15705     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15706     if (cond == Assembler::EQ)
15707       __ cbz($op1$$Register, *L);
15708     else
15709       __ cbnz($op1$$Register, *L);
15710   %}
15711   ins_pipe(pipe_cmp_branch);
15712 %}
15713 
15714 instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
15715   match(If cmp (CmpP op1 op2));
15716   effect(USE labl);
15717 
15718   ins_cost(BRANCH_COST);
15719   format %{ "cb$cmp   $op1, $labl" %}
15720   ins_encode %{
15721     Label* L = $labl$$label;
15722     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15723     if (cond == Assembler::EQ)
15724       __ cbz($op1$$Register, *L);
15725     else
15726       __ cbnz($op1$$Register, *L);
15727   %}
15728   ins_pipe(pipe_cmp_branch);
15729 %}
15730 
15731 instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
15732   match(If cmp (CmpN op1 op2));
15733   effect(USE labl);
15734 
15735   ins_cost(BRANCH_COST);
15736   format %{ "cbw$cmp   $op1, $labl" %}
15737   ins_encode %{
15738     Label* L = $labl$$label;
15739     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15740     if (cond == Assembler::EQ)
15741       __ cbzw($op1$$Register, *L);
15742     else
15743       __ cbnzw($op1$$Register, *L);
15744   %}
15745   ins_pipe(pipe_cmp_branch);
15746 %}
15747 
15748 instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
15749   match(If cmp (CmpP (DecodeN oop) zero));
15750   effect(USE labl);
15751 
15752   ins_cost(BRANCH_COST);
15753   format %{ "cb$cmp   $oop, $labl" %}
15754   ins_encode %{
15755     Label* L = $labl$$label;
15756     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15757     if (cond == Assembler::EQ)
15758       __ cbzw($oop$$Register, *L);
15759     else
15760       __ cbnzw($oop$$Register, *L);
15761   %}
15762   ins_pipe(pipe_cmp_branch);
15763 %}
15764 
15765 instruct cmpUI_imm0_branch(cmpOpUEqNeLeGt cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15766   match(If cmp (CmpU op1 op2));
15767   effect(USE labl);
15768 
15769   ins_cost(BRANCH_COST);
15770   format %{ "cbw$cmp   $op1, $labl" %}
15771   ins_encode %{
15772     Label* L = $labl$$label;
15773     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15774     if (cond == Assembler::EQ || cond == Assembler::LS) {
15775       __ cbzw($op1$$Register, *L);
15776     } else {
15777       assert(cond == Assembler::NE || cond == Assembler::HI, "unexpected condition");
15778       __ cbnzw($op1$$Register, *L);
15779     }
15780   %}
15781   ins_pipe(pipe_cmp_branch);
15782 %}
15783 
15784 instruct cmpUL_imm0_branch(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 op2, label labl) %{
15785   match(If cmp (CmpUL op1 op2));
15786   effect(USE labl);
15787 
15788   ins_cost(BRANCH_COST);
15789   format %{ "cb$cmp   $op1, $labl" %}
15790   ins_encode %{
15791     Label* L = $labl$$label;
15792     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15793     if (cond == Assembler::EQ || cond == Assembler::LS) {
15794       __ cbz($op1$$Register, *L);
15795     } else {
15796       assert(cond == Assembler::NE || cond == Assembler::HI, "unexpected condition");
15797       __ cbnz($op1$$Register, *L);
15798     }
15799   %}
15800   ins_pipe(pipe_cmp_branch);
15801 %}
15802 
15803 // Test bit and Branch
15804 
15805 // Patterns for short (< 32KiB) variants
15806 instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15807   match(If cmp (CmpL op1 op2));
15808   effect(USE labl);
15809 
15810   ins_cost(BRANCH_COST);
15811   format %{ "cb$cmp   $op1, $labl # long" %}
15812   ins_encode %{
15813     Label* L = $labl$$label;
15814     Assembler::Condition cond =
15815       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15816     __ tbr(cond, $op1$$Register, 63, *L);
15817   %}
15818   ins_pipe(pipe_cmp_branch);
15819   ins_short_branch(1);
15820 %}
15821 
15822 instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15823   match(If cmp (CmpI op1 op2));
15824   effect(USE labl);
15825 
15826   ins_cost(BRANCH_COST);
15827   format %{ "cb$cmp   $op1, $labl # int" %}
15828   ins_encode %{
15829     Label* L = $labl$$label;
15830     Assembler::Condition cond =
15831       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15832     __ tbr(cond, $op1$$Register, 31, *L);
15833   %}
15834   ins_pipe(pipe_cmp_branch);
15835   ins_short_branch(1);
15836 %}
15837 
15838 instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15839   match(If cmp (CmpL (AndL op1 op2) op3));
15840   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
15841   effect(USE labl);
15842 
15843   ins_cost(BRANCH_COST);
15844   format %{ "tb$cmp   $op1, $op2, $labl" %}
15845   ins_encode %{
15846     Label* L = $labl$$label;
15847     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15848     int bit = exact_log2_long($op2$$constant);
15849     __ tbr(cond, $op1$$Register, bit, *L);
15850   %}
15851   ins_pipe(pipe_cmp_branch);
15852   ins_short_branch(1);
15853 %}
15854 
15855 instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15856   match(If cmp (CmpI (AndI op1 op2) op3));
15857   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
15858   effect(USE labl);
15859 
15860   ins_cost(BRANCH_COST);
15861   format %{ "tb$cmp   $op1, $op2, $labl" %}
15862   ins_encode %{
15863     Label* L = $labl$$label;
15864     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15865     int bit = exact_log2((juint)$op2$$constant);
15866     __ tbr(cond, $op1$$Register, bit, *L);
15867   %}
15868   ins_pipe(pipe_cmp_branch);
15869   ins_short_branch(1);
15870 %}
15871 
15872 // And far variants
15873 instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
15874   match(If cmp (CmpL op1 op2));
15875   effect(USE labl);
15876 
15877   ins_cost(BRANCH_COST);
15878   format %{ "cb$cmp   $op1, $labl # long" %}
15879   ins_encode %{
15880     Label* L = $labl$$label;
15881     Assembler::Condition cond =
15882       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15883     __ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
15884   %}
15885   ins_pipe(pipe_cmp_branch);
15886 %}
15887 
15888 instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
15889   match(If cmp (CmpI op1 op2));
15890   effect(USE labl);
15891 
15892   ins_cost(BRANCH_COST);
15893   format %{ "cb$cmp   $op1, $labl # int" %}
15894   ins_encode %{
15895     Label* L = $labl$$label;
15896     Assembler::Condition cond =
15897       ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
15898     __ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
15899   %}
15900   ins_pipe(pipe_cmp_branch);
15901 %}
15902 
15903 instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
15904   match(If cmp (CmpL (AndL op1 op2) op3));
15905   predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
15906   effect(USE labl);
15907 
15908   ins_cost(BRANCH_COST);
15909   format %{ "tb$cmp   $op1, $op2, $labl" %}
15910   ins_encode %{
15911     Label* L = $labl$$label;
15912     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15913     int bit = exact_log2_long($op2$$constant);
15914     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15915   %}
15916   ins_pipe(pipe_cmp_branch);
15917 %}
15918 
15919 instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
15920   match(If cmp (CmpI (AndI op1 op2) op3));
15921   predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
15922   effect(USE labl);
15923 
15924   ins_cost(BRANCH_COST);
15925   format %{ "tb$cmp   $op1, $op2, $labl" %}
15926   ins_encode %{
15927     Label* L = $labl$$label;
15928     Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
15929     int bit = exact_log2((juint)$op2$$constant);
15930     __ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
15931   %}
15932   ins_pipe(pipe_cmp_branch);
15933 %}
15934 
15935 // Test bits
15936 
15937 instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
15938   match(Set cr (CmpL (AndL op1 op2) op3));
15939   predicate(Assembler::operand_valid_for_logical_immediate
15940             (/*is_32*/false, n->in(1)->in(2)->get_long()));
15941 
15942   ins_cost(INSN_COST);
15943   format %{ "tst $op1, $op2 # long" %}
15944   ins_encode %{
15945     __ tst($op1$$Register, $op2$$constant);
15946   %}
15947   ins_pipe(ialu_reg_reg);
15948 %}
15949 
15950 instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
15951   match(Set cr (CmpI (AndI op1 op2) op3));
15952   predicate(Assembler::operand_valid_for_logical_immediate
15953             (/*is_32*/true, n->in(1)->in(2)->get_int()));
15954 
15955   ins_cost(INSN_COST);
15956   format %{ "tst $op1, $op2 # int" %}
15957   ins_encode %{
15958     __ tstw($op1$$Register, $op2$$constant);
15959   %}
15960   ins_pipe(ialu_reg_reg);
15961 %}
15962 
15963 instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
15964   match(Set cr (CmpL (AndL op1 op2) op3));
15965 
15966   ins_cost(INSN_COST);
15967   format %{ "tst $op1, $op2 # long" %}
15968   ins_encode %{
15969     __ tst($op1$$Register, $op2$$Register);
15970   %}
15971   ins_pipe(ialu_reg_reg);
15972 %}
15973 
15974 instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
15975   match(Set cr (CmpI (AndI op1 op2) op3));
15976 
15977   ins_cost(INSN_COST);
15978   format %{ "tstw $op1, $op2 # int" %}
15979   ins_encode %{
15980     __ tstw($op1$$Register, $op2$$Register);
15981   %}
15982   ins_pipe(ialu_reg_reg);
15983 %}
15984 
15985 
15986 // Conditional Far Branch
15987 // Conditional Far Branch Unsigned
15988 // TODO: fixme
15989 
15990 // counted loop end branch near
15991 instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
15992 %{
15993   match(CountedLoopEnd cmp cr);
15994 
15995   effect(USE lbl);
15996 
15997   ins_cost(BRANCH_COST);
15998   // short variant.
15999   // ins_short_branch(1);
16000   format %{ "b$cmp $lbl \t// counted loop end" %}
16001 
16002   ins_encode(aarch64_enc_br_con(cmp, lbl));
16003 
16004   ins_pipe(pipe_branch);
16005 %}
16006 
16007 // counted loop end branch far
16008 // TODO: fixme
16009 
16010 // ============================================================================
16011 // inlined locking and unlocking
16012 
16013 instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
16014 %{
16015   predicate(LockingMode != LM_LIGHTWEIGHT);
16016   match(Set cr (FastLock object box));
16017   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
16018 
16019   ins_cost(5 * INSN_COST);
16020   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2,$tmp3" %}
16021 
16022   ins_encode %{
16023     __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
16024   %}
16025 
16026   ins_pipe(pipe_serial);
16027 %}
16028 
16029 instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
16030 %{
16031   predicate(LockingMode != LM_LIGHTWEIGHT);
16032   match(Set cr (FastUnlock object box));
16033   effect(TEMP tmp, TEMP tmp2);
16034 
16035   ins_cost(5 * INSN_COST);
16036   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
16037 
16038   ins_encode %{
16039     __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register);
16040   %}
16041 
16042   ins_pipe(pipe_serial);
16043 %}
16044 
16045 instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
16046 %{
16047   predicate(LockingMode == LM_LIGHTWEIGHT);
16048   match(Set cr (FastLock object box));
16049   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
16050 
16051   ins_cost(5 * INSN_COST);
16052   format %{ "fastlock $object,$box\t! kills $tmp,$tmp2,$tmp3" %}
16053 
16054   ins_encode %{
16055     __ fast_lock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
16056   %}
16057 
16058   ins_pipe(pipe_serial);
16059 %}
16060 
16061 instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
16062 %{
16063   predicate(LockingMode == LM_LIGHTWEIGHT);
16064   match(Set cr (FastUnlock object box));
16065   effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
16066 
16067   ins_cost(5 * INSN_COST);
16068   format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2, $tmp3" %}
16069 
16070   ins_encode %{
16071     __ fast_unlock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
16072   %}
16073 
16074   ins_pipe(pipe_serial);
16075 %}
16076 
16077 // ============================================================================
16078 // Safepoint Instructions
16079 
16080 // TODO
16081 // provide a near and far version of this code
16082 
16083 instruct safePoint(rFlagsReg cr, iRegP poll)
16084 %{
16085   match(SafePoint poll);
16086   effect(KILL cr);
16087 
16088   format %{
16089     "ldrw zr, [$poll]\t# Safepoint: poll for GC"
16090   %}
16091   ins_encode %{
16092     __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
16093   %}
16094   ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
16095 %}
16096 
16097 
16098 // ============================================================================
16099 // Procedure Call/Return Instructions
16100 
16101 // Call Java Static Instruction
16102 
16103 instruct CallStaticJavaDirect(method meth)
16104 %{
16105   match(CallStaticJava);
16106 
16107   effect(USE meth);
16108 
16109   ins_cost(CALL_COST);
16110 
16111   format %{ "call,static $meth \t// ==> " %}
16112 
16113   ins_encode(aarch64_enc_java_static_call(meth),
16114              aarch64_enc_call_epilog);
16115 
16116   ins_pipe(pipe_class_call);
16117 %}
16118 
16119 // TO HERE
16120 
16121 // Call Java Dynamic Instruction
16122 instruct CallDynamicJavaDirect(method meth)
16123 %{
16124   match(CallDynamicJava);
16125 
16126   effect(USE meth);
16127 
16128   ins_cost(CALL_COST);
16129 
16130   format %{ "CALL,dynamic $meth \t// ==> " %}
16131 
16132   ins_encode(aarch64_enc_java_dynamic_call(meth),
16133              aarch64_enc_call_epilog);
16134 
16135   ins_pipe(pipe_class_call);
16136 %}
16137 
16138 // Call Runtime Instruction
16139 
16140 instruct CallRuntimeDirect(method meth)
16141 %{
16142   match(CallRuntime);
16143 
16144   effect(USE meth);
16145 
16146   ins_cost(CALL_COST);
16147 
16148   format %{ "CALL, runtime $meth" %}
16149 
16150   ins_encode( aarch64_enc_java_to_runtime(meth) );
16151 
16152   ins_pipe(pipe_class_call);
16153 %}
16154 
16155 // Call Runtime Instruction
16156 
16157 instruct CallLeafDirect(method meth)
16158 %{
16159   match(CallLeaf);
16160 
16161   effect(USE meth);
16162 
16163   ins_cost(CALL_COST);
16164 
16165   format %{ "CALL, runtime leaf $meth" %}
16166 
16167   ins_encode( aarch64_enc_java_to_runtime(meth) );
16168 
16169   ins_pipe(pipe_class_call);
16170 %}
16171 
16172 // Call Runtime Instruction without safepoint and with vector arguments
16173 instruct CallLeafDirectVector(method meth)
16174 %{
16175   match(CallLeafVector);
16176 
16177   effect(USE meth);
16178 
16179   ins_cost(CALL_COST);
16180 
16181   format %{ "CALL, runtime leaf vector $meth" %}
16182 
16183   ins_encode(aarch64_enc_java_to_runtime(meth));
16184 
16185   ins_pipe(pipe_class_call);
16186 %}
16187 
16188 // Call Runtime Instruction
16189 
16190 instruct CallLeafNoFPDirect(method meth)
16191 %{
16192   match(CallLeafNoFP);
16193 
16194   effect(USE meth);
16195 
16196   ins_cost(CALL_COST);
16197 
16198   format %{ "CALL, runtime leaf nofp $meth" %}
16199 
16200   ins_encode( aarch64_enc_java_to_runtime(meth) );
16201 
16202   ins_pipe(pipe_class_call);
16203 %}
16204 
16205 // Tail Call; Jump from runtime stub to Java code.
16206 // Also known as an 'interprocedural jump'.
16207 // Target of jump will eventually return to caller.
16208 // TailJump below removes the return address.
16209 // Don't use rfp for 'jump_target' because a MachEpilogNode has already been
16210 // emitted just above the TailCall which has reset rfp to the caller state.
16211 instruct TailCalljmpInd(iRegPNoSpNoRfp jump_target, inline_cache_RegP method_ptr)
16212 %{
16213   match(TailCall jump_target method_ptr);
16214 
16215   ins_cost(CALL_COST);
16216 
16217   format %{ "br $jump_target\t# $method_ptr holds method" %}
16218 
16219   ins_encode(aarch64_enc_tail_call(jump_target));
16220 
16221   ins_pipe(pipe_class_call);
16222 %}
16223 
16224 instruct TailjmpInd(iRegPNoSpNoRfp jump_target, iRegP_R0 ex_oop)
16225 %{
16226   match(TailJump jump_target ex_oop);
16227 
16228   ins_cost(CALL_COST);
16229 
16230   format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
16231 
16232   ins_encode(aarch64_enc_tail_jmp(jump_target));
16233 
16234   ins_pipe(pipe_class_call);
16235 %}
16236 
16237 // Forward exception.
16238 instruct ForwardExceptionjmp()
16239 %{
16240   match(ForwardException);
16241   ins_cost(CALL_COST);
16242 
16243   format %{ "b forward_exception_stub" %}
16244   ins_encode %{
16245     __ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
16246   %}
16247   ins_pipe(pipe_class_call);
16248 %}
16249 
16250 // Create exception oop: created by stack-crawling runtime code.
16251 // Created exception is now available to this handler, and is setup
16252 // just prior to jumping to this handler. No code emitted.
16253 // TODO check
16254 // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
16255 instruct CreateException(iRegP_R0 ex_oop)
16256 %{
16257   match(Set ex_oop (CreateEx));
16258 
16259   format %{ " -- \t// exception oop; no code emitted" %}
16260 
16261   size(0);
16262 
16263   ins_encode( /*empty*/ );
16264 
16265   ins_pipe(pipe_class_empty);
16266 %}
16267 
16268 // Rethrow exception: The exception oop will come in the first
16269 // argument position. Then JUMP (not call) to the rethrow stub code.
16270 instruct RethrowException() %{
16271   match(Rethrow);
16272   ins_cost(CALL_COST);
16273 
16274   format %{ "b rethrow_stub" %}
16275 
16276   ins_encode( aarch64_enc_rethrow() );
16277 
16278   ins_pipe(pipe_class_call);
16279 %}
16280 
16281 
16282 // Return Instruction
16283 // epilog node loads ret address into lr as part of frame pop
16284 instruct Ret()
16285 %{
16286   match(Return);
16287 
16288   format %{ "ret\t// return register" %}
16289 
16290   ins_encode( aarch64_enc_ret() );
16291 
16292   ins_pipe(pipe_branch);
16293 %}
16294 
16295 // Die now.
16296 instruct ShouldNotReachHere() %{
16297   match(Halt);
16298 
16299   ins_cost(CALL_COST);
16300   format %{ "ShouldNotReachHere" %}
16301 
16302   ins_encode %{
16303     if (is_reachable()) {
16304       __ stop(_halt_reason);
16305     }
16306   %}
16307 
16308   ins_pipe(pipe_class_default);
16309 %}
16310 
16311 // ============================================================================
16312 // Partial Subtype Check
16313 //
16314 // superklass array for an instance of the superklass.  Set a hidden
16315 // internal cache on a hit (cache is checked with exposed code in
16316 // gen_subtype_check()).  Return NZ for a miss or zero for a hit.  The
16317 // encoding ALSO sets flags.
16318 
16319 instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
16320 %{
16321   match(Set result (PartialSubtypeCheck sub super));
16322   predicate(!UseSecondarySupersTable);
16323   effect(KILL cr, KILL temp);
16324 
16325   ins_cost(20 * INSN_COST);  // slightly larger than the next version
16326   format %{ "partialSubtypeCheck $result, $sub, $super" %}
16327 
16328   ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
16329 
16330   opcode(0x1); // Force zero of result reg on hit
16331 
16332   ins_pipe(pipe_class_memory);
16333 %}
16334 
16335 // Two versions of partialSubtypeCheck, both used when we need to
16336 // search for a super class in the secondary supers array. The first
16337 // is used when we don't know _a priori_ the class being searched
16338 // for. The second, far more common, is used when we do know: this is
16339 // used for instanceof, checkcast, and any case where C2 can determine
16340 // it by constant propagation.
16341 
16342 instruct partialSubtypeCheckVarSuper(iRegP_R4 sub, iRegP_R0 super, vRegD_V0 vtemp, iRegP_R5 result,
16343                                      iRegP_R1 tempR1, iRegP_R2 tempR2, iRegP_R3 tempR3,
16344                                      rFlagsReg cr)
16345 %{
16346   match(Set result (PartialSubtypeCheck sub super));
16347   predicate(UseSecondarySupersTable);
16348   effect(KILL cr, TEMP tempR1, TEMP tempR2, TEMP tempR3, TEMP vtemp);
16349 
16350   ins_cost(10 * INSN_COST);  // slightly larger than the next version
16351   format %{ "partialSubtypeCheck $result, $sub, $super" %}
16352 
16353   ins_encode %{
16354     __ lookup_secondary_supers_table_var($sub$$Register, $super$$Register,
16355                                          $tempR1$$Register, $tempR2$$Register, $tempR3$$Register,
16356                                          $vtemp$$FloatRegister,
16357                                          $result$$Register, /*L_success*/nullptr);
16358   %}
16359 
16360   ins_pipe(pipe_class_memory);
16361 %}
16362 
16363 instruct partialSubtypeCheckConstSuper(iRegP_R4 sub, iRegP_R0 super_reg, immP super_con, vRegD_V0 vtemp, iRegP_R5 result,
16364                                        iRegP_R1 tempR1, iRegP_R2 tempR2, iRegP_R3 tempR3,
16365                                        rFlagsReg cr)
16366 %{
16367   match(Set result (PartialSubtypeCheck sub (Binary super_reg super_con)));
16368   predicate(UseSecondarySupersTable);
16369   effect(KILL cr, TEMP tempR1, TEMP tempR2, TEMP tempR3, TEMP vtemp);
16370 
16371   ins_cost(5 * INSN_COST);  // smaller than the next version
16372   format %{ "partialSubtypeCheck $result, $sub, $super_reg, $super_con" %}
16373 
16374   ins_encode %{
16375     bool success = false;
16376     u1 super_klass_slot = ((Klass*)$super_con$$constant)->hash_slot();
16377     if (InlineSecondarySupersTest) {
16378       success =
16379         __ lookup_secondary_supers_table_const($sub$$Register, $super_reg$$Register,
16380                                                $tempR1$$Register, $tempR2$$Register, $tempR3$$Register,
16381                                                $vtemp$$FloatRegister,
16382                                                $result$$Register,
16383                                                super_klass_slot);
16384     } else {
16385       address call = __ trampoline_call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_stub(super_klass_slot)));
16386       success = (call != nullptr);
16387     }
16388     if (!success) {
16389       ciEnv::current()->record_failure("CodeCache is full");
16390       return;
16391     }
16392   %}
16393 
16394   ins_pipe(pipe_class_memory);
16395 %}
16396 
16397 // Intrisics for String.compareTo()
16398 
16399 instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16400                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16401 %{
16402   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
16403   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16404   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16405 
16406   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16407   ins_encode %{
16408     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16409     __ string_compare($str1$$Register, $str2$$Register,
16410                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16411                       $tmp1$$Register, $tmp2$$Register,
16412                       fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::UU);
16413   %}
16414   ins_pipe(pipe_class_memory);
16415 %}
16416 
16417 instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16418                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
16419 %{
16420   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
16421   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16422   effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16423 
16424   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1" %}
16425   ins_encode %{
16426     __ string_compare($str1$$Register, $str2$$Register,
16427                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16428                       $tmp1$$Register, $tmp2$$Register,
16429                       fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::LL);
16430   %}
16431   ins_pipe(pipe_class_memory);
16432 %}
16433 
16434 instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16435                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16436                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16437 %{
16438   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
16439   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16440   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16441          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16442 
16443   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16444   ins_encode %{
16445     __ string_compare($str1$$Register, $str2$$Register,
16446                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16447                       $tmp1$$Register, $tmp2$$Register,
16448                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16449                       $vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::UL);
16450   %}
16451   ins_pipe(pipe_class_memory);
16452 %}
16453 
16454 instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16455                         iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16456                         vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
16457 %{
16458   predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
16459   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16460   effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
16461          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16462 
16463   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
16464   ins_encode %{
16465     __ string_compare($str1$$Register, $str2$$Register,
16466                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16467                       $tmp1$$Register, $tmp2$$Register,
16468                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
16469                       $vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::LU);
16470   %}
16471   ins_pipe(pipe_class_memory);
16472 %}
16473 
16474 // Note that Z registers alias the corresponding NEON registers, we declare the vector operands of
16475 // these string_compare variants as NEON register type for convenience so that the prototype of
16476 // string_compare can be shared with all variants.
16477 
16478 instruct string_compareLL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16479                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16480                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16481                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16482 %{
16483   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
16484   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16485   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16486          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16487 
16488   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16489   ins_encode %{
16490     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16491     __ string_compare($str1$$Register, $str2$$Register,
16492                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16493                       $tmp1$$Register, $tmp2$$Register,
16494                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16495                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16496                       StrIntrinsicNode::LL);
16497   %}
16498   ins_pipe(pipe_class_memory);
16499 %}
16500 
16501 instruct string_compareLU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16502                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16503                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16504                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16505 %{
16506   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
16507   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16508   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16509          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16510 
16511   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16512   ins_encode %{
16513     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16514     __ string_compare($str1$$Register, $str2$$Register,
16515                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16516                       $tmp1$$Register, $tmp2$$Register,
16517                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16518                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16519                       StrIntrinsicNode::LU);
16520   %}
16521   ins_pipe(pipe_class_memory);
16522 %}
16523 
16524 instruct string_compareUL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16525                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16526                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16527                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16528 %{
16529   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
16530   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16531   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16532          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16533 
16534   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16535   ins_encode %{
16536     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16537     __ string_compare($str1$$Register, $str2$$Register,
16538                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16539                       $tmp1$$Register, $tmp2$$Register,
16540                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16541                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16542                       StrIntrinsicNode::UL);
16543   %}
16544   ins_pipe(pipe_class_memory);
16545 %}
16546 
16547 instruct string_compareUU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
16548                               iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
16549                               vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
16550                               pRegGov_P1 pgtmp2, rFlagsReg cr)
16551 %{
16552   predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
16553   match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
16554   effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
16555          USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
16556 
16557   format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result   # USE sve" %}
16558   ins_encode %{
16559     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16560     __ string_compare($str1$$Register, $str2$$Register,
16561                       $cnt1$$Register, $cnt2$$Register, $result$$Register,
16562                       $tmp1$$Register, $tmp2$$Register,
16563                       $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
16564                       as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
16565                       StrIntrinsicNode::UU);
16566   %}
16567   ins_pipe(pipe_class_memory);
16568 %}
16569 
16570 instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16571                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16572                           iRegINoSp tmp3, iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16573                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16574 %{
16575   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16576   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16577   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16578          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
16579          TEMP vtmp0, TEMP vtmp1, KILL cr);
16580   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU) "
16581             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16582 
16583   ins_encode %{
16584     __ string_indexof($str1$$Register, $str2$$Register,
16585                       $cnt1$$Register, $cnt2$$Register,
16586                       $tmp1$$Register, $tmp2$$Register,
16587                       $tmp3$$Register, $tmp4$$Register,
16588                       $tmp5$$Register, $tmp6$$Register,
16589                       -1, $result$$Register, StrIntrinsicNode::UU);
16590   %}
16591   ins_pipe(pipe_class_memory);
16592 %}
16593 
16594 instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16595                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
16596                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16597                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16598 %{
16599   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16600   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16601   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16602          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
16603          TEMP vtmp0, TEMP vtmp1, KILL cr);
16604   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL) "
16605             "# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16606 
16607   ins_encode %{
16608     __ string_indexof($str1$$Register, $str2$$Register,
16609                       $cnt1$$Register, $cnt2$$Register,
16610                       $tmp1$$Register, $tmp2$$Register,
16611                       $tmp3$$Register, $tmp4$$Register,
16612                       $tmp5$$Register, $tmp6$$Register,
16613                       -1, $result$$Register, StrIntrinsicNode::LL);
16614   %}
16615   ins_pipe(pipe_class_memory);
16616 %}
16617 
16618 instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
16619                           iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,iRegINoSp tmp3,
16620                           iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
16621                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
16622 %{
16623   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16624   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
16625   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
16626          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
16627          TEMP tmp6, TEMP vtmp0, TEMP vtmp1, KILL cr);
16628   format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL) "
16629             "# KILL $str1 cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
16630 
16631   ins_encode %{
16632     __ string_indexof($str1$$Register, $str2$$Register,
16633                       $cnt1$$Register, $cnt2$$Register,
16634                       $tmp1$$Register, $tmp2$$Register,
16635                       $tmp3$$Register, $tmp4$$Register,
16636                       $tmp5$$Register, $tmp6$$Register,
16637                       -1, $result$$Register, StrIntrinsicNode::UL);
16638   %}
16639   ins_pipe(pipe_class_memory);
16640 %}
16641 
16642 instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16643                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
16644                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16645 %{
16646   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
16647   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16648   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16649          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16650   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU) "
16651             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
16652 
16653   ins_encode %{
16654     int icnt2 = (int)$int_cnt2$$constant;
16655     __ string_indexof($str1$$Register, $str2$$Register,
16656                       $cnt1$$Register, zr,
16657                       $tmp1$$Register, $tmp2$$Register,
16658                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16659                       icnt2, $result$$Register, StrIntrinsicNode::UU);
16660   %}
16661   ins_pipe(pipe_class_memory);
16662 %}
16663 
16664 instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16665                               immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
16666                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16667 %{
16668   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
16669   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16670   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16671          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16672   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL) "
16673             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
16674 
16675   ins_encode %{
16676     int icnt2 = (int)$int_cnt2$$constant;
16677     __ string_indexof($str1$$Register, $str2$$Register,
16678                       $cnt1$$Register, zr,
16679                       $tmp1$$Register, $tmp2$$Register,
16680                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16681                       icnt2, $result$$Register, StrIntrinsicNode::LL);
16682   %}
16683   ins_pipe(pipe_class_memory);
16684 %}
16685 
16686 instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
16687                               immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
16688                               iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
16689 %{
16690   predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
16691   match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
16692   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
16693          TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
16694   format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL) "
16695             "# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
16696 
16697   ins_encode %{
16698     int icnt2 = (int)$int_cnt2$$constant;
16699     __ string_indexof($str1$$Register, $str2$$Register,
16700                       $cnt1$$Register, zr,
16701                       $tmp1$$Register, $tmp2$$Register,
16702                       $tmp3$$Register, $tmp4$$Register, zr, zr,
16703                       icnt2, $result$$Register, StrIntrinsicNode::UL);
16704   %}
16705   ins_pipe(pipe_class_memory);
16706 %}
16707 
16708 instruct string_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16709                              iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16710                              iRegINoSp tmp3, rFlagsReg cr)
16711 %{
16712   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16713   predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U));
16714   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
16715          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16716 
16717   format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
16718 
16719   ins_encode %{
16720     __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
16721                            $result$$Register, $tmp1$$Register, $tmp2$$Register,
16722                            $tmp3$$Register);
16723   %}
16724   ins_pipe(pipe_class_memory);
16725 %}
16726 
16727 instruct stringL_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16728                               iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
16729                               iRegINoSp tmp3, rFlagsReg cr)
16730 %{
16731   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16732   predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L));
16733   effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
16734          TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
16735 
16736   format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
16737 
16738   ins_encode %{
16739     __ stringL_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
16740                             $result$$Register, $tmp1$$Register, $tmp2$$Register,
16741                             $tmp3$$Register);
16742   %}
16743   ins_pipe(pipe_class_memory);
16744 %}
16745 
16746 instruct stringL_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16747                                   iRegI_R0 result, vecA ztmp1, vecA ztmp2,
16748                                   pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
16749   predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L);
16750   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16751   effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
16752   format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
16753   ins_encode %{
16754     __ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
16755                                $result$$Register, $ztmp1$$FloatRegister,
16756                                $ztmp2$$FloatRegister, $pgtmp$$PRegister,
16757                                $ptmp$$PRegister, true /* isL */);
16758   %}
16759   ins_pipe(pipe_class_memory);
16760 %}
16761 
16762 instruct stringU_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
16763                                   iRegI_R0 result, vecA ztmp1, vecA ztmp2,
16764                                   pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
16765   predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U);
16766   match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
16767   effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
16768   format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
16769   ins_encode %{
16770     __ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
16771                                $result$$Register, $ztmp1$$FloatRegister,
16772                                $ztmp2$$FloatRegister, $pgtmp$$PRegister,
16773                                $ptmp$$PRegister, false /* isL */);
16774   %}
16775   ins_pipe(pipe_class_memory);
16776 %}
16777 
16778 instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
16779                         iRegI_R0 result, rFlagsReg cr)
16780 %{
16781   predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
16782   match(Set result (StrEquals (Binary str1 str2) cnt));
16783   effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
16784 
16785   format %{ "String Equals $str1,$str2,$cnt -> $result" %}
16786   ins_encode %{
16787     // Count is in 8-bit bytes; non-Compact chars are 16 bits.
16788     __ string_equals($str1$$Register, $str2$$Register,
16789                      $result$$Register, $cnt$$Register);
16790   %}
16791   ins_pipe(pipe_class_memory);
16792 %}
16793 
16794 instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16795                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16796                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
16797                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
16798                        iRegP_R10 tmp, rFlagsReg cr)
16799 %{
16800   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
16801   match(Set result (AryEq ary1 ary2));
16802   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
16803          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
16804          TEMP vtmp6, TEMP vtmp7, KILL cr);
16805 
16806   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
16807   ins_encode %{
16808     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
16809                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16810                                    $result$$Register, $tmp$$Register, 1);
16811     if (tpc == nullptr) {
16812       ciEnv::current()->record_failure("CodeCache is full");
16813       return;
16814     }
16815   %}
16816   ins_pipe(pipe_class_memory);
16817 %}
16818 
16819 instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
16820                        iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
16821                        vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
16822                        vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
16823                        iRegP_R10 tmp, rFlagsReg cr)
16824 %{
16825   predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
16826   match(Set result (AryEq ary1 ary2));
16827   effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
16828          TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
16829          TEMP vtmp6, TEMP vtmp7, KILL cr);
16830 
16831   format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
16832   ins_encode %{
16833     address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
16834                                    $tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
16835                                    $result$$Register, $tmp$$Register, 2);
16836     if (tpc == nullptr) {
16837       ciEnv::current()->record_failure("CodeCache is full");
16838       return;
16839     }
16840   %}
16841   ins_pipe(pipe_class_memory);
16842 %}
16843 
16844 instruct arrays_hashcode(iRegP_R1 ary, iRegI_R2 cnt, iRegI_R0 result, immI basic_type,
16845                          vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
16846                          vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
16847                          vRegD_V12 vtmp8, vRegD_V13 vtmp9, rFlagsReg cr)
16848 %{
16849   match(Set result (VectorizedHashCode (Binary ary cnt) (Binary result basic_type)));
16850   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5, TEMP vtmp6,
16851          TEMP vtmp7, TEMP vtmp8, TEMP vtmp9, USE_KILL ary, USE_KILL cnt, USE basic_type, KILL cr);
16852 
16853   format %{ "Array HashCode array[] $ary,$cnt,$result,$basic_type -> $result   // KILL all" %}
16854   ins_encode %{
16855     address tpc = __ arrays_hashcode($ary$$Register, $cnt$$Register, $result$$Register,
16856                                      $vtmp3$$FloatRegister, $vtmp2$$FloatRegister,
16857                                      $vtmp1$$FloatRegister, $vtmp0$$FloatRegister,
16858                                      $vtmp4$$FloatRegister, $vtmp5$$FloatRegister,
16859                                      $vtmp6$$FloatRegister, $vtmp7$$FloatRegister,
16860                                      $vtmp8$$FloatRegister, $vtmp9$$FloatRegister,
16861                                      (BasicType)$basic_type$$constant);
16862     if (tpc == nullptr) {
16863       ciEnv::current()->record_failure("CodeCache is full");
16864       return;
16865     }
16866   %}
16867   ins_pipe(pipe_class_memory);
16868 %}
16869 
16870 instruct count_positives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
16871 %{
16872   match(Set result (CountPositives ary1 len));
16873   effect(USE_KILL ary1, USE_KILL len, KILL cr);
16874   format %{ "count positives byte[] $ary1,$len -> $result" %}
16875   ins_encode %{
16876     address tpc = __ count_positives($ary1$$Register, $len$$Register, $result$$Register);
16877     if (tpc == nullptr) {
16878       ciEnv::current()->record_failure("CodeCache is full");
16879       return;
16880     }
16881   %}
16882   ins_pipe( pipe_slow );
16883 %}
16884 
16885 // fast char[] to byte[] compression
16886 instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16887                          vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
16888                          vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
16889                          iRegI_R0 result, rFlagsReg cr)
16890 %{
16891   match(Set result (StrCompressedCopy src (Binary dst len)));
16892   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
16893          USE_KILL src, USE_KILL dst, USE len, KILL cr);
16894 
16895   format %{ "String Compress $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
16896   ins_encode %{
16897     __ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
16898                            $result$$Register, $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
16899                            $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
16900                            $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
16901   %}
16902   ins_pipe(pipe_slow);
16903 %}
16904 
16905 // fast byte[] to char[] inflation
16906 instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len, iRegP_R3 tmp,
16907                         vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
16908                         vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, rFlagsReg cr)
16909 %{
16910   match(Set dummy (StrInflatedCopy src (Binary dst len)));
16911   effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3,
16912          TEMP vtmp4, TEMP vtmp5, TEMP vtmp6, TEMP tmp,
16913          USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
16914 
16915   format %{ "String Inflate $src,$dst # KILL $tmp $src $dst $len V0-V6 cr" %}
16916   ins_encode %{
16917     address tpc = __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
16918                                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
16919                                         $vtmp2$$FloatRegister, $tmp$$Register);
16920     if (tpc == nullptr) {
16921       ciEnv::current()->record_failure("CodeCache is full");
16922       return;
16923     }
16924   %}
16925   ins_pipe(pipe_class_memory);
16926 %}
16927 
16928 // encode char[] to byte[] in ISO_8859_1
16929 instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16930                           vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
16931                           vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
16932                           iRegI_R0 result, rFlagsReg cr)
16933 %{
16934   predicate(!((EncodeISOArrayNode*)n)->is_ascii());
16935   match(Set result (EncodeISOArray src (Binary dst len)));
16936   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
16937          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
16938 
16939   format %{ "Encode ISO array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
16940   ins_encode %{
16941     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
16942                         $result$$Register, false,
16943                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
16944                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
16945                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
16946   %}
16947   ins_pipe(pipe_class_memory);
16948 %}
16949 
16950 instruct encode_ascii_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
16951                             vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
16952                             vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
16953                             iRegI_R0 result, rFlagsReg cr)
16954 %{
16955   predicate(((EncodeISOArrayNode*)n)->is_ascii());
16956   match(Set result (EncodeISOArray src (Binary dst len)));
16957   effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
16958          KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
16959 
16960   format %{ "Encode ASCII array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
16961   ins_encode %{
16962     __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
16963                         $result$$Register, true,
16964                         $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
16965                         $vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
16966                         $vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
16967   %}
16968   ins_pipe(pipe_class_memory);
16969 %}
16970 
16971 //----------------------------- CompressBits/ExpandBits ------------------------
16972 
16973 instruct compressBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
16974                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
16975   match(Set dst (CompressBits src mask));
16976   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
16977   format %{ "mov    $tsrc, $src\n\t"
16978             "mov    $tmask, $mask\n\t"
16979             "bext   $tdst, $tsrc, $tmask\n\t"
16980             "mov    $dst, $tdst"
16981           %}
16982   ins_encode %{
16983     __ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
16984     __ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
16985     __ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
16986     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
16987   %}
16988   ins_pipe(pipe_slow);
16989 %}
16990 
16991 instruct compressBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
16992                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
16993   match(Set dst (CompressBits (LoadI mem) mask));
16994   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
16995   format %{ "ldrs   $tsrc, $mem\n\t"
16996             "ldrs   $tmask, $mask\n\t"
16997             "bext   $tdst, $tsrc, $tmask\n\t"
16998             "mov    $dst, $tdst"
16999           %}
17000   ins_encode %{
17001     loadStore(masm, &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
17002               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
17003     __ ldrs($tmask$$FloatRegister, $constantaddress($mask));
17004     __ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17005     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17006   %}
17007   ins_pipe(pipe_slow);
17008 %}
17009 
17010 instruct compressBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
17011                            vRegD tdst, vRegD tsrc, vRegD tmask) %{
17012   match(Set dst (CompressBits src mask));
17013   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17014   format %{ "mov    $tsrc, $src\n\t"
17015             "mov    $tmask, $mask\n\t"
17016             "bext   $tdst, $tsrc, $tmask\n\t"
17017             "mov    $dst, $tdst"
17018           %}
17019   ins_encode %{
17020     __ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
17021     __ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
17022     __ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17023     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17024   %}
17025   ins_pipe(pipe_slow);
17026 %}
17027 
17028 instruct compressBitsL_memcon(iRegLNoSp dst, memory8 mem, immL mask,
17029                            vRegF tdst, vRegF tsrc, vRegF tmask) %{
17030   match(Set dst (CompressBits (LoadL mem) mask));
17031   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17032   format %{ "ldrd   $tsrc, $mem\n\t"
17033             "ldrd   $tmask, $mask\n\t"
17034             "bext   $tdst, $tsrc, $tmask\n\t"
17035             "mov    $dst, $tdst"
17036           %}
17037   ins_encode %{
17038     loadStore(masm, &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
17039               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
17040     __ ldrd($tmask$$FloatRegister, $constantaddress($mask));
17041     __ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17042     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17043   %}
17044   ins_pipe(pipe_slow);
17045 %}
17046 
17047 instruct expandBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
17048                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17049   match(Set dst (ExpandBits src mask));
17050   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17051   format %{ "mov    $tsrc, $src\n\t"
17052             "mov    $tmask, $mask\n\t"
17053             "bdep   $tdst, $tsrc, $tmask\n\t"
17054             "mov    $dst, $tdst"
17055           %}
17056   ins_encode %{
17057     __ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
17058     __ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
17059     __ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17060     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17061   %}
17062   ins_pipe(pipe_slow);
17063 %}
17064 
17065 instruct expandBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
17066                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17067   match(Set dst (ExpandBits (LoadI mem) mask));
17068   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17069   format %{ "ldrs   $tsrc, $mem\n\t"
17070             "ldrs   $tmask, $mask\n\t"
17071             "bdep   $tdst, $tsrc, $tmask\n\t"
17072             "mov    $dst, $tdst"
17073           %}
17074   ins_encode %{
17075     loadStore(masm, &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
17076               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
17077     __ ldrs($tmask$$FloatRegister, $constantaddress($mask));
17078     __ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17079     __ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
17080   %}
17081   ins_pipe(pipe_slow);
17082 %}
17083 
17084 instruct expandBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
17085                          vRegD tdst, vRegD tsrc, vRegD tmask) %{
17086   match(Set dst (ExpandBits src mask));
17087   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17088   format %{ "mov    $tsrc, $src\n\t"
17089             "mov    $tmask, $mask\n\t"
17090             "bdep   $tdst, $tsrc, $tmask\n\t"
17091             "mov    $dst, $tdst"
17092           %}
17093   ins_encode %{
17094     __ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
17095     __ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
17096     __ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17097     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17098   %}
17099   ins_pipe(pipe_slow);
17100 %}
17101 
17102 
17103 instruct expandBitsL_memcon(iRegINoSp dst, memory8 mem, immL mask,
17104                          vRegF tdst, vRegF tsrc, vRegF tmask) %{
17105   match(Set dst (ExpandBits (LoadL mem) mask));
17106   effect(TEMP tdst, TEMP tsrc, TEMP tmask);
17107   format %{ "ldrd   $tsrc, $mem\n\t"
17108             "ldrd   $tmask, $mask\n\t"
17109             "bdep   $tdst, $tsrc, $tmask\n\t"
17110             "mov    $dst, $tdst"
17111           %}
17112   ins_encode %{
17113     loadStore(masm, &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
17114               as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
17115     __ ldrd($tmask$$FloatRegister, $constantaddress($mask));
17116     __ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
17117     __ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
17118   %}
17119   ins_pipe(pipe_slow);
17120 %}
17121 
17122 // ============================================================================
17123 // This name is KNOWN by the ADLC and cannot be changed.
17124 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
17125 // for this guy.
17126 instruct tlsLoadP(thread_RegP dst)
17127 %{
17128   match(Set dst (ThreadLocal));
17129 
17130   ins_cost(0);
17131 
17132   format %{ " -- \t// $dst=Thread::current(), empty" %}
17133 
17134   size(0);
17135 
17136   ins_encode( /*empty*/ );
17137 
17138   ins_pipe(pipe_class_empty);
17139 %}
17140 
17141 //----------PEEPHOLE RULES-----------------------------------------------------
17142 // These must follow all instruction definitions as they use the names
17143 // defined in the instructions definitions.
17144 //
17145 // peepmatch ( root_instr_name [preceding_instruction]* );
17146 //
17147 // peepconstraint %{
17148 // (instruction_number.operand_name relational_op instruction_number.operand_name
17149 //  [, ...] );
17150 // // instruction numbers are zero-based using left to right order in peepmatch
17151 //
17152 // peepreplace ( instr_name  ( [instruction_number.operand_name]* ) );
17153 // // provide an instruction_number.operand_name for each operand that appears
17154 // // in the replacement instruction's match rule
17155 //
17156 // ---------VM FLAGS---------------------------------------------------------
17157 //
17158 // All peephole optimizations can be turned off using -XX:-OptoPeephole
17159 //
17160 // Each peephole rule is given an identifying number starting with zero and
17161 // increasing by one in the order seen by the parser.  An individual peephole
17162 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
17163 // on the command-line.
17164 //
17165 // ---------CURRENT LIMITATIONS----------------------------------------------
17166 //
17167 // Only match adjacent instructions in same basic block
17168 // Only equality constraints
17169 // Only constraints between operands, not (0.dest_reg == RAX_enc)
17170 // Only one replacement instruction
17171 //
17172 // ---------EXAMPLE----------------------------------------------------------
17173 //
17174 // // pertinent parts of existing instructions in architecture description
17175 // instruct movI(iRegINoSp dst, iRegI src)
17176 // %{
17177 //   match(Set dst (CopyI src));
17178 // %}
17179 //
17180 // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
17181 // %{
17182 //   match(Set dst (AddI dst src));
17183 //   effect(KILL cr);
17184 // %}
17185 //
17186 // // Change (inc mov) to lea
17187 // peephole %{
17188 //   // increment preceded by register-register move
17189 //   peepmatch ( incI_iReg movI );
17190 //   // require that the destination register of the increment
17191 //   // match the destination register of the move
17192 //   peepconstraint ( 0.dst == 1.dst );
17193 //   // construct a replacement instruction that sets
17194 //   // the destination to ( move's source register + one )
17195 //   peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
17196 // %}
17197 //
17198 
17199 // Implementation no longer uses movX instructions since
17200 // machine-independent system no longer uses CopyX nodes.
17201 //
17202 // peephole
17203 // %{
17204 //   peepmatch (incI_iReg movI);
17205 //   peepconstraint (0.dst == 1.dst);
17206 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17207 // %}
17208 
17209 // peephole
17210 // %{
17211 //   peepmatch (decI_iReg movI);
17212 //   peepconstraint (0.dst == 1.dst);
17213 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17214 // %}
17215 
17216 // peephole
17217 // %{
17218 //   peepmatch (addI_iReg_imm movI);
17219 //   peepconstraint (0.dst == 1.dst);
17220 //   peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
17221 // %}
17222 
17223 // peephole
17224 // %{
17225 //   peepmatch (incL_iReg movL);
17226 //   peepconstraint (0.dst == 1.dst);
17227 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17228 // %}
17229 
17230 // peephole
17231 // %{
17232 //   peepmatch (decL_iReg movL);
17233 //   peepconstraint (0.dst == 1.dst);
17234 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17235 // %}
17236 
17237 // peephole
17238 // %{
17239 //   peepmatch (addL_iReg_imm movL);
17240 //   peepconstraint (0.dst == 1.dst);
17241 //   peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
17242 // %}
17243 
17244 // peephole
17245 // %{
17246 //   peepmatch (addP_iReg_imm movP);
17247 //   peepconstraint (0.dst == 1.dst);
17248 //   peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
17249 // %}
17250 
17251 // // Change load of spilled value to only a spill
17252 // instruct storeI(memory mem, iRegI src)
17253 // %{
17254 //   match(Set mem (StoreI mem src));
17255 // %}
17256 //
17257 // instruct loadI(iRegINoSp dst, memory mem)
17258 // %{
17259 //   match(Set dst (LoadI mem));
17260 // %}
17261 //
17262 
17263 //----------SMARTSPILL RULES---------------------------------------------------
17264 // These must follow all instruction definitions as they use the names
17265 // defined in the instructions definitions.
17266 
17267 // Local Variables:
17268 // mode: c++
17269 // End: