1 /*
   2  * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/shared/barrierSet.hpp"
  27 #include "gc/shared/c2/barrierSetC2.hpp"
  28 #include "memory/allocation.inline.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/compressedOops.hpp"
  31 #include "opto/ad.hpp"
  32 #include "opto/addnode.hpp"
  33 #include "opto/callnode.hpp"
  34 #include "opto/idealGraphPrinter.hpp"
  35 #include "opto/matcher.hpp"
  36 #include "opto/memnode.hpp"
  37 #include "opto/movenode.hpp"
  38 #include "opto/opcodes.hpp"
  39 #include "opto/regmask.hpp"
  40 #include "opto/rootnode.hpp"
  41 #include "opto/runtime.hpp"
  42 #include "opto/type.hpp"
  43 #include "opto/vectornode.hpp"
  44 #include "runtime/os.inline.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "utilities/align.hpp"
  47 
  48 OptoReg::Name OptoReg::c_frame_pointer;
  49 
  50 const RegMask *Matcher::idealreg2regmask[_last_machine_leaf];
  51 RegMask Matcher::mreg2regmask[_last_Mach_Reg];
  52 RegMask Matcher::caller_save_regmask;
  53 RegMask Matcher::caller_save_regmask_exclude_soe;
  54 RegMask Matcher::mh_caller_save_regmask;
  55 RegMask Matcher::mh_caller_save_regmask_exclude_soe;
  56 RegMask Matcher::STACK_ONLY_mask;
  57 RegMask Matcher::c_frame_ptr_mask;
  58 const uint Matcher::_begin_rematerialize = _BEGIN_REMATERIALIZE;
  59 const uint Matcher::_end_rematerialize   = _END_REMATERIALIZE;
  60 
  61 //---------------------------Matcher-------------------------------------------
  62 Matcher::Matcher()
  63 : PhaseTransform( Phase::Ins_Select ),
  64   _states_arena(Chunk::medium_size, mtCompiler),
  65   _new_nodes(C->comp_arena()),
  66   _visited(&_states_arena),
  67   _shared(&_states_arena),
  68   _dontcare(&_states_arena),
  69   _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp),
  70   _swallowed(swallowed),
  71   _begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE),
  72   _end_inst_chain_rule(_END_INST_CHAIN_RULE),
  73   _must_clone(must_clone),
  74   _shared_nodes(C->comp_arena()),
  75 #ifndef PRODUCT
  76   _old2new_map(C->comp_arena()),
  77   _new2old_map(C->comp_arena()),
  78   _reused(C->comp_arena()),
  79 #endif // !PRODUCT
  80   _allocation_started(false),
  81   _ruleName(ruleName),
  82   _register_save_policy(register_save_policy),
  83   _c_reg_save_policy(c_reg_save_policy),
  84   _register_save_type(register_save_type) {
  85   C->set_matcher(this);
  86 
  87   idealreg2spillmask  [Op_RegI] = nullptr;
  88   idealreg2spillmask  [Op_RegN] = nullptr;
  89   idealreg2spillmask  [Op_RegL] = nullptr;
  90   idealreg2spillmask  [Op_RegF] = nullptr;
  91   idealreg2spillmask  [Op_RegD] = nullptr;
  92   idealreg2spillmask  [Op_RegP] = nullptr;
  93   idealreg2spillmask  [Op_VecA] = nullptr;
  94   idealreg2spillmask  [Op_VecS] = nullptr;
  95   idealreg2spillmask  [Op_VecD] = nullptr;
  96   idealreg2spillmask  [Op_VecX] = nullptr;
  97   idealreg2spillmask  [Op_VecY] = nullptr;
  98   idealreg2spillmask  [Op_VecZ] = nullptr;
  99   idealreg2spillmask  [Op_RegFlags] = nullptr;
 100   idealreg2spillmask  [Op_RegVectMask] = nullptr;
 101 
 102   idealreg2debugmask  [Op_RegI] = nullptr;
 103   idealreg2debugmask  [Op_RegN] = nullptr;
 104   idealreg2debugmask  [Op_RegL] = nullptr;
 105   idealreg2debugmask  [Op_RegF] = nullptr;
 106   idealreg2debugmask  [Op_RegD] = nullptr;
 107   idealreg2debugmask  [Op_RegP] = nullptr;
 108   idealreg2debugmask  [Op_VecA] = nullptr;
 109   idealreg2debugmask  [Op_VecS] = nullptr;
 110   idealreg2debugmask  [Op_VecD] = nullptr;
 111   idealreg2debugmask  [Op_VecX] = nullptr;
 112   idealreg2debugmask  [Op_VecY] = nullptr;
 113   idealreg2debugmask  [Op_VecZ] = nullptr;
 114   idealreg2debugmask  [Op_RegFlags] = nullptr;
 115   idealreg2debugmask  [Op_RegVectMask] = nullptr;
 116 
 117   idealreg2mhdebugmask[Op_RegI] = nullptr;
 118   idealreg2mhdebugmask[Op_RegN] = nullptr;
 119   idealreg2mhdebugmask[Op_RegL] = nullptr;
 120   idealreg2mhdebugmask[Op_RegF] = nullptr;
 121   idealreg2mhdebugmask[Op_RegD] = nullptr;
 122   idealreg2mhdebugmask[Op_RegP] = nullptr;
 123   idealreg2mhdebugmask[Op_VecA] = nullptr;
 124   idealreg2mhdebugmask[Op_VecS] = nullptr;
 125   idealreg2mhdebugmask[Op_VecD] = nullptr;
 126   idealreg2mhdebugmask[Op_VecX] = nullptr;
 127   idealreg2mhdebugmask[Op_VecY] = nullptr;
 128   idealreg2mhdebugmask[Op_VecZ] = nullptr;
 129   idealreg2mhdebugmask[Op_RegFlags] = nullptr;
 130   idealreg2mhdebugmask[Op_RegVectMask] = nullptr;
 131 
 132   debug_only(_mem_node = nullptr;)   // Ideal memory node consumed by mach node
 133 }
 134 
 135 //------------------------------warp_incoming_stk_arg------------------------
 136 // This warps a VMReg into an OptoReg::Name
 137 OptoReg::Name Matcher::warp_incoming_stk_arg( VMReg reg ) {
 138   OptoReg::Name warped;
 139   if( reg->is_stack() ) {  // Stack slot argument?
 140     warped = OptoReg::add(_old_SP, reg->reg2stack() );
 141     warped = OptoReg::add(warped, C->out_preserve_stack_slots());
 142     if( warped >= _in_arg_limit )
 143       _in_arg_limit = OptoReg::add(warped, 1); // Bump max stack slot seen
 144     if (!RegMask::can_represent_arg(warped)) {
 145       // the compiler cannot represent this method's calling sequence
 146       // Bailout. We do not have space to represent all arguments.
 147       C->record_method_not_compilable("unsupported incoming calling sequence");
 148       return OptoReg::Bad;
 149     }
 150     return warped;
 151   }
 152   return OptoReg::as_OptoReg(reg);
 153 }
 154 
 155 //---------------------------compute_old_SP------------------------------------
 156 OptoReg::Name Compile::compute_old_SP() {
 157   int fixed    = fixed_slots();
 158   int preserve = in_preserve_stack_slots();
 159   return OptoReg::stack2reg(align_up(fixed + preserve, (int)Matcher::stack_alignment_in_slots()));
 160 }
 161 
 162 
 163 
 164 #ifdef ASSERT
 165 void Matcher::verify_new_nodes_only(Node* xroot) {
 166   // Make sure that the new graph only references new nodes
 167   ResourceMark rm;
 168   Unique_Node_List worklist;
 169   VectorSet visited;
 170   worklist.push(xroot);
 171   while (worklist.size() > 0) {
 172     Node* n = worklist.pop();
 173     if (visited.test_set(n->_idx)) {
 174       continue;
 175     }
 176     assert(C->node_arena()->contains(n), "dead node");
 177     for (uint j = 0; j < n->req(); j++) {
 178       Node* in = n->in(j);
 179       if (in != nullptr) {
 180         worklist.push(in);
 181       }
 182     }
 183     for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
 184       worklist.push(n->fast_out(j));
 185     }
 186   }
 187 }
 188 #endif
 189 
 190 
 191 //---------------------------match---------------------------------------------
 192 void Matcher::match( ) {
 193   if( MaxLabelRootDepth < 100 ) { // Too small?
 194     assert(false, "invalid MaxLabelRootDepth, increase it to 100 minimum");
 195     MaxLabelRootDepth = 100;
 196   }
 197   // One-time initialization of some register masks.
 198   init_spill_mask( C->root()->in(1) );
 199   if (C->failing()) {
 200     return;
 201   }
 202   _return_addr_mask = return_addr();
 203 #ifdef _LP64
 204   // Pointers take 2 slots in 64-bit land
 205   _return_addr_mask.Insert(OptoReg::add(return_addr(),1));
 206 #endif
 207 
 208   // Map a Java-signature return type into return register-value
 209   // machine registers for 0, 1 and 2 returned values.
 210   const TypeTuple *range = C->tf()->range();
 211   if( range->cnt() > TypeFunc::Parms ) { // If not a void function
 212     // Get ideal-register return type
 213     uint ireg = range->field_at(TypeFunc::Parms)->ideal_reg();
 214     // Get machine return register
 215     uint sop = C->start()->Opcode();
 216     OptoRegPair regs = return_value(ireg);
 217 
 218     // And mask for same
 219     _return_value_mask = RegMask(regs.first());
 220     if( OptoReg::is_valid(regs.second()) )
 221       _return_value_mask.Insert(regs.second());
 222   }
 223 
 224   // ---------------
 225   // Frame Layout
 226 
 227   // Need the method signature to determine the incoming argument types,
 228   // because the types determine which registers the incoming arguments are
 229   // in, and this affects the matched code.
 230   const TypeTuple *domain = C->tf()->domain();
 231   uint             argcnt = domain->cnt() - TypeFunc::Parms;
 232   BasicType *sig_bt        = NEW_RESOURCE_ARRAY( BasicType, argcnt );
 233   VMRegPair *vm_parm_regs  = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
 234   _parm_regs               = NEW_RESOURCE_ARRAY( OptoRegPair, argcnt );
 235   _calling_convention_mask = NEW_RESOURCE_ARRAY( RegMask, argcnt );
 236   uint i;
 237   for( i = 0; i<argcnt; i++ ) {
 238     sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
 239   }
 240 
 241   // Pass array of ideal registers and length to USER code (from the AD file)
 242   // that will convert this to an array of register numbers.
 243   const StartNode *start = C->start();
 244   start->calling_convention( sig_bt, vm_parm_regs, argcnt );
 245 #ifdef ASSERT
 246   // Sanity check users' calling convention.  Real handy while trying to
 247   // get the initial port correct.
 248   { for (uint i = 0; i<argcnt; i++) {
 249       if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
 250         assert(domain->field_at(i+TypeFunc::Parms)==Type::HALF, "only allowed on halve" );
 251         _parm_regs[i].set_bad();
 252         continue;
 253       }
 254       VMReg parm_reg = vm_parm_regs[i].first();
 255       assert(parm_reg->is_valid(), "invalid arg?");
 256       if (parm_reg->is_reg()) {
 257         OptoReg::Name opto_parm_reg = OptoReg::as_OptoReg(parm_reg);
 258         assert(can_be_java_arg(opto_parm_reg) ||
 259                C->stub_function() == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C) ||
 260                opto_parm_reg == inline_cache_reg(),
 261                "parameters in register must be preserved by runtime stubs");
 262       }
 263       for (uint j = 0; j < i; j++) {
 264         assert(parm_reg != vm_parm_regs[j].first(),
 265                "calling conv. must produce distinct regs");
 266       }
 267     }
 268   }
 269 #endif
 270 
 271   // Do some initial frame layout.
 272 
 273   // Compute the old incoming SP (may be called FP) as
 274   //   OptoReg::stack0() + locks + in_preserve_stack_slots + pad2.
 275   _old_SP = C->compute_old_SP();
 276   assert( is_even(_old_SP), "must be even" );
 277 
 278   // Compute highest incoming stack argument as
 279   //   _old_SP + out_preserve_stack_slots + incoming argument size.
 280   _in_arg_limit = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
 281   assert( is_even(_in_arg_limit), "out_preserve must be even" );
 282   for( i = 0; i < argcnt; i++ ) {
 283     // Permit args to have no register
 284     _calling_convention_mask[i].Clear();
 285     if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
 286       _parm_regs[i].set_bad();
 287       continue;
 288     }
 289     // calling_convention returns stack arguments as a count of
 290     // slots beyond OptoReg::stack0()/VMRegImpl::stack0.  We need to convert this to
 291     // the allocators point of view, taking into account all the
 292     // preserve area, locks & pad2.
 293 
 294     OptoReg::Name reg1 = warp_incoming_stk_arg(vm_parm_regs[i].first());
 295     if (C->failing()) {
 296       return;
 297     }
 298     if( OptoReg::is_valid(reg1))
 299       _calling_convention_mask[i].Insert(reg1);
 300 
 301     OptoReg::Name reg2 = warp_incoming_stk_arg(vm_parm_regs[i].second());
 302     if (C->failing()) {
 303       return;
 304     }
 305     if( OptoReg::is_valid(reg2))
 306       _calling_convention_mask[i].Insert(reg2);
 307 
 308     // Saved biased stack-slot register number
 309     _parm_regs[i].set_pair(reg2, reg1);
 310   }
 311 
 312   // Finally, make sure the incoming arguments take up an even number of
 313   // words, in case the arguments or locals need to contain doubleword stack
 314   // slots.  The rest of the system assumes that stack slot pairs (in
 315   // particular, in the spill area) which look aligned will in fact be
 316   // aligned relative to the stack pointer in the target machine.  Double
 317   // stack slots will always be allocated aligned.
 318   _new_SP = OptoReg::Name(align_up(_in_arg_limit, (int)RegMask::SlotsPerLong));
 319 
 320   // Compute highest outgoing stack argument as
 321   //   _new_SP + out_preserve_stack_slots + max(outgoing argument size).
 322   _out_arg_limit = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
 323   assert( is_even(_out_arg_limit), "out_preserve must be even" );
 324 
 325   if (!RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1))) {
 326     // the compiler cannot represent this method's calling sequence
 327     // Bailout. We do not have space to represent all arguments.
 328     C->record_method_not_compilable("must be able to represent all call arguments in reg mask");
 329   }
 330 
 331   if (C->failing())  return;  // bailed out on incoming arg failure
 332 
 333   // ---------------
 334   // Collect roots of matcher trees.  Every node for which
 335   // _shared[_idx] is cleared is guaranteed to not be shared, and thus
 336   // can be a valid interior of some tree.
 337   find_shared( C->root() );
 338   find_shared( C->top() );
 339 
 340   C->print_method(PHASE_BEFORE_MATCHING, 1);
 341 
 342   // Create new ideal node ConP #null even if it does exist in old space
 343   // to avoid false sharing if the corresponding mach node is not used.
 344   // The corresponding mach node is only used in rare cases for derived
 345   // pointers.
 346   Node* new_ideal_null = ConNode::make(TypePtr::NULL_PTR);
 347 
 348   // Swap out to old-space; emptying new-space
 349   Arena* old = C->swap_old_and_new();
 350 
 351   // Save debug and profile information for nodes in old space:
 352   _old_node_note_array = C->node_note_array();
 353   if (_old_node_note_array != nullptr) {
 354     C->set_node_note_array(new(C->comp_arena()) GrowableArray<Node_Notes*>
 355                            (C->comp_arena(), _old_node_note_array->length(),
 356                             0, nullptr));
 357   }
 358 
 359   // Pre-size the new_node table to avoid the need for range checks.
 360   grow_new_node_array(C->unique());
 361 
 362   // Reset node counter so MachNodes start with _idx at 0
 363   int live_nodes = C->live_nodes();
 364   C->set_unique(0);
 365   C->reset_dead_node_list();
 366 
 367   // Recursively match trees from old space into new space.
 368   // Correct leaves of new-space Nodes; they point to old-space.
 369   _visited.clear();
 370   Node* const n = xform(C->top(), live_nodes);
 371   if (C->failing()) return;
 372   C->set_cached_top_node(n);
 373   if (!C->failing()) {
 374     Node* xroot =        xform( C->root(), 1 );
 375     if (C->failing()) return;
 376     if (xroot == nullptr) {
 377       Matcher::soft_match_failure();  // recursive matching process failed
 378       assert(false, "instruction match failed");
 379       C->record_method_not_compilable("instruction match failed");
 380     } else {
 381       // During matching shared constants were attached to C->root()
 382       // because xroot wasn't available yet, so transfer the uses to
 383       // the xroot.
 384       for( DUIterator_Fast jmax, j = C->root()->fast_outs(jmax); j < jmax; j++ ) {
 385         Node* n = C->root()->fast_out(j);
 386         if (C->node_arena()->contains(n)) {
 387           assert(n->in(0) == C->root(), "should be control user");
 388           n->set_req(0, xroot);
 389           --j;
 390           --jmax;
 391         }
 392       }
 393 
 394       // Generate new mach node for ConP #null
 395       assert(new_ideal_null != nullptr, "sanity");
 396       _mach_null = match_tree(new_ideal_null);
 397       // Don't set control, it will confuse GCM since there are no uses.
 398       // The control will be set when this node is used first time
 399       // in find_base_for_derived().
 400       assert(_mach_null != nullptr || C->failure_is_artificial(), ""); // bailouts are handled below.
 401 
 402       C->set_root(xroot->is_Root() ? xroot->as_Root() : nullptr);
 403 
 404 #ifdef ASSERT
 405       verify_new_nodes_only(xroot);
 406 #endif
 407     }
 408   }
 409   if (C->top() == nullptr || C->root() == nullptr) {
 410     // New graph lost. This is due to a compilation failure we encountered earlier.
 411     stringStream ss;
 412     if (C->failure_reason() != nullptr) {
 413       ss.print("graph lost: %s", C->failure_reason());
 414     } else {
 415       assert(C->failure_reason() != nullptr, "graph lost: reason unknown");
 416       ss.print("graph lost: reason unknown");
 417     }
 418     C->record_method_not_compilable(ss.as_string() DEBUG_ONLY(COMMA true));
 419   }
 420   if (C->failing()) {
 421     // delete old;
 422     old->destruct_contents();
 423     return;
 424   }
 425   assert( C->top(), "" );
 426   assert( C->root(), "" );
 427   validate_null_checks();
 428 
 429   // Now smoke old-space
 430   NOT_DEBUG( old->destruct_contents() );
 431 
 432   // ------------------------
 433   // Set up save-on-entry registers.
 434   Fixup_Save_On_Entry( );
 435 
 436   { // Cleanup mach IR after selection phase is over.
 437     Compile::TracePhase tp("postselect_cleanup", &timers[_t_postselect_cleanup]);
 438     do_postselect_cleanup();
 439     if (C->failing())  return;
 440     assert(verify_after_postselect_cleanup(), "");
 441   }
 442 }
 443 
 444 //------------------------------Fixup_Save_On_Entry----------------------------
 445 // The stated purpose of this routine is to take care of save-on-entry
 446 // registers.  However, the overall goal of the Match phase is to convert into
 447 // machine-specific instructions which have RegMasks to guide allocation.
 448 // So what this procedure really does is put a valid RegMask on each input
 449 // to the machine-specific variations of all Return, TailCall and Halt
 450 // instructions.  It also adds edgs to define the save-on-entry values (and of
 451 // course gives them a mask).
 452 
 453 static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
 454   RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size );
 455   // Do all the pre-defined register masks
 456   rms[TypeFunc::Control  ] = RegMask::Empty;
 457   rms[TypeFunc::I_O      ] = RegMask::Empty;
 458   rms[TypeFunc::Memory   ] = RegMask::Empty;
 459   rms[TypeFunc::ReturnAdr] = ret_adr;
 460   rms[TypeFunc::FramePtr ] = fp;
 461   return rms;
 462 }
 463 
 464 int Matcher::scalable_predicate_reg_slots() {
 465   assert(Matcher::has_predicated_vectors() && Matcher::supports_scalable_vector(),
 466         "scalable predicate vector should be supported");
 467   int vector_reg_bit_size = Matcher::scalable_vector_reg_size(T_BYTE) << LogBitsPerByte;
 468   // We assume each predicate register is one-eighth of the size of
 469   // scalable vector register, one mask bit per vector byte.
 470   int predicate_reg_bit_size = vector_reg_bit_size >> 3;
 471   // Compute number of slots which is required when scalable predicate
 472   // register is spilled. E.g. if scalable vector register is 640 bits,
 473   // predicate register is 80 bits, which is 2.5 * slots.
 474   // We will round up the slot number to power of 2, which is required
 475   // by find_first_set().
 476   int slots = predicate_reg_bit_size & (BitsPerInt - 1)
 477               ? (predicate_reg_bit_size >> LogBitsPerInt) + 1
 478               : predicate_reg_bit_size >> LogBitsPerInt;
 479   return round_up_power_of_2(slots);
 480 }
 481 
 482 #define NOF_STACK_MASKS (3*13)
 483 
 484 // Create the initial stack mask used by values spilling to the stack.
 485 // Disallow any debug info in outgoing argument areas by setting the
 486 // initial mask accordingly.
 487 void Matcher::init_first_stack_mask() {
 488 
 489   // Allocate storage for spill masks as masks for the appropriate load type.
 490   RegMask *rms = (RegMask*)C->comp_arena()->AmallocWords(sizeof(RegMask) * NOF_STACK_MASKS);
 491 
 492   // Initialize empty placeholder masks into the newly allocated arena
 493   for (int i = 0; i < NOF_STACK_MASKS; i++) {
 494     new (rms + i) RegMask();
 495   }
 496 
 497   idealreg2spillmask  [Op_RegN] = &rms[0];
 498   idealreg2spillmask  [Op_RegI] = &rms[1];
 499   idealreg2spillmask  [Op_RegL] = &rms[2];
 500   idealreg2spillmask  [Op_RegF] = &rms[3];
 501   idealreg2spillmask  [Op_RegD] = &rms[4];
 502   idealreg2spillmask  [Op_RegP] = &rms[5];
 503 
 504   idealreg2debugmask  [Op_RegN] = &rms[6];
 505   idealreg2debugmask  [Op_RegI] = &rms[7];
 506   idealreg2debugmask  [Op_RegL] = &rms[8];
 507   idealreg2debugmask  [Op_RegF] = &rms[9];
 508   idealreg2debugmask  [Op_RegD] = &rms[10];
 509   idealreg2debugmask  [Op_RegP] = &rms[11];
 510 
 511   idealreg2mhdebugmask[Op_RegN] = &rms[12];
 512   idealreg2mhdebugmask[Op_RegI] = &rms[13];
 513   idealreg2mhdebugmask[Op_RegL] = &rms[14];
 514   idealreg2mhdebugmask[Op_RegF] = &rms[15];
 515   idealreg2mhdebugmask[Op_RegD] = &rms[16];
 516   idealreg2mhdebugmask[Op_RegP] = &rms[17];
 517 
 518   idealreg2spillmask  [Op_VecA] = &rms[18];
 519   idealreg2spillmask  [Op_VecS] = &rms[19];
 520   idealreg2spillmask  [Op_VecD] = &rms[20];
 521   idealreg2spillmask  [Op_VecX] = &rms[21];
 522   idealreg2spillmask  [Op_VecY] = &rms[22];
 523   idealreg2spillmask  [Op_VecZ] = &rms[23];
 524 
 525   idealreg2debugmask  [Op_VecA] = &rms[24];
 526   idealreg2debugmask  [Op_VecS] = &rms[25];
 527   idealreg2debugmask  [Op_VecD] = &rms[26];
 528   idealreg2debugmask  [Op_VecX] = &rms[27];
 529   idealreg2debugmask  [Op_VecY] = &rms[28];
 530   idealreg2debugmask  [Op_VecZ] = &rms[29];
 531 
 532   idealreg2mhdebugmask[Op_VecA] = &rms[30];
 533   idealreg2mhdebugmask[Op_VecS] = &rms[31];
 534   idealreg2mhdebugmask[Op_VecD] = &rms[32];
 535   idealreg2mhdebugmask[Op_VecX] = &rms[33];
 536   idealreg2mhdebugmask[Op_VecY] = &rms[34];
 537   idealreg2mhdebugmask[Op_VecZ] = &rms[35];
 538 
 539   idealreg2spillmask  [Op_RegVectMask] = &rms[36];
 540   idealreg2debugmask  [Op_RegVectMask] = &rms[37];
 541   idealreg2mhdebugmask[Op_RegVectMask] = &rms[38];
 542 
 543   OptoReg::Name i;
 544 
 545   // At first, start with the empty mask
 546   C->FIRST_STACK_mask().Clear();
 547 
 548   // Add in the incoming argument area
 549   OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
 550   for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) {
 551     C->FIRST_STACK_mask().Insert(i);
 552   }
 553   // Add in all bits past the outgoing argument area
 554   guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)),
 555             "must be able to represent all call arguments in reg mask");
 556   OptoReg::Name init = _out_arg_limit;
 557   for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) {
 558     C->FIRST_STACK_mask().Insert(i);
 559   }
 560   // Finally, set the "infinite stack" bit.
 561   C->FIRST_STACK_mask().set_AllStack();
 562 
 563   // Make spill masks.  Registers for their class, plus FIRST_STACK_mask.
 564   RegMask aligned_stack_mask = C->FIRST_STACK_mask();
 565   // Keep spill masks aligned.
 566   aligned_stack_mask.clear_to_pairs();
 567   assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 568   RegMask scalable_stack_mask = aligned_stack_mask;
 569 
 570   *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP];
 571 #ifdef _LP64
 572   *idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN];
 573    idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask());
 574    idealreg2spillmask[Op_RegP]->OR(aligned_stack_mask);
 575 #else
 576    idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
 577 #endif
 578   *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI];
 579    idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
 580   *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL];
 581    idealreg2spillmask[Op_RegL]->OR(aligned_stack_mask);
 582   *idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF];
 583    idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask());
 584   *idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD];
 585    idealreg2spillmask[Op_RegD]->OR(aligned_stack_mask);
 586 
 587   if (Matcher::has_predicated_vectors()) {
 588     *idealreg2spillmask[Op_RegVectMask] = *idealreg2regmask[Op_RegVectMask];
 589      idealreg2spillmask[Op_RegVectMask]->OR(aligned_stack_mask);
 590   } else {
 591     *idealreg2spillmask[Op_RegVectMask] = RegMask::Empty;
 592   }
 593 
 594   if (Matcher::vector_size_supported(T_BYTE,4)) {
 595     *idealreg2spillmask[Op_VecS] = *idealreg2regmask[Op_VecS];
 596      idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask());
 597   } else {
 598     *idealreg2spillmask[Op_VecS] = RegMask::Empty;
 599   }
 600 
 601   if (Matcher::vector_size_supported(T_FLOAT,2)) {
 602     // For VecD we need dual alignment and 8 bytes (2 slots) for spills.
 603     // RA guarantees such alignment since it is needed for Double and Long values.
 604     *idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD];
 605      idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask);
 606   } else {
 607     *idealreg2spillmask[Op_VecD] = RegMask::Empty;
 608   }
 609 
 610   if (Matcher::vector_size_supported(T_FLOAT,4)) {
 611     // For VecX we need quadro alignment and 16 bytes (4 slots) for spills.
 612     //
 613     // RA can use input arguments stack slots for spills but until RA
 614     // we don't know frame size and offset of input arg stack slots.
 615     //
 616     // Exclude last input arg stack slots to avoid spilling vectors there
 617     // otherwise vector spills could stomp over stack slots in caller frame.
 618     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 619     for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) {
 620       aligned_stack_mask.Remove(in);
 621       in = OptoReg::add(in, -1);
 622     }
 623      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
 624      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 625     *idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX];
 626      idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask);
 627   } else {
 628     *idealreg2spillmask[Op_VecX] = RegMask::Empty;
 629   }
 630 
 631   if (Matcher::vector_size_supported(T_FLOAT,8)) {
 632     // For VecY we need octo alignment and 32 bytes (8 slots) for spills.
 633     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 634     for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) {
 635       aligned_stack_mask.Remove(in);
 636       in = OptoReg::add(in, -1);
 637     }
 638      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
 639      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 640     *idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];
 641      idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask);
 642   } else {
 643     *idealreg2spillmask[Op_VecY] = RegMask::Empty;
 644   }
 645 
 646   if (Matcher::vector_size_supported(T_FLOAT,16)) {
 647     // For VecZ we need enough alignment and 64 bytes (16 slots) for spills.
 648     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 649     for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecZ); k++) {
 650       aligned_stack_mask.Remove(in);
 651       in = OptoReg::add(in, -1);
 652     }
 653      aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecZ);
 654      assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
 655     *idealreg2spillmask[Op_VecZ] = *idealreg2regmask[Op_VecZ];
 656      idealreg2spillmask[Op_VecZ]->OR(aligned_stack_mask);
 657   } else {
 658     *idealreg2spillmask[Op_VecZ] = RegMask::Empty;
 659   }
 660 
 661   if (Matcher::supports_scalable_vector()) {
 662     int k = 1;
 663     OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
 664     if (Matcher::has_predicated_vectors()) {
 665       // Exclude last input arg stack slots to avoid spilling vector register there,
 666       // otherwise RegVectMask spills could stomp over stack slots in caller frame.
 667       for (; (in >= init_in) && (k < scalable_predicate_reg_slots()); k++) {
 668         scalable_stack_mask.Remove(in);
 669         in = OptoReg::add(in, -1);
 670       }
 671 
 672       // For RegVectMask
 673       scalable_stack_mask.clear_to_sets(scalable_predicate_reg_slots());
 674       assert(scalable_stack_mask.is_AllStack(), "should be infinite stack");
 675       *idealreg2spillmask[Op_RegVectMask] = *idealreg2regmask[Op_RegVectMask];
 676       idealreg2spillmask[Op_RegVectMask]->OR(scalable_stack_mask);
 677     }
 678 
 679     // Exclude last input arg stack slots to avoid spilling vector register there,
 680     // otherwise vector spills could stomp over stack slots in caller frame.
 681     for (; (in >= init_in) && (k < scalable_vector_reg_size(T_FLOAT)); k++) {
 682       scalable_stack_mask.Remove(in);
 683       in = OptoReg::add(in, -1);
 684     }
 685 
 686     // For VecA
 687      scalable_stack_mask.clear_to_sets(RegMask::SlotsPerVecA);
 688      assert(scalable_stack_mask.is_AllStack(), "should be infinite stack");
 689     *idealreg2spillmask[Op_VecA] = *idealreg2regmask[Op_VecA];
 690      idealreg2spillmask[Op_VecA]->OR(scalable_stack_mask);
 691   } else {
 692     *idealreg2spillmask[Op_VecA] = RegMask::Empty;
 693   }
 694 
 695   if (UseFPUForSpilling) {
 696     // This mask logic assumes that the spill operations are
 697     // symmetric and that the registers involved are the same size.
 698     // On sparc for instance we may have to use 64 bit moves will
 699     // kill 2 registers when used with F0-F31.
 700     idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]);
 701     idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]);
 702 #ifdef _LP64
 703     idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]);
 704     idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
 705     idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
 706     idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]);
 707 #else
 708     idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]);
 709 #ifdef ARM
 710     // ARM has support for moving 64bit values between a pair of
 711     // integer registers and a double register
 712     idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
 713     idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
 714 #endif
 715 #endif
 716   }
 717 
 718   // Make up debug masks.  Any spill slot plus callee-save (SOE) registers.
 719   // Caller-save (SOC, AS) registers are assumed to be trashable by the various
 720   // inline-cache fixup routines.
 721   *idealreg2debugmask  [Op_RegN] = *idealreg2spillmask[Op_RegN];
 722   *idealreg2debugmask  [Op_RegI] = *idealreg2spillmask[Op_RegI];
 723   *idealreg2debugmask  [Op_RegL] = *idealreg2spillmask[Op_RegL];
 724   *idealreg2debugmask  [Op_RegF] = *idealreg2spillmask[Op_RegF];
 725   *idealreg2debugmask  [Op_RegD] = *idealreg2spillmask[Op_RegD];
 726   *idealreg2debugmask  [Op_RegP] = *idealreg2spillmask[Op_RegP];
 727   *idealreg2debugmask  [Op_RegVectMask] = *idealreg2spillmask[Op_RegVectMask];
 728 
 729   *idealreg2debugmask  [Op_VecA] = *idealreg2spillmask[Op_VecA];
 730   *idealreg2debugmask  [Op_VecS] = *idealreg2spillmask[Op_VecS];
 731   *idealreg2debugmask  [Op_VecD] = *idealreg2spillmask[Op_VecD];
 732   *idealreg2debugmask  [Op_VecX] = *idealreg2spillmask[Op_VecX];
 733   *idealreg2debugmask  [Op_VecY] = *idealreg2spillmask[Op_VecY];
 734   *idealreg2debugmask  [Op_VecZ] = *idealreg2spillmask[Op_VecZ];
 735 
 736   *idealreg2mhdebugmask[Op_RegN] = *idealreg2spillmask[Op_RegN];
 737   *idealreg2mhdebugmask[Op_RegI] = *idealreg2spillmask[Op_RegI];
 738   *idealreg2mhdebugmask[Op_RegL] = *idealreg2spillmask[Op_RegL];
 739   *idealreg2mhdebugmask[Op_RegF] = *idealreg2spillmask[Op_RegF];
 740   *idealreg2mhdebugmask[Op_RegD] = *idealreg2spillmask[Op_RegD];
 741   *idealreg2mhdebugmask[Op_RegP] = *idealreg2spillmask[Op_RegP];
 742   *idealreg2mhdebugmask[Op_RegVectMask] = *idealreg2spillmask[Op_RegVectMask];
 743 
 744   *idealreg2mhdebugmask[Op_VecA] = *idealreg2spillmask[Op_VecA];
 745   *idealreg2mhdebugmask[Op_VecS] = *idealreg2spillmask[Op_VecS];
 746   *idealreg2mhdebugmask[Op_VecD] = *idealreg2spillmask[Op_VecD];
 747   *idealreg2mhdebugmask[Op_VecX] = *idealreg2spillmask[Op_VecX];
 748   *idealreg2mhdebugmask[Op_VecY] = *idealreg2spillmask[Op_VecY];
 749   *idealreg2mhdebugmask[Op_VecZ] = *idealreg2spillmask[Op_VecZ];
 750 
 751   // Prevent stub compilations from attempting to reference
 752   // callee-saved (SOE) registers from debug info
 753   bool exclude_soe = !Compile::current()->is_method_compilation();
 754   RegMask* caller_save_mask = exclude_soe ? &caller_save_regmask_exclude_soe : &caller_save_regmask;
 755   RegMask* mh_caller_save_mask = exclude_soe ? &mh_caller_save_regmask_exclude_soe : &mh_caller_save_regmask;
 756 
 757   idealreg2debugmask[Op_RegN]->SUBTRACT(*caller_save_mask);
 758   idealreg2debugmask[Op_RegI]->SUBTRACT(*caller_save_mask);
 759   idealreg2debugmask[Op_RegL]->SUBTRACT(*caller_save_mask);
 760   idealreg2debugmask[Op_RegF]->SUBTRACT(*caller_save_mask);
 761   idealreg2debugmask[Op_RegD]->SUBTRACT(*caller_save_mask);
 762   idealreg2debugmask[Op_RegP]->SUBTRACT(*caller_save_mask);
 763   idealreg2debugmask[Op_RegVectMask]->SUBTRACT(*caller_save_mask);
 764 
 765   idealreg2debugmask[Op_VecA]->SUBTRACT(*caller_save_mask);
 766   idealreg2debugmask[Op_VecS]->SUBTRACT(*caller_save_mask);
 767   idealreg2debugmask[Op_VecD]->SUBTRACT(*caller_save_mask);
 768   idealreg2debugmask[Op_VecX]->SUBTRACT(*caller_save_mask);
 769   idealreg2debugmask[Op_VecY]->SUBTRACT(*caller_save_mask);
 770   idealreg2debugmask[Op_VecZ]->SUBTRACT(*caller_save_mask);
 771 
 772   idealreg2mhdebugmask[Op_RegN]->SUBTRACT(*mh_caller_save_mask);
 773   idealreg2mhdebugmask[Op_RegI]->SUBTRACT(*mh_caller_save_mask);
 774   idealreg2mhdebugmask[Op_RegL]->SUBTRACT(*mh_caller_save_mask);
 775   idealreg2mhdebugmask[Op_RegF]->SUBTRACT(*mh_caller_save_mask);
 776   idealreg2mhdebugmask[Op_RegD]->SUBTRACT(*mh_caller_save_mask);
 777   idealreg2mhdebugmask[Op_RegP]->SUBTRACT(*mh_caller_save_mask);
 778   idealreg2mhdebugmask[Op_RegVectMask]->SUBTRACT(*mh_caller_save_mask);
 779 
 780   idealreg2mhdebugmask[Op_VecA]->SUBTRACT(*mh_caller_save_mask);
 781   idealreg2mhdebugmask[Op_VecS]->SUBTRACT(*mh_caller_save_mask);
 782   idealreg2mhdebugmask[Op_VecD]->SUBTRACT(*mh_caller_save_mask);
 783   idealreg2mhdebugmask[Op_VecX]->SUBTRACT(*mh_caller_save_mask);
 784   idealreg2mhdebugmask[Op_VecY]->SUBTRACT(*mh_caller_save_mask);
 785   idealreg2mhdebugmask[Op_VecZ]->SUBTRACT(*mh_caller_save_mask);
 786 }
 787 
 788 //---------------------------is_save_on_entry----------------------------------
 789 bool Matcher::is_save_on_entry(int reg) {
 790   return
 791     _register_save_policy[reg] == 'E' ||
 792     _register_save_policy[reg] == 'A'; // Save-on-entry register?
 793 }
 794 
 795 //---------------------------Fixup_Save_On_Entry-------------------------------
 796 void Matcher::Fixup_Save_On_Entry( ) {
 797   init_first_stack_mask();
 798 
 799   Node *root = C->root();       // Short name for root
 800   // Count number of save-on-entry registers.
 801   uint soe_cnt = number_of_saved_registers();
 802   uint i;
 803 
 804   // Find the procedure Start Node
 805   StartNode *start = C->start();
 806   assert( start, "Expect a start node" );
 807 
 808   // Input RegMask array shared by all Returns.
 809   // The type for doubles and longs has a count of 2, but
 810   // there is only 1 returned value
 811   uint ret_edge_cnt = TypeFunc::Parms + ((C->tf()->range()->cnt() == TypeFunc::Parms) ? 0 : 1);
 812   RegMask *ret_rms  = init_input_masks( ret_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 813   // Returns have 0 or 1 returned values depending on call signature.
 814   // Return register is specified by return_value in the AD file.
 815   if (ret_edge_cnt > TypeFunc::Parms)
 816     ret_rms[TypeFunc::Parms+0] = _return_value_mask;
 817 
 818   // Input RegMask array shared by all ForwardExceptions
 819   uint forw_exc_edge_cnt = TypeFunc::Parms;
 820   RegMask* forw_exc_rms  = init_input_masks( forw_exc_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 821 
 822   // Input RegMask array shared by all Rethrows.
 823   uint reth_edge_cnt = TypeFunc::Parms+1;
 824   RegMask *reth_rms  = init_input_masks( reth_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 825   // Rethrow takes exception oop only, but in the argument 0 slot.
 826   OptoReg::Name reg = find_receiver();
 827   if (reg >= 0) {
 828     reth_rms[TypeFunc::Parms] = mreg2regmask[reg];
 829 #ifdef _LP64
 830     // Need two slots for ptrs in 64-bit land
 831     reth_rms[TypeFunc::Parms].Insert(OptoReg::add(OptoReg::Name(reg), 1));
 832 #endif
 833   }
 834 
 835   // Input RegMask array shared by all TailCalls
 836   uint tail_call_edge_cnt = TypeFunc::Parms+2;
 837   RegMask *tail_call_rms = init_input_masks( tail_call_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 838 
 839   // Input RegMask array shared by all TailJumps
 840   uint tail_jump_edge_cnt = TypeFunc::Parms+2;
 841   RegMask *tail_jump_rms = init_input_masks( tail_jump_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 842 
 843   // TailCalls have 2 returned values (target & moop), whose masks come
 844   // from the usual MachNode/MachOper mechanism.  Find a sample
 845   // TailCall to extract these masks and put the correct masks into
 846   // the tail_call_rms array.
 847   for( i=1; i < root->req(); i++ ) {
 848     MachReturnNode *m = root->in(i)->as_MachReturn();
 849     if( m->ideal_Opcode() == Op_TailCall ) {
 850       tail_call_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
 851       tail_call_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
 852       break;
 853     }
 854   }
 855 
 856   // TailJumps have 2 returned values (target & ex_oop), whose masks come
 857   // from the usual MachNode/MachOper mechanism.  Find a sample
 858   // TailJump to extract these masks and put the correct masks into
 859   // the tail_jump_rms array.
 860   for( i=1; i < root->req(); i++ ) {
 861     MachReturnNode *m = root->in(i)->as_MachReturn();
 862     if( m->ideal_Opcode() == Op_TailJump ) {
 863       tail_jump_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
 864       tail_jump_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
 865       break;
 866     }
 867   }
 868 
 869   // Input RegMask array shared by all Halts
 870   uint halt_edge_cnt = TypeFunc::Parms;
 871   RegMask *halt_rms = init_input_masks( halt_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
 872 
 873   // Capture the return input masks into each exit flavor
 874   for( i=1; i < root->req(); i++ ) {
 875     MachReturnNode *exit = root->in(i)->as_MachReturn();
 876     switch( exit->ideal_Opcode() ) {
 877       case Op_Return   : exit->_in_rms = ret_rms;  break;
 878       case Op_Rethrow  : exit->_in_rms = reth_rms; break;
 879       case Op_TailCall : exit->_in_rms = tail_call_rms; break;
 880       case Op_TailJump : exit->_in_rms = tail_jump_rms; break;
 881       case Op_ForwardException: exit->_in_rms = forw_exc_rms; break;
 882       case Op_Halt     : exit->_in_rms = halt_rms; break;
 883       default          : ShouldNotReachHere();
 884     }
 885   }
 886 
 887   // Next unused projection number from Start.
 888   int proj_cnt = C->tf()->domain()->cnt();
 889 
 890   // Do all the save-on-entry registers.  Make projections from Start for
 891   // them, and give them a use at the exit points.  To the allocator, they
 892   // look like incoming register arguments.
 893   for( i = 0; i < _last_Mach_Reg; i++ ) {
 894     if( is_save_on_entry(i) ) {
 895 
 896       // Add the save-on-entry to the mask array
 897       ret_rms      [      ret_edge_cnt] = mreg2regmask[i];
 898       reth_rms     [     reth_edge_cnt] = mreg2regmask[i];
 899       tail_call_rms[tail_call_edge_cnt] = mreg2regmask[i];
 900       tail_jump_rms[tail_jump_edge_cnt] = mreg2regmask[i];
 901       forw_exc_rms [ forw_exc_edge_cnt] = mreg2regmask[i];
 902       // Halts need the SOE registers, but only in the stack as debug info.
 903       // A just-prior uncommon-trap or deoptimization will use the SOE regs.
 904       halt_rms     [     halt_edge_cnt] = *idealreg2spillmask[_register_save_type[i]];
 905 
 906       Node *mproj;
 907 
 908       // Is this a RegF low half of a RegD?  Double up 2 adjacent RegF's
 909       // into a single RegD.
 910       if( (i&1) == 0 &&
 911           _register_save_type[i  ] == Op_RegF &&
 912           _register_save_type[i+1] == Op_RegF &&
 913           is_save_on_entry(i+1) ) {
 914         // Add other bit for double
 915         ret_rms      [      ret_edge_cnt].Insert(OptoReg::Name(i+1));
 916         reth_rms     [     reth_edge_cnt].Insert(OptoReg::Name(i+1));
 917         tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
 918         tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
 919         forw_exc_rms [ forw_exc_edge_cnt].Insert(OptoReg::Name(i+1));
 920         halt_rms     [     halt_edge_cnt].Insert(OptoReg::Name(i+1));
 921         mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegD );
 922         proj_cnt += 2;          // Skip 2 for doubles
 923       }
 924       else if( (i&1) == 1 &&    // Else check for high half of double
 925                _register_save_type[i-1] == Op_RegF &&
 926                _register_save_type[i  ] == Op_RegF &&
 927                is_save_on_entry(i-1) ) {
 928         ret_rms      [      ret_edge_cnt] = RegMask::Empty;
 929         reth_rms     [     reth_edge_cnt] = RegMask::Empty;
 930         tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
 931         tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
 932         forw_exc_rms [ forw_exc_edge_cnt] = RegMask::Empty;
 933         halt_rms     [     halt_edge_cnt] = RegMask::Empty;
 934         mproj = C->top();
 935       }
 936       // Is this a RegI low half of a RegL?  Double up 2 adjacent RegI's
 937       // into a single RegL.
 938       else if( (i&1) == 0 &&
 939           _register_save_type[i  ] == Op_RegI &&
 940           _register_save_type[i+1] == Op_RegI &&
 941         is_save_on_entry(i+1) ) {
 942         // Add other bit for long
 943         ret_rms      [      ret_edge_cnt].Insert(OptoReg::Name(i+1));
 944         reth_rms     [     reth_edge_cnt].Insert(OptoReg::Name(i+1));
 945         tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
 946         tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
 947         forw_exc_rms [ forw_exc_edge_cnt].Insert(OptoReg::Name(i+1));
 948         halt_rms     [     halt_edge_cnt].Insert(OptoReg::Name(i+1));
 949         mproj = new MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegL );
 950         proj_cnt += 2;          // Skip 2 for longs
 951       }
 952       else if( (i&1) == 1 &&    // Else check for high half of long
 953                _register_save_type[i-1] == Op_RegI &&
 954                _register_save_type[i  ] == Op_RegI &&
 955                is_save_on_entry(i-1) ) {
 956         ret_rms      [      ret_edge_cnt] = RegMask::Empty;
 957         reth_rms     [     reth_edge_cnt] = RegMask::Empty;
 958         tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
 959         tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
 960         forw_exc_rms [ forw_exc_edge_cnt] = RegMask::Empty;
 961         halt_rms     [     halt_edge_cnt] = RegMask::Empty;
 962         mproj = C->top();
 963       } else {
 964         // Make a projection for it off the Start
 965         mproj = new MachProjNode( start, proj_cnt++, ret_rms[ret_edge_cnt], _register_save_type[i] );
 966       }
 967 
 968       ret_edge_cnt ++;
 969       reth_edge_cnt ++;
 970       tail_call_edge_cnt ++;
 971       tail_jump_edge_cnt ++;
 972       forw_exc_edge_cnt++;
 973       halt_edge_cnt ++;
 974 
 975       // Add a use of the SOE register to all exit paths
 976       for (uint j=1; j < root->req(); j++) {
 977         root->in(j)->add_req(mproj);
 978       }
 979     } // End of if a save-on-entry register
 980   } // End of for all machine registers
 981 }
 982 
 983 //------------------------------init_spill_mask--------------------------------
 984 void Matcher::init_spill_mask( Node *ret ) {
 985   if( idealreg2regmask[Op_RegI] ) return; // One time only init
 986 
 987   OptoReg::c_frame_pointer = c_frame_pointer();
 988   c_frame_ptr_mask = c_frame_pointer();
 989 #ifdef _LP64
 990   // pointers are twice as big
 991   c_frame_ptr_mask.Insert(OptoReg::add(c_frame_pointer(),1));
 992 #endif
 993 
 994   // Start at OptoReg::stack0()
 995   STACK_ONLY_mask.Clear();
 996   OptoReg::Name init = OptoReg::stack2reg(0);
 997   // STACK_ONLY_mask is all stack bits
 998   OptoReg::Name i;
 999   for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1))
1000     STACK_ONLY_mask.Insert(i);
1001   // Also set the "infinite stack" bit.
1002   STACK_ONLY_mask.set_AllStack();
1003 
1004   for (i = OptoReg::Name(0); i < OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i, 1)) {
1005     // Copy the register names over into the shared world.
1006     // SharedInfo::regName[i] = regName[i];
1007     // Handy RegMasks per machine register
1008     mreg2regmask[i].Insert(i);
1009 
1010     // Set up regmasks used to exclude save-on-call (and always-save) registers from debug masks.
1011     if (_register_save_policy[i] == 'C' ||
1012         _register_save_policy[i] == 'A') {
1013       caller_save_regmask.Insert(i);
1014       mh_caller_save_regmask.Insert(i);
1015     }
1016     // Exclude save-on-entry registers from debug masks for stub compilations.
1017     if (_register_save_policy[i] == 'C' ||
1018         _register_save_policy[i] == 'A' ||
1019         _register_save_policy[i] == 'E') {
1020       caller_save_regmask_exclude_soe.Insert(i);
1021       mh_caller_save_regmask_exclude_soe.Insert(i);
1022     }
1023   }
1024 
1025   // Also exclude the register we use to save the SP for MethodHandle
1026   // invokes to from the corresponding MH debug masks
1027   const RegMask sp_save_mask = method_handle_invoke_SP_save_mask();
1028   mh_caller_save_regmask.OR(sp_save_mask);
1029   mh_caller_save_regmask_exclude_soe.OR(sp_save_mask);
1030 
1031   // Grab the Frame Pointer
1032   Node *fp  = ret->in(TypeFunc::FramePtr);
1033   // Share frame pointer while making spill ops
1034   set_shared(fp);
1035 
1036 // Get the ADLC notion of the right regmask, for each basic type.
1037 #ifdef _LP64
1038   idealreg2regmask[Op_RegN] = regmask_for_ideal_register(Op_RegN, ret);
1039 #endif
1040   idealreg2regmask[Op_RegI] = regmask_for_ideal_register(Op_RegI, ret);
1041   idealreg2regmask[Op_RegP] = regmask_for_ideal_register(Op_RegP, ret);
1042   idealreg2regmask[Op_RegF] = regmask_for_ideal_register(Op_RegF, ret);
1043   idealreg2regmask[Op_RegD] = regmask_for_ideal_register(Op_RegD, ret);
1044   idealreg2regmask[Op_RegL] = regmask_for_ideal_register(Op_RegL, ret);
1045   idealreg2regmask[Op_VecA] = regmask_for_ideal_register(Op_VecA, ret);
1046   idealreg2regmask[Op_VecS] = regmask_for_ideal_register(Op_VecS, ret);
1047   idealreg2regmask[Op_VecD] = regmask_for_ideal_register(Op_VecD, ret);
1048   idealreg2regmask[Op_VecX] = regmask_for_ideal_register(Op_VecX, ret);
1049   idealreg2regmask[Op_VecY] = regmask_for_ideal_register(Op_VecY, ret);
1050   idealreg2regmask[Op_VecZ] = regmask_for_ideal_register(Op_VecZ, ret);
1051   idealreg2regmask[Op_RegVectMask] = regmask_for_ideal_register(Op_RegVectMask, ret);
1052 }
1053 
1054 #ifdef ASSERT
1055 static void match_alias_type(Compile* C, Node* n, Node* m) {
1056   if (!VerifyAliases)  return;  // do not go looking for trouble by default
1057   const TypePtr* nat = n->adr_type();
1058   const TypePtr* mat = m->adr_type();
1059   int nidx = C->get_alias_index(nat);
1060   int midx = C->get_alias_index(mat);
1061   // Detune the assert for cases like (AndI 0xFF (LoadB p)).
1062   if (nidx == Compile::AliasIdxTop && midx >= Compile::AliasIdxRaw) {
1063     for (uint i = 1; i < n->req(); i++) {
1064       Node* n1 = n->in(i);
1065       const TypePtr* n1at = n1->adr_type();
1066       if (n1at != nullptr) {
1067         nat = n1at;
1068         nidx = C->get_alias_index(n1at);
1069       }
1070     }
1071   }
1072   // %%% Kludgery.  Instead, fix ideal adr_type methods for all these cases:
1073   if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxRaw) {
1074     switch (n->Opcode()) {
1075     case Op_PrefetchAllocation:
1076       nidx = Compile::AliasIdxRaw;
1077       nat = TypeRawPtr::BOTTOM;
1078       break;
1079     }
1080   }
1081   if (nidx == Compile::AliasIdxRaw && midx == Compile::AliasIdxTop) {
1082     switch (n->Opcode()) {
1083     case Op_ClearArray:
1084       midx = Compile::AliasIdxRaw;
1085       mat = TypeRawPtr::BOTTOM;
1086       break;
1087     }
1088   }
1089   if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxBot) {
1090     switch (n->Opcode()) {
1091     case Op_Return:
1092     case Op_Rethrow:
1093     case Op_Halt:
1094     case Op_TailCall:
1095     case Op_TailJump:
1096     case Op_ForwardException:
1097       nidx = Compile::AliasIdxBot;
1098       nat = TypePtr::BOTTOM;
1099       break;
1100     }
1101   }
1102   if (nidx == Compile::AliasIdxBot && midx == Compile::AliasIdxTop) {
1103     switch (n->Opcode()) {
1104     case Op_StrComp:
1105     case Op_StrEquals:
1106     case Op_StrIndexOf:
1107     case Op_StrIndexOfChar:
1108     case Op_AryEq:
1109     case Op_VectorizedHashCode:
1110     case Op_CountPositives:
1111     case Op_MemBarVolatile:
1112     case Op_MemBarCPUOrder: // %%% these ideals should have narrower adr_type?
1113     case Op_StrInflatedCopy:
1114     case Op_StrCompressedCopy:
1115     case Op_OnSpinWait:
1116     case Op_EncodeISOArray:
1117       nidx = Compile::AliasIdxTop;
1118       nat = nullptr;
1119       break;
1120     }
1121   }
1122   if (nidx != midx) {
1123     if (PrintOpto || (PrintMiscellaneous && (WizardMode || Verbose))) {
1124       tty->print_cr("==== Matcher alias shift %d => %d", nidx, midx);
1125       n->dump();
1126       m->dump();
1127     }
1128     assert(C->subsume_loads() && C->must_alias(nat, midx),
1129            "must not lose alias info when matching");
1130   }
1131 }
1132 #endif
1133 
1134 //------------------------------xform------------------------------------------
1135 // Given a Node in old-space, Match him (Label/Reduce) to produce a machine
1136 // Node in new-space.  Given a new-space Node, recursively walk his children.
1137 Node *Matcher::transform( Node *n ) { ShouldNotCallThis(); return n; }
1138 Node *Matcher::xform( Node *n, int max_stack ) {
1139   // Use one stack to keep both: child's node/state and parent's node/index
1140   MStack mstack(max_stack * 2 * 2); // usually: C->live_nodes() * 2 * 2
1141   mstack.push(n, Visit, nullptr, -1);  // set null as parent to indicate root
1142   while (mstack.is_nonempty()) {
1143     C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
1144     if (C->failing()) return nullptr;
1145     n = mstack.node();          // Leave node on stack
1146     Node_State nstate = mstack.state();
1147     if (nstate == Visit) {
1148       mstack.set_state(Post_Visit);
1149       Node *oldn = n;
1150       // Old-space or new-space check
1151       if (!C->node_arena()->contains(n)) {
1152         // Old space!
1153         Node* m;
1154         if (has_new_node(n)) {  // Not yet Label/Reduced
1155           m = new_node(n);
1156         } else {
1157           if (!is_dontcare(n)) { // Matcher can match this guy
1158             // Calls match special.  They match alone with no children.
1159             // Their children, the incoming arguments, match normally.
1160             m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n);
1161             if (C->failing())  return nullptr;
1162             if (m == nullptr) { Matcher::soft_match_failure(); return nullptr; }
1163             if (n->is_MemBar()) {
1164               m->as_MachMemBar()->set_adr_type(n->adr_type());
1165             }
1166           } else {                  // Nothing the matcher cares about
1167             if (n->is_Proj() && n->in(0) != nullptr && n->in(0)->is_Multi()) {       // Projections?
1168               // Convert to machine-dependent projection
1169               m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
1170               NOT_PRODUCT(record_new2old(m, n);)
1171               if (m->in(0) != nullptr) // m might be top
1172                 collect_null_checks(m, n);
1173             } else {                // Else just a regular 'ol guy
1174               m = n->clone();       // So just clone into new-space
1175               NOT_PRODUCT(record_new2old(m, n);)
1176               // Def-Use edges will be added incrementally as Uses
1177               // of this node are matched.
1178               assert(m->outcnt() == 0, "no Uses of this clone yet");
1179             }
1180           }
1181 
1182           set_new_node(n, m);       // Map old to new
1183           if (_old_node_note_array != nullptr) {
1184             Node_Notes* nn = C->locate_node_notes(_old_node_note_array,
1185                                                   n->_idx);
1186             C->set_node_notes_at(m->_idx, nn);
1187           }
1188           debug_only(match_alias_type(C, n, m));
1189         }
1190         n = m;    // n is now a new-space node
1191         mstack.set_node(n);
1192       }
1193 
1194       // New space!
1195       if (_visited.test_set(n->_idx)) continue; // while(mstack.is_nonempty())
1196 
1197       int i;
1198       // Put precedence edges on stack first (match them last).
1199       for (i = oldn->req(); (uint)i < oldn->len(); i++) {
1200         Node *m = oldn->in(i);
1201         if (m == nullptr) break;
1202         // set -1 to call add_prec() instead of set_req() during Step1
1203         mstack.push(m, Visit, n, -1);
1204       }
1205 
1206       // Handle precedence edges for interior nodes
1207       for (i = n->len()-1; (uint)i >= n->req(); i--) {
1208         Node *m = n->in(i);
1209         if (m == nullptr || C->node_arena()->contains(m)) continue;
1210         n->rm_prec(i);
1211         // set -1 to call add_prec() instead of set_req() during Step1
1212         mstack.push(m, Visit, n, -1);
1213       }
1214 
1215       // For constant debug info, I'd rather have unmatched constants.
1216       int cnt = n->req();
1217       JVMState* jvms = n->jvms();
1218       int debug_cnt = jvms ? jvms->debug_start() : cnt;
1219 
1220       // Now do only debug info.  Clone constants rather than matching.
1221       // Constants are represented directly in the debug info without
1222       // the need for executable machine instructions.
1223       // Monitor boxes are also represented directly.
1224       for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do
1225         Node *m = n->in(i);          // Get input
1226         int op = m->Opcode();
1227         assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites");
1228         if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConNKlass ||
1229             op == Op_ConF || op == Op_ConD || op == Op_ConL
1230             // || op == Op_BoxLock  // %%%% enable this and remove (+++) in chaitin.cpp
1231             ) {
1232           m = m->clone();
1233           NOT_PRODUCT(record_new2old(m, n));
1234           mstack.push(m, Post_Visit, n, i); // Don't need to visit
1235           mstack.push(m->in(0), Visit, m, 0);
1236         } else {
1237           mstack.push(m, Visit, n, i);
1238         }
1239       }
1240 
1241       // And now walk his children, and convert his inputs to new-space.
1242       for( ; i >= 0; --i ) { // For all normal inputs do
1243         Node *m = n->in(i);  // Get input
1244         if(m != nullptr)
1245           mstack.push(m, Visit, n, i);
1246       }
1247 
1248     }
1249     else if (nstate == Post_Visit) {
1250       // Set xformed input
1251       Node *p = mstack.parent();
1252       if (p != nullptr) { // root doesn't have parent
1253         int i = (int)mstack.index();
1254         if (i >= 0)
1255           p->set_req(i, n); // required input
1256         else if (i == -1)
1257           p->add_prec(n);   // precedence input
1258         else
1259           ShouldNotReachHere();
1260       }
1261       mstack.pop(); // remove processed node from stack
1262     }
1263     else {
1264       ShouldNotReachHere();
1265     }
1266   } // while (mstack.is_nonempty())
1267   return n; // Return new-space Node
1268 }
1269 
1270 //------------------------------warp_outgoing_stk_arg------------------------
1271 OptoReg::Name Matcher::warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out_arg_area, OptoReg::Name &out_arg_limit_per_call ) {
1272   // Convert outgoing argument location to a pre-biased stack offset
1273   if (reg->is_stack()) {
1274     OptoReg::Name warped = reg->reg2stack();
1275     // Adjust the stack slot offset to be the register number used
1276     // by the allocator.
1277     warped = OptoReg::add(begin_out_arg_area, warped);
1278     // Keep track of the largest numbered stack slot used for an arg.
1279     // Largest used slot per call-site indicates the amount of stack
1280     // that is killed by the call.
1281     if( warped >= out_arg_limit_per_call )
1282       out_arg_limit_per_call = OptoReg::add(warped,1);
1283     if (!RegMask::can_represent_arg(warped)) {
1284       // Bailout. For example not enough space on stack for all arguments. Happens for methods with too many arguments.
1285       C->record_method_not_compilable("unsupported calling sequence");
1286       return OptoReg::Bad;
1287     }
1288     return warped;
1289   }
1290   return OptoReg::as_OptoReg(reg);
1291 }
1292 
1293 
1294 //------------------------------match_sfpt-------------------------------------
1295 // Helper function to match call instructions.  Calls match special.
1296 // They match alone with no children.  Their children, the incoming
1297 // arguments, match normally.
1298 MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
1299   MachSafePointNode *msfpt = nullptr;
1300   MachCallNode      *mcall = nullptr;
1301   uint               cnt;
1302   // Split out case for SafePoint vs Call
1303   CallNode *call;
1304   const TypeTuple *domain;
1305   ciMethod*        method = nullptr;
1306   bool             is_method_handle_invoke = false;  // for special kill effects
1307   if( sfpt->is_Call() ) {
1308     call = sfpt->as_Call();
1309     domain = call->tf()->domain();
1310     cnt = domain->cnt();
1311 
1312     // Match just the call, nothing else
1313     MachNode *m = match_tree(call);
1314     if (C->failing())  return nullptr;
1315     if( m == nullptr ) { Matcher::soft_match_failure(); return nullptr; }
1316 
1317     // Copy data from the Ideal SafePoint to the machine version
1318     mcall = m->as_MachCall();
1319 
1320     mcall->set_tf(                  call->tf());
1321     mcall->set_entry_point(         call->entry_point());
1322     mcall->set_cnt(                 call->cnt());
1323     mcall->set_guaranteed_safepoint(call->guaranteed_safepoint());
1324 
1325     if( mcall->is_MachCallJava() ) {
1326       MachCallJavaNode *mcall_java  = mcall->as_MachCallJava();
1327       const CallJavaNode *call_java =  call->as_CallJava();
1328       assert(call_java->validate_symbolic_info(), "inconsistent info");
1329       method = call_java->method();
1330       mcall_java->_method = method;
1331       mcall_java->_optimized_virtual = call_java->is_optimized_virtual();
1332       is_method_handle_invoke = call_java->is_method_handle_invoke();
1333       mcall_java->_method_handle_invoke = is_method_handle_invoke;
1334       mcall_java->_override_symbolic_info = call_java->override_symbolic_info();
1335       mcall_java->_arg_escape = call_java->arg_escape();
1336       if (is_method_handle_invoke) {
1337         C->set_has_method_handle_invokes(true);
1338       }
1339       if( mcall_java->is_MachCallStaticJava() )
1340         mcall_java->as_MachCallStaticJava()->_name =
1341          call_java->as_CallStaticJava()->_name;
1342       if( mcall_java->is_MachCallDynamicJava() )
1343         mcall_java->as_MachCallDynamicJava()->_vtable_index =
1344          call_java->as_CallDynamicJava()->_vtable_index;
1345     }
1346     else if( mcall->is_MachCallRuntime() ) {
1347       MachCallRuntimeNode* mach_call_rt = mcall->as_MachCallRuntime();
1348       mach_call_rt->_name = call->as_CallRuntime()->_name;
1349       mach_call_rt->_leaf_no_fp = call->is_CallLeafNoFP();
1350     }
1351     msfpt = mcall;
1352   }
1353   // This is a non-call safepoint
1354   else {
1355     call = nullptr;
1356     domain = nullptr;
1357     MachNode *mn = match_tree(sfpt);
1358     if (C->failing())  return nullptr;
1359     msfpt = mn->as_MachSafePoint();
1360     cnt = TypeFunc::Parms;
1361   }
1362   msfpt->_has_ea_local_in_scope = sfpt->has_ea_local_in_scope();
1363 
1364   // Advertise the correct memory effects (for anti-dependence computation).
1365   msfpt->set_adr_type(sfpt->adr_type());
1366 
1367   // Allocate a private array of RegMasks.  These RegMasks are not shared.
1368   msfpt->_in_rms = NEW_RESOURCE_ARRAY( RegMask, cnt );
1369   // Empty them all.
1370   for (uint i = 0; i < cnt; i++) ::new (&(msfpt->_in_rms[i])) RegMask();
1371 
1372   // Do all the pre-defined non-Empty register masks
1373   msfpt->_in_rms[TypeFunc::ReturnAdr] = _return_addr_mask;
1374   msfpt->_in_rms[TypeFunc::FramePtr ] = c_frame_ptr_mask;
1375 
1376   // Place first outgoing argument can possibly be put.
1377   OptoReg::Name begin_out_arg_area = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
1378   assert( is_even(begin_out_arg_area), "" );
1379   // Compute max outgoing register number per call site.
1380   OptoReg::Name out_arg_limit_per_call = begin_out_arg_area;
1381   // Calls to C may hammer extra stack slots above and beyond any arguments.
1382   // These are usually backing store for register arguments for varargs.
1383   if( call != nullptr && call->is_CallRuntime() )
1384     out_arg_limit_per_call = OptoReg::add(out_arg_limit_per_call,C->varargs_C_out_slots_killed());
1385 
1386 
1387   // Do the normal argument list (parameters) register masks
1388   int argcnt = cnt - TypeFunc::Parms;
1389   if( argcnt > 0 ) {          // Skip it all if we have no args
1390     BasicType *sig_bt  = NEW_RESOURCE_ARRAY( BasicType, argcnt );
1391     VMRegPair *parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
1392     int i;
1393     for( i = 0; i < argcnt; i++ ) {
1394       sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
1395     }
1396     // V-call to pick proper calling convention
1397     call->calling_convention( sig_bt, parm_regs, argcnt );
1398 
1399 #ifdef ASSERT
1400     // Sanity check users' calling convention.  Really handy during
1401     // the initial porting effort.  Fairly expensive otherwise.
1402     { for (int i = 0; i<argcnt; i++) {
1403       if( !parm_regs[i].first()->is_valid() &&
1404           !parm_regs[i].second()->is_valid() ) continue;
1405       VMReg reg1 = parm_regs[i].first();
1406       VMReg reg2 = parm_regs[i].second();
1407       for (int j = 0; j < i; j++) {
1408         if( !parm_regs[j].first()->is_valid() &&
1409             !parm_regs[j].second()->is_valid() ) continue;
1410         VMReg reg3 = parm_regs[j].first();
1411         VMReg reg4 = parm_regs[j].second();
1412         if( !reg1->is_valid() ) {
1413           assert( !reg2->is_valid(), "valid halvsies" );
1414         } else if( !reg3->is_valid() ) {
1415           assert( !reg4->is_valid(), "valid halvsies" );
1416         } else {
1417           assert( reg1 != reg2, "calling conv. must produce distinct regs");
1418           assert( reg1 != reg3, "calling conv. must produce distinct regs");
1419           assert( reg1 != reg4, "calling conv. must produce distinct regs");
1420           assert( reg2 != reg3, "calling conv. must produce distinct regs");
1421           assert( reg2 != reg4 || !reg2->is_valid(), "calling conv. must produce distinct regs");
1422           assert( reg3 != reg4, "calling conv. must produce distinct regs");
1423         }
1424       }
1425     }
1426     }
1427 #endif
1428 
1429     // Visit each argument.  Compute its outgoing register mask.
1430     // Return results now can have 2 bits returned.
1431     // Compute max over all outgoing arguments both per call-site
1432     // and over the entire method.
1433     for( i = 0; i < argcnt; i++ ) {
1434       // Address of incoming argument mask to fill in
1435       RegMask *rm = &mcall->_in_rms[i+TypeFunc::Parms];
1436       VMReg first = parm_regs[i].first();
1437       VMReg second = parm_regs[i].second();
1438       if(!first->is_valid() &&
1439          !second->is_valid()) {
1440         continue;               // Avoid Halves
1441       }
1442       // Handle case where arguments are in vector registers.
1443       if(call->in(TypeFunc::Parms + i)->bottom_type()->isa_vect()) {
1444         OptoReg::Name reg_fst = OptoReg::as_OptoReg(first);
1445         OptoReg::Name reg_snd = OptoReg::as_OptoReg(second);
1446         assert (reg_fst <= reg_snd, "fst=%d snd=%d", reg_fst, reg_snd);
1447         for (OptoReg::Name r = reg_fst; r <= reg_snd; r++) {
1448           rm->Insert(r);
1449         }
1450       }
1451       // Grab first register, adjust stack slots and insert in mask.
1452       OptoReg::Name reg1 = warp_outgoing_stk_arg(first, begin_out_arg_area, out_arg_limit_per_call );
1453       if (C->failing()) {
1454         return nullptr;
1455       }
1456       if (OptoReg::is_valid(reg1))
1457         rm->Insert( reg1 );
1458       // Grab second register (if any), adjust stack slots and insert in mask.
1459       OptoReg::Name reg2 = warp_outgoing_stk_arg(second, begin_out_arg_area, out_arg_limit_per_call );
1460       if (C->failing()) {
1461         return nullptr;
1462       }
1463       if (OptoReg::is_valid(reg2))
1464         rm->Insert( reg2 );
1465     } // End of for all arguments
1466   }
1467 
1468   // Compute the max stack slot killed by any call.  These will not be
1469   // available for debug info, and will be used to adjust FIRST_STACK_mask
1470   // after all call sites have been visited.
1471   if( _out_arg_limit < out_arg_limit_per_call)
1472     _out_arg_limit = out_arg_limit_per_call;
1473 
1474   if (mcall) {
1475     // Kill the outgoing argument area, including any non-argument holes and
1476     // any legacy C-killed slots.  Use Fat-Projections to do the killing.
1477     // Since the max-per-method covers the max-per-call-site and debug info
1478     // is excluded on the max-per-method basis, debug info cannot land in
1479     // this killed area.
1480     uint r_cnt = mcall->tf()->range()->cnt();
1481     MachProjNode *proj = new MachProjNode( mcall, r_cnt+10000, RegMask::Empty, MachProjNode::fat_proj );
1482     if (!RegMask::can_represent_arg(OptoReg::Name(out_arg_limit_per_call-1))) {
1483       // Bailout. We do not have space to represent all arguments.
1484       C->record_method_not_compilable("unsupported outgoing calling sequence");
1485     } else {
1486       for (int i = begin_out_arg_area; i < out_arg_limit_per_call; i++)
1487         proj->_rout.Insert(OptoReg::Name(i));
1488     }
1489     if (proj->_rout.is_NotEmpty()) {
1490       push_projection(proj);
1491     }
1492   }
1493   // Transfer the safepoint information from the call to the mcall
1494   // Move the JVMState list
1495   msfpt->set_jvms(sfpt->jvms());
1496   for (JVMState* jvms = msfpt->jvms(); jvms; jvms = jvms->caller()) {
1497     jvms->set_map(sfpt);
1498   }
1499 
1500   // Debug inputs begin just after the last incoming parameter
1501   assert((mcall == nullptr) || (mcall->jvms() == nullptr) ||
1502          (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), "");
1503 
1504   // Add additional edges.
1505   if (msfpt->mach_constant_base_node_input() != (uint)-1 && !msfpt->is_MachCallLeaf()) {
1506     // For these calls we can not add MachConstantBase in expand(), as the
1507     // ins are not complete then.
1508     msfpt->ins_req(msfpt->mach_constant_base_node_input(), C->mach_constant_base_node());
1509     if (msfpt->jvms() &&
1510         msfpt->mach_constant_base_node_input() <= msfpt->jvms()->debug_start() + msfpt->_jvmadj) {
1511       // We added an edge before jvms, so we must adapt the position of the ins.
1512       msfpt->jvms()->adapt_position(+1);
1513     }
1514   }
1515 
1516   // Registers killed by the call are set in the local scheduling pass
1517   // of Global Code Motion.
1518   return msfpt;
1519 }
1520 
1521 //---------------------------match_tree----------------------------------------
1522 // Match a Ideal Node DAG - turn it into a tree; Label & Reduce.  Used as part
1523 // of the whole-sale conversion from Ideal to Mach Nodes.  Also used for
1524 // making GotoNodes while building the CFG and in init_spill_mask() to identify
1525 // a Load's result RegMask for memoization in idealreg2regmask[]
1526 MachNode *Matcher::match_tree( const Node *n ) {
1527   assert( n->Opcode() != Op_Phi, "cannot match" );
1528   assert( !n->is_block_start(), "cannot match" );
1529   // Set the mark for all locally allocated State objects.
1530   // When this call returns, the _states_arena arena will be reset
1531   // freeing all State objects.
1532   ResourceMark rm( &_states_arena );
1533 
1534   LabelRootDepth = 0;
1535 
1536   // StoreNodes require their Memory input to match any LoadNodes
1537   Node *mem = n->is_Store() ? n->in(MemNode::Memory) : (Node*)1 ;
1538 #ifdef ASSERT
1539   Node* save_mem_node = _mem_node;
1540   _mem_node = n->is_Store() ? (Node*)n : nullptr;
1541 #endif
1542   // State object for root node of match tree
1543   // Allocate it on _states_arena - stack allocation can cause stack overflow.
1544   State *s = new (&_states_arena) State;
1545   s->_kids[0] = nullptr;
1546   s->_kids[1] = nullptr;
1547   s->_leaf = (Node*)n;
1548   // Label the input tree, allocating labels from top-level arena
1549   Node* root_mem = mem;
1550   Label_Root(n, s, n->in(0), root_mem);
1551   if (C->failing())  return nullptr;
1552 
1553   // The minimum cost match for the whole tree is found at the root State
1554   uint mincost = max_juint;
1555   uint cost = max_juint;
1556   uint i;
1557   for (i = 0; i < NUM_OPERANDS; i++) {
1558     if (s->valid(i) &&               // valid entry and
1559         s->cost(i) < cost &&         // low cost and
1560         s->rule(i) >= NUM_OPERANDS) {// not an operand
1561       mincost = i;
1562       cost = s->cost(i);
1563     }
1564   }
1565   if (mincost == max_juint) {
1566 #ifndef PRODUCT
1567     tty->print("No matching rule for:");
1568     s->dump();
1569 #endif
1570     Matcher::soft_match_failure();
1571     return nullptr;
1572   }
1573   // Reduce input tree based upon the state labels to machine Nodes
1574   MachNode *m = ReduceInst(s, s->rule(mincost), mem);
1575   // New-to-old mapping is done in ReduceInst, to cover complex instructions.
1576   NOT_PRODUCT(_old2new_map.map(n->_idx, m);)
1577 
1578   // Add any Matcher-ignored edges
1579   uint cnt = n->req();
1580   uint start = 1;
1581   if( mem != (Node*)1 ) start = MemNode::Memory+1;
1582   if( n->is_AddP() ) {
1583     assert( mem == (Node*)1, "" );
1584     start = AddPNode::Base+1;
1585   }
1586   for( i = start; i < cnt; i++ ) {
1587     if( !n->match_edge(i) ) {
1588       if( i < m->req() )
1589         m->ins_req( i, n->in(i) );
1590       else
1591         m->add_req( n->in(i) );
1592     }
1593   }
1594 
1595   debug_only( _mem_node = save_mem_node; )
1596   return m;
1597 }
1598 
1599 
1600 //------------------------------match_into_reg---------------------------------
1601 // Choose to either match this Node in a register or part of the current
1602 // match tree.  Return true for requiring a register and false for matching
1603 // as part of the current match tree.
1604 static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool shared ) {
1605 
1606   const Type *t = m->bottom_type();
1607 
1608   if (t->singleton()) {
1609     // Never force constants into registers.  Allow them to match as
1610     // constants or registers.  Copies of the same value will share
1611     // the same register.  See find_shared_node.
1612     return false;
1613   } else {                      // Not a constant
1614     if (!shared && Matcher::is_encode_and_store_pattern(n, m)) {
1615       // Make it possible to match "encode and store" patterns with non-shared
1616       // encode operations that are pinned to a control node (e.g. by CastPP
1617       // node removal in final graph reshaping). The matched instruction cannot
1618       // float above the encode's control node because it is pinned to the
1619       // store's control node.
1620       return false;
1621     }
1622     // Stop recursion if they have different Controls.
1623     Node* m_control = m->in(0);
1624     // Control of load's memory can post-dominates load's control.
1625     // So use it since load can't float above its memory.
1626     Node* mem_control = (m->is_Load()) ? m->in(MemNode::Memory)->in(0) : nullptr;
1627     if (control && m_control && control != m_control && control != mem_control) {
1628 
1629       // Actually, we can live with the most conservative control we
1630       // find, if it post-dominates the others.  This allows us to
1631       // pick up load/op/store trees where the load can float a little
1632       // above the store.
1633       Node *x = control;
1634       const uint max_scan = 6;  // Arbitrary scan cutoff
1635       uint j;
1636       for (j=0; j<max_scan; j++) {
1637         if (x->is_Region())     // Bail out at merge points
1638           return true;
1639         x = x->in(0);
1640         if (x == m_control)     // Does 'control' post-dominate
1641           break;                // m->in(0)?  If so, we can use it
1642         if (x == mem_control)   // Does 'control' post-dominate
1643           break;                // mem_control?  If so, we can use it
1644       }
1645       if (j == max_scan)        // No post-domination before scan end?
1646         return true;            // Then break the match tree up
1647     }
1648     if ((m->is_DecodeN() && Matcher::narrow_oop_use_complex_address()) ||
1649         (m->is_DecodeNKlass() && Matcher::narrow_klass_use_complex_address())) {
1650       // These are commonly used in address expressions and can
1651       // efficiently fold into them on X64 in some cases.
1652       return false;
1653     }
1654   }
1655 
1656   // Not forceable cloning.  If shared, put it into a register.
1657   return shared;
1658 }
1659 
1660 
1661 //------------------------------Instruction Selection--------------------------
1662 // Label method walks a "tree" of nodes, using the ADLC generated DFA to match
1663 // ideal nodes to machine instructions.  Trees are delimited by shared Nodes,
1664 // things the Matcher does not match (e.g., Memory), and things with different
1665 // Controls (hence forced into different blocks).  We pass in the Control
1666 // selected for this entire State tree.
1667 
1668 // The Matcher works on Trees, but an Intel add-to-memory requires a DAG: the
1669 // Store and the Load must have identical Memories (as well as identical
1670 // pointers).  Since the Matcher does not have anything for Memory (and
1671 // does not handle DAGs), I have to match the Memory input myself.  If the
1672 // Tree root is a Store or if there are multiple Loads in the tree, I require
1673 // all Loads to have the identical memory.
1674 Node* Matcher::Label_Root(const Node* n, State* svec, Node* control, Node*& mem) {
1675   // Since Label_Root is a recursive function, its possible that we might run
1676   // out of stack space.  See bugs 6272980 & 6227033 for more info.
1677   LabelRootDepth++;
1678   if (LabelRootDepth > MaxLabelRootDepth) {
1679     // Bailout. Can for example be hit with a deep chain of operations.
1680     C->record_method_not_compilable("Out of stack space, increase MaxLabelRootDepth");
1681     return nullptr;
1682   }
1683   uint care = 0;                // Edges matcher cares about
1684   uint cnt = n->req();
1685   uint i = 0;
1686 
1687   // Examine children for memory state
1688   // Can only subsume a child into your match-tree if that child's memory state
1689   // is not modified along the path to another input.
1690   // It is unsafe even if the other inputs are separate roots.
1691   Node *input_mem = nullptr;
1692   for( i = 1; i < cnt; i++ ) {
1693     if( !n->match_edge(i) ) continue;
1694     Node *m = n->in(i);         // Get ith input
1695     assert( m, "expect non-null children" );
1696     if( m->is_Load() ) {
1697       if( input_mem == nullptr ) {
1698         input_mem = m->in(MemNode::Memory);
1699         if (mem == (Node*)1) {
1700           // Save this memory to bail out if there's another memory access
1701           // to a different memory location in the same tree.
1702           mem = input_mem;
1703         }
1704       } else if( input_mem != m->in(MemNode::Memory) ) {
1705         input_mem = NodeSentinel;
1706       }
1707     }
1708   }
1709 
1710   for( i = 1; i < cnt; i++ ){// For my children
1711     if( !n->match_edge(i) ) continue;
1712     Node *m = n->in(i);         // Get ith input
1713     // Allocate states out of a private arena
1714     State *s = new (&_states_arena) State;
1715     svec->_kids[care++] = s;
1716     assert( care <= 2, "binary only for now" );
1717 
1718     // Recursively label the State tree.
1719     s->_kids[0] = nullptr;
1720     s->_kids[1] = nullptr;
1721     s->_leaf = m;
1722 
1723     // Check for leaves of the State Tree; things that cannot be a part of
1724     // the current tree.  If it finds any, that value is matched as a
1725     // register operand.  If not, then the normal matching is used.
1726     if( match_into_reg(n, m, control, i, is_shared(m)) ||
1727         // Stop recursion if this is a LoadNode and there is another memory access
1728         // to a different memory location in the same tree (for example, a StoreNode
1729         // at the root of this tree or another LoadNode in one of the children).
1730         ((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ||
1731         // Can NOT include the match of a subtree when its memory state
1732         // is used by any of the other subtrees
1733         (input_mem == NodeSentinel) ) {
1734       // Print when we exclude matching due to different memory states at input-loads
1735       if (PrintOpto && (Verbose && WizardMode) && (input_mem == NodeSentinel)
1736           && !((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem)) {
1737         tty->print_cr("invalid input_mem");
1738       }
1739       // Switch to a register-only opcode; this value must be in a register
1740       // and cannot be subsumed as part of a larger instruction.
1741       s->DFA( m->ideal_reg(), m );
1742 
1743     } else {
1744       // If match tree has no control and we do, adopt it for entire tree
1745       if( control == nullptr && m->in(0) != nullptr && m->req() > 1 )
1746         control = m->in(0);         // Pick up control
1747       // Else match as a normal part of the match tree.
1748       control = Label_Root(m, s, control, mem);
1749       if (C->failing()) return nullptr;
1750     }
1751   }
1752 
1753   // Call DFA to match this node, and return
1754   svec->DFA( n->Opcode(), n );
1755 
1756 #ifdef ASSERT
1757   uint x;
1758   for( x = 0; x < _LAST_MACH_OPER; x++ )
1759     if( svec->valid(x) )
1760       break;
1761 
1762   if (x >= _LAST_MACH_OPER) {
1763     n->dump();
1764     svec->dump();
1765     assert( false, "bad AD file" );
1766   }
1767 #endif
1768   return control;
1769 }
1770 
1771 
1772 // Con nodes reduced using the same rule can share their MachNode
1773 // which reduces the number of copies of a constant in the final
1774 // program.  The register allocator is free to split uses later to
1775 // split live ranges.
1776 MachNode* Matcher::find_shared_node(Node* leaf, uint rule) {
1777   if (!leaf->is_Con() && !leaf->is_DecodeNarrowPtr()) return nullptr;
1778 
1779   // See if this Con has already been reduced using this rule.
1780   if (_shared_nodes.max() <= leaf->_idx) return nullptr;
1781   MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx);
1782   if (last != nullptr && rule == last->rule()) {
1783     // Don't expect control change for DecodeN
1784     if (leaf->is_DecodeNarrowPtr())
1785       return last;
1786     // Get the new space root.
1787     Node* xroot = new_node(C->root());
1788     if (xroot == nullptr) {
1789       // This shouldn't happen give the order of matching.
1790       return nullptr;
1791     }
1792 
1793     // Shared constants need to have their control be root so they
1794     // can be scheduled properly.
1795     Node* control = last->in(0);
1796     if (control != xroot) {
1797       if (control == nullptr || control == C->root()) {
1798         last->set_req(0, xroot);
1799       } else {
1800         assert(false, "unexpected control");
1801         return nullptr;
1802       }
1803     }
1804     return last;
1805   }
1806   return nullptr;
1807 }
1808 
1809 
1810 //------------------------------ReduceInst-------------------------------------
1811 // Reduce a State tree (with given Control) into a tree of MachNodes.
1812 // This routine (and it's cohort ReduceOper) convert Ideal Nodes into
1813 // complicated machine Nodes.  Each MachNode covers some tree of Ideal Nodes.
1814 // Each MachNode has a number of complicated MachOper operands; each
1815 // MachOper also covers a further tree of Ideal Nodes.
1816 
1817 // The root of the Ideal match tree is always an instruction, so we enter
1818 // the recursion here.  After building the MachNode, we need to recurse
1819 // the tree checking for these cases:
1820 // (1) Child is an instruction -
1821 //     Build the instruction (recursively), add it as an edge.
1822 //     Build a simple operand (register) to hold the result of the instruction.
1823 // (2) Child is an interior part of an instruction -
1824 //     Skip over it (do nothing)
1825 // (3) Child is the start of a operand -
1826 //     Build the operand, place it inside the instruction
1827 //     Call ReduceOper.
1828 MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
1829   assert( rule >= NUM_OPERANDS, "called with operand rule" );
1830 
1831   MachNode* shared_node = find_shared_node(s->_leaf, rule);
1832   if (shared_node != nullptr) {
1833     return shared_node;
1834   }
1835 
1836   // Build the object to represent this state & prepare for recursive calls
1837   MachNode *mach = s->MachNodeGenerator(rule);
1838   guarantee(mach != nullptr, "Missing MachNode");
1839   mach->_opnds[0] = s->MachOperGenerator(_reduceOp[rule]);
1840   assert( mach->_opnds[0] != nullptr, "Missing result operand" );
1841   Node *leaf = s->_leaf;
1842   NOT_PRODUCT(record_new2old(mach, leaf);)
1843   // Check for instruction or instruction chain rule
1844   if( rule >= _END_INST_CHAIN_RULE || rule < _BEGIN_INST_CHAIN_RULE ) {
1845     assert(C->node_arena()->contains(s->_leaf) || !has_new_node(s->_leaf),
1846            "duplicating node that's already been matched");
1847     // Instruction
1848     mach->add_req( leaf->in(0) ); // Set initial control
1849     // Reduce interior of complex instruction
1850     ReduceInst_Interior( s, rule, mem, mach, 1 );
1851   } else {
1852     // Instruction chain rules are data-dependent on their inputs
1853     mach->add_req(nullptr);     // Set initial control to none
1854     ReduceInst_Chain_Rule( s, rule, mem, mach );
1855   }
1856 
1857   // If a Memory was used, insert a Memory edge
1858   if( mem != (Node*)1 ) {
1859     mach->ins_req(MemNode::Memory,mem);
1860 #ifdef ASSERT
1861     // Verify adr type after matching memory operation
1862     const MachOper* oper = mach->memory_operand();
1863     if (oper != nullptr && oper != (MachOper*)-1) {
1864       // It has a unique memory operand.  Find corresponding ideal mem node.
1865       Node* m = nullptr;
1866       if (leaf->is_Mem()) {
1867         m = leaf;
1868       } else {
1869         m = _mem_node;
1870         assert(m != nullptr && m->is_Mem(), "expecting memory node");
1871       }
1872       const Type* mach_at = mach->adr_type();
1873       // DecodeN node consumed by an address may have different type
1874       // than its input. Don't compare types for such case.
1875       if (m->adr_type() != mach_at &&
1876           (m->in(MemNode::Address)->is_DecodeNarrowPtr() ||
1877            (m->in(MemNode::Address)->is_AddP() &&
1878             m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr()) ||
1879            (m->in(MemNode::Address)->is_AddP() &&
1880             m->in(MemNode::Address)->in(AddPNode::Address)->is_AddP() &&
1881             m->in(MemNode::Address)->in(AddPNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr()))) {
1882         mach_at = m->adr_type();
1883       }
1884       if (m->adr_type() != mach_at) {
1885         m->dump();
1886         tty->print_cr("mach:");
1887         mach->dump(1);
1888       }
1889       assert(m->adr_type() == mach_at, "matcher should not change adr type");
1890     }
1891 #endif
1892   }
1893 
1894   // If the _leaf is an AddP, insert the base edge
1895   if (leaf->is_AddP()) {
1896     mach->ins_req(AddPNode::Base,leaf->in(AddPNode::Base));
1897   }
1898 
1899   uint number_of_projections_prior = number_of_projections();
1900 
1901   // Perform any 1-to-many expansions required
1902   MachNode *ex = mach->Expand(s, _projection_list, mem);
1903   if (ex != mach) {
1904     assert(ex->ideal_reg() == mach->ideal_reg(), "ideal types should match");
1905     if( ex->in(1)->is_Con() )
1906       ex->in(1)->set_req(0, C->root());
1907     // Remove old node from the graph
1908     for( uint i=0; i<mach->req(); i++ ) {
1909       mach->set_req(i,nullptr);
1910     }
1911     NOT_PRODUCT(record_new2old(ex, s->_leaf);)
1912   }
1913 
1914   // PhaseChaitin::fixup_spills will sometimes generate spill code
1915   // via the matcher.  By the time, nodes have been wired into the CFG,
1916   // and any further nodes generated by expand rules will be left hanging
1917   // in space, and will not get emitted as output code.  Catch this.
1918   // Also, catch any new register allocation constraints ("projections")
1919   // generated belatedly during spill code generation.
1920   if (_allocation_started) {
1921     guarantee(ex == mach, "no expand rules during spill generation");
1922     guarantee(number_of_projections_prior == number_of_projections(), "no allocation during spill generation");
1923   }
1924 
1925   if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) {
1926     // Record the con for sharing
1927     _shared_nodes.map(leaf->_idx, ex);
1928   }
1929 
1930   // Have mach nodes inherit GC barrier data
1931   mach->set_barrier_data(MemNode::barrier_data(leaf));
1932 
1933   return ex;
1934 }
1935 
1936 void Matcher::handle_precedence_edges(Node* n, MachNode *mach) {
1937   for (uint i = n->req(); i < n->len(); i++) {
1938     if (n->in(i) != nullptr) {
1939       mach->add_prec(n->in(i));
1940     }
1941   }
1942 }
1943 
1944 void Matcher::ReduceInst_Chain_Rule(State* s, int rule, Node* &mem, MachNode* mach) {
1945   // 'op' is what I am expecting to receive
1946   int op = _leftOp[rule];
1947   // Operand type to catch childs result
1948   // This is what my child will give me.
1949   unsigned int opnd_class_instance = s->rule(op);
1950   // Choose between operand class or not.
1951   // This is what I will receive.
1952   int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op;
1953   // New rule for child.  Chase operand classes to get the actual rule.
1954   unsigned int newrule = s->rule(catch_op);
1955 
1956   if (newrule < NUM_OPERANDS) {
1957     // Chain from operand or operand class, may be output of shared node
1958     assert(opnd_class_instance < NUM_OPERANDS, "Bad AD file: Instruction chain rule must chain from operand");
1959     // Insert operand into array of operands for this instruction
1960     mach->_opnds[1] = s->MachOperGenerator(opnd_class_instance);
1961 
1962     ReduceOper(s, newrule, mem, mach);
1963   } else {
1964     // Chain from the result of an instruction
1965     assert(newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
1966     mach->_opnds[1] = s->MachOperGenerator(_reduceOp[catch_op]);
1967     Node *mem1 = (Node*)1;
1968     debug_only(Node *save_mem_node = _mem_node;)
1969     mach->add_req( ReduceInst(s, newrule, mem1) );
1970     debug_only(_mem_node = save_mem_node;)
1971   }
1972   return;
1973 }
1974 
1975 
1976 uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds ) {
1977   handle_precedence_edges(s->_leaf, mach);
1978 
1979   if( s->_leaf->is_Load() ) {
1980     Node *mem2 = s->_leaf->in(MemNode::Memory);
1981     assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
1982     debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
1983     mem = mem2;
1984   }
1985   if( s->_leaf->in(0) != nullptr && s->_leaf->req() > 1) {
1986     if( mach->in(0) == nullptr )
1987       mach->set_req(0, s->_leaf->in(0));
1988   }
1989 
1990   // Now recursively walk the state tree & add operand list.
1991   for( uint i=0; i<2; i++ ) {   // binary tree
1992     State *newstate = s->_kids[i];
1993     if( newstate == nullptr ) break;      // Might only have 1 child
1994     // 'op' is what I am expecting to receive
1995     int op;
1996     if( i == 0 ) {
1997       op = _leftOp[rule];
1998     } else {
1999       op = _rightOp[rule];
2000     }
2001     // Operand type to catch childs result
2002     // This is what my child will give me.
2003     int opnd_class_instance = newstate->rule(op);
2004     // Choose between operand class or not.
2005     // This is what I will receive.
2006     int catch_op = (op >= FIRST_OPERAND_CLASS && op < NUM_OPERANDS) ? opnd_class_instance : op;
2007     // New rule for child.  Chase operand classes to get the actual rule.
2008     int newrule = newstate->rule(catch_op);
2009 
2010     if (newrule < NUM_OPERANDS) { // Operand/operandClass or internalOp/instruction?
2011       // Operand/operandClass
2012       // Insert operand into array of operands for this instruction
2013       mach->_opnds[num_opnds++] = newstate->MachOperGenerator(opnd_class_instance);
2014       ReduceOper(newstate, newrule, mem, mach);
2015 
2016     } else {                    // Child is internal operand or new instruction
2017       if (newrule < _LAST_MACH_OPER) { // internal operand or instruction?
2018         // internal operand --> call ReduceInst_Interior
2019         // Interior of complex instruction.  Do nothing but recurse.
2020         num_opnds = ReduceInst_Interior(newstate, newrule, mem, mach, num_opnds);
2021       } else {
2022         // instruction --> call build operand(  ) to catch result
2023         //             --> ReduceInst( newrule )
2024         mach->_opnds[num_opnds++] = s->MachOperGenerator(_reduceOp[catch_op]);
2025         Node *mem1 = (Node*)1;
2026         debug_only(Node *save_mem_node = _mem_node;)
2027         mach->add_req( ReduceInst( newstate, newrule, mem1 ) );
2028         debug_only(_mem_node = save_mem_node;)
2029       }
2030     }
2031     assert( mach->_opnds[num_opnds-1], "" );
2032   }
2033   return num_opnds;
2034 }
2035 
2036 // This routine walks the interior of possible complex operands.
2037 // At each point we check our children in the match tree:
2038 // (1) No children -
2039 //     We are a leaf; add _leaf field as an input to the MachNode
2040 // (2) Child is an internal operand -
2041 //     Skip over it ( do nothing )
2042 // (3) Child is an instruction -
2043 //     Call ReduceInst recursively and
2044 //     and instruction as an input to the MachNode
2045 void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
2046   assert( rule < _LAST_MACH_OPER, "called with operand rule" );
2047   State *kid = s->_kids[0];
2048   assert( kid == nullptr || s->_leaf->in(0) == nullptr, "internal operands have no control" );
2049 
2050   // Leaf?  And not subsumed?
2051   if( kid == nullptr && !_swallowed[rule] ) {
2052     mach->add_req( s->_leaf );  // Add leaf pointer
2053     return;                     // Bail out
2054   }
2055 
2056   if( s->_leaf->is_Load() ) {
2057     assert( mem == (Node*)1, "multiple Memories being matched at once?" );
2058     mem = s->_leaf->in(MemNode::Memory);
2059     debug_only(_mem_node = s->_leaf;)
2060   }
2061 
2062   handle_precedence_edges(s->_leaf, mach);
2063 
2064   if( s->_leaf->in(0) && s->_leaf->req() > 1) {
2065     if( !mach->in(0) )
2066       mach->set_req(0,s->_leaf->in(0));
2067     else {
2068       assert( s->_leaf->in(0) == mach->in(0), "same instruction, differing controls?" );
2069     }
2070   }
2071 
2072   for (uint i = 0; kid != nullptr && i < 2; kid = s->_kids[1], i++) {   // binary tree
2073     int newrule;
2074     if( i == 0) {
2075       newrule = kid->rule(_leftOp[rule]);
2076     } else {
2077       newrule = kid->rule(_rightOp[rule]);
2078     }
2079 
2080     if (newrule < _LAST_MACH_OPER) { // Operand or instruction?
2081       // Internal operand; recurse but do nothing else
2082       ReduceOper(kid, newrule, mem, mach);
2083 
2084     } else {                    // Child is a new instruction
2085       // Reduce the instruction, and add a direct pointer from this
2086       // machine instruction to the newly reduced one.
2087       Node *mem1 = (Node*)1;
2088       debug_only(Node *save_mem_node = _mem_node;)
2089       mach->add_req( ReduceInst( kid, newrule, mem1 ) );
2090       debug_only(_mem_node = save_mem_node;)
2091     }
2092   }
2093 }
2094 
2095 
2096 // -------------------------------------------------------------------------
2097 // Java-Java calling convention
2098 // (what you use when Java calls Java)
2099 
2100 //------------------------------find_receiver----------------------------------
2101 // For a given signature, return the OptoReg for parameter 0.
2102 OptoReg::Name Matcher::find_receiver() {
2103   VMRegPair regs;
2104   BasicType sig_bt = T_OBJECT;
2105   SharedRuntime::java_calling_convention(&sig_bt, &regs, 1);
2106   // Return argument 0 register.  In the LP64 build pointers
2107   // take 2 registers, but the VM wants only the 'main' name.
2108   return OptoReg::as_OptoReg(regs.first());
2109 }
2110 
2111 bool Matcher::is_vshift_con_pattern(Node* n, Node* m) {
2112   if (n != nullptr && m != nullptr) {
2113     return VectorNode::is_vector_shift(n) &&
2114            VectorNode::is_vector_shift_count(m) && m->in(1)->is_Con();
2115   }
2116   return false;
2117 }
2118 
2119 bool Matcher::clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
2120   // Must clone all producers of flags, or we will not match correctly.
2121   // Suppose a compare setting int-flags is shared (e.g., a switch-tree)
2122   // then it will match into an ideal Op_RegFlags.  Alas, the fp-flags
2123   // are also there, so we may match a float-branch to int-flags and
2124   // expect the allocator to haul the flags from the int-side to the
2125   // fp-side.  No can do.
2126   if (_must_clone[m->Opcode()]) {
2127     mstack.push(m, Visit);
2128     return true;
2129   }
2130   return pd_clone_node(n, m, mstack);
2131 }
2132 
2133 bool Matcher::clone_base_plus_offset_address(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
2134   Node *off = m->in(AddPNode::Offset);
2135   if (off->is_Con()) {
2136     address_visited.test_set(m->_idx); // Flag as address_visited
2137     mstack.push(m->in(AddPNode::Address), Pre_Visit);
2138     // Clone X+offset as it also folds into most addressing expressions
2139     mstack.push(off, Visit);
2140     mstack.push(m->in(AddPNode::Base), Pre_Visit);
2141     return true;
2142   }
2143   return false;
2144 }
2145 
2146 // A method-klass-holder may be passed in the inline_cache_reg
2147 // and then expanded into the inline_cache_reg and a method_ptr register
2148 //   defined in ad_<arch>.cpp
2149 
2150 //------------------------------find_shared------------------------------------
2151 // Set bits if Node is shared or otherwise a root
2152 void Matcher::find_shared(Node* n) {
2153   // Allocate stack of size C->live_nodes() * 2 to avoid frequent realloc
2154   MStack mstack(C->live_nodes() * 2);
2155   // Mark nodes as address_visited if they are inputs to an address expression
2156   VectorSet address_visited;
2157   mstack.push(n, Visit);     // Don't need to pre-visit root node
2158   while (mstack.is_nonempty()) {
2159     n = mstack.node();       // Leave node on stack
2160     Node_State nstate = mstack.state();
2161     uint nop = n->Opcode();
2162     if (nstate == Pre_Visit) {
2163       if (address_visited.test(n->_idx)) { // Visited in address already?
2164         // Flag as visited and shared now.
2165         set_visited(n);
2166       }
2167       if (is_visited(n)) {   // Visited already?
2168         // Node is shared and has no reason to clone.  Flag it as shared.
2169         // This causes it to match into a register for the sharing.
2170         set_shared(n);       // Flag as shared and
2171         if (n->is_DecodeNarrowPtr()) {
2172           // Oop field/array element loads must be shared but since
2173           // they are shared through a DecodeN they may appear to have
2174           // a single use so force sharing here.
2175           set_shared(n->in(1));
2176         }
2177         mstack.pop();        // remove node from stack
2178         continue;
2179       }
2180       nstate = Visit; // Not already visited; so visit now
2181     }
2182     if (nstate == Visit) {
2183       mstack.set_state(Post_Visit);
2184       set_visited(n);   // Flag as visited now
2185       bool mem_op = false;
2186       int mem_addr_idx = MemNode::Address;
2187       if (find_shared_visit(mstack, n, nop, mem_op, mem_addr_idx)) {
2188         continue;
2189       }
2190       for (int i = n->req() - 1; i >= 0; --i) { // For my children
2191         Node* m = n->in(i); // Get ith input
2192         if (m == nullptr) {
2193           continue;  // Ignore nulls
2194         }
2195         if (clone_node(n, m, mstack)) {
2196           continue;
2197         }
2198 
2199         // Clone addressing expressions as they are "free" in memory access instructions
2200         if (mem_op && i == mem_addr_idx && m->is_AddP() &&
2201             // When there are other uses besides address expressions
2202             // put it on stack and mark as shared.
2203             !is_visited(m)) {
2204           // Some inputs for address expression are not put on stack
2205           // to avoid marking them as shared and forcing them into register
2206           // if they are used only in address expressions.
2207           // But they should be marked as shared if there are other uses
2208           // besides address expressions.
2209 
2210           if (pd_clone_address_expressions(m->as_AddP(), mstack, address_visited)) {
2211             continue;
2212           }
2213         }   // if( mem_op &&
2214         mstack.push(m, Pre_Visit);
2215       }     // for(int i = ...)
2216     }
2217     else if (nstate == Alt_Post_Visit) {
2218       mstack.pop(); // Remove node from stack
2219       // We cannot remove the Cmp input from the Bool here, as the Bool may be
2220       // shared and all users of the Bool need to move the Cmp in parallel.
2221       // This leaves both the Bool and the If pointing at the Cmp.  To
2222       // prevent the Matcher from trying to Match the Cmp along both paths
2223       // BoolNode::match_edge always returns a zero.
2224 
2225       // We reorder the Op_If in a pre-order manner, so we can visit without
2226       // accidentally sharing the Cmp (the Bool and the If make 2 users).
2227       n->add_req( n->in(1)->in(1) ); // Add the Cmp next to the Bool
2228     }
2229     else if (nstate == Post_Visit) {
2230       mstack.pop(); // Remove node from stack
2231 
2232       // Now hack a few special opcodes
2233       uint opcode = n->Opcode();
2234       bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->matcher_find_shared_post_visit(this, n, opcode);
2235       if (!gc_handled) {
2236         find_shared_post_visit(n, opcode);
2237       }
2238     }
2239     else {
2240       ShouldNotReachHere();
2241     }
2242   } // end of while (mstack.is_nonempty())
2243 }
2244 
2245 bool Matcher::find_shared_visit(MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) {
2246   switch(opcode) {  // Handle some opcodes special
2247     case Op_Phi:             // Treat Phis as shared roots
2248     case Op_Parm:
2249     case Op_Proj:            // All handled specially during matching
2250     case Op_SafePointScalarObject:
2251       set_shared(n);
2252       set_dontcare(n);
2253       break;
2254     case Op_If:
2255     case Op_CountedLoopEnd:
2256       mstack.set_state(Alt_Post_Visit); // Alternative way
2257       // Convert (If (Bool (CmpX A B))) into (If (Bool) (CmpX A B)).  Helps
2258       // with matching cmp/branch in 1 instruction.  The Matcher needs the
2259       // Bool and CmpX side-by-side, because it can only get at constants
2260       // that are at the leaves of Match trees, and the Bool's condition acts
2261       // as a constant here.
2262       mstack.push(n->in(1), Visit);         // Clone the Bool
2263       mstack.push(n->in(0), Pre_Visit);     // Visit control input
2264       return true; // while (mstack.is_nonempty())
2265     case Op_ConvI2D:         // These forms efficiently match with a prior
2266     case Op_ConvI2F:         //   Load but not a following Store
2267       if( n->in(1)->is_Load() &&        // Prior load
2268           n->outcnt() == 1 &&           // Not already shared
2269           n->unique_out()->is_Store() ) // Following store
2270         set_shared(n);       // Force it to be a root
2271       break;
2272     case Op_ReverseBytesI:
2273     case Op_ReverseBytesL:
2274       if( n->in(1)->is_Load() &&        // Prior load
2275           n->outcnt() == 1 )            // Not already shared
2276         set_shared(n);                  // Force it to be a root
2277       break;
2278     case Op_BoxLock:         // Can't match until we get stack-regs in ADLC
2279     case Op_IfFalse:
2280     case Op_IfTrue:
2281     case Op_MachProj:
2282     case Op_MergeMem:
2283     case Op_Catch:
2284     case Op_CatchProj:
2285     case Op_CProj:
2286     case Op_JumpProj:
2287     case Op_JProj:
2288     case Op_NeverBranch:
2289       set_dontcare(n);
2290       break;
2291     case Op_Jump:
2292       mstack.push(n->in(1), Pre_Visit);     // Switch Value (could be shared)
2293       mstack.push(n->in(0), Pre_Visit);     // Visit Control input
2294       return true;                             // while (mstack.is_nonempty())
2295     case Op_StrComp:
2296     case Op_StrEquals:
2297     case Op_StrIndexOf:
2298     case Op_StrIndexOfChar:
2299     case Op_AryEq:
2300     case Op_VectorizedHashCode:
2301     case Op_CountPositives:
2302     case Op_StrInflatedCopy:
2303     case Op_StrCompressedCopy:
2304     case Op_EncodeISOArray:
2305     case Op_FmaD:
2306     case Op_FmaF:
2307     case Op_FmaVD:
2308     case Op_FmaVF:
2309     case Op_MacroLogicV:
2310     case Op_VectorCmpMasked:
2311     case Op_CompressV:
2312     case Op_CompressM:
2313     case Op_ExpandV:
2314     case Op_VectorLoadMask:
2315       set_shared(n); // Force result into register (it will be anyways)
2316       break;
2317     case Op_ConP: {  // Convert pointers above the centerline to NUL
2318       TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2319       const TypePtr* tp = tn->type()->is_ptr();
2320       if (tp->_ptr == TypePtr::AnyNull) {
2321         tn->set_type(TypePtr::NULL_PTR);
2322       }
2323       break;
2324     }
2325     case Op_ConN: {  // Convert narrow pointers above the centerline to NUL
2326       TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2327       const TypePtr* tp = tn->type()->make_ptr();
2328       if (tp && tp->_ptr == TypePtr::AnyNull) {
2329         tn->set_type(TypeNarrowOop::NULL_PTR);
2330       }
2331       break;
2332     }
2333     case Op_Binary:         // These are introduced in the Post_Visit state.
2334       ShouldNotReachHere();
2335       break;
2336     case Op_ClearArray:
2337     case Op_SafePoint:
2338       mem_op = true;
2339       break;
2340     default:
2341       if( n->is_Store() ) {
2342         // Do match stores, despite no ideal reg
2343         mem_op = true;
2344         break;
2345       }
2346       if( n->is_Mem() ) { // Loads and LoadStores
2347         mem_op = true;
2348         // Loads must be root of match tree due to prior load conflict
2349         if( C->subsume_loads() == false )
2350           set_shared(n);
2351       }
2352       // Fall into default case
2353       if( !n->ideal_reg() )
2354         set_dontcare(n);  // Unmatchable Nodes
2355   } // end_switch
2356   return false;
2357 }
2358 
2359 void Matcher::find_shared_post_visit(Node* n, uint opcode) {
2360   if (n->is_predicated_vector()) {
2361     // Restructure into binary trees for Matching.
2362     if (n->req() == 4) {
2363       n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2364       n->set_req(2, n->in(3));
2365       n->del_req(3);
2366     } else if (n->req() == 5) {
2367       n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2368       n->set_req(2, new BinaryNode(n->in(3), n->in(4)));
2369       n->del_req(4);
2370       n->del_req(3);
2371     } else if (n->req() == 6) {
2372       Node* b3 = new BinaryNode(n->in(4), n->in(5));
2373       Node* b2 = new BinaryNode(n->in(3), b3);
2374       Node* b1 = new BinaryNode(n->in(2), b2);
2375       n->set_req(2, b1);
2376       n->del_req(5);
2377       n->del_req(4);
2378       n->del_req(3);
2379     }
2380     return;
2381   }
2382 
2383   switch(opcode) {       // Handle some opcodes special
2384     case Op_CompareAndExchangeB:
2385     case Op_CompareAndExchangeS:
2386     case Op_CompareAndExchangeI:
2387     case Op_CompareAndExchangeL:
2388     case Op_CompareAndExchangeP:
2389     case Op_CompareAndExchangeN:
2390     case Op_WeakCompareAndSwapB:
2391     case Op_WeakCompareAndSwapS:
2392     case Op_WeakCompareAndSwapI:
2393     case Op_WeakCompareAndSwapL:
2394     case Op_WeakCompareAndSwapP:
2395     case Op_WeakCompareAndSwapN:
2396     case Op_CompareAndSwapB:
2397     case Op_CompareAndSwapS:
2398     case Op_CompareAndSwapI:
2399     case Op_CompareAndSwapL:
2400     case Op_CompareAndSwapP:
2401     case Op_CompareAndSwapN: {   // Convert trinary to binary-tree
2402       Node* newval = n->in(MemNode::ValueIn);
2403       Node* oldval = n->in(LoadStoreConditionalNode::ExpectedIn);
2404       Node* pair = new BinaryNode(oldval, newval);
2405       n->set_req(MemNode::ValueIn, pair);
2406       n->del_req(LoadStoreConditionalNode::ExpectedIn);
2407       break;
2408     }
2409     case Op_CMoveD:              // Convert trinary to binary-tree
2410     case Op_CMoveF:
2411     case Op_CMoveI:
2412     case Op_CMoveL:
2413     case Op_CMoveN:
2414     case Op_CMoveP: {
2415       // Restructure into a binary tree for Matching.  It's possible that
2416       // we could move this code up next to the graph reshaping for IfNodes
2417       // or vice-versa, but I do not want to debug this for Ladybird.
2418       // 10/2/2000 CNC.
2419       Node* pair1 = new BinaryNode(n->in(1), n->in(1)->in(1));
2420       n->set_req(1, pair1);
2421       Node* pair2 = new BinaryNode(n->in(2), n->in(3));
2422       n->set_req(2, pair2);
2423       n->del_req(3);
2424       break;
2425     }
2426     case Op_MacroLogicV: {
2427       Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2428       Node* pair2 = new BinaryNode(n->in(3), n->in(4));
2429       n->set_req(1, pair1);
2430       n->set_req(2, pair2);
2431       n->del_req(4);
2432       n->del_req(3);
2433       break;
2434     }
2435     case Op_StoreVectorMasked: {
2436       Node* pair = new BinaryNode(n->in(3), n->in(4));
2437       n->set_req(3, pair);
2438       n->del_req(4);
2439       break;
2440     }
2441     case Op_SelectFromTwoVector:
2442     case Op_LoopLimit: {
2443       Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2444       n->set_req(1, pair1);
2445       n->set_req(2, n->in(3));
2446       n->del_req(3);
2447       break;
2448     }
2449     case Op_StrEquals:
2450     case Op_StrIndexOfChar: {
2451       Node* pair1 = new BinaryNode(n->in(2), n->in(3));
2452       n->set_req(2, pair1);
2453       n->set_req(3, n->in(4));
2454       n->del_req(4);
2455       break;
2456     }
2457     case Op_StrComp:
2458     case Op_StrIndexOf:
2459     case Op_VectorizedHashCode: {
2460       Node* pair1 = new BinaryNode(n->in(2), n->in(3));
2461       n->set_req(2, pair1);
2462       Node* pair2 = new BinaryNode(n->in(4),n->in(5));
2463       n->set_req(3, pair2);
2464       n->del_req(5);
2465       n->del_req(4);
2466       break;
2467     }
2468     case Op_EncodeISOArray:
2469     case Op_StrCompressedCopy:
2470     case Op_StrInflatedCopy: {
2471       // Restructure into a binary tree for Matching.
2472       Node* pair = new BinaryNode(n->in(3), n->in(4));
2473       n->set_req(3, pair);
2474       n->del_req(4);
2475       break;
2476     }
2477     case Op_FmaD:
2478     case Op_FmaF:
2479     case Op_FmaVD:
2480     case Op_FmaVF: {
2481       // Restructure into a binary tree for Matching.
2482       Node* pair = new BinaryNode(n->in(1), n->in(2));
2483       n->set_req(2, pair);
2484       n->set_req(1, n->in(3));
2485       n->del_req(3);
2486       break;
2487     }
2488     case Op_MulAddS2I: {
2489       Node* pair1 = new BinaryNode(n->in(1), n->in(2));
2490       Node* pair2 = new BinaryNode(n->in(3), n->in(4));
2491       n->set_req(1, pair1);
2492       n->set_req(2, pair2);
2493       n->del_req(4);
2494       n->del_req(3);
2495       break;
2496     }
2497     case Op_VectorCmpMasked:
2498     case Op_CopySignD:
2499     case Op_SignumVF:
2500     case Op_SignumVD:
2501     case Op_SignumF:
2502     case Op_SignumD: {
2503       Node* pair = new BinaryNode(n->in(2), n->in(3));
2504       n->set_req(2, pair);
2505       n->del_req(3);
2506       break;
2507     }
2508     case Op_VectorBlend:
2509     case Op_VectorInsert: {
2510       Node* pair = new BinaryNode(n->in(1), n->in(2));
2511       n->set_req(1, pair);
2512       n->set_req(2, n->in(3));
2513       n->del_req(3);
2514       break;
2515     }
2516     case Op_LoadVectorGather:
2517       if (is_subword_type(n->bottom_type()->is_vect()->element_basic_type())) {
2518         Node* pair = new BinaryNode(n->in(MemNode::ValueIn), n->in(MemNode::ValueIn+1));
2519         n->set_req(MemNode::ValueIn, pair);
2520         n->del_req(MemNode::ValueIn+1);
2521       }
2522       break;
2523     case Op_LoadVectorGatherMasked:
2524       if (is_subword_type(n->bottom_type()->is_vect()->element_basic_type())) {
2525         Node* pair2 = new BinaryNode(n->in(MemNode::ValueIn + 1), n->in(MemNode::ValueIn + 2));
2526         Node* pair1 = new BinaryNode(n->in(MemNode::ValueIn), pair2);
2527         n->set_req(MemNode::ValueIn, pair1);
2528         n->del_req(MemNode::ValueIn+2);
2529         n->del_req(MemNode::ValueIn+1);
2530         break;
2531       } // fall-through
2532     case Op_StoreVectorScatter: {
2533       Node* pair = new BinaryNode(n->in(MemNode::ValueIn), n->in(MemNode::ValueIn+1));
2534       n->set_req(MemNode::ValueIn, pair);
2535       n->del_req(MemNode::ValueIn+1);
2536       break;
2537     }
2538     case Op_StoreVectorScatterMasked: {
2539       Node* pair = new BinaryNode(n->in(MemNode::ValueIn+1), n->in(MemNode::ValueIn+2));
2540       n->set_req(MemNode::ValueIn+1, pair);
2541       n->del_req(MemNode::ValueIn+2);
2542       pair = new BinaryNode(n->in(MemNode::ValueIn), n->in(MemNode::ValueIn+1));
2543       n->set_req(MemNode::ValueIn, pair);
2544       n->del_req(MemNode::ValueIn+1);
2545       break;
2546     }
2547     case Op_VectorMaskCmp: {
2548       n->set_req(1, new BinaryNode(n->in(1), n->in(2)));
2549       n->set_req(2, n->in(3));
2550       n->del_req(3);
2551       break;
2552     }
2553     case Op_PartialSubtypeCheck: {
2554       if (UseSecondarySupersTable && n->in(2)->is_Con()) {
2555         // PartialSubtypeCheck uses both constant and register operands for superclass input.
2556         n->set_req(2, new BinaryNode(n->in(2), n->in(2)));
2557         break;
2558       }
2559       break;
2560     }
2561     default:
2562       break;
2563   }
2564 }
2565 
2566 #ifndef PRODUCT
2567 void Matcher::record_new2old(Node* newn, Node* old) {
2568   _new2old_map.map(newn->_idx, old);
2569   if (!_reused.test_set(old->_igv_idx)) {
2570     // Reuse the Ideal-level IGV identifier so that the node can be tracked
2571     // across matching. If there are multiple machine nodes expanded from the
2572     // same Ideal node, only one will reuse its IGV identifier.
2573     newn->_igv_idx = old->_igv_idx;
2574   }
2575 }
2576 
2577 // machine-independent root to machine-dependent root
2578 void Matcher::dump_old2new_map() {
2579   _old2new_map.dump();
2580 }
2581 #endif // !PRODUCT
2582 
2583 //---------------------------collect_null_checks-------------------------------
2584 // Find null checks in the ideal graph; write a machine-specific node for
2585 // it.  Used by later implicit-null-check handling.  Actually collects
2586 // either an IfTrue or IfFalse for the common NOT-null path, AND the ideal
2587 // value being tested.
2588 void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
2589   Node *iff = proj->in(0);
2590   if( iff->Opcode() == Op_If ) {
2591     // During matching If's have Bool & Cmp side-by-side
2592     BoolNode *b = iff->in(1)->as_Bool();
2593     Node *cmp = iff->in(2);
2594     int opc = cmp->Opcode();
2595     if (opc != Op_CmpP && opc != Op_CmpN) return;
2596 
2597     const Type* ct = cmp->in(2)->bottom_type();
2598     if (ct == TypePtr::NULL_PTR ||
2599         (opc == Op_CmpN && ct == TypeNarrowOop::NULL_PTR)) {
2600 
2601       bool push_it = false;
2602       if( proj->Opcode() == Op_IfTrue ) {
2603 #ifndef PRODUCT
2604         extern uint all_null_checks_found;
2605         all_null_checks_found++;
2606 #endif
2607         if( b->_test._test == BoolTest::ne ) {
2608           push_it = true;
2609         }
2610       } else {
2611         assert( proj->Opcode() == Op_IfFalse, "" );
2612         if( b->_test._test == BoolTest::eq ) {
2613           push_it = true;
2614         }
2615       }
2616       if( push_it ) {
2617         _null_check_tests.push(proj);
2618         Node* val = cmp->in(1);
2619 #ifdef _LP64
2620         if (val->bottom_type()->isa_narrowoop() &&
2621             !Matcher::narrow_oop_use_complex_address()) {
2622           //
2623           // Look for DecodeN node which should be pinned to orig_proj.
2624           // On platforms (Sparc) which can not handle 2 adds
2625           // in addressing mode we have to keep a DecodeN node and
2626           // use it to do implicit null check in address.
2627           //
2628           // DecodeN node was pinned to non-null path (orig_proj) during
2629           // CastPP transformation in final_graph_reshaping_impl().
2630           //
2631           uint cnt = orig_proj->outcnt();
2632           for (uint i = 0; i < orig_proj->outcnt(); i++) {
2633             Node* d = orig_proj->raw_out(i);
2634             if (d->is_DecodeN() && d->in(1) == val) {
2635               val = d;
2636               val->set_req(0, nullptr); // Unpin now.
2637               // Mark this as special case to distinguish from
2638               // a regular case: CmpP(DecodeN, null).
2639               val = (Node*)(((intptr_t)val) | 1);
2640               break;
2641             }
2642           }
2643         }
2644 #endif
2645         _null_check_tests.push(val);
2646       }
2647     }
2648   }
2649 }
2650 
2651 //---------------------------validate_null_checks------------------------------
2652 // Its possible that the value being null checked is not the root of a match
2653 // tree.  If so, I cannot use the value in an implicit null check.
2654 void Matcher::validate_null_checks( ) {
2655   uint cnt = _null_check_tests.size();
2656   for( uint i=0; i < cnt; i+=2 ) {
2657     Node *test = _null_check_tests[i];
2658     Node *val = _null_check_tests[i+1];
2659     bool is_decoden = ((intptr_t)val) & 1;
2660     val = (Node*)(((intptr_t)val) & ~1);
2661     if (has_new_node(val)) {
2662       Node* new_val = new_node(val);
2663       if (is_decoden) {
2664         assert(val->is_DecodeNarrowPtr() && val->in(0) == nullptr, "sanity");
2665         // Note: new_val may have a control edge if
2666         // the original ideal node DecodeN was matched before
2667         // it was unpinned in Matcher::collect_null_checks().
2668         // Unpin the mach node and mark it.
2669         new_val->set_req(0, nullptr);
2670         new_val = (Node*)(((intptr_t)new_val) | 1);
2671       }
2672       // Is a match-tree root, so replace with the matched value
2673       _null_check_tests.map(i+1, new_val);
2674     } else {
2675       // Yank from candidate list
2676       _null_check_tests.map(i+1,_null_check_tests[--cnt]);
2677       _null_check_tests.map(i,_null_check_tests[--cnt]);
2678       _null_check_tests.pop();
2679       _null_check_tests.pop();
2680       i-=2;
2681     }
2682   }
2683 }
2684 
2685 bool Matcher::gen_narrow_oop_implicit_null_checks() {
2686   // Advice matcher to perform null checks on the narrow oop side.
2687   // Implicit checks are not possible on the uncompressed oop side anyway
2688   // (at least not for read accesses).
2689   // Performs significantly better (especially on Power 6).
2690   if (!os::zero_page_read_protected()) {
2691     return true;
2692   }
2693   return CompressedOops::use_implicit_null_checks() &&
2694          (narrow_oop_use_complex_address() ||
2695           CompressedOops::base() != nullptr);
2696 }
2697 
2698 // Compute RegMask for an ideal register.
2699 const RegMask* Matcher::regmask_for_ideal_register(uint ideal_reg, Node* ret) {
2700   assert(!C->failing_internal() || C->failure_is_artificial(), "already failing.");
2701   if (C->failing()) {
2702     return nullptr;
2703   }
2704   const Type* t = Type::mreg2type[ideal_reg];
2705   if (t == nullptr) {
2706     assert(ideal_reg >= Op_VecA && ideal_reg <= Op_VecZ, "not a vector: %d", ideal_reg);
2707     return nullptr; // not supported
2708   }
2709   Node* fp  = ret->in(TypeFunc::FramePtr);
2710   Node* mem = ret->in(TypeFunc::Memory);
2711   const TypePtr* atp = TypePtr::BOTTOM;
2712   MemNode::MemOrd mo = MemNode::unordered;
2713 
2714   Node* spill;
2715   switch (ideal_reg) {
2716     case Op_RegN: spill = new LoadNNode(nullptr, mem, fp, atp, t->is_narrowoop(), mo); break;
2717     case Op_RegI: spill = new LoadINode(nullptr, mem, fp, atp, t->is_int(),       mo); break;
2718     case Op_RegP: spill = new LoadPNode(nullptr, mem, fp, atp, t->is_ptr(),       mo); break;
2719     case Op_RegF: spill = new LoadFNode(nullptr, mem, fp, atp, t,                 mo); break;
2720     case Op_RegD: spill = new LoadDNode(nullptr, mem, fp, atp, t,                 mo); break;
2721     case Op_RegL: spill = new LoadLNode(nullptr, mem, fp, atp, t->is_long(),      mo); break;
2722 
2723     case Op_VecA: // fall-through
2724     case Op_VecS: // fall-through
2725     case Op_VecD: // fall-through
2726     case Op_VecX: // fall-through
2727     case Op_VecY: // fall-through
2728     case Op_VecZ: spill = new LoadVectorNode(nullptr, mem, fp, atp, t->is_vect()); break;
2729     case Op_RegVectMask: return Matcher::predicate_reg_mask();
2730 
2731     default: ShouldNotReachHere();
2732   }
2733   MachNode* mspill = match_tree(spill);
2734   assert(mspill != nullptr || C->failure_is_artificial(), "matching failed: %d", ideal_reg);
2735   if (C->failing()) {
2736     return nullptr;
2737   }
2738   // Handle generic vector operand case
2739   if (Matcher::supports_generic_vector_operands && t->isa_vect()) {
2740     specialize_mach_node(mspill);
2741   }
2742   return &mspill->out_RegMask();
2743 }
2744 
2745 // Process Mach IR right after selection phase is over.
2746 void Matcher::do_postselect_cleanup() {
2747   if (supports_generic_vector_operands) {
2748     specialize_generic_vector_operands();
2749     if (C->failing())  return;
2750   }
2751 }
2752 
2753 //----------------------------------------------------------------------
2754 // Generic machine operands elision.
2755 //----------------------------------------------------------------------
2756 
2757 // Compute concrete vector operand for a generic TEMP vector mach node based on its user info.
2758 void Matcher::specialize_temp_node(MachTempNode* tmp, MachNode* use, uint idx) {
2759   assert(use->in(idx) == tmp, "not a user");
2760   assert(!Matcher::is_generic_vector(use->_opnds[0]), "use not processed yet");
2761 
2762   if ((uint)idx == use->two_adr()) { // DEF_TEMP case
2763     tmp->_opnds[0] = use->_opnds[0]->clone();
2764   } else {
2765     uint ideal_vreg = vector_ideal_reg(C->max_vector_size());
2766     tmp->_opnds[0] = Matcher::pd_specialize_generic_vector_operand(tmp->_opnds[0], ideal_vreg, true /*is_temp*/);
2767   }
2768 }
2769 
2770 // Compute concrete vector operand for a generic DEF/USE vector operand (of mach node m at index idx).
2771 MachOper* Matcher::specialize_vector_operand(MachNode* m, uint opnd_idx) {
2772   assert(Matcher::is_generic_vector(m->_opnds[opnd_idx]), "repeated updates");
2773   Node* def = nullptr;
2774   if (opnd_idx == 0) { // DEF
2775     def = m; // use mach node itself to compute vector operand type
2776   } else {
2777     int base_idx = m->operand_index(opnd_idx);
2778     def = m->in(base_idx);
2779     if (def->is_Mach()) {
2780       if (def->is_MachTemp() && Matcher::is_generic_vector(def->as_Mach()->_opnds[0])) {
2781         specialize_temp_node(def->as_MachTemp(), m, base_idx); // MachTemp node use site
2782       } else if (is_reg2reg_move(def->as_Mach())) {
2783         def = def->in(1); // skip over generic reg-to-reg moves
2784       }
2785     }
2786   }
2787   assert(def->bottom_type()->isa_vect(), "not a vector");
2788   uint ideal_vreg = def->bottom_type()->ideal_reg();
2789   return Matcher::pd_specialize_generic_vector_operand(m->_opnds[opnd_idx], ideal_vreg, false /*is_temp*/);
2790 }
2791 
2792 void Matcher::specialize_mach_node(MachNode* m) {
2793   assert(!m->is_MachTemp(), "processed along with its user");
2794   // For generic use operands pull specific register class operands from
2795   // its def instruction's output operand (def operand).
2796   for (uint i = 0; i < m->num_opnds(); i++) {
2797     if (Matcher::is_generic_vector(m->_opnds[i])) {
2798       m->_opnds[i] = specialize_vector_operand(m, i);
2799     }
2800   }
2801 }
2802 
2803 // Replace generic vector operands with concrete vector operands and eliminate generic reg-to-reg moves from the graph.
2804 void Matcher::specialize_generic_vector_operands() {
2805   assert(supports_generic_vector_operands, "sanity");
2806   ResourceMark rm;
2807 
2808   // Replace generic vector operands (vec/legVec) with concrete ones (vec[SDXYZ]/legVec[SDXYZ])
2809   // and remove reg-to-reg vector moves (MoveVec2Leg and MoveLeg2Vec).
2810   Unique_Node_List live_nodes;
2811   C->identify_useful_nodes(live_nodes);
2812 
2813   while (live_nodes.size() > 0) {
2814     MachNode* m = live_nodes.pop()->isa_Mach();
2815     if (m != nullptr) {
2816       if (Matcher::is_reg2reg_move(m)) {
2817         // Register allocator properly handles vec <=> leg moves using register masks.
2818         int opnd_idx = m->operand_index(1);
2819         Node* def = m->in(opnd_idx);
2820         m->subsume_by(def, C);
2821       } else if (m->is_MachTemp()) {
2822         // process MachTemp nodes at use site (see Matcher::specialize_vector_operand)
2823       } else {
2824         specialize_mach_node(m);
2825       }
2826     }
2827   }
2828 }
2829 
2830 uint Matcher::vector_length(const Node* n) {
2831   const TypeVect* vt = n->bottom_type()->is_vect();
2832   return vt->length();
2833 }
2834 
2835 uint Matcher::vector_length(const MachNode* use, const MachOper* opnd) {
2836   int def_idx = use->operand_index(opnd);
2837   Node* def = use->in(def_idx);
2838   return def->bottom_type()->is_vect()->length();
2839 }
2840 
2841 uint Matcher::vector_length_in_bytes(const Node* n) {
2842   const TypeVect* vt = n->bottom_type()->is_vect();
2843   return vt->length_in_bytes();
2844 }
2845 
2846 uint Matcher::vector_length_in_bytes(const MachNode* use, const MachOper* opnd) {
2847   uint def_idx = use->operand_index(opnd);
2848   Node* def = use->in(def_idx);
2849   return def->bottom_type()->is_vect()->length_in_bytes();
2850 }
2851 
2852 BasicType Matcher::vector_element_basic_type(const Node* n) {
2853   const TypeVect* vt = n->bottom_type()->is_vect();
2854   return vt->element_basic_type();
2855 }
2856 
2857 BasicType Matcher::vector_element_basic_type(const MachNode* use, const MachOper* opnd) {
2858   int def_idx = use->operand_index(opnd);
2859   Node* def = use->in(def_idx);
2860   return def->bottom_type()->is_vect()->element_basic_type();
2861 }
2862 
2863 bool Matcher::is_non_long_integral_vector(const Node* n) {
2864   BasicType bt = vector_element_basic_type(n);
2865   assert(bt != T_CHAR, "char is not allowed in vector");
2866   return is_subword_type(bt) || bt == T_INT;
2867 }
2868 
2869 bool Matcher::is_encode_and_store_pattern(const Node* n, const Node* m) {
2870   if (n == nullptr ||
2871       m == nullptr ||
2872       n->Opcode() != Op_StoreN ||
2873       !m->is_EncodeP() ||
2874       n->as_Store()->barrier_data() == 0) {
2875     return false;
2876   }
2877   assert(m == n->in(MemNode::ValueIn), "m should be input to n");
2878   return true;
2879 }
2880 
2881 #ifdef ASSERT
2882 bool Matcher::verify_after_postselect_cleanup() {
2883   assert(!C->failing_internal() || C->failure_is_artificial(), "sanity");
2884   if (supports_generic_vector_operands) {
2885     Unique_Node_List useful;
2886     C->identify_useful_nodes(useful);
2887     for (uint i = 0; i < useful.size(); i++) {
2888       MachNode* m = useful.at(i)->isa_Mach();
2889       if (m != nullptr) {
2890         assert(!Matcher::is_reg2reg_move(m), "no MoveVec nodes allowed");
2891         for (uint j = 0; j < m->num_opnds(); j++) {
2892           assert(!Matcher::is_generic_vector(m->_opnds[j]), "no generic vector operands allowed");
2893         }
2894       }
2895     }
2896   }
2897   return true;
2898 }
2899 #endif // ASSERT
2900 
2901 // Used by the DFA in dfa_xxx.cpp.  Check for a following barrier or
2902 // atomic instruction acting as a store_load barrier without any
2903 // intervening volatile load, and thus we don't need a barrier here.
2904 // We retain the Node to act as a compiler ordering barrier.
2905 bool Matcher::post_store_load_barrier(const Node* vmb) {
2906   Compile* C = Compile::current();
2907   assert(vmb->is_MemBar(), "");
2908   assert(vmb->Opcode() != Op_MemBarAcquire && vmb->Opcode() != Op_LoadFence, "");
2909   const MemBarNode* membar = vmb->as_MemBar();
2910 
2911   // Get the Ideal Proj node, ctrl, that can be used to iterate forward
2912   Node* ctrl = nullptr;
2913   for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) {
2914     Node* p = membar->fast_out(i);
2915     assert(p->is_Proj(), "only projections here");
2916     if ((p->as_Proj()->_con == TypeFunc::Control) &&
2917         !C->node_arena()->contains(p)) { // Unmatched old-space only
2918       ctrl = p;
2919       break;
2920     }
2921   }
2922   assert((ctrl != nullptr), "missing control projection");
2923 
2924   for (DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++) {
2925     Node *x = ctrl->fast_out(j);
2926     int xop = x->Opcode();
2927 
2928     // We don't need current barrier if we see another or a lock
2929     // before seeing volatile load.
2930     //
2931     // Op_Fastunlock previously appeared in the Op_* list below.
2932     // With the advent of 1-0 lock operations we're no longer guaranteed
2933     // that a monitor exit operation contains a serializing instruction.
2934 
2935     if (xop == Op_MemBarVolatile ||
2936         xop == Op_CompareAndExchangeB ||
2937         xop == Op_CompareAndExchangeS ||
2938         xop == Op_CompareAndExchangeI ||
2939         xop == Op_CompareAndExchangeL ||
2940         xop == Op_CompareAndExchangeP ||
2941         xop == Op_CompareAndExchangeN ||
2942         xop == Op_WeakCompareAndSwapB ||
2943         xop == Op_WeakCompareAndSwapS ||
2944         xop == Op_WeakCompareAndSwapL ||
2945         xop == Op_WeakCompareAndSwapP ||
2946         xop == Op_WeakCompareAndSwapN ||
2947         xop == Op_WeakCompareAndSwapI ||
2948         xop == Op_CompareAndSwapB ||
2949         xop == Op_CompareAndSwapS ||
2950         xop == Op_CompareAndSwapL ||
2951         xop == Op_CompareAndSwapP ||
2952         xop == Op_CompareAndSwapN ||
2953         xop == Op_CompareAndSwapI ||
2954         BarrierSet::barrier_set()->barrier_set_c2()->matcher_is_store_load_barrier(x, xop)) {
2955       return true;
2956     }
2957 
2958     // Op_FastLock previously appeared in the Op_* list above.
2959     if (xop == Op_FastLock) {
2960       return true;
2961     }
2962 
2963     if (x->is_MemBar()) {
2964       // We must retain this membar if there is an upcoming volatile
2965       // load, which will be followed by acquire membar.
2966       if (xop == Op_MemBarAcquire || xop == Op_LoadFence) {
2967         return false;
2968       } else {
2969         // For other kinds of barriers, check by pretending we
2970         // are them, and seeing if we can be removed.
2971         return post_store_load_barrier(x->as_MemBar());
2972       }
2973     }
2974 
2975     // probably not necessary to check for these
2976     if (x->is_Call() || x->is_SafePoint() || x->is_block_proj()) {
2977       return false;
2978     }
2979   }
2980   return false;
2981 }
2982 
2983 // Check whether node n is a branch to an uncommon trap that we could
2984 // optimize as test with very high branch costs in case of going to
2985 // the uncommon trap. The code must be able to be recompiled to use
2986 // a cheaper test.
2987 bool Matcher::branches_to_uncommon_trap(const Node *n) {
2988   // Don't do it for natives, adapters, or runtime stubs
2989   Compile *C = Compile::current();
2990   if (!C->is_method_compilation()) return false;
2991 
2992   assert(n->is_If(), "You should only call this on if nodes.");
2993   IfNode *ifn = n->as_If();
2994 
2995   Node *ifFalse = nullptr;
2996   for (DUIterator_Fast imax, i = ifn->fast_outs(imax); i < imax; i++) {
2997     if (ifn->fast_out(i)->is_IfFalse()) {
2998       ifFalse = ifn->fast_out(i);
2999       break;
3000     }
3001   }
3002   assert(ifFalse, "An If should have an ifFalse. Graph is broken.");
3003 
3004   Node *reg = ifFalse;
3005   int cnt = 4; // We must protect against cycles.  Limit to 4 iterations.
3006                // Alternatively use visited set?  Seems too expensive.
3007   while (reg != nullptr && cnt > 0) {
3008     CallNode *call = nullptr;
3009     RegionNode *nxt_reg = nullptr;
3010     for (DUIterator_Fast imax, i = reg->fast_outs(imax); i < imax; i++) {
3011       Node *o = reg->fast_out(i);
3012       if (o->is_Call()) {
3013         call = o->as_Call();
3014       }
3015       if (o->is_Region()) {
3016         nxt_reg = o->as_Region();
3017       }
3018     }
3019 
3020     if (call &&
3021         call->entry_point() == OptoRuntime::uncommon_trap_blob()->entry_point()) {
3022       const Type* trtype = call->in(TypeFunc::Parms)->bottom_type();
3023       if (trtype->isa_int() && trtype->is_int()->is_con()) {
3024         jint tr_con = trtype->is_int()->get_con();
3025         Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con);
3026         Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con);
3027         assert((int)reason < (int)BitsPerInt, "recode bit map");
3028 
3029         if (is_set_nth_bit(C->allowed_deopt_reasons(), (int)reason)
3030             && action != Deoptimization::Action_none) {
3031           // This uncommon trap is sure to recompile, eventually.
3032           // When that happens, C->too_many_traps will prevent
3033           // this transformation from happening again.
3034           return true;
3035         }
3036       }
3037     }
3038 
3039     reg = nxt_reg;
3040     cnt--;
3041   }
3042 
3043   return false;
3044 }
3045 
3046 //=============================================================================
3047 //---------------------------State---------------------------------------------
3048 State::State(void) : _rule() {
3049 #ifdef ASSERT
3050   _id = 0;
3051   _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
3052   _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
3053 #endif
3054 }
3055 
3056 #ifdef ASSERT
3057 State::~State() {
3058   _id = 99;
3059   _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
3060   _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
3061   memset(_cost, -3, sizeof(_cost));
3062   memset(_rule, -3, sizeof(_rule));
3063 }
3064 #endif
3065 
3066 #ifndef PRODUCT
3067 //---------------------------dump----------------------------------------------
3068 void State::dump() {
3069   tty->print("\n");
3070   dump(0);
3071 }
3072 
3073 void State::dump(int depth) {
3074   for (int j = 0; j < depth; j++) {
3075     tty->print("   ");
3076   }
3077   tty->print("--N: ");
3078   _leaf->dump();
3079   uint i;
3080   for (i = 0; i < _LAST_MACH_OPER; i++) {
3081     // Check for valid entry
3082     if (valid(i)) {
3083       for (int j = 0; j < depth; j++) {
3084         tty->print("   ");
3085       }
3086       assert(cost(i) != max_juint, "cost must be a valid value");
3087       assert(rule(i) < _last_Mach_Node, "rule[i] must be valid rule");
3088       tty->print_cr("%s  %d  %s",
3089                     ruleName[i], cost(i), ruleName[rule(i)] );
3090     }
3091   }
3092   tty->cr();
3093 
3094   for (i = 0; i < 2; i++) {
3095     if (_kids[i]) {
3096       _kids[i]->dump(depth + 1);
3097     }
3098   }
3099 }
3100 #endif