1 /*
   2  * Copyright (c) 2015, 2021, Red Hat, Inc. All rights reserved.
   3  * Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 
  28 #include "classfile/javaClasses.hpp"
  29 #include "gc/shenandoah/c2/shenandoahSupport.hpp"
  30 #include "gc/shenandoah/c2/shenandoahBarrierSetC2.hpp"
  31 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
  32 #include "gc/shenandoah/shenandoahForwarding.hpp"
  33 #include "gc/shenandoah/shenandoahHeap.hpp"
  34 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  35 #include "gc/shenandoah/shenandoahRuntime.hpp"
  36 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
  37 #include "opto/arraycopynode.hpp"
  38 #include "opto/block.hpp"
  39 #include "opto/callnode.hpp"
  40 #include "opto/castnode.hpp"
  41 #include "opto/movenode.hpp"
  42 #include "opto/phaseX.hpp"
  43 #include "opto/rootnode.hpp"
  44 #include "opto/runtime.hpp"
  45 #include "opto/subnode.hpp"
  46 
  47 bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) {
  48   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
  49   if (state->load_reference_barriers_count() > 0) {
  50     assert(C->post_loop_opts_phase(), "no loop opts allowed");
  51     C->reset_post_loop_opts_phase(); // ... but we know what we are doing
  52     C->clear_major_progress();
  53     PhaseIdealLoop::optimize(igvn, LoopOptsShenandoahExpand);
  54     if (C->failing()) return false;
  55 
  56     C->set_major_progress();
  57     if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) {
  58       return false;
  59     }
  60     C->clear_major_progress();
  61     C->process_for_post_loop_opts_igvn(igvn);
  62     if (C->failing()) return false;
  63 
  64     C->set_post_loop_opts_phase(); // now for real!
  65   }
  66   return true;
  67 }
  68 
  69 bool ShenandoahBarrierC2Support::is_gc_state_test(Node* iff, int mask) {
  70   if (!UseShenandoahGC) {
  71     return false;
  72   }
  73   assert(iff->is_If(), "bad input");
  74   if (iff->Opcode() != Op_If) {
  75     return false;
  76   }
  77   Node* bol = iff->in(1);
  78   if (!bol->is_Bool() || bol->as_Bool()->_test._test != BoolTest::ne) {
  79     return false;
  80   }
  81   Node* cmp = bol->in(1);
  82   if (cmp->Opcode() != Op_CmpI) {
  83     return false;
  84   }
  85   Node* in1 = cmp->in(1);
  86   Node* in2 = cmp->in(2);
  87   if (in2->find_int_con(-1) != 0) {
  88     return false;
  89   }
  90   if (in1->Opcode() != Op_AndI) {
  91     return false;
  92   }
  93   in2 = in1->in(2);
  94   if (in2->find_int_con(-1) != mask) {
  95     return false;
  96   }
  97   in1 = in1->in(1);
  98 
  99   return is_gc_state_load(in1);
 100 }
 101 
 102 bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) {
 103   return is_gc_state_test(iff, ShenandoahHeap::HAS_FORWARDED);
 104 }
 105 
 106 bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) {
 107   if (!UseShenandoahGC) {
 108     return false;
 109   }
 110   if (n->Opcode() != Op_LoadB && n->Opcode() != Op_LoadUB) {
 111     return false;
 112   }
 113   Node* addp = n->in(MemNode::Address);
 114   if (!addp->is_AddP()) {
 115     return false;
 116   }
 117   Node* base = addp->in(AddPNode::Address);
 118   Node* off = addp->in(AddPNode::Offset);
 119   if (base->Opcode() != Op_ThreadLocal) {
 120     return false;
 121   }
 122   if (off->find_intptr_t_con(-1) != in_bytes(ShenandoahThreadLocalData::gc_state_offset())) {
 123     return false;
 124   }
 125   return true;
 126 }
 127 
 128 bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) {
 129   assert(phase->is_dominator(stop, start), "bad inputs");
 130   ResourceMark rm;
 131   Unique_Node_List wq;
 132   wq.push(start);
 133   for (uint next = 0; next < wq.size(); next++) {
 134     Node *m = wq.at(next);
 135     if (m == stop) {
 136       continue;
 137     }
 138     if (m->is_SafePoint() && !m->is_CallLeaf()) {
 139       return true;
 140     }
 141     if (m->is_Region()) {
 142       for (uint i = 1; i < m->req(); i++) {
 143         wq.push(m->in(i));
 144       }
 145     } else {
 146       wq.push(m->in(0));
 147     }
 148   }
 149   return false;
 150 }
 151 
 152 #ifdef ASSERT
 153 bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) {
 154   assert(phis.size() == 0, "");
 155 
 156   while (true) {
 157     if (in->bottom_type() == TypePtr::NULL_PTR) {
 158       if (trace) {tty->print_cr("null");}
 159     } else if (!in->bottom_type()->make_ptr()->make_oopptr()) {
 160       if (trace) {tty->print_cr("Non oop");}
 161     } else {
 162       if (in->is_ConstraintCast()) {
 163         in = in->in(1);
 164         continue;
 165       } else if (in->is_AddP()) {
 166         assert(!in->in(AddPNode::Address)->is_top(), "no raw memory access");
 167         in = in->in(AddPNode::Address);
 168         continue;
 169       } else if (in->is_Con()) {
 170         if (trace) {
 171           tty->print("Found constant");
 172           in->dump();
 173         }
 174       } else if (in->Opcode() == Op_Parm) {
 175         if (trace) {
 176           tty->print("Found argument");
 177         }
 178       } else if (in->Opcode() == Op_CreateEx) {
 179         if (trace) {
 180           tty->print("Found create-exception");
 181         }
 182       } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) {
 183         if (trace) {
 184           tty->print("Found raw LoadP (OSR argument?)");
 185         }
 186       } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 187         if (t == ShenandoahOopStore) {
 188           return false;
 189         }
 190         barriers_used.push(in);
 191         if (trace) {tty->print("Found barrier"); in->dump();}
 192       } else if (in->is_Proj() && in->in(0)->is_Allocate()) {
 193         if (trace) {
 194           tty->print("Found alloc");
 195           in->in(0)->dump();
 196         }
 197       } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) {
 198         if (trace) {
 199           tty->print("Found Java call");
 200         }
 201       } else if (in->is_Phi()) {
 202         if (!visited.test_set(in->_idx)) {
 203           if (trace) {tty->print("Pushed phi:"); in->dump();}
 204           phis.push(in, 2);
 205           in = in->in(1);
 206           continue;
 207         }
 208         if (trace) {tty->print("Already seen phi:"); in->dump();}
 209       } else if (in->Opcode() == Op_CMoveP || in->Opcode() == Op_CMoveN) {
 210         if (!visited.test_set(in->_idx)) {
 211           if (trace) {tty->print("Pushed cmovep:"); in->dump();}
 212           phis.push(in, CMoveNode::IfTrue);
 213           in = in->in(CMoveNode::IfFalse);
 214           continue;
 215         }
 216         if (trace) {tty->print("Already seen cmovep:"); in->dump();}
 217       } else if (in->Opcode() == Op_EncodeP || in->Opcode() == Op_DecodeN) {
 218         in = in->in(1);
 219         continue;
 220       } else {
 221         return false;
 222       }
 223     }
 224     bool cont = false;
 225     while (phis.is_nonempty()) {
 226       uint idx = phis.index();
 227       Node* phi = phis.node();
 228       if (idx >= phi->req()) {
 229         if (trace) {tty->print("Popped phi:"); phi->dump();}
 230         phis.pop();
 231         continue;
 232       }
 233       if (trace) {tty->print("Next entry(%d) for phi:", idx); phi->dump();}
 234       in = phi->in(idx);
 235       phis.set_index(idx+1);
 236       cont = true;
 237       break;
 238     }
 239     if (!cont) {
 240       break;
 241     }
 242   }
 243   return true;
 244 }
 245 
 246 void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) {
 247   if (n1 != nullptr) {
 248     n1->dump(+10);
 249   }
 250   if (n2 != nullptr) {
 251     n2->dump(+10);
 252   }
 253   fatal("%s", msg);
 254 }
 255 
 256 void ShenandoahBarrierC2Support::verify(RootNode* root) {
 257   ResourceMark rm;
 258   Unique_Node_List wq;
 259   GrowableArray<Node*> barriers;
 260   Unique_Node_List barriers_used;
 261   Node_Stack phis(0);
 262   VectorSet visited;
 263   const bool trace = false;
 264   const bool verify_no_useless_barrier = false;
 265 
 266   wq.push(root);
 267   for (uint next = 0; next < wq.size(); next++) {
 268     Node *n = wq.at(next);
 269     if (n->is_Load()) {
 270       const bool trace = false;
 271       if (trace) {tty->print("Verifying"); n->dump();}
 272       if (n->Opcode() == Op_LoadRange || n->Opcode() == Op_LoadKlass || n->Opcode() == Op_LoadNKlass) {
 273         if (trace) {tty->print_cr("Load range/klass");}
 274       } else {
 275         const TypePtr* adr_type = n->as_Load()->adr_type();
 276 
 277         if (adr_type->isa_oopptr() && adr_type->is_oopptr()->offset() == oopDesc::mark_offset_in_bytes()) {
 278           if (trace) {tty->print_cr("Mark load");}
 279         } else if (adr_type->isa_instptr() &&
 280                    adr_type->is_instptr()->instance_klass()->is_subtype_of(Compile::current()->env()->Reference_klass()) &&
 281                    adr_type->is_instptr()->offset() == java_lang_ref_Reference::referent_offset()) {
 282           if (trace) {tty->print_cr("Reference.get()");}
 283         } else if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 284           report_verify_failure("Shenandoah verification: Load should have barriers", n);
 285         }
 286       }
 287     } else if (n->is_Store()) {
 288       const bool trace = false;
 289 
 290       if (trace) {tty->print("Verifying"); n->dump();}
 291       if (n->in(MemNode::ValueIn)->bottom_type()->make_oopptr()) {
 292         Node* adr = n->in(MemNode::Address);
 293         bool verify = true;
 294 
 295         if (adr->is_AddP() && adr->in(AddPNode::Base)->is_top()) {
 296           adr = adr->in(AddPNode::Address);
 297           if (adr->is_AddP()) {
 298             assert(adr->in(AddPNode::Base)->is_top(), "");
 299             adr = adr->in(AddPNode::Address);
 300             if (adr->Opcode() == Op_LoadP &&
 301                 adr->in(MemNode::Address)->in(AddPNode::Base)->is_top() &&
 302                 adr->in(MemNode::Address)->in(AddPNode::Address)->Opcode() == Op_ThreadLocal &&
 303                 adr->in(MemNode::Address)->in(AddPNode::Offset)->find_intptr_t_con(-1) == in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset())) {
 304               if (trace) {tty->print_cr("SATB prebarrier");}
 305               verify = false;
 306             }
 307           }
 308         }
 309 
 310         if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahValue, trace, barriers_used)) {
 311           report_verify_failure("Shenandoah verification: Store should have barriers", n);
 312         }
 313       }
 314       if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 315         report_verify_failure("Shenandoah verification: Store (address) should have barriers", n);
 316       }
 317     } else if (n->Opcode() == Op_CmpP) {
 318       const bool trace = false;
 319 
 320       Node* in1 = n->in(1);
 321       Node* in2 = n->in(2);
 322       if (in1->bottom_type()->isa_oopptr()) {
 323         if (trace) {tty->print("Verifying"); n->dump();}
 324 
 325         bool mark_inputs = false;
 326         if (in1->bottom_type() == TypePtr::NULL_PTR || in2->bottom_type() == TypePtr::NULL_PTR ||
 327             (in1->is_Con() || in2->is_Con())) {
 328           if (trace) {tty->print_cr("Comparison against a constant");}
 329           mark_inputs = true;
 330         } else if ((in1->is_CheckCastPP() && in1->in(1)->is_Proj() && in1->in(1)->in(0)->is_Allocate()) ||
 331                    (in2->is_CheckCastPP() && in2->in(1)->is_Proj() && in2->in(1)->in(0)->is_Allocate())) {
 332           if (trace) {tty->print_cr("Comparison with newly alloc'ed object");}
 333           mark_inputs = true;
 334         } else {
 335           assert(in2->bottom_type()->isa_oopptr(), "");
 336 
 337           if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) ||
 338               !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) {
 339             report_verify_failure("Shenandoah verification: Cmp should have barriers", n);
 340           }
 341         }
 342         if (verify_no_useless_barrier &&
 343             mark_inputs &&
 344             (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) ||
 345              !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) {
 346           phis.clear();
 347           visited.reset();
 348         }
 349       }
 350     } else if (n->is_LoadStore()) {
 351       if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() &&
 352           !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahValue, trace, barriers_used)) {
 353         report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n);
 354       }
 355 
 356       if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) {
 357         report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n);
 358       }
 359     } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) {
 360       CallNode* call = n->as_Call();
 361 
 362       static struct {
 363         const char* name;
 364         struct {
 365           int pos;
 366           verify_type t;
 367         } args[6];
 368       } calls[] = {
 369         "array_partition_stub",
 370         { { TypeFunc::Parms, ShenandoahStore }, { TypeFunc::Parms+4, ShenandoahStore },   { -1, ShenandoahNone },
 371           { -1, ShenandoahNone },                { -1, ShenandoahNone },                  { -1, ShenandoahNone } },
 372         "arraysort_stub",
 373         { { TypeFunc::Parms, ShenandoahStore },  { -1, ShenandoahNone },                  { -1, ShenandoahNone },
 374           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 375         "aescrypt_encryptBlock",
 376         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 377           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 378         "aescrypt_decryptBlock",
 379         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 380           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 381         "multiplyToLen",
 382         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { TypeFunc::Parms+4, ShenandoahStore },
 383           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 384         "squareToLen",
 385         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },   { -1,  ShenandoahNone},
 386           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 387         "montgomery_multiply",
 388         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 389           { TypeFunc::Parms+6, ShenandoahStore }, { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 390         "montgomery_square",
 391         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+5, ShenandoahStore },
 392           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 393         "mulAdd",
 394         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 395           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 396         "vectorizedMismatch",
 397         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahLoad },   { -1,  ShenandoahNone},
 398           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 399         "updateBytesCRC32",
 400         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 401           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 402         "updateBytesAdler32",
 403         { { TypeFunc::Parms+1, ShenandoahLoad }, { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 404           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 405         "updateBytesCRC32C",
 406         { { TypeFunc::Parms+1, ShenandoahLoad }, { TypeFunc::Parms+3, ShenandoahLoad},    { -1,  ShenandoahNone},
 407           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 408         "counterMode_AESCrypt",
 409         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 410           { TypeFunc::Parms+3, ShenandoahStore }, { TypeFunc::Parms+5, ShenandoahStore }, { TypeFunc::Parms+6, ShenandoahStore } },
 411         "cipherBlockChaining_encryptAESCrypt",
 412         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 413           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 414         "cipherBlockChaining_decryptAESCrypt",
 415         { { TypeFunc::Parms, ShenandoahLoad },   { TypeFunc::Parms+1, ShenandoahStore },  { TypeFunc::Parms+2, ShenandoahLoad },
 416           { TypeFunc::Parms+3, ShenandoahLoad },  { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 417         "shenandoah_clone",
 418         { { TypeFunc::Parms, ShenandoahLoad },   { -1,  ShenandoahNone},                  { -1,  ShenandoahNone},
 419           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 420         "ghash_processBlocks",
 421         { { TypeFunc::Parms, ShenandoahStore },  { TypeFunc::Parms+1, ShenandoahLoad },   { TypeFunc::Parms+2, ShenandoahLoad },
 422           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 423         "sha1_implCompress",
 424         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 425           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 426         "sha256_implCompress",
 427         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 428           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 429         "sha512_implCompress",
 430         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 431           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 432         "sha1_implCompressMB",
 433         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 434           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 435         "sha256_implCompressMB",
 436         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 437           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 438         "sha512_implCompressMB",
 439         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+1, ShenandoahStore },   { -1, ShenandoahNone },
 440           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 441         "encodeBlock",
 442         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 443           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 444         "decodeBlock",
 445         { { TypeFunc::Parms, ShenandoahLoad },  { TypeFunc::Parms+3, ShenandoahStore },   { -1, ShenandoahNone },
 446           { -1,  ShenandoahNone},                 { -1,  ShenandoahNone},                 { -1,  ShenandoahNone} },
 447       };
 448 
 449       if (call->is_call_to_arraycopystub()) {
 450         Node* dest = nullptr;
 451         const TypeTuple* args = n->as_Call()->_tf->domain();
 452         for (uint i = TypeFunc::Parms, j = 0; i < args->cnt(); i++) {
 453           if (args->field_at(i)->isa_ptr()) {
 454             j++;
 455             if (j == 2) {
 456               dest = n->in(i);
 457               break;
 458             }
 459           }
 460         }
 461         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) ||
 462             !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) {
 463           report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n);
 464         }
 465       } else if (strlen(call->_name) > 5 &&
 466                  !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) {
 467         if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) {
 468           report_verify_failure("Shenandoah verification: _fill should have barriers", n);
 469         }
 470       } else if (!strcmp(call->_name, "shenandoah_wb_pre")) {
 471         // skip
 472       } else {
 473         const int calls_len = sizeof(calls) / sizeof(calls[0]);
 474         int i = 0;
 475         for (; i < calls_len; i++) {
 476           if (!strcmp(calls[i].name, call->_name)) {
 477             break;
 478           }
 479         }
 480         if (i != calls_len) {
 481           const uint args_len = sizeof(calls[0].args) / sizeof(calls[0].args[0]);
 482           for (uint j = 0; j < args_len; j++) {
 483             int pos = calls[i].args[j].pos;
 484             if (pos == -1) {
 485               break;
 486             }
 487             if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) {
 488               report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 489             }
 490           }
 491           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 492             if (call->in(j)->bottom_type()->make_ptr() &&
 493                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 494               uint k = 0;
 495               for (; k < args_len && calls[i].args[k].pos != (int)j; k++);
 496               if (k == args_len) {
 497                 fatal("arg %d for call %s not covered", j, call->_name);
 498               }
 499             }
 500           }
 501         } else {
 502           for (uint j = TypeFunc::Parms; j < call->req(); j++) {
 503             if (call->in(j)->bottom_type()->make_ptr() &&
 504                 call->in(j)->bottom_type()->make_ptr()->isa_oopptr()) {
 505               fatal("%s not covered", call->_name);
 506             }
 507           }
 508         }
 509       }
 510     } else if (n->Opcode() == Op_ShenandoahLoadReferenceBarrier) {
 511       // skip
 512     } else if (n->is_AddP()
 513                || n->is_Phi()
 514                || n->is_ConstraintCast()
 515                || n->Opcode() == Op_Return
 516                || n->Opcode() == Op_CMoveP
 517                || n->Opcode() == Op_CMoveN
 518                || n->Opcode() == Op_Rethrow
 519                || n->is_MemBar()
 520                || n->Opcode() == Op_Conv2B
 521                || n->Opcode() == Op_SafePoint
 522                || n->is_CallJava()
 523                || n->Opcode() == Op_Unlock
 524                || n->Opcode() == Op_EncodeP
 525                || n->Opcode() == Op_DecodeN) {
 526       // nothing to do
 527     } else {
 528       static struct {
 529         int opcode;
 530         struct {
 531           int pos;
 532           verify_type t;
 533         } inputs[2];
 534       } others[] = {
 535         Op_FastLock,
 536         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 537         Op_Lock,
 538         { { TypeFunc::Parms, ShenandoahLoad },    { -1, ShenandoahNone} },
 539         Op_ArrayCopy,
 540         { { ArrayCopyNode::Src, ShenandoahLoad }, { ArrayCopyNode::Dest, ShenandoahStore } },
 541         Op_StrCompressedCopy,
 542         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 543         Op_StrInflatedCopy,
 544         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 545         Op_AryEq,
 546         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 547         Op_StrIndexOf,
 548         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 549         Op_StrComp,
 550         { { 2, ShenandoahLoad },                  { 4, ShenandoahLoad } },
 551         Op_StrEquals,
 552         { { 2, ShenandoahLoad },                  { 3, ShenandoahLoad } },
 553         Op_VectorizedHashCode,
 554         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone } },
 555         Op_EncodeISOArray,
 556         { { 2, ShenandoahLoad },                  { 3, ShenandoahStore } },
 557         Op_CountPositives,
 558         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone} },
 559         Op_CastP2X,
 560         { { 1, ShenandoahLoad },                  { -1, ShenandoahNone} },
 561         Op_StrIndexOfChar,
 562         { { 2, ShenandoahLoad },                  { -1, ShenandoahNone } },
 563       };
 564 
 565       const int others_len = sizeof(others) / sizeof(others[0]);
 566       int i = 0;
 567       for (; i < others_len; i++) {
 568         if (others[i].opcode == n->Opcode()) {
 569           break;
 570         }
 571       }
 572       uint stop = n->is_Call() ? n->as_Call()->tf()->domain()->cnt() : n->req();
 573       if (i != others_len) {
 574         const uint inputs_len = sizeof(others[0].inputs) / sizeof(others[0].inputs[0]);
 575         for (uint j = 0; j < inputs_len; j++) {
 576           int pos = others[i].inputs[j].pos;
 577           if (pos == -1) {
 578             break;
 579           }
 580           if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) {
 581             report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n);
 582           }
 583         }
 584         for (uint j = 1; j < stop; j++) {
 585           if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() &&
 586               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 587             uint k = 0;
 588             for (; k < inputs_len && others[i].inputs[k].pos != (int)j; k++);
 589             if (k == inputs_len) {
 590               fatal("arg %d for node %s not covered", j, n->Name());
 591             }
 592           }
 593         }
 594       } else {
 595         for (uint j = 1; j < stop; j++) {
 596           if (n->in(j) != nullptr && n->in(j)->bottom_type()->make_ptr() &&
 597               n->in(j)->bottom_type()->make_ptr()->make_oopptr()) {
 598             fatal("%s not covered", n->Name());
 599           }
 600         }
 601       }
 602     }
 603 
 604     if (n->is_SafePoint()) {
 605       SafePointNode* sfpt = n->as_SafePoint();
 606       if (verify_no_useless_barrier && sfpt->jvms() != nullptr) {
 607         for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) {
 608           if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) {
 609             phis.clear();
 610             visited.reset();
 611           }
 612         }
 613       }
 614     }
 615   }
 616 
 617   if (verify_no_useless_barrier) {
 618     for (int i = 0; i < barriers.length(); i++) {
 619       Node* n = barriers.at(i);
 620       if (!barriers_used.member(n)) {
 621         tty->print("XXX useless barrier"); n->dump(-2);
 622         ShouldNotReachHere();
 623       }
 624     }
 625   }
 626 }
 627 #endif
 628 
 629 bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) {
 630   // That both nodes have the same control is not sufficient to prove
 631   // domination, verify that there's no path from d to n
 632   ResourceMark rm;
 633   Unique_Node_List wq;
 634   wq.push(d);
 635   for (uint next = 0; next < wq.size(); next++) {
 636     Node *m = wq.at(next);
 637     if (m == n) {
 638       return false;
 639     }
 640     if (m->is_Phi() && m->in(0)->is_Loop()) {
 641       assert(phase->ctrl_or_self(m->in(LoopNode::EntryControl)) != c, "following loop entry should lead to new control");
 642     } else {
 643       if (m->is_Store() || m->is_LoadStore()) {
 644         // Take anti-dependencies into account
 645         Node* mem = m->in(MemNode::Memory);
 646         for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
 647           Node* u = mem->fast_out(i);
 648           if (u->is_Load() && phase->C->can_alias(m->adr_type(), phase->C->get_alias_index(u->adr_type())) &&
 649               phase->ctrl_or_self(u) == c) {
 650             wq.push(u);
 651           }
 652         }
 653       }
 654       for (uint i = 0; i < m->req(); i++) {
 655         if (m->in(i) != nullptr && phase->ctrl_or_self(m->in(i)) == c) {
 656           wq.push(m->in(i));
 657         }
 658       }
 659     }
 660   }
 661   return true;
 662 }
 663 
 664 bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) {
 665   if (d_c != n_c) {
 666     return phase->is_dominator(d_c, n_c);
 667   }
 668   return is_dominator_same_ctrl(d_c, d, n, phase);
 669 }
 670 
 671 Node* next_mem(Node* mem, int alias) {
 672   Node* res = nullptr;
 673   if (mem->is_Proj()) {
 674     res = mem->in(0);
 675   } else if (mem->is_SafePoint() || mem->is_MemBar()) {
 676     res = mem->in(TypeFunc::Memory);
 677   } else if (mem->is_Phi()) {
 678     res = mem->in(1);
 679   } else if (mem->is_MergeMem()) {
 680     res = mem->as_MergeMem()->memory_at(alias);
 681   } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
 682     assert(alias == Compile::AliasIdxRaw, "following raw memory can't lead to a barrier");
 683     res = mem->in(MemNode::Memory);
 684   } else {
 685 #ifdef ASSERT
 686     mem->dump();
 687 #endif
 688     ShouldNotReachHere();
 689   }
 690   return res;
 691 }
 692 
 693 Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) {
 694   Node* iffproj = nullptr;
 695   while (c != dom) {
 696     Node* next = phase->idom(c);
 697     assert(next->unique_ctrl_out_or_null() == c || c->is_Proj() || c->is_Region(), "multiple control flow out but no proj or region?");
 698     if (c->is_Region()) {
 699       ResourceMark rm;
 700       Unique_Node_List wq;
 701       wq.push(c);
 702       for (uint i = 0; i < wq.size(); i++) {
 703         Node *n = wq.at(i);
 704         if (n == next) {
 705           continue;
 706         }
 707         if (n->is_Region()) {
 708           for (uint j = 1; j < n->req(); j++) {
 709             wq.push(n->in(j));
 710           }
 711         } else {
 712           wq.push(n->in(0));
 713         }
 714       }
 715       for (uint i = 0; i < wq.size(); i++) {
 716         Node *n = wq.at(i);
 717         assert(n->is_CFG(), "");
 718         if (n->is_Multi()) {
 719           for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
 720             Node* u = n->fast_out(j);
 721             if (u->is_CFG()) {
 722               if (!wq.member(u) && !u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) {
 723                 return NodeSentinel;
 724               }
 725             }
 726           }
 727         }
 728       }
 729     } else  if (c->is_Proj()) {
 730       if (c->is_IfProj()) {
 731         if (c->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) != nullptr) {
 732           // continue;
 733         } else {
 734           if (!allow_one_proj) {
 735             return NodeSentinel;
 736           }
 737           if (iffproj == nullptr) {
 738             iffproj = c;
 739           } else {
 740             return NodeSentinel;
 741           }
 742         }
 743       } else if (c->Opcode() == Op_JumpProj) {
 744         return NodeSentinel; // unsupported
 745       } else if (c->Opcode() == Op_CatchProj) {
 746         return NodeSentinel; // unsupported
 747       } else if (c->Opcode() == Op_CProj && next->is_NeverBranch()) {
 748         return NodeSentinel; // unsupported
 749       } else {
 750         assert(next->unique_ctrl_out() == c, "unsupported branch pattern");
 751       }
 752     }
 753     c = next;
 754   }
 755   return iffproj;
 756 }
 757 
 758 Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) {
 759   ResourceMark rm;
 760   VectorSet wq;
 761   wq.set(mem->_idx);
 762   mem_ctrl = phase->ctrl_or_self(mem);
 763   while (!phase->is_dominator(mem_ctrl, ctrl) || mem_ctrl == ctrl) {
 764     mem = next_mem(mem, alias);
 765     if (wq.test_set(mem->_idx)) {
 766       return nullptr;
 767     }
 768     mem_ctrl = phase->ctrl_or_self(mem);
 769   }
 770   if (mem->is_MergeMem()) {
 771     mem = mem->as_MergeMem()->memory_at(alias);
 772     mem_ctrl = phase->ctrl_or_self(mem);
 773   }
 774   return mem;
 775 }
 776 
 777 Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) {
 778   Node* mem = nullptr;
 779   Node* c = ctrl;
 780   do {
 781     if (c->is_Region()) {
 782       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax && mem == nullptr; i++) {
 783         Node* u = c->fast_out(i);
 784         if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
 785           if (u->adr_type() == TypePtr::BOTTOM) {
 786             mem = u;
 787           }
 788         }
 789       }
 790     } else {
 791       if (c->is_Call() && c->as_Call()->adr_type() != nullptr) {
 792         CallProjections projs;
 793         c->as_Call()->extract_projections(&projs, true, false);
 794         if (projs.fallthrough_memproj != nullptr) {
 795           if (projs.fallthrough_memproj->adr_type() == TypePtr::BOTTOM) {
 796             if (projs.catchall_memproj == nullptr) {
 797               mem = projs.fallthrough_memproj;
 798             } else {
 799               if (phase->is_dominator(projs.fallthrough_catchproj, ctrl)) {
 800                 mem = projs.fallthrough_memproj;
 801               } else {
 802                 assert(phase->is_dominator(projs.catchall_catchproj, ctrl), "one proj must dominate barrier");
 803                 mem = projs.catchall_memproj;
 804               }
 805             }
 806           }
 807         } else {
 808           Node* proj = c->as_Call()->proj_out(TypeFunc::Memory);
 809           if (proj != nullptr &&
 810               proj->adr_type() == TypePtr::BOTTOM) {
 811             mem = proj;
 812           }
 813         }
 814       } else {
 815         for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
 816           Node* u = c->fast_out(i);
 817           if (u->is_Proj() &&
 818               u->bottom_type() == Type::MEMORY &&
 819               u->adr_type() == TypePtr::BOTTOM) {
 820               assert(c->is_SafePoint() || c->is_MemBar() || c->is_Start(), "");
 821               assert(mem == nullptr, "only one proj");
 822               mem = u;
 823           }
 824         }
 825         assert(!c->is_Call() || c->as_Call()->adr_type() != nullptr || mem == nullptr, "no mem projection expected");
 826       }
 827     }
 828     c = phase->idom(c);
 829   } while (mem == nullptr);
 830   return mem;
 831 }
 832 
 833 void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) {
 834   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 835     Node* u = n->fast_out(i);
 836     if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) {
 837       uses.push(u);
 838     }
 839   }
 840 }
 841 
 842 static void hide_strip_mined_loop(OuterStripMinedLoopNode* outer, CountedLoopNode* inner, PhaseIdealLoop* phase) {
 843   OuterStripMinedLoopEndNode* le = inner->outer_loop_end();
 844   Node* new_outer = new LoopNode(outer->in(LoopNode::EntryControl), outer->in(LoopNode::LoopBackControl));
 845   phase->register_control(new_outer, phase->get_loop(outer), outer->in(LoopNode::EntryControl));
 846   Node* new_le = new IfNode(le->in(0), le->in(1), le->_prob, le->_fcnt);
 847   phase->register_control(new_le, phase->get_loop(le), le->in(0));
 848   phase->lazy_replace(outer, new_outer);
 849   phase->lazy_replace(le, new_le);
 850   inner->clear_strip_mined();
 851 }
 852 
 853 void ShenandoahBarrierC2Support::test_gc_state(Node*& ctrl, Node* raw_mem, Node*& test_fail_ctrl,
 854                                                PhaseIdealLoop* phase, int flags) {
 855   PhaseIterGVN& igvn = phase->igvn();
 856   Node* old_ctrl = ctrl;
 857 
 858   Node* thread          = new ThreadLocalNode();
 859   Node* gc_state_offset = igvn.MakeConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
 860   Node* gc_state_addr   = new AddPNode(phase->C->top(), thread, gc_state_offset);
 861   Node* gc_state        = new LoadBNode(old_ctrl, raw_mem, gc_state_addr,
 862                                         DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr),
 863                                         TypeInt::BYTE, MemNode::unordered);
 864   Node* gc_state_and    = new AndINode(gc_state, igvn.intcon(flags));
 865   Node* gc_state_cmp    = new CmpINode(gc_state_and, igvn.zerocon(T_INT));
 866   Node* gc_state_bool   = new BoolNode(gc_state_cmp, BoolTest::ne);
 867 
 868   IfNode* gc_state_iff  = new IfNode(old_ctrl, gc_state_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 869   ctrl                  = new IfTrueNode(gc_state_iff);
 870   test_fail_ctrl        = new IfFalseNode(gc_state_iff);
 871 
 872   IdealLoopTree* loop = phase->get_loop(old_ctrl);
 873   phase->register_control(gc_state_iff,   loop, old_ctrl);
 874   phase->register_control(ctrl,           loop, gc_state_iff);
 875   phase->register_control(test_fail_ctrl, loop, gc_state_iff);
 876 
 877   phase->register_new_node(thread,        old_ctrl);
 878   phase->register_new_node(gc_state_addr, old_ctrl);
 879   phase->register_new_node(gc_state,      old_ctrl);
 880   phase->register_new_node(gc_state_and,  old_ctrl);
 881   phase->register_new_node(gc_state_cmp,  old_ctrl);
 882   phase->register_new_node(gc_state_bool, old_ctrl);
 883 
 884   phase->set_ctrl(gc_state_offset, phase->C->root());
 885 
 886   assert(is_gc_state_test(gc_state_iff, flags), "Should match the shape");
 887 }
 888 
 889 void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) {
 890   Node* old_ctrl = ctrl;
 891   PhaseIterGVN& igvn = phase->igvn();
 892 
 893   const Type* val_t = igvn.type(val);
 894   if (val_t->meet(TypePtr::NULL_PTR) == val_t) {
 895     Node* null_cmp   = new CmpPNode(val, igvn.zerocon(T_OBJECT));
 896     Node* null_test  = new BoolNode(null_cmp, BoolTest::ne);
 897 
 898     IfNode* null_iff = new IfNode(old_ctrl, null_test, PROB_LIKELY(0.999), COUNT_UNKNOWN);
 899     ctrl             = new IfTrueNode(null_iff);
 900     null_ctrl        = new IfFalseNode(null_iff);
 901 
 902     IdealLoopTree* loop = phase->get_loop(old_ctrl);
 903     phase->register_control(null_iff,  loop, old_ctrl);
 904     phase->register_control(ctrl,      loop, null_iff);
 905     phase->register_control(null_ctrl, loop, null_iff);
 906 
 907     phase->register_new_node(null_cmp,  old_ctrl);
 908     phase->register_new_node(null_test, old_ctrl);
 909   }
 910 }
 911 
 912 void ShenandoahBarrierC2Support::test_in_cset(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) {
 913   Node* old_ctrl = ctrl;
 914   PhaseIterGVN& igvn = phase->igvn();
 915 
 916   Node* raw_val        = new CastP2XNode(old_ctrl, val);
 917   Node* cset_idx       = new URShiftXNode(raw_val, igvn.intcon(ShenandoahHeapRegion::region_size_bytes_shift_jint()));
 918 
 919   // Figure out the target cset address with raw pointer math.
 920   // This avoids matching AddP+LoadB that would emit inefficient code.
 921   // See JDK-8245465.
 922   Node* cset_addr_ptr  = igvn.makecon(TypeRawPtr::make(ShenandoahHeap::in_cset_fast_test_addr()));
 923   Node* cset_addr      = new CastP2XNode(old_ctrl, cset_addr_ptr);
 924   Node* cset_load_addr = new AddXNode(cset_addr, cset_idx);
 925   Node* cset_load_ptr  = new CastX2PNode(cset_load_addr);
 926 
 927   Node* cset_load      = new LoadBNode(old_ctrl, raw_mem, cset_load_ptr,
 928                                        DEBUG_ONLY(phase->C->get_adr_type(Compile::AliasIdxRaw)) NOT_DEBUG(nullptr),
 929                                        TypeInt::BYTE, MemNode::unordered);
 930   Node* cset_cmp       = new CmpINode(cset_load, igvn.zerocon(T_INT));
 931   Node* cset_bool      = new BoolNode(cset_cmp, BoolTest::ne);
 932 
 933   IfNode* cset_iff     = new IfNode(old_ctrl, cset_bool, PROB_UNLIKELY(0.999), COUNT_UNKNOWN);
 934   ctrl                 = new IfTrueNode(cset_iff);
 935   not_cset_ctrl        = new IfFalseNode(cset_iff);
 936 
 937   IdealLoopTree *loop = phase->get_loop(old_ctrl);
 938   phase->register_control(cset_iff,      loop, old_ctrl);
 939   phase->register_control(ctrl,          loop, cset_iff);
 940   phase->register_control(not_cset_ctrl, loop, cset_iff);
 941 
 942   phase->set_ctrl(cset_addr_ptr, phase->C->root());
 943 
 944   phase->register_new_node(raw_val,        old_ctrl);
 945   phase->register_new_node(cset_idx,       old_ctrl);
 946   phase->register_new_node(cset_addr,      old_ctrl);
 947   phase->register_new_node(cset_load_addr, old_ctrl);
 948   phase->register_new_node(cset_load_ptr,  old_ctrl);
 949   phase->register_new_node(cset_load,      old_ctrl);
 950   phase->register_new_node(cset_cmp,       old_ctrl);
 951   phase->register_new_node(cset_bool,      old_ctrl);
 952 }
 953 
 954 void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node* load_addr,
 955                                                DecoratorSet decorators, PhaseIdealLoop* phase) {
 956   IdealLoopTree*loop = phase->get_loop(ctrl);
 957   const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr();
 958 
 959   address calladdr = nullptr;
 960   const char* name = nullptr;
 961   bool is_strong  = ShenandoahBarrierSet::is_strong_access(decorators);
 962   bool is_weak    = ShenandoahBarrierSet::is_weak_access(decorators);
 963   bool is_phantom = ShenandoahBarrierSet::is_phantom_access(decorators);
 964   bool is_native  = ShenandoahBarrierSet::is_native_access(decorators);
 965   bool is_narrow  = UseCompressedOops && !is_native;
 966   if (is_strong) {
 967     if (is_narrow) {
 968       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong_narrow);
 969       name = "load_reference_barrier_strong_narrow";
 970     } else {
 971       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_strong);
 972       name = "load_reference_barrier_strong";
 973     }
 974   } else if (is_weak) {
 975     if (is_narrow) {
 976       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak_narrow);
 977       name = "load_reference_barrier_weak_narrow";
 978     } else {
 979       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_weak);
 980       name = "load_reference_barrier_weak";
 981     }
 982   } else {
 983     assert(is_phantom, "only remaining strength");
 984     if (is_narrow) {
 985       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom_narrow);
 986       name = "load_reference_barrier_phantom_narrow";
 987     } else {
 988       calladdr = CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_phantom);
 989       name = "load_reference_barrier_phantom";
 990     }
 991   }
 992   Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_load_reference_barrier_Type(), calladdr, name, TypeRawPtr::BOTTOM);
 993 
 994   call->init_req(TypeFunc::Control, ctrl);
 995   call->init_req(TypeFunc::I_O, phase->C->top());
 996   call->init_req(TypeFunc::Memory, phase->C->top());
 997   call->init_req(TypeFunc::FramePtr, phase->C->top());
 998   call->init_req(TypeFunc::ReturnAdr, phase->C->top());
 999   call->init_req(TypeFunc::Parms, val);
1000   call->init_req(TypeFunc::Parms+1, load_addr);
1001   phase->register_control(call, loop, ctrl);
1002   ctrl = new ProjNode(call, TypeFunc::Control);
1003   phase->register_control(ctrl, loop, call);
1004   val = new ProjNode(call, TypeFunc::Parms);
1005   phase->register_new_node(val, call);
1006   val = new CheckCastPPNode(ctrl, val, obj_type);
1007   phase->register_new_node(val, ctrl);
1008 }
1009 
1010 void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) {
1011   Node* ctrl = phase->get_ctrl(barrier);
1012   Node* init_raw_mem = fixer.find_mem(ctrl, barrier);
1013 
1014   // Update the control of all nodes that should be after the
1015   // barrier control flow
1016   uses.clear();
1017   // Every node that is control dependent on the barrier's input
1018   // control will be after the expanded barrier. The raw memory (if
1019   // its memory is control dependent on the barrier's input control)
1020   // must stay above the barrier.
1021   uses_to_ignore.clear();
1022   if (phase->has_ctrl(init_raw_mem) && phase->get_ctrl(init_raw_mem) == ctrl && !init_raw_mem->is_Phi()) {
1023     uses_to_ignore.push(init_raw_mem);
1024   }
1025   for (uint next = 0; next < uses_to_ignore.size(); next++) {
1026     Node *n = uses_to_ignore.at(next);
1027     for (uint i = 0; i < n->req(); i++) {
1028       Node* in = n->in(i);
1029       if (in != nullptr && phase->has_ctrl(in) && phase->get_ctrl(in) == ctrl) {
1030         uses_to_ignore.push(in);
1031       }
1032     }
1033   }
1034   for (DUIterator_Fast imax, i = ctrl->fast_outs(imax); i < imax; i++) {
1035     Node* u = ctrl->fast_out(i);
1036     if (u->_idx < last &&
1037         u != barrier &&
1038         !u->depends_only_on_test() && // preserve dependency on test
1039         !uses_to_ignore.member(u) &&
1040         (u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
1041         (ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {
1042       Node* old_c = phase->ctrl_or_self(u);
1043       Node* c = old_c;
1044       if (c != ctrl ||
1045           is_dominator_same_ctrl(old_c, barrier, u, phase) ||
1046           ShenandoahBarrierSetC2::is_shenandoah_state_load(u)) {
1047         phase->igvn().rehash_node_delayed(u);
1048         int nb = u->replace_edge(ctrl, region, &phase->igvn());
1049         if (u->is_CFG()) {
1050           if (phase->idom(u) == ctrl) {
1051             phase->set_idom(u, region, phase->dom_depth(region));
1052           }
1053         } else if (phase->get_ctrl(u) == ctrl) {
1054           assert(u != init_raw_mem, "should leave input raw mem above the barrier");
1055           uses.push(u);
1056         }
1057         assert(nb == 1, "more than 1 ctrl input?");
1058         --i, imax -= nb;
1059       }
1060     }
1061   }
1062 }
1063 
1064 static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) {
1065   Node* region = nullptr;
1066   while (c != ctrl) {
1067     if (c->is_Region()) {
1068       region = c;
1069     }
1070     c = phase->idom(c);
1071   }
1072   assert(region != nullptr, "");
1073   Node* phi = new PhiNode(region, n->bottom_type());
1074   for (uint j = 1; j < region->req(); j++) {
1075     Node* in = region->in(j);
1076     if (phase->is_dominator(projs.fallthrough_catchproj, in)) {
1077       phi->init_req(j, n);
1078     } else if (phase->is_dominator(projs.catchall_catchproj, in)) {
1079       phi->init_req(j, n_clone);
1080     } else {
1081       phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase));
1082     }
1083   }
1084   phase->register_new_node(phi, region);
1085   return phi;
1086 }
1087 
1088 void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) {
1089   ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state();
1090 
1091   Unique_Node_List uses;
1092   Node_Stack stack(0);
1093   Node_List clones;
1094   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1095     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1096 
1097     Node* ctrl = phase->get_ctrl(lrb);
1098     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1099 
1100     CallStaticJavaNode* unc = nullptr;
1101     Node* unc_ctrl = nullptr;
1102     Node* uncasted_val = val;
1103 
1104     for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) {
1105       Node* u = lrb->fast_out(i);
1106       if (u->Opcode() == Op_CastPP &&
1107           u->in(0) != nullptr &&
1108           phase->is_dominator(u->in(0), ctrl)) {
1109         const Type* u_t = phase->igvn().type(u);
1110 
1111         if (u_t->meet(TypePtr::NULL_PTR) != u_t &&
1112             u->in(0)->Opcode() == Op_IfTrue &&
1113             u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) &&
1114             u->in(0)->in(0)->is_If() &&
1115             u->in(0)->in(0)->in(1)->Opcode() == Op_Bool &&
1116             u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne &&
1117             u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP &&
1118             u->in(0)->in(0)->in(1)->in(1)->in(1) == val &&
1119             u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) {
1120           IdealLoopTree* loop = phase->get_loop(ctrl);
1121           IdealLoopTree* unc_loop = phase->get_loop(u->in(0));
1122 
1123           if (!unc_loop->is_member(loop)) {
1124             continue;
1125           }
1126 
1127           Node* branch = no_branches(ctrl, u->in(0), false, phase);
1128           assert(branch == nullptr || branch == NodeSentinel, "was not looking for a branch");
1129           if (branch == NodeSentinel) {
1130             continue;
1131           }
1132 
1133           Node* iff = u->in(0)->in(0);
1134           Node* bol = iff->in(1)->clone();
1135           Node* cmp = bol->in(1)->clone();
1136           cmp->set_req(1, lrb);
1137           bol->set_req(1, cmp);
1138           phase->igvn().replace_input_of(iff, 1, bol);
1139           phase->set_ctrl(lrb, iff->in(0));
1140           phase->register_new_node(cmp, iff->in(0));
1141           phase->register_new_node(bol, iff->in(0));
1142           break;
1143         }
1144       }
1145     }
1146     if ((ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) || ctrl->is_CallJava()) {
1147       CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_CallJava() : ctrl->as_CallJava();
1148       if (call->entry_point() == OptoRuntime::rethrow_stub()) {
1149         // The rethrow call may have too many projections to be
1150         // properly handled here. Given there's no reason for a
1151         // barrier to depend on the call, move it above the call
1152         stack.push(lrb, 0);
1153         do {
1154           Node* n = stack.node();
1155           uint idx = stack.index();
1156           if (idx < n->req()) {
1157             Node* in = n->in(idx);
1158             stack.set_index(idx+1);
1159             if (in != nullptr) {
1160               if (phase->has_ctrl(in)) {
1161                 if (phase->is_dominator(call, phase->get_ctrl(in))) {
1162 #ifdef ASSERT
1163                   for (uint i = 0; i < stack.size(); i++) {
1164                     assert(stack.node_at(i) != in, "node shouldn't have been seen yet");
1165                   }
1166 #endif
1167                   stack.push(in, 0);
1168                 }
1169               } else {
1170                 assert(phase->is_dominator(in, call->in(0)), "no dependency on the call");
1171               }
1172             }
1173           } else {
1174             phase->set_ctrl(n, call->in(0));
1175             stack.pop();
1176           }
1177         } while(stack.size() > 0);
1178         continue;
1179       }
1180       CallProjections projs;
1181       call->extract_projections(&projs, false, false);
1182 
1183 #ifdef ASSERT
1184       VectorSet cloned;
1185 #endif
1186       Node* lrb_clone = lrb->clone();
1187       phase->register_new_node(lrb_clone, projs.catchall_catchproj);
1188       phase->set_ctrl(lrb, projs.fallthrough_catchproj);
1189 
1190       stack.push(lrb, 0);
1191       clones.push(lrb_clone);
1192 
1193       do {
1194         assert(stack.size() == clones.size(), "");
1195         Node* n = stack.node();
1196 #ifdef ASSERT
1197         if (n->is_Load()) {
1198           Node* mem = n->in(MemNode::Memory);
1199           for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) {
1200             Node* u = mem->fast_out(j);
1201             assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?");
1202           }
1203         }
1204 #endif
1205         uint idx = stack.index();
1206         Node* n_clone = clones.at(clones.size()-1);
1207         if (idx < n->outcnt()) {
1208           Node* u = n->raw_out(idx);
1209           Node* c = phase->ctrl_or_self(u);
1210           if (phase->is_dominator(call, c) && phase->is_dominator(c, projs.fallthrough_proj)) {
1211             stack.set_index(idx+1);
1212             assert(!u->is_CFG(), "");
1213             stack.push(u, 0);
1214             assert(!cloned.test_set(u->_idx), "only one clone");
1215             Node* u_clone = u->clone();
1216             int nb = u_clone->replace_edge(n, n_clone, &phase->igvn());
1217             assert(nb > 0, "should have replaced some uses");
1218             phase->register_new_node(u_clone, projs.catchall_catchproj);
1219             clones.push(u_clone);
1220             phase->set_ctrl(u, projs.fallthrough_catchproj);
1221           } else {
1222             bool replaced = false;
1223             if (u->is_Phi()) {
1224               for (uint k = 1; k < u->req(); k++) {
1225                 if (u->in(k) == n) {
1226                   if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) {
1227                     phase->igvn().replace_input_of(u, k, n_clone);
1228                     replaced = true;
1229                   } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) {
1230                     phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase));
1231                     replaced = true;
1232                   }
1233                 }
1234               }
1235             } else {
1236               if (phase->is_dominator(projs.catchall_catchproj, c)) {
1237                 phase->igvn().rehash_node_delayed(u);
1238                 int nb = u->replace_edge(n, n_clone, &phase->igvn());
1239                 assert(nb > 0, "should have replaced some uses");
1240                 replaced = true;
1241               } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) {
1242                 if (u->is_If()) {
1243                   // Can't break If/Bool/Cmp chain
1244                   assert(n->is_Bool(), "unexpected If shape");
1245                   assert(stack.node_at(stack.size()-2)->is_Cmp(), "unexpected If shape");
1246                   assert(n_clone->is_Bool(), "unexpected clone");
1247                   assert(clones.at(clones.size()-2)->is_Cmp(), "unexpected clone");
1248                   Node* bol_clone = n->clone();
1249                   Node* cmp_clone = stack.node_at(stack.size()-2)->clone();
1250                   bol_clone->set_req(1, cmp_clone);
1251 
1252                   Node* nn = stack.node_at(stack.size()-3);
1253                   Node* nn_clone = clones.at(clones.size()-3);
1254                   assert(nn->Opcode() == nn_clone->Opcode(), "mismatch");
1255 
1256                   int nb = cmp_clone->replace_edge(nn, create_phis_on_call_return(ctrl, c, nn, nn_clone, projs, phase),
1257                                                    &phase->igvn());
1258                   assert(nb > 0, "should have replaced some uses");
1259 
1260                   phase->register_new_node(bol_clone, u->in(0));
1261                   phase->register_new_node(cmp_clone, u->in(0));
1262 
1263                   phase->igvn().replace_input_of(u, 1, bol_clone);
1264 
1265                 } else {
1266                   phase->igvn().rehash_node_delayed(u);
1267                   int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase), &phase->igvn());
1268                   assert(nb > 0, "should have replaced some uses");
1269                 }
1270                 replaced = true;
1271               }
1272             }
1273             if (!replaced) {
1274               stack.set_index(idx+1);
1275             }
1276           }
1277         } else {
1278           stack.pop();
1279           clones.pop();
1280         }
1281       } while (stack.size() > 0);
1282       assert(stack.size() == 0 && clones.size() == 0, "");
1283     }
1284   }
1285 
1286   for (int i = 0; i < state->load_reference_barriers_count(); i++) {
1287     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1288     Node* ctrl = phase->get_ctrl(lrb);
1289     IdealLoopTree* loop = phase->get_loop(ctrl);
1290     Node* head = loop->head();
1291     if (head->is_OuterStripMinedLoop()) {
1292       // Expanding a barrier here will break loop strip mining
1293       // verification. Transform the loop so the loop nest doesn't
1294       // appear as strip mined.
1295       OuterStripMinedLoopNode* outer = head->as_OuterStripMinedLoop();
1296       hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase);
1297     }
1298   }
1299 
1300   // Expand load-reference-barriers
1301   MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase);
1302   Unique_Node_List uses_to_ignore;
1303   for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) {
1304     ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i);
1305     uint last = phase->C->unique();
1306     Node* ctrl = phase->get_ctrl(lrb);
1307     Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn);
1308 
1309     Node* orig_ctrl = ctrl;
1310 
1311     Node* raw_mem = fixer.find_mem(ctrl, lrb);
1312     Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, nullptr);
1313 
1314     IdealLoopTree *loop = phase->get_loop(ctrl);
1315 
1316     Node* heap_stable_ctrl = nullptr;
1317     Node* null_ctrl = nullptr;
1318 
1319     assert(val->bottom_type()->make_oopptr(), "need oop");
1320     assert(val->bottom_type()->make_oopptr()->const_oop() == nullptr, "expect non-constant");
1321 
1322     enum { _heap_stable = 1, _evac_path, _not_cset, PATH_LIMIT };
1323     Node* region = new RegionNode(PATH_LIMIT);
1324     Node* val_phi = new PhiNode(region, val->bottom_type()->is_oopptr());
1325 
1326     // Stable path.
1327     int flags = ShenandoahHeap::HAS_FORWARDED;
1328     if (!ShenandoahBarrierSet::is_strong_access(lrb->decorators())) {
1329       flags |= ShenandoahHeap::WEAK_ROOTS;
1330     }
1331     test_gc_state(ctrl, raw_mem, heap_stable_ctrl, phase, flags);
1332     IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If();
1333 
1334     // Heap stable case
1335     region->init_req(_heap_stable, heap_stable_ctrl);
1336     val_phi->init_req(_heap_stable, val);
1337 
1338     // Test for in-cset, unless it's a native-LRB. Native LRBs need to return null
1339     // even for non-cset objects to prevent resurrection of such objects.
1340     // Wires !in_cset(obj) to slot 2 of region and phis
1341     Node* not_cset_ctrl = nullptr;
1342     if (ShenandoahBarrierSet::is_strong_access(lrb->decorators())) {
1343       test_in_cset(ctrl, not_cset_ctrl, val, raw_mem, phase);
1344     }
1345     if (not_cset_ctrl != nullptr) {
1346       region->init_req(_not_cset, not_cset_ctrl);
1347       val_phi->init_req(_not_cset, val);
1348     } else {
1349       region->del_req(_not_cset);
1350       val_phi->del_req(_not_cset);
1351     }
1352 
1353     // Resolve object when orig-value is in cset.
1354     // Make the unconditional resolve for fwdptr.
1355 
1356     // Call lrb-stub and wire up that path in slots 4
1357     Node* result_mem = nullptr;
1358 
1359     Node* addr;
1360     {
1361       VectorSet visited;
1362       addr = get_load_addr(phase, visited, lrb);
1363     }
1364     if (addr->Opcode() == Op_AddP) {
1365       Node* orig_base = addr->in(AddPNode::Base);
1366       Node* base = new CheckCastPPNode(ctrl, orig_base, orig_base->bottom_type(), ConstraintCastNode::StrongDependency);
1367       phase->register_new_node(base, ctrl);
1368       if (addr->in(AddPNode::Base) == addr->in((AddPNode::Address))) {
1369         // Field access
1370         addr = addr->clone();
1371         addr->set_req(AddPNode::Base, base);
1372         addr->set_req(AddPNode::Address, base);
1373         phase->register_new_node(addr, ctrl);
1374       } else {
1375         Node* addr2 = addr->in(AddPNode::Address);
1376         if (addr2->Opcode() == Op_AddP && addr2->in(AddPNode::Base) == addr2->in(AddPNode::Address) &&
1377               addr2->in(AddPNode::Base) == orig_base) {
1378           addr2 = addr2->clone();
1379           addr2->set_req(AddPNode::Base, base);
1380           addr2->set_req(AddPNode::Address, base);
1381           phase->register_new_node(addr2, ctrl);
1382           addr = addr->clone();
1383           addr->set_req(AddPNode::Base, base);
1384           addr->set_req(AddPNode::Address, addr2);
1385           phase->register_new_node(addr, ctrl);
1386         }
1387       }
1388     }
1389     call_lrb_stub(ctrl, val, addr, lrb->decorators(), phase);
1390     region->init_req(_evac_path, ctrl);
1391     val_phi->init_req(_evac_path, val);
1392 
1393     phase->register_control(region, loop, heap_stable_iff);
1394     Node* out_val = val_phi;
1395     phase->register_new_node(val_phi, region);
1396 
1397     fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase);
1398 
1399     ctrl = orig_ctrl;
1400 
1401     phase->igvn().replace_node(lrb, out_val);
1402 
1403     follow_barrier_uses(out_val, ctrl, uses, phase);
1404 
1405     for(uint next = 0; next < uses.size(); next++ ) {
1406       Node *n = uses.at(next);
1407       assert(phase->get_ctrl(n) == ctrl, "bad control");
1408       assert(n != raw_mem, "should leave input raw mem above the barrier");
1409       phase->set_ctrl(n, region);
1410       follow_barrier_uses(n, ctrl, uses, phase);
1411     }
1412     fixer.record_new_ctrl(ctrl, region, raw_mem, raw_mem_for_ctrl);
1413   }
1414   // Done expanding load-reference-barriers.
1415   assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced");
1416 }
1417 
1418 Node* ShenandoahBarrierC2Support::get_load_addr(PhaseIdealLoop* phase, VectorSet& visited, Node* in) {
1419   if (visited.test_set(in->_idx)) {
1420     return nullptr;
1421   }
1422   switch (in->Opcode()) {
1423     case Op_Proj:
1424       return get_load_addr(phase, visited, in->in(0));
1425     case Op_CastPP:
1426     case Op_CheckCastPP:
1427     case Op_DecodeN:
1428     case Op_EncodeP:
1429       return get_load_addr(phase, visited, in->in(1));
1430     case Op_LoadN:
1431     case Op_LoadP:
1432       return in->in(MemNode::Address);
1433     case Op_CompareAndExchangeN:
1434     case Op_CompareAndExchangeP:
1435     case Op_GetAndSetN:
1436     case Op_GetAndSetP:
1437     case Op_ShenandoahCompareAndExchangeP:
1438     case Op_ShenandoahCompareAndExchangeN:
1439       // Those instructions would just have stored a different
1440       // value into the field. No use to attempt to fix it at this point.
1441       return phase->igvn().zerocon(T_OBJECT);
1442     case Op_CMoveP:
1443     case Op_CMoveN: {
1444       Node* t = get_load_addr(phase, visited, in->in(CMoveNode::IfTrue));
1445       Node* f = get_load_addr(phase, visited, in->in(CMoveNode::IfFalse));
1446       // Handle unambiguous cases: single address reported on both branches.
1447       if (t != nullptr && f == nullptr) return t;
1448       if (t == nullptr && f != nullptr) return f;
1449       if (t != nullptr && t == f)    return t;
1450       // Ambiguity.
1451       return phase->igvn().zerocon(T_OBJECT);
1452     }
1453     case Op_Phi: {
1454       Node* addr = nullptr;
1455       for (uint i = 1; i < in->req(); i++) {
1456         Node* addr1 = get_load_addr(phase, visited, in->in(i));
1457         if (addr == nullptr) {
1458           addr = addr1;
1459         }
1460         if (addr != addr1) {
1461           return phase->igvn().zerocon(T_OBJECT);
1462         }
1463       }
1464       return addr;
1465     }
1466     case Op_ShenandoahLoadReferenceBarrier:
1467       return get_load_addr(phase, visited, in->in(ShenandoahLoadReferenceBarrierNode::ValueIn));
1468     case Op_CallDynamicJava:
1469     case Op_CallLeaf:
1470     case Op_CallStaticJava:
1471     case Op_ConN:
1472     case Op_ConP:
1473     case Op_Parm:
1474     case Op_CreateEx:
1475       return phase->igvn().zerocon(T_OBJECT);
1476     default:
1477 #ifdef ASSERT
1478       fatal("Unknown node in get_load_addr: %s", NodeClassNames[in->Opcode()]);
1479 #endif
1480       return phase->igvn().zerocon(T_OBJECT);
1481   }
1482 
1483 }
1484 
1485 void ShenandoahBarrierC2Support::move_gc_state_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) {
1486   IdealLoopTree *loop = phase->get_loop(iff);
1487   Node* loop_head = loop->_head;
1488   Node* entry_c = loop_head->in(LoopNode::EntryControl);
1489 
1490   Node* bol = iff->in(1);
1491   Node* cmp = bol->in(1);
1492   Node* andi = cmp->in(1);
1493   Node* load = andi->in(1);
1494 
1495   assert(is_gc_state_load(load), "broken");
1496   if (!phase->is_dominator(load->in(0), entry_c)) {
1497     Node* mem_ctrl = nullptr;
1498     Node* mem = dom_mem(load->in(MemNode::Memory), loop_head, Compile::AliasIdxRaw, mem_ctrl, phase);
1499     load = load->clone();
1500     load->set_req(MemNode::Memory, mem);
1501     load->set_req(0, entry_c);
1502     phase->register_new_node(load, entry_c);
1503     andi = andi->clone();
1504     andi->set_req(1, load);
1505     phase->register_new_node(andi, entry_c);
1506     cmp = cmp->clone();
1507     cmp->set_req(1, andi);
1508     phase->register_new_node(cmp, entry_c);
1509     bol = bol->clone();
1510     bol->set_req(1, cmp);
1511     phase->register_new_node(bol, entry_c);
1512 
1513     phase->igvn().replace_input_of(iff, 1, bol);
1514   }
1515 }
1516 
1517 bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) {
1518   if (!n->is_If() || n->is_CountedLoopEnd()) {
1519     return false;
1520   }
1521   Node* region = n->in(0);
1522 
1523   if (!region->is_Region()) {
1524     return false;
1525   }
1526   Node* dom = phase->idom(region);
1527   if (!dom->is_If()) {
1528     return false;
1529   }
1530 
1531   if (!is_heap_stable_test(n) || !is_heap_stable_test(dom)) {
1532     return false;
1533   }
1534 
1535   IfNode* dom_if = dom->as_If();
1536   Node* proj_true = dom_if->proj_out(1);
1537   Node* proj_false = dom_if->proj_out(0);
1538 
1539   for (uint i = 1; i < region->req(); i++) {
1540     if (phase->is_dominator(proj_true, region->in(i))) {
1541       continue;
1542     }
1543     if (phase->is_dominator(proj_false, region->in(i))) {
1544       continue;
1545     }
1546     return false;
1547   }
1548 
1549   return true;
1550 }
1551 
1552 bool ShenandoahBarrierC2Support::merge_point_safe(Node* region) {
1553   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
1554     Node* n = region->fast_out(i);
1555     if (n->is_LoadStore()) {
1556       // Splitting a LoadStore node through phi, causes it to lose its SCMemProj: the split if code doesn't have support
1557       // for a LoadStore at the region the if is split through because that's not expected to happen (LoadStore nodes
1558       // should be between barrier nodes). It does however happen with Shenandoah though because barriers can get
1559       // expanded around a LoadStore node.
1560       return false;
1561     }
1562   }
1563   return true;
1564 }
1565 
1566 
1567 void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) {
1568   assert(is_heap_stable_test(n), "no other tests");
1569   if (identical_backtoback_ifs(n, phase)) {
1570     Node* n_ctrl = n->in(0);
1571     if (phase->can_split_if(n_ctrl) && merge_point_safe(n_ctrl)) {
1572       IfNode* dom_if = phase->idom(n_ctrl)->as_If();
1573       if (is_heap_stable_test(n)) {
1574         Node* gc_state_load = n->in(1)->in(1)->in(1)->in(1);
1575         assert(is_gc_state_load(gc_state_load), "broken");
1576         Node* dom_gc_state_load = dom_if->in(1)->in(1)->in(1)->in(1);
1577         assert(is_gc_state_load(dom_gc_state_load), "broken");
1578         if (gc_state_load != dom_gc_state_load) {
1579           phase->igvn().replace_node(gc_state_load, dom_gc_state_load);
1580         }
1581       }
1582       PhiNode* bolphi = PhiNode::make_blank(n_ctrl, n->in(1));
1583       Node* proj_true = dom_if->proj_out(1);
1584       Node* proj_false = dom_if->proj_out(0);
1585       Node* con_true = phase->igvn().makecon(TypeInt::ONE);
1586       Node* con_false = phase->igvn().makecon(TypeInt::ZERO);
1587 
1588       for (uint i = 1; i < n_ctrl->req(); i++) {
1589         if (phase->is_dominator(proj_true, n_ctrl->in(i))) {
1590           bolphi->init_req(i, con_true);
1591         } else {
1592           assert(phase->is_dominator(proj_false, n_ctrl->in(i)), "bad if");
1593           bolphi->init_req(i, con_false);
1594         }
1595       }
1596       phase->register_new_node(bolphi, n_ctrl);
1597       phase->igvn().replace_input_of(n, 1, bolphi);
1598       phase->do_split_if(n);
1599     }
1600   }
1601 }
1602 
1603 IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) {
1604   // Find first invariant test that doesn't exit the loop
1605   LoopNode *head = loop->_head->as_Loop();
1606   IfNode* unswitch_iff = nullptr;
1607   Node* n = head->in(LoopNode::LoopBackControl);
1608   int loop_has_sfpts = -1;
1609   while (n != head) {
1610     Node* n_dom = phase->idom(n);
1611     if (n->is_Region()) {
1612       if (n_dom->is_If()) {
1613         IfNode* iff = n_dom->as_If();
1614         if (iff->in(1)->is_Bool()) {
1615           BoolNode* bol = iff->in(1)->as_Bool();
1616           if (bol->in(1)->is_Cmp()) {
1617             // If condition is invariant and not a loop exit,
1618             // then found reason to unswitch.
1619             if (is_heap_stable_test(iff) &&
1620                 (loop_has_sfpts == -1 || loop_has_sfpts == 0)) {
1621               assert(!loop->is_loop_exit(iff), "both branches should be in the loop");
1622               if (loop_has_sfpts == -1) {
1623                 for(uint i = 0; i < loop->_body.size(); i++) {
1624                   Node *m = loop->_body[i];
1625                   if (m->is_SafePoint() && !m->is_CallLeaf()) {
1626                     loop_has_sfpts = 1;
1627                     break;
1628                   }
1629                 }
1630                 if (loop_has_sfpts == -1) {
1631                   loop_has_sfpts = 0;
1632                 }
1633               }
1634               if (!loop_has_sfpts) {
1635                 unswitch_iff = iff;
1636               }
1637             }
1638           }
1639         }
1640       }
1641     }
1642     n = n_dom;
1643   }
1644   return unswitch_iff;
1645 }
1646 
1647 
1648 void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) {
1649   Node_List heap_stable_tests;
1650   stack.push(phase->C->start(), 0);
1651   do {
1652     Node* n = stack.node();
1653     uint i = stack.index();
1654 
1655     if (i < n->outcnt()) {
1656       Node* u = n->raw_out(i);
1657       stack.set_index(i+1);
1658       if (!visited.test_set(u->_idx)) {
1659         stack.push(u, 0);
1660       }
1661     } else {
1662       stack.pop();
1663       if (n->is_If() && is_heap_stable_test(n)) {
1664         heap_stable_tests.push(n);
1665       }
1666     }
1667   } while (stack.size() > 0);
1668 
1669   for (uint i = 0; i < heap_stable_tests.size(); i++) {
1670     Node* n = heap_stable_tests.at(i);
1671     assert(is_heap_stable_test(n), "only evacuation test");
1672     merge_back_to_back_tests(n, phase);
1673   }
1674 
1675   if (!phase->C->major_progress()) {
1676     VectorSet seen;
1677     for (uint i = 0; i < heap_stable_tests.size(); i++) {
1678       Node* n = heap_stable_tests.at(i);
1679       IdealLoopTree* loop = phase->get_loop(n);
1680       if (loop != phase->ltree_root() &&
1681           loop->_child == nullptr &&
1682           !loop->_irreducible) {
1683         Node* head = loop->_head;
1684         if (head->is_Loop() &&
1685             (!head->is_CountedLoop() || head->as_CountedLoop()->is_main_loop() || head->as_CountedLoop()->is_normal_loop()) &&
1686             !seen.test_set(head->_idx)) {
1687           IfNode* iff = find_unswitching_candidate(loop, phase);
1688           if (iff != nullptr) {
1689             Node* bol = iff->in(1);
1690             if (head->as_Loop()->is_strip_mined()) {
1691               head->as_Loop()->verify_strip_mined(0);
1692             }
1693             move_gc_state_test_out_of_loop(iff, phase);
1694 
1695             AutoNodeBudget node_budget(phase);
1696 
1697             if (loop->policy_unswitching(phase)) {
1698               if (head->as_Loop()->is_strip_mined()) {
1699                 OuterStripMinedLoopNode* outer = head->as_CountedLoop()->outer_loop();
1700                 hide_strip_mined_loop(outer, head->as_CountedLoop(), phase);
1701               }
1702               phase->do_unswitching(loop, old_new);
1703             } else {
1704               // Not proceeding with unswitching. Move load back in
1705               // the loop.
1706               phase->igvn().replace_input_of(iff, 1, bol);
1707             }
1708           }
1709         }
1710       }
1711     }
1712   }
1713 }
1714 
1715 #ifdef ASSERT
1716 static bool has_never_branch(Node* root) {
1717   for (uint i = 1; i < root->req(); i++) {
1718     Node* in = root->in(i);
1719     if (in != nullptr && in->Opcode() == Op_Halt && in->in(0)->is_Proj() && in->in(0)->in(0)->is_NeverBranch()) {
1720       return true;
1721     }
1722   }
1723   return false;
1724 }
1725 #endif
1726 
1727 void MemoryGraphFixer::collect_memory_nodes() {
1728   Node_Stack stack(0);
1729   VectorSet visited;
1730   Node_List regions;
1731 
1732   // Walk the raw memory graph and create a mapping from CFG node to
1733   // memory node. Exclude phis for now.
1734   stack.push(_phase->C->root(), 1);
1735   do {
1736     Node* n = stack.node();
1737     int opc = n->Opcode();
1738     uint i = stack.index();
1739     if (i < n->req()) {
1740       Node* mem = nullptr;
1741       if (opc == Op_Root) {
1742         Node* in = n->in(i);
1743         int in_opc = in->Opcode();
1744         if (in_opc == Op_Return || in_opc == Op_Rethrow) {
1745           mem = in->in(TypeFunc::Memory);
1746         } else if (in_opc == Op_Halt) {
1747           if (in->in(0)->is_Region()) {
1748             Node* r = in->in(0);
1749             for (uint j = 1; j < r->req(); j++) {
1750               assert(!r->in(j)->is_NeverBranch(), "");
1751             }
1752           } else {
1753             Node* proj = in->in(0);
1754             assert(proj->is_Proj(), "");
1755             Node* in = proj->in(0);
1756             assert(in->is_CallStaticJava() || in->is_NeverBranch() || in->Opcode() == Op_Catch || proj->is_IfProj(), "");
1757             if (in->is_CallStaticJava()) {
1758               mem = in->in(TypeFunc::Memory);
1759             } else if (in->Opcode() == Op_Catch) {
1760               Node* call = in->in(0)->in(0);
1761               assert(call->is_Call(), "");
1762               mem = call->in(TypeFunc::Memory);
1763             } else if (in->is_NeverBranch()) {
1764               mem = collect_memory_for_infinite_loop(in);
1765             }
1766           }
1767         } else {
1768 #ifdef ASSERT
1769           n->dump();
1770           in->dump();
1771 #endif
1772           ShouldNotReachHere();
1773         }
1774       } else {
1775         assert(n->is_Phi() && n->bottom_type() == Type::MEMORY, "");
1776         assert(n->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(n->adr_type()) == _alias, "");
1777         mem = n->in(i);
1778       }
1779       i++;
1780       stack.set_index(i);
1781       if (mem == nullptr) {
1782         continue;
1783       }
1784       for (;;) {
1785         if (visited.test_set(mem->_idx) || mem->is_Start()) {
1786           break;
1787         }
1788         if (mem->is_Phi()) {
1789           stack.push(mem, 2);
1790           mem = mem->in(1);
1791         } else if (mem->is_Proj()) {
1792           stack.push(mem, mem->req());
1793           mem = mem->in(0);
1794         } else if (mem->is_SafePoint() || mem->is_MemBar()) {
1795           mem = mem->in(TypeFunc::Memory);
1796         } else if (mem->is_MergeMem()) {
1797           MergeMemNode* mm = mem->as_MergeMem();
1798           mem = mm->memory_at(_alias);
1799         } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) {
1800           assert(_alias == Compile::AliasIdxRaw, "");
1801           stack.push(mem, mem->req());
1802           mem = mem->in(MemNode::Memory);
1803         } else {
1804 #ifdef ASSERT
1805           mem->dump();
1806 #endif
1807           ShouldNotReachHere();
1808         }
1809       }
1810     } else {
1811       if (n->is_Phi()) {
1812         // Nothing
1813       } else if (!n->is_Root()) {
1814         Node* c = get_ctrl(n);
1815         _memory_nodes.map(c->_idx, n);
1816       }
1817       stack.pop();
1818     }
1819   } while(stack.is_nonempty());
1820 
1821   // Iterate over CFG nodes in rpo and propagate memory state to
1822   // compute memory state at regions, creating new phis if needed.
1823   Node_List rpo_list;
1824   visited.clear();
1825   _phase->rpo(_phase->C->root(), stack, visited, rpo_list);
1826   Node* root = rpo_list.pop();
1827   assert(root == _phase->C->root(), "");
1828 
1829   const bool trace = false;
1830 #ifdef ASSERT
1831   if (trace) {
1832     for (int i = rpo_list.size() - 1; i >= 0; i--) {
1833       Node* c = rpo_list.at(i);
1834       if (_memory_nodes[c->_idx] != nullptr) {
1835         tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump();
1836       }
1837     }
1838   }
1839 #endif
1840   uint last = _phase->C->unique();
1841 
1842 #ifdef ASSERT
1843   uint16_t max_depth = 0;
1844   for (LoopTreeIterator iter(_phase->ltree_root()); !iter.done(); iter.next()) {
1845     IdealLoopTree* lpt = iter.current();
1846     max_depth = MAX2(max_depth, lpt->_nest);
1847   }
1848 #endif
1849 
1850   bool progress = true;
1851   int iteration = 0;
1852   Node_List dead_phis;
1853   while (progress) {
1854     progress = false;
1855     iteration++;
1856     assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
1857     if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); }
1858 
1859     for (int i = rpo_list.size() - 1; i >= 0; i--) {
1860       Node* c = rpo_list.at(i);
1861 
1862       Node* prev_mem = _memory_nodes[c->_idx];
1863       if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
1864         Node* prev_region = regions[c->_idx];
1865         Node* unique = nullptr;
1866         for (uint j = 1; j < c->req() && unique != NodeSentinel; j++) {
1867           Node* m = _memory_nodes[c->in(j)->_idx];
1868           assert(m != nullptr || (c->is_Loop() && j == LoopNode::LoopBackControl && iteration == 1) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "expect memory state");
1869           if (m != nullptr) {
1870             if (m == prev_region && ((c->is_Loop() && j == LoopNode::LoopBackControl) || (prev_region->is_Phi() && prev_region->in(0) == c))) {
1871               assert((c->is_Loop() && j == LoopNode::LoopBackControl) || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), "");
1872               // continue
1873             } else if (unique == nullptr) {
1874               unique = m;
1875             } else if (m == unique) {
1876               // continue
1877             } else {
1878               unique = NodeSentinel;
1879             }
1880           }
1881         }
1882         assert(unique != nullptr, "empty phi???");
1883         if (unique != NodeSentinel) {
1884           if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c) {
1885             dead_phis.push(prev_region);
1886           }
1887           regions.map(c->_idx, unique);
1888         } else {
1889           Node* phi = nullptr;
1890           if (prev_region != nullptr && prev_region->is_Phi() && prev_region->in(0) == c && prev_region->_idx >= last) {
1891             phi = prev_region;
1892             for (uint k = 1; k < c->req(); k++) {
1893               Node* m = _memory_nodes[c->in(k)->_idx];
1894               assert(m != nullptr, "expect memory state");
1895               phi->set_req(k, m);
1896             }
1897           } else {
1898             for (DUIterator_Fast jmax, j = c->fast_outs(jmax); j < jmax && phi == nullptr; j++) {
1899               Node* u = c->fast_out(j);
1900               if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
1901                   (u->adr_type() == TypePtr::BOTTOM || _phase->C->get_alias_index(u->adr_type()) == _alias)) {
1902                 phi = u;
1903                 for (uint k = 1; k < c->req() && phi != nullptr; k++) {
1904                   Node* m = _memory_nodes[c->in(k)->_idx];
1905                   assert(m != nullptr, "expect memory state");
1906                   if (u->in(k) != m) {
1907                     phi = NodeSentinel;
1908                   }
1909                 }
1910               }
1911             }
1912             if (phi == NodeSentinel) {
1913               phi = new PhiNode(c, Type::MEMORY, _phase->C->get_adr_type(_alias));
1914               for (uint k = 1; k < c->req(); k++) {
1915                 Node* m = _memory_nodes[c->in(k)->_idx];
1916                 assert(m != nullptr, "expect memory state");
1917                 phi->init_req(k, m);
1918               }
1919             }
1920           }
1921           if (phi != nullptr) {
1922             regions.map(c->_idx, phi);
1923           } else {
1924             assert(c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
1925           }
1926         }
1927         Node* current_region = regions[c->_idx];
1928         if (current_region != prev_region) {
1929           progress = true;
1930           if (prev_region == prev_mem) {
1931             _memory_nodes.map(c->_idx, current_region);
1932           }
1933         }
1934       } else if (prev_mem == nullptr || prev_mem->is_Phi() || ctrl_or_self(prev_mem) != c) {
1935         Node* m = _memory_nodes[_phase->idom(c)->_idx];
1936         assert(m != nullptr || c->Opcode() == Op_Halt, "expect memory state");
1937         if (m != prev_mem) {
1938           _memory_nodes.map(c->_idx, m);
1939           progress = true;
1940         }
1941       }
1942 #ifdef ASSERT
1943       if (trace) { tty->print("X %d", c->_idx);  _memory_nodes[c->_idx]->dump(); }
1944 #endif
1945     }
1946   }
1947 
1948   // Replace existing phi with computed memory state for that region
1949   // if different (could be a new phi or a dominating memory node if
1950   // that phi was found to be useless).
1951   while (dead_phis.size() > 0) {
1952     Node* n = dead_phis.pop();
1953     n->replace_by(_phase->C->top());
1954     n->destruct(&_phase->igvn());
1955   }
1956   for (int i = rpo_list.size() - 1; i >= 0; i--) {
1957     Node* c = rpo_list.at(i);
1958     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
1959       Node* n = regions[c->_idx];
1960       assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
1961       if (n != nullptr && n->is_Phi() && n->_idx >= last && n->in(0) == c) {
1962         _phase->register_new_node(n, c);
1963       }
1964     }
1965   }
1966   for (int i = rpo_list.size() - 1; i >= 0; i--) {
1967     Node* c = rpo_list.at(i);
1968     if (c->is_Region() && (_include_lsm || !c->is_OuterStripMinedLoop())) {
1969       Node* n = regions[c->_idx];
1970       assert(n != nullptr || c->unique_ctrl_out()->Opcode() == Op_Halt, "expected memory state");
1971       for (DUIterator_Fast imax, i = c->fast_outs(imax); i < imax; i++) {
1972         Node* u = c->fast_out(i);
1973         if (u->is_Phi() && u->bottom_type() == Type::MEMORY &&
1974             u != n) {
1975           assert(c->unique_ctrl_out()->Opcode() != Op_Halt, "expected memory state");
1976           if (u->adr_type() == TypePtr::BOTTOM) {
1977             fix_memory_uses(u, n, n, c);
1978           } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
1979             _phase->lazy_replace(u, n);
1980             --i; --imax;
1981           }
1982         }
1983       }
1984     }
1985   }
1986 }
1987 
1988 Node* MemoryGraphFixer::collect_memory_for_infinite_loop(const Node* in) {
1989   Node* mem = nullptr;
1990   Node* head = in->in(0);
1991   assert(head->is_Region(), "unexpected infinite loop graph shape");
1992 
1993   Node* phi_mem = nullptr;
1994   for (DUIterator_Fast jmax, j = head->fast_outs(jmax); j < jmax; j++) {
1995     Node* u = head->fast_out(j);
1996     if (u->is_Phi() && u->bottom_type() == Type::MEMORY) {
1997       if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
1998         assert(phi_mem == nullptr || phi_mem->adr_type() == TypePtr::BOTTOM, "");
1999         phi_mem = u;
2000       } else if (u->adr_type() == TypePtr::BOTTOM) {
2001         assert(phi_mem == nullptr || _phase->C->get_alias_index(phi_mem->adr_type()) == _alias, "");
2002         if (phi_mem == nullptr) {
2003           phi_mem = u;
2004         }
2005       }
2006     }
2007   }
2008   if (phi_mem == nullptr) {
2009     ResourceMark rm;
2010     Node_Stack stack(0);
2011     stack.push(head, 1);
2012     do {
2013       Node* n = stack.node();
2014       uint i = stack.index();
2015       if (i >= n->req()) {
2016         stack.pop();
2017       } else {
2018         stack.set_index(i + 1);
2019         Node* c = n->in(i);
2020         assert(c != head, "should have found a safepoint on the way");
2021         if (stack.size() != 1 || _phase->is_dominator(head, c)) {
2022           for (;;) {
2023             if (c->is_Region()) {
2024               stack.push(c, 1);
2025               break;
2026             } else if (c->is_SafePoint() && !c->is_CallLeaf()) {
2027               Node* m = c->in(TypeFunc::Memory);
2028               if (m->is_MergeMem()) {
2029                 m = m->as_MergeMem()->memory_at(_alias);
2030               }
2031               assert(mem == nullptr || mem == m, "several memory states");
2032               mem = m;
2033               break;
2034             } else {
2035               assert(c != c->in(0), "");
2036               c = c->in(0);
2037             }
2038           }
2039         }
2040       }
2041     } while (stack.size() > 0);
2042     assert(mem != nullptr, "should have found safepoint");
2043   } else {
2044     mem = phi_mem;
2045   }
2046   return mem;
2047 }
2048 
2049 Node* MemoryGraphFixer::get_ctrl(Node* n) const {
2050   Node* c = _phase->get_ctrl(n);
2051   if (n->is_Proj() && n->in(0) != nullptr && n->in(0)->is_Call()) {
2052     assert(c == n->in(0), "");
2053     CallNode* call = c->as_Call();
2054     CallProjections projs;
2055     call->extract_projections(&projs, true, false);
2056     if (projs.catchall_memproj != nullptr) {
2057       if (projs.fallthrough_memproj == n) {
2058         c = projs.fallthrough_catchproj;
2059       } else {
2060         assert(projs.catchall_memproj == n, "");
2061         c = projs.catchall_catchproj;
2062       }
2063     }
2064   }
2065   return c;
2066 }
2067 
2068 Node* MemoryGraphFixer::ctrl_or_self(Node* n) const {
2069   if (_phase->has_ctrl(n))
2070     return get_ctrl(n);
2071   else {
2072     assert (n->is_CFG(), "must be a CFG node");
2073     return n;
2074   }
2075 }
2076 
2077 bool MemoryGraphFixer::mem_is_valid(Node* m, Node* c) const {
2078   return m != nullptr && get_ctrl(m) == c;
2079 }
2080 
2081 Node* MemoryGraphFixer::find_mem(Node* ctrl, Node* n) const {
2082   assert(n == nullptr || _phase->ctrl_or_self(n) == ctrl, "");
2083   assert(!ctrl->is_Call() || ctrl == n, "projection expected");
2084 #ifdef ASSERT
2085   if ((ctrl->is_Proj() && ctrl->in(0)->is_Call()) ||
2086       (ctrl->is_Catch() && ctrl->in(0)->in(0)->is_Call())) {
2087     CallNode* call = ctrl->is_Proj() ? ctrl->in(0)->as_Call() : ctrl->in(0)->in(0)->as_Call();
2088     int mems = 0;
2089     for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) {
2090       Node* u = call->fast_out(i);
2091       if (u->bottom_type() == Type::MEMORY) {
2092         mems++;
2093       }
2094     }
2095     assert(mems <= 1, "No node right after call if multiple mem projections");
2096   }
2097 #endif
2098   Node* mem = _memory_nodes[ctrl->_idx];
2099   Node* c = ctrl;
2100   while (!mem_is_valid(mem, c) &&
2101          (!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem))) {
2102     c = _phase->idom(c);
2103     mem = _memory_nodes[c->_idx];
2104   }
2105   if (n != nullptr && mem_is_valid(mem, c)) {
2106     while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) {
2107       mem = next_mem(mem, _alias);
2108     }
2109     if (mem->is_MergeMem()) {
2110       mem = mem->as_MergeMem()->memory_at(_alias);
2111     }
2112     if (!mem_is_valid(mem, c)) {
2113       do {
2114         c = _phase->idom(c);
2115         mem = _memory_nodes[c->_idx];
2116       } while (!mem_is_valid(mem, c) &&
2117                (!c->is_CatchProj() || mem == nullptr || c->in(0)->in(0)->in(0) != get_ctrl(mem)));
2118     }
2119   }
2120   assert(mem->bottom_type() == Type::MEMORY, "");
2121   return mem;
2122 }
2123 
2124 bool MemoryGraphFixer::has_mem_phi(Node* region) const {
2125   for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
2126     Node* use = region->fast_out(i);
2127     if (use->is_Phi() && use->bottom_type() == Type::MEMORY &&
2128         (_phase->C->get_alias_index(use->adr_type()) == _alias)) {
2129       return true;
2130     }
2131   }
2132   return false;
2133 }
2134 
2135 void MemoryGraphFixer::fix_mem(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl, Node* new_mem, Unique_Node_List& uses) {
2136   assert(_phase->ctrl_or_self(new_mem) == new_ctrl, "");
2137   const bool trace = false;
2138   DEBUG_ONLY(if (trace) { tty->print("ZZZ control is"); ctrl->dump(); });
2139   DEBUG_ONLY(if (trace) { tty->print("ZZZ mem is"); mem->dump(); });
2140   GrowableArray<Node*> phis;
2141   if (mem_for_ctrl != mem) {
2142     Node* old = mem_for_ctrl;
2143     Node* prev = nullptr;
2144     while (old != mem) {
2145       prev = old;
2146       if (old->is_Store() || old->is_ClearArray() || old->is_LoadStore()) {
2147         assert(_alias == Compile::AliasIdxRaw, "");
2148         old = old->in(MemNode::Memory);
2149       } else if (old->Opcode() == Op_SCMemProj) {
2150         assert(_alias == Compile::AliasIdxRaw, "");
2151         old = old->in(0);
2152       } else {
2153         ShouldNotReachHere();
2154       }
2155     }
2156     assert(prev != nullptr, "");
2157     if (new_ctrl != ctrl) {
2158       _memory_nodes.map(ctrl->_idx, mem);
2159       _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2160     }
2161     uint input = (uint)MemNode::Memory;
2162     _phase->igvn().replace_input_of(prev, input, new_mem);
2163   } else {
2164     uses.clear();
2165     _memory_nodes.map(new_ctrl->_idx, new_mem);
2166     uses.push(new_ctrl);
2167     for(uint next = 0; next < uses.size(); next++ ) {
2168       Node *n = uses.at(next);
2169       assert(n->is_CFG(), "");
2170       DEBUG_ONLY(if (trace) { tty->print("ZZZ ctrl"); n->dump(); });
2171       for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2172         Node* u = n->fast_out(i);
2173         if (!u->is_Root() && u->is_CFG() && u != n) {
2174           Node* m = _memory_nodes[u->_idx];
2175           if (u->is_Region() && (!u->is_OuterStripMinedLoop() || _include_lsm) &&
2176               !has_mem_phi(u) &&
2177               u->unique_ctrl_out()->Opcode() != Op_Halt) {
2178             DEBUG_ONLY(if (trace) { tty->print("ZZZ region"); u->dump(); });
2179             DEBUG_ONLY(if (trace && m != nullptr) { tty->print("ZZZ mem"); m->dump(); });
2180 
2181             if (!mem_is_valid(m, u) || !m->is_Phi()) {
2182               bool push = true;
2183               bool create_phi = true;
2184               if (_phase->is_dominator(new_ctrl, u)) {
2185                 create_phi = false;
2186               }
2187               if (create_phi) {
2188                 Node* phi = new PhiNode(u, Type::MEMORY, _phase->C->get_adr_type(_alias));
2189                 _phase->register_new_node(phi, u);
2190                 phis.push(phi);
2191                 DEBUG_ONLY(if (trace) { tty->print("ZZZ new phi"); phi->dump(); });
2192                 if (!mem_is_valid(m, u)) {
2193                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting mem"); phi->dump(); });
2194                   _memory_nodes.map(u->_idx, phi);
2195                 } else {
2196                   DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); });
2197                   for (;;) {
2198                     assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), "");
2199                     Node* next = nullptr;
2200                     if (m->is_Proj()) {
2201                       next = m->in(0);
2202                     } else {
2203                       assert(m->is_Mem() || m->is_LoadStore(), "");
2204                       assert(_alias == Compile::AliasIdxRaw, "");
2205                       next = m->in(MemNode::Memory);
2206                     }
2207                     if (_phase->get_ctrl(next) != u) {
2208                       break;
2209                     }
2210                     if (next->is_MergeMem()) {
2211                       assert(_phase->get_ctrl(next->as_MergeMem()->memory_at(_alias)) != u, "");
2212                       break;
2213                     }
2214                     if (next->is_Phi()) {
2215                       assert(next->adr_type() == TypePtr::BOTTOM && next->in(0) == u, "");
2216                       break;
2217                     }
2218                     m = next;
2219                   }
2220 
2221                   DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); });
2222                   assert(m->is_Mem() || m->is_LoadStore(), "");
2223                   uint input = (uint)MemNode::Memory;
2224                   _phase->igvn().replace_input_of(m, input, phi);
2225                   push = false;
2226                 }
2227               } else {
2228                 DEBUG_ONLY(if (trace) { tty->print("ZZZ skipping region"); u->dump(); });
2229               }
2230               if (push) {
2231                 uses.push(u);
2232               }
2233             }
2234           } else if (!mem_is_valid(m, u) &&
2235                      !(u->Opcode() == Op_CProj && u->in(0)->is_NeverBranch() && u->as_Proj()->_con == 1)) {
2236             uses.push(u);
2237           }
2238         }
2239       }
2240     }
2241     for (int i = 0; i < phis.length(); i++) {
2242       Node* n = phis.at(i);
2243       Node* r = n->in(0);
2244       DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi"); n->dump(); });
2245       for (uint j = 1; j < n->req(); j++) {
2246         Node* m = find_mem(r->in(j), nullptr);
2247         _phase->igvn().replace_input_of(n, j, m);
2248         DEBUG_ONLY(if (trace) { tty->print("ZZZ fixing new phi: %d", j); m->dump(); });
2249       }
2250     }
2251   }
2252   uint last = _phase->C->unique();
2253   MergeMemNode* mm = nullptr;
2254   int alias = _alias;
2255   DEBUG_ONLY(if (trace) { tty->print("ZZZ raw mem is"); mem->dump(); });
2256   // Process loads first to not miss an anti-dependency: if the memory
2257   // edge of a store is updated before a load is processed then an
2258   // anti-dependency may be missed.
2259   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2260     Node* u = mem->out(i);
2261     if (u->_idx < last && u->is_Load() && _phase->C->get_alias_index(u->adr_type()) == alias) {
2262       Node* m = find_mem(_phase->get_ctrl(u), u);
2263       if (m != mem) {
2264         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2265         _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2266         --i;
2267       }
2268     }
2269   }
2270   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2271     Node* u = mem->out(i);
2272     if (u->_idx < last) {
2273       if (u->is_Mem()) {
2274         if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2275           Node* m = find_mem(_phase->get_ctrl(u), u);
2276           if (m != mem) {
2277             DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2278             _phase->igvn().replace_input_of(u, MemNode::Memory, m);
2279             --i;
2280           }
2281         }
2282       } else if (u->is_MergeMem()) {
2283         MergeMemNode* u_mm = u->as_MergeMem();
2284         if (u_mm->memory_at(alias) == mem) {
2285           MergeMemNode* newmm = nullptr;
2286           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2287             Node* uu = u->fast_out(j);
2288             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2289             if (uu->is_Phi()) {
2290               assert(uu->adr_type() == TypePtr::BOTTOM, "");
2291               Node* region = uu->in(0);
2292               int nb = 0;
2293               for (uint k = 1; k < uu->req(); k++) {
2294                 if (uu->in(k) == u) {
2295                   Node* m = find_mem(region->in(k), nullptr);
2296                   if (m != mem) {
2297                     DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", k); uu->dump(); });
2298                     newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2299                     if (newmm != u) {
2300                       _phase->igvn().replace_input_of(uu, k, newmm);
2301                       nb++;
2302                       --jmax;
2303                     }
2304                   }
2305                 }
2306               }
2307               if (nb > 0) {
2308                 --j;
2309               }
2310             } else {
2311               Node* m = find_mem(_phase->ctrl_or_self(uu), uu);
2312               if (m != mem) {
2313                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); uu->dump(); });
2314                 newmm = clone_merge_mem(u, mem, m, _phase->ctrl_or_self(m), i);
2315                 if (newmm != u) {
2316                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2317                   --j, --jmax;
2318                 }
2319               }
2320             }
2321           }
2322         }
2323       } else if (u->is_Phi()) {
2324         assert(u->bottom_type() == Type::MEMORY, "what else?");
2325         if (_phase->C->get_alias_index(u->adr_type()) == alias || u->adr_type() == TypePtr::BOTTOM) {
2326           Node* region = u->in(0);
2327           bool replaced = false;
2328           for (uint j = 1; j < u->req(); j++) {
2329             if (u->in(j) == mem) {
2330               Node* m = find_mem(region->in(j), nullptr);
2331               Node* nnew = m;
2332               if (m != mem) {
2333                 if (u->adr_type() == TypePtr::BOTTOM) {
2334                   mm = allocate_merge_mem(mem, m, _phase->ctrl_or_self(m));
2335                   nnew = mm;
2336                 }
2337                 DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of phi %d", j); u->dump(); });
2338                 _phase->igvn().replace_input_of(u, j, nnew);
2339                 replaced = true;
2340               }
2341             }
2342           }
2343           if (replaced) {
2344             --i;
2345           }
2346         }
2347       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2348                  u->adr_type() == nullptr) {
2349         assert(u->adr_type() != nullptr ||
2350                u->Opcode() == Op_Rethrow ||
2351                u->Opcode() == Op_Return ||
2352                u->Opcode() == Op_SafePoint ||
2353                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2354                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2355                u->Opcode() == Op_CallLeaf, "");
2356         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2357         if (m != mem) {
2358           mm = allocate_merge_mem(mem, m, _phase->get_ctrl(m));
2359           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2360           --i;
2361         }
2362       } else if (_phase->C->get_alias_index(u->adr_type()) == alias) {
2363         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2364         if (m != mem) {
2365           DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2366           _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2367           --i;
2368         }
2369       } else if (u->adr_type() != TypePtr::BOTTOM &&
2370                  _memory_nodes[_phase->ctrl_or_self(u)->_idx] == u) {
2371         Node* m = find_mem(_phase->ctrl_or_self(u), u);
2372         assert(m != mem, "");
2373         // u is on the wrong slice...
2374         assert(u->is_ClearArray(), "");
2375         DEBUG_ONLY(if (trace) { tty->print("ZZZ setting memory of use"); u->dump(); });
2376         _phase->igvn().replace_input_of(u, u->find_edge(mem), m);
2377         --i;
2378       }
2379     }
2380   }
2381 #ifdef ASSERT
2382   assert(new_mem->outcnt() > 0, "");
2383   for (int i = 0; i < phis.length(); i++) {
2384     Node* n = phis.at(i);
2385     assert(n->outcnt() > 0, "new phi must have uses now");
2386   }
2387 #endif
2388 }
2389 
2390 void MemoryGraphFixer::record_new_ctrl(Node* ctrl, Node* new_ctrl, Node* mem, Node* mem_for_ctrl) {
2391   if (mem_for_ctrl != mem && new_ctrl != ctrl) {
2392     _memory_nodes.map(ctrl->_idx, mem);
2393     _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl);
2394   }
2395 }
2396 
2397 MergeMemNode* MemoryGraphFixer::allocate_merge_mem(Node* mem, Node* rep_proj, Node* rep_ctrl) const {
2398   MergeMemNode* mm = MergeMemNode::make(mem);
2399   mm->set_memory_at(_alias, rep_proj);
2400   _phase->register_new_node(mm, rep_ctrl);
2401   return mm;
2402 }
2403 
2404 MergeMemNode* MemoryGraphFixer::clone_merge_mem(Node* u, Node* mem, Node* rep_proj, Node* rep_ctrl, DUIterator& i) const {
2405   MergeMemNode* newmm = nullptr;
2406   MergeMemNode* u_mm = u->as_MergeMem();
2407   Node* c = _phase->get_ctrl(u);
2408   if (_phase->is_dominator(c, rep_ctrl)) {
2409     c = rep_ctrl;
2410   } else {
2411     assert(_phase->is_dominator(rep_ctrl, c), "one must dominate the other");
2412   }
2413   if (u->outcnt() == 1) {
2414     if (u->req() > (uint)_alias && u->in(_alias) == mem) {
2415       _phase->igvn().replace_input_of(u, _alias, rep_proj);
2416       --i;
2417     } else {
2418       _phase->igvn().rehash_node_delayed(u);
2419       u_mm->set_memory_at(_alias, rep_proj);
2420     }
2421     newmm = u_mm;
2422     _phase->set_ctrl_and_loop(u, c);
2423   } else {
2424     // can't simply clone u and then change one of its input because
2425     // it adds and then removes an edge which messes with the
2426     // DUIterator
2427     newmm = MergeMemNode::make(u_mm->base_memory());
2428     for (uint j = 0; j < u->req(); j++) {
2429       if (j < newmm->req()) {
2430         if (j == (uint)_alias) {
2431           newmm->set_req(j, rep_proj);
2432         } else if (newmm->in(j) != u->in(j)) {
2433           newmm->set_req(j, u->in(j));
2434         }
2435       } else if (j == (uint)_alias) {
2436         newmm->add_req(rep_proj);
2437       } else {
2438         newmm->add_req(u->in(j));
2439       }
2440     }
2441     if ((uint)_alias >= u->req()) {
2442       newmm->set_memory_at(_alias, rep_proj);
2443     }
2444     _phase->register_new_node(newmm, c);
2445   }
2446   return newmm;
2447 }
2448 
2449 bool MemoryGraphFixer::should_process_phi(Node* phi) const {
2450   if (phi->adr_type() == TypePtr::BOTTOM) {
2451     Node* region = phi->in(0);
2452     for (DUIterator_Fast jmax, j = region->fast_outs(jmax); j < jmax; j++) {
2453       Node* uu = region->fast_out(j);
2454       if (uu->is_Phi() && uu != phi && uu->bottom_type() == Type::MEMORY && _phase->C->get_alias_index(uu->adr_type()) == _alias) {
2455         return false;
2456       }
2457     }
2458     return true;
2459   }
2460   return _phase->C->get_alias_index(phi->adr_type()) == _alias;
2461 }
2462 
2463 void MemoryGraphFixer::fix_memory_uses(Node* mem, Node* replacement, Node* rep_proj, Node* rep_ctrl) const {
2464   uint last = _phase-> C->unique();
2465   MergeMemNode* mm = nullptr;
2466   assert(mem->bottom_type() == Type::MEMORY, "");
2467   for (DUIterator i = mem->outs(); mem->has_out(i); i++) {
2468     Node* u = mem->out(i);
2469     if (u != replacement && u->_idx < last) {
2470       if (u->is_MergeMem()) {
2471         MergeMemNode* u_mm = u->as_MergeMem();
2472         if (u_mm->memory_at(_alias) == mem) {
2473           MergeMemNode* newmm = nullptr;
2474           for (DUIterator_Fast jmax, j = u->fast_outs(jmax); j < jmax; j++) {
2475             Node* uu = u->fast_out(j);
2476             assert(!uu->is_MergeMem(), "chain of MergeMems?");
2477             if (uu->is_Phi()) {
2478               if (should_process_phi(uu)) {
2479                 Node* region = uu->in(0);
2480                 int nb = 0;
2481                 for (uint k = 1; k < uu->req(); k++) {
2482                   if (uu->in(k) == u && _phase->is_dominator(rep_ctrl, region->in(k))) {
2483                     if (newmm == nullptr) {
2484                       newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2485                     }
2486                     if (newmm != u) {
2487                       _phase->igvn().replace_input_of(uu, k, newmm);
2488                       nb++;
2489                       --jmax;
2490                     }
2491                   }
2492                 }
2493                 if (nb > 0) {
2494                   --j;
2495                 }
2496               }
2497             } else {
2498               if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) {
2499                 if (newmm == nullptr) {
2500                   newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i);
2501                 }
2502                 if (newmm != u) {
2503                   _phase->igvn().replace_input_of(uu, uu->find_edge(u), newmm);
2504                   --j, --jmax;
2505                 }
2506               }
2507             }
2508           }
2509         }
2510       } else if (u->is_Phi()) {
2511         assert(u->bottom_type() == Type::MEMORY, "what else?");
2512         Node* region = u->in(0);
2513         if (should_process_phi(u)) {
2514           bool replaced = false;
2515           for (uint j = 1; j < u->req(); j++) {
2516             if (u->in(j) == mem && _phase->is_dominator(rep_ctrl, region->in(j))) {
2517               Node* nnew = rep_proj;
2518               if (u->adr_type() == TypePtr::BOTTOM) {
2519                 if (mm == nullptr) {
2520                   mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2521                 }
2522                 nnew = mm;
2523               }
2524               _phase->igvn().replace_input_of(u, j, nnew);
2525               replaced = true;
2526             }
2527           }
2528           if (replaced) {
2529             --i;
2530           }
2531 
2532         }
2533       } else if ((u->adr_type() == TypePtr::BOTTOM && u->Opcode() != Op_StrInflatedCopy) ||
2534                  u->adr_type() == nullptr) {
2535         assert(u->adr_type() != nullptr ||
2536                u->Opcode() == Op_Rethrow ||
2537                u->Opcode() == Op_Return ||
2538                u->Opcode() == Op_SafePoint ||
2539                (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) ||
2540                (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) ||
2541                u->Opcode() == Op_CallLeaf, "%s", u->Name());
2542         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2543           if (mm == nullptr) {
2544             mm = allocate_merge_mem(mem, rep_proj, rep_ctrl);
2545           }
2546           _phase->igvn().replace_input_of(u, u->find_edge(mem), mm);
2547           --i;
2548         }
2549       } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) {
2550         if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) {
2551           _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj);
2552           --i;
2553         }
2554       }
2555     }
2556   }
2557 }
2558 
2559 ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj, DecoratorSet decorators)
2560 : Node(ctrl, obj), _decorators(decorators) {
2561   ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this);
2562 }
2563 
2564 DecoratorSet ShenandoahLoadReferenceBarrierNode::decorators() const {
2565   return _decorators;
2566 }
2567 
2568 uint ShenandoahLoadReferenceBarrierNode::size_of() const {
2569   return sizeof(*this);
2570 }
2571 
2572 static DecoratorSet mask_decorators(DecoratorSet decorators) {
2573   return decorators & (ON_STRONG_OOP_REF | ON_WEAK_OOP_REF | ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF | IN_NATIVE);
2574 }
2575 
2576 uint ShenandoahLoadReferenceBarrierNode::hash() const {
2577   uint hash = Node::hash();
2578   hash += mask_decorators(_decorators);
2579   return hash;
2580 }
2581 
2582 bool ShenandoahLoadReferenceBarrierNode::cmp( const Node &n ) const {
2583   return Node::cmp(n) && n.Opcode() == Op_ShenandoahLoadReferenceBarrier &&
2584          mask_decorators(_decorators) == mask_decorators(((const ShenandoahLoadReferenceBarrierNode&)n)._decorators);
2585 }
2586 
2587 const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const {
2588   if (in(ValueIn) == nullptr || in(ValueIn)->is_top()) {
2589     return Type::TOP;
2590   }
2591   const Type* t = in(ValueIn)->bottom_type();
2592   if (t == TypePtr::NULL_PTR) {
2593     return t;
2594   }
2595 
2596   if (ShenandoahBarrierSet::is_strong_access(decorators())) {
2597     return t;
2598   }
2599 
2600   return t->meet(TypePtr::NULL_PTR);
2601 }
2602 
2603 const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const {
2604   // Either input is TOP ==> the result is TOP
2605   const Type *t2 = phase->type(in(ValueIn));
2606   if( t2 == Type::TOP ) return Type::TOP;
2607 
2608   if (t2 == TypePtr::NULL_PTR) {
2609     return t2;
2610   }
2611 
2612   if (ShenandoahBarrierSet::is_strong_access(decorators())) {
2613     return t2;
2614   }
2615 
2616   return t2->meet(TypePtr::NULL_PTR);
2617 }
2618 
2619 Node* ShenandoahLoadReferenceBarrierNode::Identity(PhaseGVN* phase) {
2620   Node* value = in(ValueIn);
2621   if (!needs_barrier(phase, value)) {
2622     return value;
2623   }
2624   return this;
2625 }
2626 
2627 bool ShenandoahLoadReferenceBarrierNode::needs_barrier(PhaseGVN* phase, Node* n) {
2628   Unique_Node_List visited;
2629   return needs_barrier_impl(phase, n, visited);
2630 }
2631 
2632 bool ShenandoahLoadReferenceBarrierNode::needs_barrier_impl(PhaseGVN* phase, Node* n, Unique_Node_List &visited) {
2633   if (n == nullptr) return false;
2634   if (visited.member(n)) {
2635     return false; // Been there.
2636   }
2637   visited.push(n);
2638 
2639   if (n->is_Allocate()) {
2640     // tty->print_cr("optimize barrier on alloc");
2641     return false;
2642   }
2643   if (n->is_Call()) {
2644     // tty->print_cr("optimize barrier on call");
2645     return false;
2646   }
2647 
2648   const Type* type = phase->type(n);
2649   if (type == Type::TOP) {
2650     return false;
2651   }
2652   if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) {
2653     // tty->print_cr("optimize barrier on null");
2654     return false;
2655   }
2656   if (type->make_oopptr() && type->make_oopptr()->const_oop() != nullptr) {
2657     // tty->print_cr("optimize barrier on constant");
2658     return false;
2659   }
2660 
2661   switch (n->Opcode()) {
2662     case Op_AddP:
2663       return true; // TODO: Can refine?
2664     case Op_LoadP:
2665     case Op_ShenandoahCompareAndExchangeN:
2666     case Op_ShenandoahCompareAndExchangeP:
2667     case Op_CompareAndExchangeN:
2668     case Op_CompareAndExchangeP:
2669     case Op_GetAndSetN:
2670     case Op_GetAndSetP:
2671       return true;
2672     case Op_Phi: {
2673       for (uint i = 1; i < n->req(); i++) {
2674         if (needs_barrier_impl(phase, n->in(i), visited)) return true;
2675       }
2676       return false;
2677     }
2678     case Op_CheckCastPP:
2679     case Op_CastPP:
2680       return needs_barrier_impl(phase, n->in(1), visited);
2681     case Op_Proj:
2682       return needs_barrier_impl(phase, n->in(0), visited);
2683     case Op_ShenandoahLoadReferenceBarrier:
2684       // tty->print_cr("optimize barrier on barrier");
2685       return false;
2686     case Op_Parm:
2687       // tty->print_cr("optimize barrier on input arg");
2688       return false;
2689     case Op_DecodeN:
2690     case Op_EncodeP:
2691       return needs_barrier_impl(phase, n->in(1), visited);
2692     case Op_LoadN:
2693       return true;
2694     case Op_CMoveN:
2695     case Op_CMoveP:
2696       return needs_barrier_impl(phase, n->in(2), visited) ||
2697              needs_barrier_impl(phase, n->in(3), visited);
2698     case Op_CreateEx:
2699       return false;
2700     default:
2701       break;
2702   }
2703 #ifdef ASSERT
2704   tty->print("need barrier on?: ");
2705   tty->print_cr("ins:");
2706   n->dump(2);
2707   tty->print_cr("outs:");
2708   n->dump(-2);
2709   ShouldNotReachHere();
2710 #endif
2711   return true;
2712 }