1 /*
   2  * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/bcEscapeAnalyzer.hpp"
  26 #include "compiler/compileLog.hpp"
  27 #include "gc/shared/barrierSet.hpp"
  28 #include "gc/shared/c2/barrierSetC2.hpp"
  29 #include "libadt/vectset.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "memory/metaspace.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "opto/c2compiler.hpp"
  34 #include "opto/arraycopynode.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/cfgnode.hpp"
  37 #include "opto/compile.hpp"
  38 #include "opto/escape.hpp"
  39 #include "opto/inlinetypenode.hpp"
  40 #include "opto/macro.hpp"
  41 #include "opto/locknode.hpp"
  42 #include "opto/phaseX.hpp"
  43 #include "opto/movenode.hpp"
  44 #include "opto/narrowptrnode.hpp"
  45 #include "opto/castnode.hpp"
  46 #include "opto/rootnode.hpp"
  47 #include "utilities/macros.hpp"
  48 
  49 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
  50   // If ReduceAllocationMerges is enabled we might call split_through_phi during
  51   // split_unique_types and that will create additional nodes that need to be
  52   // pushed to the ConnectionGraph. The code below bumps the initial capacity of
  53   // _nodes by 10% to account for these additional nodes. If capacity is exceeded
  54   // the array will be reallocated.
  55   _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr),
  56   _in_worklist(C->comp_arena()),
  57   _next_pidx(0),
  58   _collecting(true),
  59   _verify(false),
  60   _compile(C),
  61   _igvn(igvn),
  62   _invocation(invocation),
  63   _build_iterations(0),
  64   _build_time(0.),
  65   _node_map(C->comp_arena()) {
  66   // Add unknown java object.
  67   add_java_object(C->top(), PointsToNode::GlobalEscape);
  68   phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
  69   set_not_scalar_replaceable(phantom_obj NOT_PRODUCT(COMMA "Phantom object"));
  70   // Add ConP and ConN null oop nodes
  71   Node* oop_null = igvn->zerocon(T_OBJECT);
  72   assert(oop_null->_idx < nodes_size(), "should be created already");
  73   add_java_object(oop_null, PointsToNode::NoEscape);
  74   null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
  75   set_not_scalar_replaceable(null_obj NOT_PRODUCT(COMMA "Null object"));
  76   if (UseCompressedOops) {
  77     Node* noop_null = igvn->zerocon(T_NARROWOOP);
  78     assert(noop_null->_idx < nodes_size(), "should be created already");
  79     map_ideal_node(noop_null, null_obj);
  80   }
  81 }
  82 
  83 bool ConnectionGraph::has_candidates(Compile *C) {
  84   // EA brings benefits only when the code has allocations and/or locks which
  85   // are represented by ideal Macro nodes.
  86   int cnt = C->macro_count();
  87   for (int i = 0; i < cnt; i++) {
  88     Node *n = C->macro_node(i);
  89     if (n->is_Allocate()) {
  90       return true;
  91     }
  92     if (n->is_Lock()) {
  93       Node* obj = n->as_Lock()->obj_node()->uncast();
  94       if (!(obj->is_Parm() || obj->is_Con())) {
  95         return true;
  96       }
  97     }
  98     if (n->is_CallStaticJava() &&
  99         n->as_CallStaticJava()->is_boxing_method()) {
 100       return true;
 101     }
 102   }
 103   return false;
 104 }
 105 
 106 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
 107   Compile::TracePhase tp(Phase::_t_escapeAnalysis);
 108   ResourceMark rm;
 109 
 110   // Add ConP and ConN null oop nodes before ConnectionGraph construction
 111   // to create space for them in ConnectionGraph::_nodes[].
 112   Node* oop_null = igvn->zerocon(T_OBJECT);
 113   Node* noop_null = igvn->zerocon(T_NARROWOOP);
 114   int invocation = 0;
 115   if (C->congraph() != nullptr) {
 116     invocation = C->congraph()->_invocation + 1;
 117   }
 118   ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn, invocation);
 119   // Perform escape analysis
 120   if (congraph->compute_escape()) {
 121     // There are non escaping objects.
 122     C->set_congraph(congraph);
 123   }
 124   // Cleanup.
 125   if (oop_null->outcnt() == 0) {
 126     igvn->hash_delete(oop_null);
 127   }
 128   if (noop_null->outcnt() == 0) {
 129     igvn->hash_delete(noop_null);
 130   }
 131 }
 132 
 133 bool ConnectionGraph::compute_escape() {
 134   Compile* C = _compile;
 135   PhaseGVN* igvn = _igvn;
 136 
 137   // Worklists used by EA.
 138   Unique_Node_List delayed_worklist;
 139   Unique_Node_List reducible_merges;
 140   GrowableArray<Node*> alloc_worklist;
 141   GrowableArray<Node*> ptr_cmp_worklist;
 142   GrowableArray<MemBarStoreStoreNode*> storestore_worklist;
 143   GrowableArray<ArrayCopyNode*>  arraycopy_worklist;
 144   GrowableArray<PointsToNode*>   ptnodes_worklist;
 145   GrowableArray<JavaObjectNode*> java_objects_worklist;
 146   GrowableArray<JavaObjectNode*> non_escaped_allocs_worklist;
 147   GrowableArray<FieldNode*>      oop_fields_worklist;
 148   GrowableArray<SafePointNode*>  sfn_worklist;
 149   GrowableArray<MergeMemNode*>   mergemem_worklist;
 150   DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
 151 
 152   { Compile::TracePhase tp(Phase::_t_connectionGraph);
 153 
 154   // 1. Populate Connection Graph (CG) with PointsTo nodes.
 155   ideal_nodes.map(C->live_nodes(), nullptr);  // preallocate space
 156   // Initialize worklist
 157   if (C->root() != nullptr) {
 158     ideal_nodes.push(C->root());
 159   }
 160   // Processed ideal nodes are unique on ideal_nodes list
 161   // but several ideal nodes are mapped to the phantom_obj.
 162   // To avoid duplicated entries on the following worklists
 163   // add the phantom_obj only once to them.
 164   ptnodes_worklist.append(phantom_obj);
 165   java_objects_worklist.append(phantom_obj);
 166   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
 167     Node* n = ideal_nodes.at(next);
 168     if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) &&
 169         !n->in(MemNode::Address)->is_AddP() &&
 170         _igvn->type(n->in(MemNode::Address))->isa_oopptr()) {
 171       // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA
 172       Node* addp = new AddPNode(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0));
 173       _igvn->register_new_node_with_optimizer(addp);
 174       _igvn->replace_input_of(n, MemNode::Address, addp);
 175       ideal_nodes.push(addp);
 176       _nodes.at_put_grow(addp->_idx, nullptr, nullptr);
 177     }
 178     // Create PointsTo nodes and add them to Connection Graph. Called
 179     // only once per ideal node since ideal_nodes is Unique_Node list.
 180     add_node_to_connection_graph(n, &delayed_worklist);
 181     PointsToNode* ptn = ptnode_adr(n->_idx);
 182     if (ptn != nullptr && ptn != phantom_obj) {
 183       ptnodes_worklist.append(ptn);
 184       if (ptn->is_JavaObject()) {
 185         java_objects_worklist.append(ptn->as_JavaObject());
 186         if ((n->is_Allocate() || n->is_CallStaticJava()) &&
 187             (ptn->escape_state() < PointsToNode::GlobalEscape)) {
 188           // Only allocations and java static calls results are interesting.
 189           non_escaped_allocs_worklist.append(ptn->as_JavaObject());
 190         }
 191       } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
 192         oop_fields_worklist.append(ptn->as_Field());
 193       }
 194     }
 195     // Collect some interesting nodes for further use.
 196     switch (n->Opcode()) {
 197       case Op_MergeMem:
 198         // Collect all MergeMem nodes to add memory slices for
 199         // scalar replaceable objects in split_unique_types().
 200         mergemem_worklist.append(n->as_MergeMem());
 201         break;
 202       case Op_CmpP:
 203       case Op_CmpN:
 204         // Collect compare pointers nodes.
 205         if (OptimizePtrCompare) {
 206           ptr_cmp_worklist.append(n);
 207         }
 208         break;
 209       case Op_MemBarStoreStore:
 210         // Collect all MemBarStoreStore nodes so that depending on the
 211         // escape status of the associated Allocate node some of them
 212         // may be eliminated.
 213         if (!UseStoreStoreForCtor || n->req() > MemBarNode::Precedent) {
 214           storestore_worklist.append(n->as_MemBarStoreStore());
 215         }
 216         break;
 217       case Op_MemBarRelease:
 218         if (n->req() > MemBarNode::Precedent) {
 219           record_for_optimizer(n);
 220         }
 221         break;
 222 #ifdef ASSERT
 223       case Op_AddP:
 224         // Collect address nodes for graph verification.
 225         addp_worklist.append(n);
 226         break;
 227 #endif
 228       case Op_ArrayCopy:
 229         // Keep a list of ArrayCopy nodes so if one of its input is non
 230         // escaping, we can record a unique type
 231         arraycopy_worklist.append(n->as_ArrayCopy());
 232         break;
 233       default:
 234         // not interested now, ignore...
 235         break;
 236     }
 237     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 238       Node* m = n->fast_out(i);   // Get user
 239       ideal_nodes.push(m);
 240     }
 241     if (n->is_SafePoint()) {
 242       sfn_worklist.append(n->as_SafePoint());
 243     }
 244   }
 245 
 246 #ifndef PRODUCT
 247   if (_compile->directive()->TraceEscapeAnalysisOption) {
 248     tty->print("+++++ Initial worklist for ");
 249     _compile->method()->print_name();
 250     tty->print_cr(" (ea_inv=%d)", _invocation);
 251     for (int i = 0; i < ptnodes_worklist.length(); i++) {
 252       PointsToNode* ptn = ptnodes_worklist.at(i);
 253       ptn->dump();
 254     }
 255     tty->print_cr("+++++ Calculating escape states and scalar replaceability");
 256   }
 257 #endif
 258 
 259   if (non_escaped_allocs_worklist.length() == 0) {
 260     _collecting = false;
 261     NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 262     return false; // Nothing to do.
 263   }
 264   // Add final simple edges to graph.
 265   while(delayed_worklist.size() > 0) {
 266     Node* n = delayed_worklist.pop();
 267     add_final_edges(n);
 268   }
 269 
 270 #ifdef ASSERT
 271   if (VerifyConnectionGraph) {
 272     // Verify that no new simple edges could be created and all
 273     // local vars has edges.
 274     _verify = true;
 275     int ptnodes_length = ptnodes_worklist.length();
 276     for (int next = 0; next < ptnodes_length; ++next) {
 277       PointsToNode* ptn = ptnodes_worklist.at(next);
 278       add_final_edges(ptn->ideal_node());
 279       if (ptn->is_LocalVar() && ptn->edge_count() == 0) {
 280         ptn->dump();
 281         assert(ptn->as_LocalVar()->edge_count() > 0, "sanity");
 282       }
 283     }
 284     _verify = false;
 285   }
 286 #endif
 287   // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes
 288   // processing, calls to CI to resolve symbols (types, fields, methods)
 289   // referenced in bytecode. During symbol resolution VM may throw
 290   // an exception which CI cleans and converts to compilation failure.
 291   if (C->failing()) {
 292     NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 293     return false;
 294   }
 295 
 296   // 2. Finish Graph construction by propagating references to all
 297   //    java objects through graph.
 298   if (!complete_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist,
 299                                  java_objects_worklist, oop_fields_worklist)) {
 300     // All objects escaped or hit time or iterations limits.
 301     _collecting = false;
 302     NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 303     return false;
 304   }
 305 
 306   // 3. Adjust scalar_replaceable state of nonescaping objects and push
 307   //    scalar replaceable allocations on alloc_worklist for processing
 308   //    in split_unique_types().
 309   GrowableArray<JavaObjectNode*> jobj_worklist;
 310   int non_escaped_length = non_escaped_allocs_worklist.length();
 311   bool found_nsr_alloc = false;
 312   for (int next = 0; next < non_escaped_length; next++) {
 313     JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
 314     bool noescape = (ptn->escape_state() == PointsToNode::NoEscape);
 315     Node* n = ptn->ideal_node();
 316     if (n->is_Allocate()) {
 317       n->as_Allocate()->_is_non_escaping = noescape;
 318     }
 319     if (noescape && ptn->scalar_replaceable()) {
 320       adjust_scalar_replaceable_state(ptn, reducible_merges);
 321       if (ptn->scalar_replaceable()) {
 322         jobj_worklist.push(ptn);
 323       } else {
 324         found_nsr_alloc = true;
 325       }
 326     }
 327   }
 328 
 329   // Propagate NSR (Not Scalar Replaceable) state.
 330   if (found_nsr_alloc) {
 331     find_scalar_replaceable_allocs(jobj_worklist, reducible_merges);
 332   }
 333 
 334   // alloc_worklist will be processed in reverse push order.
 335   // Therefore the reducible Phis will be processed for last and that's what we
 336   // want because by then the scalarizable inputs of the merge will already have
 337   // an unique instance type.
 338   for (uint i = 0; i < reducible_merges.size(); i++ ) {
 339     Node* n = reducible_merges.at(i);
 340     alloc_worklist.append(n);
 341   }
 342 
 343   for (int next = 0; next < jobj_worklist.length(); ++next) {
 344     JavaObjectNode* jobj = jobj_worklist.at(next);
 345     if (jobj->scalar_replaceable()) {
 346       alloc_worklist.append(jobj->ideal_node());
 347     }
 348   }
 349 
 350 #ifdef ASSERT
 351   if (VerifyConnectionGraph) {
 352     // Verify that graph is complete - no new edges could be added or needed.
 353     verify_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist,
 354                             java_objects_worklist, addp_worklist);
 355   }
 356   assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");
 357   assert(null_obj->escape_state() == PointsToNode::NoEscape &&
 358          null_obj->edge_count() == 0 &&
 359          !null_obj->arraycopy_src() &&
 360          !null_obj->arraycopy_dst(), "sanity");
 361 #endif
 362 
 363   _collecting = false;
 364 
 365   } // TracePhase t3("connectionGraph")
 366 
 367   // 4. Optimize ideal graph based on EA information.
 368   bool has_non_escaping_obj = (non_escaped_allocs_worklist.length() > 0);
 369   if (has_non_escaping_obj) {
 370     optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist);
 371   }
 372 
 373 #ifndef PRODUCT
 374   if (PrintEscapeAnalysis) {
 375     dump(ptnodes_worklist); // Dump ConnectionGraph
 376   }
 377 #endif
 378 
 379 #ifdef ASSERT
 380   if (VerifyConnectionGraph) {
 381     int alloc_length = alloc_worklist.length();
 382     for (int next = 0; next < alloc_length; ++next) {
 383       Node* n = alloc_worklist.at(next);
 384       PointsToNode* ptn = ptnode_adr(n->_idx);
 385       assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity");
 386     }
 387   }
 388 
 389   if (VerifyReduceAllocationMerges) {
 390     for (uint i = 0; i < reducible_merges.size(); i++ ) {
 391       Node* n = reducible_merges.at(i);
 392       if (!can_reduce_phi(n->as_Phi())) {
 393         TraceReduceAllocationMerges = true;
 394         n->dump(2);
 395         n->dump(-2);
 396         assert(can_reduce_phi(n->as_Phi()), "Sanity: previous reducible Phi is no longer reducible before SUT.");
 397       }
 398     }
 399   }
 400 #endif
 401 
 402   // 5. Separate memory graph for scalar replaceable allcations.
 403   bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);
 404   if (has_scalar_replaceable_candidates && EliminateAllocations) {
 405     assert(C->do_aliasing(), "Aliasing should be enabled");
 406     // Now use the escape information to create unique types for
 407     // scalar replaceable objects.
 408     split_unique_types(alloc_worklist, arraycopy_worklist, mergemem_worklist, reducible_merges);
 409     if (C->failing()) {
 410       NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 411       return false;
 412     }
 413     C->print_method(PHASE_AFTER_EA, 2);
 414 
 415 #ifdef ASSERT
 416   } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
 417     tty->print("=== No allocations eliminated for ");
 418     C->method()->print_short_name();
 419     if (!EliminateAllocations) {
 420       tty->print(" since EliminateAllocations is off ===");
 421     } else if(!has_scalar_replaceable_candidates) {
 422       tty->print(" since there are no scalar replaceable candidates ===");
 423     }
 424     tty->cr();
 425 #endif
 426   }
 427 
 428   // 6. Reduce allocation merges used as debug information. This is done after
 429   // split_unique_types because the methods used to create SafePointScalarObject
 430   // need to traverse the memory graph to find values for object fields. We also
 431   // set to null the scalarized inputs of reducible Phis so that the Allocate
 432   // that they point can be later scalar replaced.
 433   bool delay = _igvn->delay_transform();
 434   _igvn->set_delay_transform(true);
 435   for (uint i = 0; i < reducible_merges.size(); i++) {
 436     Node* n = reducible_merges.at(i);
 437     if (n->outcnt() > 0) {
 438       if (!reduce_phi_on_safepoints(n->as_Phi())) {
 439         NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 440         C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
 441         return false;
 442       }
 443 
 444       // Now we set the scalar replaceable inputs of ophi to null, which is
 445       // the last piece that would prevent it from being scalar replaceable.
 446       reset_scalar_replaceable_entries(n->as_Phi());
 447     }
 448   }
 449   _igvn->set_delay_transform(delay);
 450 
 451   // Annotate at safepoints if they have <= ArgEscape objects in their scope and at
 452   // java calls if they pass ArgEscape objects as parameters.
 453   if (has_non_escaping_obj &&
 454       (C->env()->should_retain_local_variables() ||
 455        C->env()->jvmti_can_get_owned_monitor_info() ||
 456        C->env()->jvmti_can_walk_any_space() ||
 457        DeoptimizeObjectsALot)) {
 458     int sfn_length = sfn_worklist.length();
 459     for (int next = 0; next < sfn_length; next++) {
 460       SafePointNode* sfn = sfn_worklist.at(next);
 461       sfn->set_has_ea_local_in_scope(has_ea_local_in_scope(sfn));
 462       if (sfn->is_CallJava()) {
 463         CallJavaNode* call = sfn->as_CallJava();
 464         call->set_arg_escape(has_arg_escape(call));
 465       }
 466     }
 467   }
 468 
 469   NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 470   return has_non_escaping_obj;
 471 }
 472 
 473 // Check if it's profitable to reduce the Phi passed as parameter.  Returns true
 474 // if at least one scalar replaceable allocation participates in the merge.
 475 bool ConnectionGraph::can_reduce_phi_check_inputs(PhiNode* ophi) const {
 476   bool found_sr_allocate = false;
 477 
 478   for (uint i = 1; i < ophi->req(); i++) {
 479     JavaObjectNode* ptn = unique_java_object(ophi->in(i));
 480     if (ptn != nullptr && ptn->scalar_replaceable()) {
 481       AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
 482 
 483       // Don't handle arrays.
 484       if (alloc->Opcode() != Op_Allocate) {
 485         assert(alloc->Opcode() == Op_AllocateArray, "Unexpected type of allocation.");
 486         continue;
 487       }
 488 
 489       if (PhaseMacroExpand::can_eliminate_allocation(_igvn, alloc, nullptr)) {
 490         found_sr_allocate = true;
 491       } else {
 492         NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("%dth input of Phi %d is SR but can't be eliminated.", i, ophi->_idx);)
 493         ptn->set_scalar_replaceable(false);
 494       }
 495     }
 496   }
 497 
 498   NOT_PRODUCT(if (TraceReduceAllocationMerges && !found_sr_allocate) tty->print_cr("Can NOT reduce Phi %d on invocation %d. No SR Allocate as input.", ophi->_idx, _invocation);)
 499   return found_sr_allocate;
 500 }
 501 
 502 // We can reduce the Cmp if it's a comparison between the Phi and a constant.
 503 // I require the 'other' input to be a constant so that I can move the Cmp
 504 // around safely.
 505 bool ConnectionGraph::can_reduce_cmp(Node* n, Node* cmp) const {
 506   assert(cmp->Opcode() == Op_CmpP || cmp->Opcode() == Op_CmpN, "not expected node: %s", cmp->Name());
 507   Node* left = cmp->in(1);
 508   Node* right = cmp->in(2);
 509 
 510   return (left == n || right == n) &&
 511          (left->is_Con() || right->is_Con()) &&
 512          cmp->outcnt() == 1;
 513 }
 514 
 515 // We are going to check if any of the SafePointScalarMerge entries
 516 // in the SafePoint reference the Phi that we are checking.
 517 bool ConnectionGraph::has_been_reduced(PhiNode* n, SafePointNode* sfpt) const {
 518   JVMState *jvms = sfpt->jvms();
 519 
 520   for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) {
 521     Node* sfpt_in = sfpt->in(i);
 522     if (sfpt_in->is_SafePointScalarMerge()) {
 523       SafePointScalarMergeNode* smerge = sfpt_in->as_SafePointScalarMerge();
 524       Node* nsr_ptr = sfpt->in(smerge->merge_pointer_idx(jvms));
 525       if (nsr_ptr == n) {
 526         return true;
 527       }
 528     }
 529   }
 530 
 531   return false;
 532 }
 533 
 534 // Check if we are able to untangle the merge. The following patterns are
 535 // supported:
 536 //  - Phi -> SafePoints
 537 //  - Phi -> CmpP/N
 538 //  - Phi -> AddP -> Load
 539 //  - Phi -> CastPP -> SafePoints
 540 //  - Phi -> CastPP -> AddP -> Load
 541 bool ConnectionGraph::can_reduce_check_users(Node* n, uint nesting) const {
 542   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 543     Node* use = n->fast_out(i);
 544 
 545     if (use->is_SafePoint()) {
 546       if (use->is_Call() && use->as_Call()->has_non_debug_use(n)) {
 547         NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Call has non_debug_use().", n->_idx, _invocation);)
 548         return false;
 549       } else if (has_been_reduced(n->is_Phi() ? n->as_Phi() : n->as_CastPP()->in(1)->as_Phi(), use->as_SafePoint())) {
 550         NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. It has already been reduced.", n->_idx, _invocation);)
 551         return false;
 552       }
 553     } else if (use->is_AddP()) {
 554       Node* addp = use;
 555       for (DUIterator_Fast jmax, j = addp->fast_outs(jmax); j < jmax; j++) {
 556         Node* use_use = addp->fast_out(j);
 557         const Type* load_type = _igvn->type(use_use);
 558 
 559         if (!use_use->is_Load() || !use_use->as_Load()->can_split_through_phi_base(_igvn)) {
 560           NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. AddP user isn't a [splittable] Load(): %s", n->_idx, _invocation, use_use->Name());)
 561           return false;
 562         } else if (load_type->isa_narrowklass() || load_type->isa_klassptr()) {
 563           NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. [Narrow] Klass Load: %s", n->_idx, _invocation, use_use->Name());)
 564           return false;
 565         }
 566       }
 567     } else if (nesting > 0) {
 568       NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Unsupported user %s at nesting level %d.", n->_idx, _invocation, use->Name(), nesting);)
 569       return false;
 570     } else if (use->is_CastPP()) {
 571       const Type* cast_t = _igvn->type(use);
 572       if (cast_t == nullptr || cast_t->make_ptr()->isa_instptr() == nullptr) {
 573 #ifndef PRODUCT
 574         if (TraceReduceAllocationMerges) {
 575           tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP is not to an instance.", n->_idx, _invocation);
 576           use->dump();
 577         }
 578 #endif
 579         return false;
 580       }
 581 
 582       bool is_trivial_control = use->in(0) == nullptr || use->in(0) == n->in(0);
 583       if (!is_trivial_control) {
 584         // If it's not a trivial control then we check if we can reduce the
 585         // CmpP/N used by the If controlling the cast.
 586         if (use->in(0)->is_IfTrue() || use->in(0)->is_IfFalse()) {
 587           Node* iff = use->in(0)->in(0);
 588           // We may have an OpaqueNotNull node between If and Bool nodes. But we could also have a sub class of IfNode,
 589           // for example, an OuterStripMinedLoopEnd or a Parse Predicate. Bail out in all these cases.
 590           bool can_reduce = (iff->Opcode() == Op_If) && iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp();
 591           if (can_reduce) {
 592             Node* iff_cmp = iff->in(1)->in(1);
 593             int opc = iff_cmp->Opcode();
 594             can_reduce = (opc == Op_CmpP || opc == Op_CmpN) && can_reduce_cmp(n, iff_cmp);
 595           }
 596           if (!can_reduce) {
 597 #ifndef PRODUCT
 598             if (TraceReduceAllocationMerges) {
 599               tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP %d doesn't have simple control.", n->_idx, _invocation, use->_idx);
 600               n->dump(5);
 601             }
 602 #endif
 603             return false;
 604           }
 605         }
 606       }
 607 
 608       if (!can_reduce_check_users(use, nesting+1)) {
 609         return false;
 610       }
 611     } else if (use->Opcode() == Op_CmpP || use->Opcode() == Op_CmpN) {
 612       if (!can_reduce_cmp(n, use)) {
 613         NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. CmpP/N %d isn't reducible.", n->_idx, _invocation, use->_idx);)
 614         return false;
 615       }
 616     } else {
 617       NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. One of the uses is: %d %s", n->_idx, _invocation, use->_idx, use->Name());)
 618       return false;
 619     }
 620   }
 621 
 622   return true;
 623 }
 624 
 625 // Returns true if: 1) It's profitable to reduce the merge, and 2) The Phi is
 626 // only used in some certain code shapes. Check comments in
 627 // 'can_reduce_phi_inputs' and 'can_reduce_phi_users' for more
 628 // details.
 629 bool ConnectionGraph::can_reduce_phi(PhiNode* ophi) const {
 630   // If there was an error attempting to reduce allocation merges for this
 631   // method we might have disabled the compilation and be retrying with RAM
 632   // disabled.
 633   if (!_compile->do_reduce_allocation_merges() || ophi->region()->Opcode() != Op_Region) {
 634     return false;
 635   }
 636 
 637   const Type* phi_t = _igvn->type(ophi);
 638   if (phi_t == nullptr ||
 639       phi_t->make_ptr() == nullptr ||
 640       phi_t->make_ptr()->isa_aryptr() != nullptr) {
 641     return false;
 642   }
 643 
 644   if (!can_reduce_phi_check_inputs(ophi) || !can_reduce_check_users(ophi, /* nesting: */ 0)) {
 645     return false;
 646   }
 647 
 648   NOT_PRODUCT(if (TraceReduceAllocationMerges) { tty->print_cr("Can reduce Phi %d during invocation %d: ", ophi->_idx, _invocation); })
 649   return true;
 650 }
 651 
 652 // This method will return a CmpP/N that we need to use on the If controlling a
 653 // CastPP after it was split. This method is only called on bases that are
 654 // nullable therefore we always need a controlling if for the splitted CastPP.
 655 //
 656 // 'curr_ctrl' is the control of the CastPP that we want to split through phi.
 657 // If the CastPP currently doesn't have a control then the CmpP/N will be
 658 // against the null constant, otherwise it will be against the constant input of
 659 // the existing CmpP/N. It's guaranteed that there will be a CmpP/N in the later
 660 // case because we have constraints on it and because the CastPP has a control
 661 // input.
 662 Node* ConnectionGraph::specialize_cmp(Node* base, Node* curr_ctrl) {
 663   const Type* t = base->bottom_type();
 664   Node* con = nullptr;
 665 
 666   if (curr_ctrl == nullptr || curr_ctrl->is_Region()) {
 667     con = _igvn->zerocon(t->basic_type());
 668   } else {
 669     // can_reduce_check_users() verified graph: true/false -> if -> bool -> cmp
 670     assert(curr_ctrl->in(0)->Opcode() == Op_If, "unexpected node %s", curr_ctrl->in(0)->Name());
 671     Node* bol = curr_ctrl->in(0)->in(1);
 672     assert(bol->is_Bool(), "unexpected node %s", bol->Name());
 673     Node* curr_cmp = bol->in(1);
 674     assert(curr_cmp->Opcode() == Op_CmpP || curr_cmp->Opcode() == Op_CmpN, "unexpected node %s", curr_cmp->Name());
 675     con = curr_cmp->in(1)->is_Con() ? curr_cmp->in(1) : curr_cmp->in(2);
 676   }
 677 
 678   return CmpNode::make(base, con, t->basic_type());
 679 }
 680 
 681 // This method 'specializes' the CastPP passed as parameter to the base passed
 682 // as parameter. Note that the existing CastPP input is a Phi. "Specialize"
 683 // means that the CastPP now will be specific for a given base instead of a Phi.
 684 // An If-Then-Else-Region block is inserted to control the CastPP. The control
 685 // of the CastPP is a copy of the current one (if there is one) or a check
 686 // against null.
 687 //
 688 // Before:
 689 //
 690 //    C1     C2  ... Cn
 691 //     \      |      /
 692 //      \     |     /
 693 //       \    |    /
 694 //        \   |   /
 695 //         \  |  /
 696 //          \ | /
 697 //           \|/
 698 //          Region     B1      B2  ... Bn
 699 //            |          \      |      /
 700 //            |           \     |     /
 701 //            |            \    |    /
 702 //            |             \   |   /
 703 //            |              \  |  /
 704 //            |               \ | /
 705 //            ---------------> Phi
 706 //                              |
 707 //                      X       |
 708 //                      |       |
 709 //                      |       |
 710 //                      ------> CastPP
 711 //
 712 // After (only partial illustration; base = B2, current_control = C2):
 713 //
 714 //                      C2
 715 //                      |
 716 //                      If
 717 //                     / \
 718 //                    /   \
 719 //                   T     F
 720 //                  /\     /
 721 //                 /  \   /
 722 //                /    \ /
 723 //      C1    CastPP   Reg        Cn
 724 //       |              |          |
 725 //       |              |          |
 726 //       |              |          |
 727 //       -------------- | ----------
 728 //                    | | |
 729 //                    Region
 730 //
 731 Node* ConnectionGraph::specialize_castpp(Node* castpp, Node* base, Node* current_control) {
 732   Node* control_successor  = current_control->unique_ctrl_out();
 733   Node* cmp                = _igvn->transform(specialize_cmp(base, castpp->in(0)));
 734   Node* bol                = _igvn->transform(new BoolNode(cmp, BoolTest::ne));
 735   IfNode* if_ne            = _igvn->transform(new IfNode(current_control, bol, PROB_MIN, COUNT_UNKNOWN))->as_If();
 736   Node* not_eq_control     = _igvn->transform(new IfTrueNode(if_ne));
 737   Node* yes_eq_control     = _igvn->transform(new IfFalseNode(if_ne));
 738   Node* end_region         = _igvn->transform(new RegionNode(3));
 739 
 740   // Insert the new if-else-region block into the graph
 741   end_region->set_req(1, not_eq_control);
 742   end_region->set_req(2, yes_eq_control);
 743   control_successor->replace_edge(current_control, end_region, _igvn);
 744 
 745   _igvn->_worklist.push(current_control);
 746   _igvn->_worklist.push(control_successor);
 747 
 748   return _igvn->transform(ConstraintCastNode::make_cast_for_type(not_eq_control, base, _igvn->type(castpp), ConstraintCastNode::UnconditionalDependency, nullptr));
 749 }
 750 
 751 Node* ConnectionGraph::split_castpp_load_through_phi(Node* curr_addp, Node* curr_load, Node* region, GrowableArray<Node*>* bases_for_loads, GrowableArray<Node *>  &alloc_worklist) {
 752   const Type* load_type = _igvn->type(curr_load);
 753   Node* nsr_value = _igvn->zerocon(load_type->basic_type());
 754   Node* memory = curr_load->in(MemNode::Memory);
 755 
 756   // The data_phi merging the loads needs to be nullable if
 757   // we are loading pointers.
 758   if (load_type->make_ptr() != nullptr) {
 759     if (load_type->isa_narrowoop()) {
 760       load_type = load_type->meet(TypeNarrowOop::NULL_PTR);
 761     } else if (load_type->isa_ptr()) {
 762       load_type = load_type->meet(TypePtr::NULL_PTR);
 763     } else {
 764       assert(false, "Unexpected load ptr type.");
 765     }
 766   }
 767 
 768   Node* data_phi = PhiNode::make(region, nsr_value, load_type);
 769 
 770   for (int i = 1; i < bases_for_loads->length(); i++) {
 771     Node* base = bases_for_loads->at(i);
 772     Node* cmp_region = nullptr;
 773     if (base != nullptr) {
 774       if (base->is_CFG()) { // means that we added a CastPP as child of this CFG node
 775         cmp_region = base->unique_ctrl_out_or_null();
 776         assert(cmp_region != nullptr, "There should be.");
 777         base = base->find_out_with(Op_CastPP);
 778       }
 779 
 780       Node* addr = _igvn->transform(new AddPNode(base, base, curr_addp->in(AddPNode::Offset)));
 781       Node* mem = (memory->is_Phi() && (memory->in(0) == region)) ? memory->in(i) : memory;
 782       Node* load = curr_load->clone();
 783       load->set_req(0, nullptr);
 784       load->set_req(1, mem);
 785       load->set_req(2, addr);
 786 
 787       if (cmp_region != nullptr) { // see comment on previous if
 788         Node* intermediate_phi = PhiNode::make(cmp_region, nsr_value, load_type);
 789         intermediate_phi->set_req(1, _igvn->transform(load));
 790         load = intermediate_phi;
 791       }
 792 
 793       data_phi->set_req(i, _igvn->transform(load));
 794     } else {
 795       // Just use the default, which is already in phi
 796     }
 797   }
 798 
 799   // Takes care of updating CG and split_unique_types worklists due
 800   // to cloned AddP->Load.
 801   updates_after_load_split(data_phi, curr_load, alloc_worklist);
 802 
 803   return _igvn->transform(data_phi);
 804 }
 805 
 806 // This method only reduces CastPP fields loads; SafePoints are handled
 807 // separately. The idea here is basically to clone the CastPP and place copies
 808 // on each input of the Phi, including non-scalar replaceable inputs.
 809 // Experimentation shows that the resulting IR graph is simpler that way than if
 810 // we just split the cast through scalar-replaceable inputs.
 811 //
 812 // The reduction process requires that CastPP's control be one of:
 813 //  1) no control,
 814 //  2) the same region as Ophi, or
 815 //  3) an IfTrue/IfFalse coming from an CmpP/N between Ophi and a constant.
 816 //
 817 // After splitting the CastPP we'll put it under an If-Then-Else-Region control
 818 // flow. If the CastPP originally had an IfTrue/False control input then we'll
 819 // use a similar CmpP/N to control the new If-Then-Else-Region. Otherwise, we'll
 820 // juse use a CmpP/N against the null constant.
 821 //
 822 // The If-Then-Else-Region isn't always needed. For instance, if input to
 823 // splitted cast was not nullable (or if it was the null constant) then we don't
 824 // need (shouldn't) use a CastPP at all.
 825 //
 826 // After the casts are splitted we'll split the AddP->Loads through the Phi and
 827 // connect them to the just split CastPPs.
 828 //
 829 // Before (CastPP control is same as Phi):
 830 //
 831 //          Region     Allocate   Null    Call
 832 //            |             \      |      /
 833 //            |              \     |     /
 834 //            |               \    |    /
 835 //            |                \   |   /
 836 //            |                 \  |  /
 837 //            |                  \ | /
 838 //            ------------------> Phi            # Oop Phi
 839 //            |                    |
 840 //            |                    |
 841 //            |                    |
 842 //            |                    |
 843 //            ----------------> CastPP
 844 //                                 |
 845 //                               AddP
 846 //                                 |
 847 //                               Load
 848 //
 849 // After (Very much simplified):
 850 //
 851 //                         Call  Null
 852 //                            \  /
 853 //                            CmpP
 854 //                             |
 855 //                           Bool#NE
 856 //                             |
 857 //                             If
 858 //                            / \
 859 //                           T   F
 860 //                          / \ /
 861 //                         /   R
 862 //                     CastPP  |
 863 //                       |     |
 864 //                     AddP    |
 865 //                       |     |
 866 //                     Load    |
 867 //                         \   |   0
 868 //            Allocate      \  |  /
 869 //                \          \ | /
 870 //               AddP         Phi
 871 //                  \         /
 872 //                 Load      /
 873 //                    \  0  /
 874 //                     \ | /
 875 //                      \|/
 876 //                      Phi        # "Field" Phi
 877 //
 878 void ConnectionGraph::reduce_phi_on_castpp_field_load(Node* curr_castpp, GrowableArray<Node *>  &alloc_worklist, GrowableArray<Node *>  &memnode_worklist) {
 879   Node* ophi = curr_castpp->in(1);
 880   assert(ophi->is_Phi(), "Expected this to be a Phi node.");
 881 
 882   // Identify which base should be used for AddP->Load later when spliting the
 883   // CastPP->Loads through ophi. Three kind of values may be stored in this
 884   // array, depending on the nullability status of the corresponding input in
 885   // ophi.
 886   //
 887   //  - nullptr:    Meaning that the base is actually the null constant and therefore
 888   //                we won't try to load from it.
 889   //
 890   //  - CFG Node:   Meaning that the base is a CastPP that was specialized for
 891   //                this input of Ophi. I.e., we added an If->Then->Else-Region
 892   //                that will 'activate' the CastPp only when the input is not Null.
 893   //
 894   //  - Other Node: Meaning that the base is not nullable and therefore we'll try
 895   //                to load directly from it.
 896   GrowableArray<Node*> bases_for_loads(ophi->req(), ophi->req(), nullptr);
 897 
 898   for (uint i = 1; i < ophi->req(); i++) {
 899     Node* base = ophi->in(i);
 900     const Type* base_t = _igvn->type(base);
 901 
 902     if (base_t->maybe_null()) {
 903       if (base->is_Con()) {
 904         // Nothing todo as bases_for_loads[i] is already null
 905       } else {
 906         Node* new_castpp = specialize_castpp(curr_castpp, base, ophi->in(0)->in(i));
 907         bases_for_loads.at_put(i, new_castpp->in(0)); // Use the ctrl of the new node just as a flag
 908       }
 909     } else {
 910       bases_for_loads.at_put(i, base);
 911     }
 912   }
 913 
 914   // Now let's split the CastPP->Loads through the Phi
 915   for (int i = curr_castpp->outcnt()-1; i >= 0;) {
 916     Node* use = curr_castpp->raw_out(i);
 917     if (use->is_AddP()) {
 918       for (int j = use->outcnt()-1; j >= 0;) {
 919         Node* use_use = use->raw_out(j);
 920         assert(use_use->is_Load(), "Expected this to be a Load node.");
 921 
 922         // We can't make an unconditional load from a nullable input. The
 923         // 'split_castpp_load_through_phi` method will add an
 924         // 'If-Then-Else-Region` around nullable bases and only load from them
 925         // when the input is not null.
 926         Node* phi = split_castpp_load_through_phi(use, use_use, ophi->in(0), &bases_for_loads, alloc_worklist);
 927         _igvn->replace_node(use_use, phi);
 928 
 929         --j;
 930         j = MIN2(j, (int)use->outcnt()-1);
 931       }
 932 
 933       _igvn->remove_dead_node(use);
 934     }
 935     --i;
 936     i = MIN2(i, (int)curr_castpp->outcnt()-1);
 937   }
 938 }
 939 
 940 // This method split a given CmpP/N through the Phi used in one of its inputs.
 941 // As a result we convert a comparison with a pointer to a comparison with an
 942 // integer.
 943 // The only requirement is that one of the inputs of the CmpP/N must be a Phi
 944 // while the other must be a constant.
 945 // The splitting process is basically just cloning the CmpP/N above the input
 946 // Phi.  However, some (most) of the cloned CmpP/Ns won't be requred because we
 947 // can prove at compile time the result of the comparison.
 948 //
 949 // Before:
 950 //
 951 //             in1    in2 ... inN
 952 //              \      |      /
 953 //               \     |     /
 954 //                \    |    /
 955 //                 \   |   /
 956 //                  \  |  /
 957 //                   \ | /
 958 //                    Phi
 959 //                     |   Other
 960 //                     |    /
 961 //                     |   /
 962 //                     |  /
 963 //                    CmpP/N
 964 //
 965 // After:
 966 //
 967 //        in1  Other   in2 Other  inN  Other
 968 //         |    |      |   |      |    |
 969 //         \    |      |   |      |    |
 970 //          \  /       |   /      |    /
 971 //          CmpP/N    CmpP/N     CmpP/N
 972 //          Bool      Bool       Bool
 973 //            \        |        /
 974 //             \       |       /
 975 //              \      |      /
 976 //               \     |     /
 977 //                \    |    /
 978 //                 \   |   /
 979 //                  \  |  /
 980 //                   \ | /
 981 //                    Phi
 982 //                     |
 983 //                     |   Zero
 984 //                     |    /
 985 //                     |   /
 986 //                     |  /
 987 //                     CmpI
 988 //
 989 //
 990 void ConnectionGraph::reduce_phi_on_cmp(Node* cmp) {
 991   Node* ophi = cmp->in(1)->is_Con() ? cmp->in(2) : cmp->in(1);
 992   assert(ophi->is_Phi(), "Expected this to be a Phi node.");
 993 
 994   Node* other = cmp->in(1)->is_Con() ? cmp->in(1) : cmp->in(2);
 995   Node* zero = _igvn->intcon(0);
 996   Node* one = _igvn->intcon(1);
 997   BoolTest::mask mask = cmp->unique_out()->as_Bool()->_test._test;
 998 
 999   // This Phi will merge the result of the Cmps split through the Phi
1000   Node* res_phi = PhiNode::make(ophi->in(0), zero, TypeInt::INT);
1001 
1002   for (uint i=1; i<ophi->req(); i++) {
1003     Node* ophi_input = ophi->in(i);
1004     Node* res_phi_input = nullptr;
1005 
1006     const TypeInt* tcmp = optimize_ptr_compare(ophi_input, other);
1007     if (tcmp->singleton()) {
1008       if ((mask == BoolTest::mask::eq && tcmp == TypeInt::CC_EQ) ||
1009           (mask == BoolTest::mask::ne && tcmp == TypeInt::CC_GT)) {
1010         res_phi_input = one;
1011       } else {
1012         res_phi_input = zero;
1013       }
1014     } else {
1015       Node* ncmp = _igvn->transform(cmp->clone());
1016       ncmp->set_req(1, ophi_input);
1017       ncmp->set_req(2, other);
1018       Node* bol = _igvn->transform(new BoolNode(ncmp, mask));
1019       res_phi_input = bol->as_Bool()->as_int_value(_igvn);
1020     }
1021 
1022     res_phi->set_req(i, res_phi_input);
1023   }
1024 
1025   // This CMP always compares whether the output of "res_phi" is TRUE as far as the "mask".
1026   Node* new_cmp = _igvn->transform(new CmpINode(_igvn->transform(res_phi), (mask == BoolTest::mask::eq) ? one : zero));
1027   _igvn->replace_node(cmp, new_cmp);
1028 }
1029 
1030 // Push the newly created AddP on alloc_worklist and patch
1031 // the connection graph. Note that the changes in the CG below
1032 // won't affect the ES of objects since the new nodes have the
1033 // same status as the old ones.
1034 void ConnectionGraph::updates_after_load_split(Node* data_phi, Node* previous_load, GrowableArray<Node *>  &alloc_worklist) {
1035   assert(data_phi != nullptr, "Output of split_through_phi is null.");
1036   assert(data_phi != previous_load, "Output of split_through_phi is same as input.");
1037   assert(data_phi->is_Phi(), "Output of split_through_phi isn't a Phi.");
1038 
1039   if (data_phi == nullptr || !data_phi->is_Phi()) {
1040     // Make this a retry?
1041     return ;
1042   }
1043 
1044   Node* previous_addp = previous_load->in(MemNode::Address);
1045   FieldNode* fn = ptnode_adr(previous_addp->_idx)->as_Field();
1046   for (uint i = 1; i < data_phi->req(); i++) {
1047     Node* new_load = data_phi->in(i);
1048 
1049     if (new_load->is_Phi()) {
1050       // new_load is currently the "intermediate_phi" from an specialized
1051       // CastPP.
1052       new_load = new_load->in(1);
1053     }
1054 
1055     // "new_load" might actually be a constant, parameter, etc.
1056     if (new_load->is_Load()) {
1057       Node* new_addp = new_load->in(MemNode::Address);
1058       Node* base = get_addp_base(new_addp);
1059 
1060       // The base might not be something that we can create an unique
1061       // type for. If that's the case we are done with that input.
1062       PointsToNode* jobj_ptn = unique_java_object(base);
1063       if (jobj_ptn == nullptr || !jobj_ptn->scalar_replaceable()) {
1064         continue;
1065       }
1066 
1067       // Push to alloc_worklist since the base has an unique_type
1068       alloc_worklist.append_if_missing(new_addp);
1069 
1070       // Now let's add the node to the connection graph
1071       _nodes.at_grow(new_addp->_idx, nullptr);
1072       add_field(new_addp, fn->escape_state(), fn->offset());
1073       add_base(ptnode_adr(new_addp->_idx)->as_Field(), ptnode_adr(base->_idx));
1074 
1075       // If the load doesn't load an object then it won't be
1076       // part of the connection graph
1077       PointsToNode* curr_load_ptn = ptnode_adr(previous_load->_idx);
1078       if (curr_load_ptn != nullptr) {
1079         _nodes.at_grow(new_load->_idx, nullptr);
1080         add_local_var(new_load, curr_load_ptn->escape_state());
1081         add_edge(ptnode_adr(new_load->_idx), ptnode_adr(new_addp->_idx)->as_Field());
1082       }
1083     }
1084   }
1085 }
1086 
1087 void ConnectionGraph::reduce_phi_on_field_access(Node* previous_addp, GrowableArray<Node *>  &alloc_worklist) {
1088   // We'll pass this to 'split_through_phi' so that it'll do the split even
1089   // though the load doesn't have an unique instance type.
1090   bool ignore_missing_instance_id = true;
1091 
1092   // All AddPs are present in the connection graph
1093   FieldNode* fn = ptnode_adr(previous_addp->_idx)->as_Field();
1094 
1095   // Iterate over AddP looking for a Load
1096   for (int k = previous_addp->outcnt()-1; k >= 0;) {
1097     Node* previous_load = previous_addp->raw_out(k);
1098     if (previous_load->is_Load()) {
1099       Node* data_phi = previous_load->as_Load()->split_through_phi(_igvn, ignore_missing_instance_id);
1100 
1101       // Takes care of updating CG and split_unique_types worklists due to cloned
1102       // AddP->Load.
1103       updates_after_load_split(data_phi, previous_load, alloc_worklist);
1104 
1105       _igvn->replace_node(previous_load, data_phi);
1106     }
1107     --k;
1108     k = MIN2(k, (int)previous_addp->outcnt()-1);
1109   }
1110 
1111   // Remove the old AddP from the processing list because it's dead now
1112   assert(previous_addp->outcnt() == 0, "AddP should be dead now.");
1113   alloc_worklist.remove_if_existing(previous_addp);
1114 }
1115 
1116 // Create a 'selector' Phi based on the inputs of 'ophi'. If index 'i' of the
1117 // selector is:
1118 //    -> a '-1' constant, the i'th input of the original Phi is NSR.
1119 //    -> a 'x' constant >=0, the i'th input of of original Phi will be SR and
1120 //       the info about the scalarized object will be at index x of ObjectMergeValue::possible_objects
1121 PhiNode* ConnectionGraph::create_selector(PhiNode* ophi) const {
1122   Node* minus_one = _igvn->register_new_node_with_optimizer(ConINode::make(-1));
1123   Node* selector  = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), minus_one, TypeInt::INT));
1124   uint number_of_sr_objects = 0;
1125   for (uint i = 1; i < ophi->req(); i++) {
1126     Node* base = ophi->in(i);
1127     JavaObjectNode* ptn = unique_java_object(base);
1128 
1129     if (ptn != nullptr && ptn->scalar_replaceable()) {
1130       Node* sr_obj_idx = _igvn->register_new_node_with_optimizer(ConINode::make(number_of_sr_objects));
1131       selector->set_req(i, sr_obj_idx);
1132       number_of_sr_objects++;
1133     }
1134   }
1135 
1136   return selector->as_Phi();
1137 }
1138 
1139 // Returns true if the AddP node 'n' has at least one base that is a reducible
1140 // merge. If the base is a CastPP/CheckCastPP then the input of the cast is
1141 // checked instead.
1142 bool ConnectionGraph::has_reducible_merge_base(AddPNode* n, Unique_Node_List &reducible_merges) {
1143   PointsToNode* ptn = ptnode_adr(n->_idx);
1144   if (ptn == nullptr || !ptn->is_Field() || ptn->as_Field()->base_count() < 2) {
1145     return false;
1146   }
1147 
1148   for (BaseIterator i(ptn->as_Field()); i.has_next(); i.next()) {
1149     Node* base = i.get()->ideal_node();
1150 
1151     if (reducible_merges.member(base)) {
1152       return true;
1153     }
1154 
1155     if (base->is_CastPP() || base->is_CheckCastPP()) {
1156       base = base->in(1);
1157       if (reducible_merges.member(base)) {
1158         return true;
1159       }
1160     }
1161   }
1162 
1163   return false;
1164 }
1165 
1166 // This method will call its helper method to reduce SafePoint nodes that use
1167 // 'ophi' or a casted version of 'ophi'. All SafePoint nodes using the same
1168 // "version" of Phi use the same debug information (regarding the Phi).
1169 // Therefore, I collect all safepoints and patch them all at once.
1170 //
1171 // The safepoints using the Phi node have to be processed before safepoints of
1172 // CastPP nodes. The reason is, when reducing a CastPP we add a reference (the
1173 // NSR merge pointer) to the input of the CastPP (i.e., the Phi) in the
1174 // safepoint. If we process CastPP's safepoints before Phi's safepoints the
1175 // algorithm that process Phi's safepoints will think that the added Phi
1176 // reference is a regular reference.
1177 bool ConnectionGraph::reduce_phi_on_safepoints(PhiNode* ophi) {
1178   PhiNode* selector = create_selector(ophi);
1179   Unique_Node_List safepoints;
1180   Unique_Node_List casts;
1181 
1182   // Just collect the users of the Phis for later processing
1183   // in the needed order.
1184   for (uint i = 0; i < ophi->outcnt(); i++) {
1185     Node* use = ophi->raw_out(i);
1186     if (use->is_SafePoint()) {
1187       safepoints.push(use);
1188     } else if (use->is_CastPP()) {
1189       casts.push(use);
1190     } else {
1191       assert(use->outcnt() == 0, "Only CastPP & SafePoint users should be left.");
1192     }
1193   }
1194 
1195   // Need to process safepoints using the Phi first
1196   if (!reduce_phi_on_safepoints_helper(ophi, nullptr, selector, safepoints)) {
1197     return false;
1198   }
1199 
1200   // Now process CastPP->safepoints
1201   for (uint i = 0; i < casts.size(); i++) {
1202     Node* cast = casts.at(i);
1203     Unique_Node_List cast_sfpts;
1204 
1205     for (DUIterator_Fast jmax, j = cast->fast_outs(jmax); j < jmax; j++) {
1206       Node* use_use = cast->fast_out(j);
1207       if (use_use->is_SafePoint()) {
1208         cast_sfpts.push(use_use);
1209       } else {
1210         assert(use_use->outcnt() == 0, "Only SafePoint users should be left.");
1211       }
1212     }
1213 
1214     if (!reduce_phi_on_safepoints_helper(ophi, cast, selector, cast_sfpts)) {
1215       return false;
1216     }
1217   }
1218 
1219   return true;
1220 }
1221 
1222 // This method will create a SafePointScalarMERGEnode for each SafePoint in
1223 // 'safepoints'. It then will iterate on the inputs of 'ophi' and create a
1224 // SafePointScalarObjectNode for each scalar replaceable input. Each
1225 // SafePointScalarMergeNode may describe multiple scalar replaced objects -
1226 // check detailed description in SafePointScalarMergeNode class header.
1227 bool ConnectionGraph::reduce_phi_on_safepoints_helper(Node* ophi, Node* cast, Node* selector, Unique_Node_List& safepoints) {
1228   PhaseMacroExpand mexp(*_igvn);
1229   Node* original_sfpt_parent =  cast != nullptr ? cast : ophi;
1230   const TypeOopPtr* merge_t = _igvn->type(original_sfpt_parent)->make_oopptr();
1231 
1232   Node* nsr_merge_pointer = ophi;
1233   if (cast != nullptr) {
1234     const Type* new_t = merge_t->meet(TypePtr::NULL_PTR);
1235     nsr_merge_pointer = _igvn->transform(ConstraintCastNode::make_cast_for_type(cast->in(0), cast->in(1), new_t, ConstraintCastNode::RegularDependency, nullptr));
1236   }
1237 
1238   for (uint spi = 0; spi < safepoints.size(); spi++) {
1239     SafePointNode* sfpt = safepoints.at(spi)->as_SafePoint();
1240     JVMState *jvms      = sfpt->jvms();
1241     uint merge_idx      = (sfpt->req() - jvms->scloff());
1242     int debug_start     = jvms->debug_start();
1243 
1244     SafePointScalarMergeNode* smerge = new SafePointScalarMergeNode(merge_t, merge_idx);
1245     smerge->init_req(0, _compile->root());
1246     _igvn->register_new_node_with_optimizer(smerge);
1247 
1248     // The next two inputs are:
1249     //  (1) A copy of the original pointer to NSR objects.
1250     //  (2) A selector, used to decide if we need to rematerialize an object
1251     //      or use the pointer to a NSR object.
1252     // See more details of these fields in the declaration of SafePointScalarMergeNode
1253     sfpt->add_req(nsr_merge_pointer);
1254     sfpt->add_req(selector);
1255 
1256     for (uint i = 1; i < ophi->req(); i++) {
1257       Node* base = ophi->in(i);
1258       JavaObjectNode* ptn = unique_java_object(base);
1259 
1260       // If the base is not scalar replaceable we don't need to register information about
1261       // it at this time.
1262       if (ptn == nullptr || !ptn->scalar_replaceable()) {
1263         continue;
1264       }
1265 
1266       AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
1267       Unique_Node_List value_worklist;
1268 #ifdef ASSERT
1269       const Type* res_type = alloc->result_cast()->bottom_type();
1270       if (res_type->is_inlinetypeptr() && !Compile::current()->has_circular_inline_type()) {
1271         PhiNode* phi = ophi->as_Phi();
1272         assert(!ophi->as_Phi()->can_push_inline_types_down(_igvn), "missed earlier scalarization opportunity");
1273       }
1274 #endif
1275       SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt, &value_worklist);
1276       if (sobj == nullptr) {
1277         _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1278         return false;
1279       }
1280 
1281       // Now make a pass over the debug information replacing any references
1282       // to the allocated object with "sobj"
1283       Node* ccpp = alloc->result_cast();
1284       sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn);
1285 
1286       // Register the scalarized object as a candidate for reallocation
1287       smerge->add_req(sobj);
1288 
1289       // Scalarize inline types that were added to the safepoint.
1290       // Don't allow linking a constant oop (if available) for flat array elements
1291       // because Deoptimization::reassign_flat_array_elements needs field values.
1292       const bool allow_oop = !merge_t->is_flat();
1293       for (uint j = 0; j < value_worklist.size(); ++j) {
1294         InlineTypeNode* vt = value_worklist.at(j)->as_InlineType();
1295         vt->make_scalar_in_safepoints(_igvn, allow_oop);
1296       }
1297     }
1298 
1299     // Replaces debug information references to "original_sfpt_parent" in "sfpt" with references to "smerge"
1300     sfpt->replace_edges_in_range(original_sfpt_parent, smerge, debug_start, jvms->debug_end(), _igvn);
1301 
1302     // The call to 'replace_edges_in_range' above might have removed the
1303     // reference to ophi that we need at _merge_pointer_idx. The line below make
1304     // sure the reference is maintained.
1305     sfpt->set_req(smerge->merge_pointer_idx(jvms), nsr_merge_pointer);
1306     _igvn->_worklist.push(sfpt);
1307   }
1308 
1309   return true;
1310 }
1311 
1312 void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node *>  &alloc_worklist, GrowableArray<Node *>  &memnode_worklist) {
1313   bool delay = _igvn->delay_transform();
1314   _igvn->set_delay_transform(true);
1315   _igvn->hash_delete(ophi);
1316 
1317   // Copying all users first because some will be removed and others won't.
1318   // Ophi also may acquire some new users as part of Cast reduction.
1319   // CastPPs also need to be processed before CmpPs.
1320   Unique_Node_List castpps;
1321   Unique_Node_List others;
1322   for (DUIterator_Fast imax, i = ophi->fast_outs(imax); i < imax; i++) {
1323     Node* use = ophi->fast_out(i);
1324 
1325     if (use->is_CastPP()) {
1326       castpps.push(use);
1327     } else if (use->is_AddP() || use->is_Cmp()) {
1328       others.push(use);
1329     } else if (use->is_SafePoint()) {
1330       // processed later
1331     } else {
1332       assert(use->is_SafePoint(), "Unexpected user of reducible Phi %d -> %d:%s:%d", ophi->_idx, use->_idx, use->Name(), use->outcnt());
1333     }
1334   }
1335 
1336   // CastPPs need to be processed before Cmps because during the process of
1337   // splitting CastPPs we make reference to the inputs of the Cmp that is used
1338   // by the If controlling the CastPP.
1339   for (uint i = 0; i < castpps.size(); i++) {
1340     reduce_phi_on_castpp_field_load(castpps.at(i), alloc_worklist, memnode_worklist);
1341   }
1342 
1343   for (uint i = 0; i < others.size(); i++) {
1344     Node* use = others.at(i);
1345 
1346     if (use->is_AddP()) {
1347       reduce_phi_on_field_access(use, alloc_worklist);
1348     } else if(use->is_Cmp()) {
1349       reduce_phi_on_cmp(use);
1350     }
1351   }
1352 
1353   _igvn->set_delay_transform(delay);
1354 }
1355 
1356 void ConnectionGraph::reset_scalar_replaceable_entries(PhiNode* ophi) {
1357   Node* null_ptr            = _igvn->makecon(TypePtr::NULL_PTR);
1358   const TypeOopPtr* merge_t = _igvn->type(ophi)->make_oopptr();
1359   const Type* new_t         = merge_t->meet(TypePtr::NULL_PTR);
1360   Node* new_phi             = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), null_ptr, new_t));
1361 
1362   for (uint i = 1; i < ophi->req(); i++) {
1363     Node* base          = ophi->in(i);
1364     JavaObjectNode* ptn = unique_java_object(base);
1365 
1366     if (ptn != nullptr && ptn->scalar_replaceable()) {
1367       new_phi->set_req(i, null_ptr);
1368     } else {
1369       new_phi->set_req(i, ophi->in(i));
1370     }
1371   }
1372 
1373   for (int i = ophi->outcnt()-1; i >= 0;) {
1374     Node* out = ophi->raw_out(i);
1375 
1376     if (out->is_ConstraintCast()) {
1377       const Type* out_t = _igvn->type(out)->make_ptr();
1378       const Type* out_new_t = out_t->meet(TypePtr::NULL_PTR);
1379       bool change = out_new_t != out_t;
1380 
1381       for (int j = out->outcnt()-1; change && j >= 0; --j) {
1382         Node* out2 = out->raw_out(j);
1383         if (!out2->is_SafePoint()) {
1384           change = false;
1385           break;
1386         }
1387       }
1388 
1389       if (change) {
1390         Node* new_cast = ConstraintCastNode::make_cast_for_type(out->in(0), out->in(1), out_new_t, ConstraintCastNode::StrongDependency, nullptr);
1391         _igvn->replace_node(out, new_cast);
1392         _igvn->register_new_node_with_optimizer(new_cast);
1393       }
1394     }
1395 
1396     --i;
1397     i = MIN2(i, (int)ophi->outcnt()-1);
1398   }
1399 
1400   _igvn->replace_node(ophi, new_phi);
1401 }
1402 
1403 void ConnectionGraph::verify_ram_nodes(Compile* C, Node* root) {
1404   if (!C->do_reduce_allocation_merges()) return;
1405 
1406   Unique_Node_List ideal_nodes;
1407   ideal_nodes.map(C->live_nodes(), nullptr);  // preallocate space
1408   ideal_nodes.push(root);
1409 
1410   for (uint next = 0; next < ideal_nodes.size(); ++next) {
1411     Node* n = ideal_nodes.at(next);
1412 
1413     if (n->is_SafePointScalarMerge()) {
1414       SafePointScalarMergeNode* merge = n->as_SafePointScalarMerge();
1415 
1416       // Validate inputs of merge
1417       for (uint i = 1; i < merge->req(); i++) {
1418         if (merge->in(i) != nullptr && !merge->in(i)->is_top() && !merge->in(i)->is_SafePointScalarObject()) {
1419           assert(false, "SafePointScalarMerge inputs should be null/top or SafePointScalarObject.");
1420           C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1421         }
1422       }
1423 
1424       // Validate users of merge
1425       for (DUIterator_Fast imax, i = merge->fast_outs(imax); i < imax; i++) {
1426         Node* sfpt = merge->fast_out(i);
1427         if (sfpt->is_SafePoint()) {
1428           int merge_idx = merge->merge_pointer_idx(sfpt->as_SafePoint()->jvms());
1429 
1430           if (sfpt->in(merge_idx) != nullptr && sfpt->in(merge_idx)->is_SafePointScalarMerge()) {
1431             assert(false, "SafePointScalarMerge nodes can't be nested.");
1432             C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1433           }
1434         } else {
1435           assert(false, "Only safepoints can use SafePointScalarMerge nodes.");
1436           C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1437         }
1438       }
1439     }
1440 
1441     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1442       Node* m = n->fast_out(i);
1443       ideal_nodes.push(m);
1444     }
1445   }
1446 }
1447 
1448 // Returns true if there is an object in the scope of sfn that does not escape globally.
1449 bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) {
1450   Compile* C = _compile;
1451   for (JVMState* jvms = sfn->jvms(); jvms != nullptr; jvms = jvms->caller()) {
1452     if (C->env()->should_retain_local_variables() || C->env()->jvmti_can_walk_any_space() ||
1453         DeoptimizeObjectsALot) {
1454       // Jvmti agents can access locals. Must provide info about local objects at runtime.
1455       int num_locs = jvms->loc_size();
1456       for (int idx = 0; idx < num_locs; idx++) {
1457         Node* l = sfn->local(jvms, idx);
1458         if (not_global_escape(l)) {
1459           return true;
1460         }
1461       }
1462     }
1463     if (C->env()->jvmti_can_get_owned_monitor_info() ||
1464         C->env()->jvmti_can_walk_any_space() || DeoptimizeObjectsALot) {
1465       // Jvmti agents can read monitors. Must provide info about locked objects at runtime.
1466       int num_mon = jvms->nof_monitors();
1467       for (int idx = 0; idx < num_mon; idx++) {
1468         Node* m = sfn->monitor_obj(jvms, idx);
1469         if (m != nullptr && not_global_escape(m)) {
1470           return true;
1471         }
1472       }
1473     }
1474   }
1475   return false;
1476 }
1477 
1478 // Returns true if at least one of the arguments to the call is an object
1479 // that does not escape globally.
1480 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
1481   if (call->method() != nullptr) {
1482     uint max_idx = TypeFunc::Parms + call->method()->arg_size();
1483     for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
1484       Node* p = call->in(idx);
1485       if (not_global_escape(p)) {
1486         return true;
1487       }
1488     }
1489   } else {
1490     const char* name = call->as_CallStaticJava()->_name;
1491     assert(name != nullptr, "no name");
1492     // no arg escapes through uncommon traps
1493     if (strcmp(name, "uncommon_trap") != 0) {
1494       // process_call_arguments() assumes that all arguments escape globally
1495       const TypeTuple* d = call->tf()->domain_sig();
1496       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1497         const Type* at = d->field_at(i);
1498         if (at->isa_oopptr() != nullptr) {
1499           return true;
1500         }
1501       }
1502     }
1503   }
1504   return false;
1505 }
1506 
1507 
1508 
1509 // Utility function for nodes that load an object
1510 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1511   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1512   // ThreadLocal has RawPtr type.
1513   const Type* t = _igvn->type(n);
1514   if (t->make_ptr() != nullptr) {
1515     Node* adr = n->in(MemNode::Address);
1516 #ifdef ASSERT
1517     if (!adr->is_AddP()) {
1518       assert(_igvn->type(adr)->isa_rawptr(), "sanity");
1519     } else {
1520       assert((ptnode_adr(adr->_idx) == nullptr ||
1521               ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");
1522     }
1523 #endif
1524     add_local_var_and_edge(n, PointsToNode::NoEscape,
1525                            adr, delayed_worklist);
1526   }
1527 }
1528 
1529 // Populate Connection Graph with PointsTo nodes and create simple
1530 // connection graph edges.
1531 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1532   assert(!_verify, "this method should not be called for verification");
1533   PhaseGVN* igvn = _igvn;
1534   uint n_idx = n->_idx;
1535   PointsToNode* n_ptn = ptnode_adr(n_idx);
1536   if (n_ptn != nullptr) {
1537     return; // No need to redefine PointsTo node during first iteration.
1538   }
1539   int opcode = n->Opcode();
1540   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode);
1541   if (gc_handled) {
1542     return; // Ignore node if already handled by GC.
1543   }
1544 
1545   if (n->is_Call()) {
1546     // Arguments to allocation and locking don't escape.
1547     if (n->is_AbstractLock()) {
1548       // Put Lock and Unlock nodes on IGVN worklist to process them during
1549       // first IGVN optimization when escape information is still available.
1550       record_for_optimizer(n);
1551     } else if (n->is_Allocate()) {
1552       add_call_node(n->as_Call());
1553       record_for_optimizer(n);
1554     } else {
1555       if (n->is_CallStaticJava()) {
1556         const char* name = n->as_CallStaticJava()->_name;
1557         if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
1558           return; // Skip uncommon traps
1559         }
1560       }
1561       // Don't mark as processed since call's arguments have to be processed.
1562       delayed_worklist->push(n);
1563       // Check if a call returns an object.
1564       if ((n->as_Call()->returns_pointer() &&
1565            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
1566           (n->is_CallStaticJava() &&
1567            n->as_CallStaticJava()->is_boxing_method())) {
1568         add_call_node(n->as_Call());
1569       } else if (n->as_Call()->tf()->returns_inline_type_as_fields()) {
1570         bool returns_oop = false;
1571         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) {
1572           ProjNode* pn = n->fast_out(i)->as_Proj();
1573           if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) {
1574             returns_oop = true;
1575           }
1576         }
1577         if (returns_oop) {
1578           add_call_node(n->as_Call());
1579         }
1580       }
1581     }
1582     return;
1583   }
1584   // Put this check here to process call arguments since some call nodes
1585   // point to phantom_obj.
1586   if (n_ptn == phantom_obj || n_ptn == null_obj) {
1587     return; // Skip predefined nodes.
1588   }
1589   switch (opcode) {
1590     case Op_AddP: {
1591       Node* base = get_addp_base(n);
1592       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1593       // Field nodes are created for all field types. They are used in
1594       // adjust_scalar_replaceable_state() and split_unique_types().
1595       // Note, non-oop fields will have only base edges in Connection
1596       // Graph because such fields are not used for oop loads and stores.
1597       int offset = address_offset(n, igvn);
1598       add_field(n, PointsToNode::NoEscape, offset);
1599       if (ptn_base == nullptr) {
1600         delayed_worklist->push(n); // Process it later.
1601       } else {
1602         n_ptn = ptnode_adr(n_idx);
1603         add_base(n_ptn->as_Field(), ptn_base);
1604       }
1605       break;
1606     }
1607     case Op_CastX2P:
1608     case Op_CastI2N: {
1609       map_ideal_node(n, phantom_obj);
1610       break;
1611     }
1612     case Op_InlineType:
1613     case Op_CastPP:
1614     case Op_CheckCastPP:
1615     case Op_EncodeP:
1616     case Op_DecodeN:
1617     case Op_EncodePKlass:
1618     case Op_DecodeNKlass: {
1619       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
1620       break;
1621     }
1622     case Op_CMoveP: {
1623       add_local_var(n, PointsToNode::NoEscape);
1624       // Do not add edges during first iteration because some could be
1625       // not defined yet.
1626       delayed_worklist->push(n);
1627       break;
1628     }
1629     case Op_ConP:
1630     case Op_ConN:
1631     case Op_ConNKlass: {
1632       // assume all oop constants globally escape except for null
1633       PointsToNode::EscapeState es;
1634       const Type* t = igvn->type(n);
1635       if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) {
1636         es = PointsToNode::NoEscape;
1637       } else {
1638         es = PointsToNode::GlobalEscape;
1639       }
1640       PointsToNode* ptn_con = add_java_object(n, es);
1641       set_not_scalar_replaceable(ptn_con NOT_PRODUCT(COMMA "Constant pointer"));
1642       break;
1643     }
1644     case Op_CreateEx: {
1645       // assume that all exception objects globally escape
1646       map_ideal_node(n, phantom_obj);
1647       break;
1648     }
1649     case Op_LoadKlass:
1650     case Op_LoadNKlass: {
1651       // Unknown class is loaded
1652       map_ideal_node(n, phantom_obj);
1653       break;
1654     }
1655     case Op_LoadP:
1656     case Op_LoadN: {
1657       add_objload_to_connection_graph(n, delayed_worklist);
1658       break;
1659     }
1660     case Op_Parm: {
1661       map_ideal_node(n, phantom_obj);
1662       break;
1663     }
1664     case Op_PartialSubtypeCheck: {
1665       // Produces Null or notNull and is used in only in CmpP so
1666       // phantom_obj could be used.
1667       map_ideal_node(n, phantom_obj); // Result is unknown
1668       break;
1669     }
1670     case Op_Phi: {
1671       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1672       // ThreadLocal has RawPtr type.
1673       const Type* t = n->as_Phi()->type();
1674       if (t->make_ptr() != nullptr) {
1675         add_local_var(n, PointsToNode::NoEscape);
1676         // Do not add edges during first iteration because some could be
1677         // not defined yet.
1678         delayed_worklist->push(n);
1679       }
1680       break;
1681     }
1682     case Op_Proj: {
1683       // we are only interested in the oop result projection from a call
1684       if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() &&
1685           (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_ptr())) {
1686         assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
1687                n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
1688         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1689       }
1690       break;
1691     }
1692     case Op_Rethrow: // Exception object escapes
1693     case Op_Return: {
1694       if (n->req() > TypeFunc::Parms &&
1695           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
1696         // Treat Return value as LocalVar with GlobalEscape escape state.
1697         add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
1698       }
1699       break;
1700     }
1701     case Op_CompareAndExchangeP:
1702     case Op_CompareAndExchangeN:
1703     case Op_GetAndSetP:
1704     case Op_GetAndSetN: {
1705       add_objload_to_connection_graph(n, delayed_worklist);
1706       // fall-through
1707     }
1708     case Op_StoreP:
1709     case Op_StoreN:
1710     case Op_StoreNKlass:
1711     case Op_WeakCompareAndSwapP:
1712     case Op_WeakCompareAndSwapN:
1713     case Op_CompareAndSwapP:
1714     case Op_CompareAndSwapN: {
1715       add_to_congraph_unsafe_access(n, opcode, delayed_worklist);
1716       break;
1717     }
1718     case Op_AryEq:
1719     case Op_CountPositives:
1720     case Op_StrComp:
1721     case Op_StrEquals:
1722     case Op_StrIndexOf:
1723     case Op_StrIndexOfChar:
1724     case Op_StrInflatedCopy:
1725     case Op_StrCompressedCopy:
1726     case Op_VectorizedHashCode:
1727     case Op_EncodeISOArray: {
1728       add_local_var(n, PointsToNode::ArgEscape);
1729       delayed_worklist->push(n); // Process it later.
1730       break;
1731     }
1732     case Op_ThreadLocal: {
1733       PointsToNode* ptn_thr = add_java_object(n, PointsToNode::ArgEscape);
1734       set_not_scalar_replaceable(ptn_thr NOT_PRODUCT(COMMA "Constant pointer"));
1735       break;
1736     }
1737     case Op_Blackhole: {
1738       // All blackhole pointer arguments are globally escaping.
1739       // Only do this if there is at least one pointer argument.
1740       // Do not add edges during first iteration because some could be
1741       // not defined yet, defer to final step.
1742       for (uint i = 0; i < n->req(); i++) {
1743         Node* in = n->in(i);
1744         if (in != nullptr) {
1745           const Type* at = _igvn->type(in);
1746           if (!at->isa_ptr()) continue;
1747 
1748           add_local_var(n, PointsToNode::GlobalEscape);
1749           delayed_worklist->push(n);
1750           break;
1751         }
1752       }
1753       break;
1754     }
1755     default:
1756       ; // Do nothing for nodes not related to EA.
1757   }
1758   return;
1759 }
1760 
1761 // Add final simple edges to graph.
1762 void ConnectionGraph::add_final_edges(Node *n) {
1763   PointsToNode* n_ptn = ptnode_adr(n->_idx);
1764 #ifdef ASSERT
1765   if (_verify && n_ptn->is_JavaObject())
1766     return; // This method does not change graph for JavaObject.
1767 #endif
1768 
1769   if (n->is_Call()) {
1770     process_call_arguments(n->as_Call());
1771     return;
1772   }
1773   assert(n->is_Store() || n->is_LoadStore() ||
1774          ((n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr)),
1775          "node should be registered already");
1776   int opcode = n->Opcode();
1777   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
1778   if (gc_handled) {
1779     return; // Ignore node if already handled by GC.
1780   }
1781   switch (opcode) {
1782     case Op_AddP: {
1783       Node* base = get_addp_base(n);
1784       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1785       assert(ptn_base != nullptr, "field's base should be registered");
1786       add_base(n_ptn->as_Field(), ptn_base);
1787       break;
1788     }
1789     case Op_InlineType:
1790     case Op_CastPP:
1791     case Op_CheckCastPP:
1792     case Op_EncodeP:
1793     case Op_DecodeN:
1794     case Op_EncodePKlass:
1795     case Op_DecodeNKlass: {
1796       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1797       break;
1798     }
1799     case Op_CMoveP: {
1800       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
1801         Node* in = n->in(i);
1802         if (in == nullptr) {
1803           continue;  // ignore null
1804         }
1805         Node* uncast_in = in->uncast();
1806         if (uncast_in->is_top() || uncast_in == n) {
1807           continue;  // ignore top or inputs which go back this node
1808         }
1809         PointsToNode* ptn = ptnode_adr(in->_idx);
1810         assert(ptn != nullptr, "node should be registered");
1811         add_edge(n_ptn, ptn);
1812       }
1813       break;
1814     }
1815     case Op_LoadP:
1816     case Op_LoadN: {
1817       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1818       // ThreadLocal has RawPtr type.
1819       assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1820       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1821       break;
1822     }
1823     case Op_Phi: {
1824       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1825       // ThreadLocal has RawPtr type.
1826       assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type");
1827       for (uint i = 1; i < n->req(); i++) {
1828         Node* in = n->in(i);
1829         if (in == nullptr) {
1830           continue;  // ignore null
1831         }
1832         Node* uncast_in = in->uncast();
1833         if (uncast_in->is_top() || uncast_in == n) {
1834           continue;  // ignore top or inputs which go back this node
1835         }
1836         PointsToNode* ptn = ptnode_adr(in->_idx);
1837         assert(ptn != nullptr, "node should be registered");
1838         add_edge(n_ptn, ptn);
1839       }
1840       break;
1841     }
1842     case Op_Proj: {
1843       // we are only interested in the oop result projection from a call
1844       assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
1845              n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
1846       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr);
1847       break;
1848     }
1849     case Op_Rethrow: // Exception object escapes
1850     case Op_Return: {
1851       assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
1852              "Unexpected node type");
1853       // Treat Return value as LocalVar with GlobalEscape escape state.
1854       add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr);
1855       break;
1856     }
1857     case Op_CompareAndExchangeP:
1858     case Op_CompareAndExchangeN:
1859     case Op_GetAndSetP:
1860     case Op_GetAndSetN:{
1861       assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1862       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1863       // fall-through
1864     }
1865     case Op_CompareAndSwapP:
1866     case Op_CompareAndSwapN:
1867     case Op_WeakCompareAndSwapP:
1868     case Op_WeakCompareAndSwapN:
1869     case Op_StoreP:
1870     case Op_StoreN:
1871     case Op_StoreNKlass:{
1872       add_final_edges_unsafe_access(n, opcode);
1873       break;
1874     }
1875     case Op_VectorizedHashCode:
1876     case Op_AryEq:
1877     case Op_CountPositives:
1878     case Op_StrComp:
1879     case Op_StrEquals:
1880     case Op_StrIndexOf:
1881     case Op_StrIndexOfChar:
1882     case Op_StrInflatedCopy:
1883     case Op_StrCompressedCopy:
1884     case Op_EncodeISOArray: {
1885       // char[]/byte[] arrays passed to string intrinsic do not escape but
1886       // they are not scalar replaceable. Adjust escape state for them.
1887       // Start from in(2) edge since in(1) is memory edge.
1888       for (uint i = 2; i < n->req(); i++) {
1889         Node* adr = n->in(i);
1890         const Type* at = _igvn->type(adr);
1891         if (!adr->is_top() && at->isa_ptr()) {
1892           assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
1893                  at->isa_ptr() != nullptr, "expecting a pointer");
1894           if (adr->is_AddP()) {
1895             adr = get_addp_base(adr);
1896           }
1897           PointsToNode* ptn = ptnode_adr(adr->_idx);
1898           assert(ptn != nullptr, "node should be registered");
1899           add_edge(n_ptn, ptn);
1900         }
1901       }
1902       break;
1903     }
1904     case Op_Blackhole: {
1905       // All blackhole pointer arguments are globally escaping.
1906       for (uint i = 0; i < n->req(); i++) {
1907         Node* in = n->in(i);
1908         if (in != nullptr) {
1909           const Type* at = _igvn->type(in);
1910           if (!at->isa_ptr()) continue;
1911 
1912           if (in->is_AddP()) {
1913             in = get_addp_base(in);
1914           }
1915 
1916           PointsToNode* ptn = ptnode_adr(in->_idx);
1917           assert(ptn != nullptr, "should be defined already");
1918           set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "blackhole"));
1919           add_edge(n_ptn, ptn);
1920         }
1921       }
1922       break;
1923     }
1924     default: {
1925       // This method should be called only for EA specific nodes which may
1926       // miss some edges when they were created.
1927 #ifdef ASSERT
1928       n->dump(1);
1929 #endif
1930       guarantee(false, "unknown node");
1931     }
1932   }
1933   return;
1934 }
1935 
1936 void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) {
1937   Node* adr = n->in(MemNode::Address);
1938   const Type* adr_type = _igvn->type(adr);
1939   adr_type = adr_type->make_ptr();
1940   if (adr_type == nullptr) {
1941     return; // skip dead nodes
1942   }
1943   if (adr_type->isa_oopptr()
1944       || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
1945           && adr_type == TypeRawPtr::NOTNULL
1946           && is_captured_store_address(adr))) {
1947     delayed_worklist->push(n); // Process it later.
1948 #ifdef ASSERT
1949     assert (adr->is_AddP(), "expecting an AddP");
1950     if (adr_type == TypeRawPtr::NOTNULL) {
1951       // Verify a raw address for a store captured by Initialize node.
1952       int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
1953       assert(offs != Type::OffsetBot, "offset must be a constant");
1954     }
1955 #endif
1956   } else {
1957     // Ignore copy the displaced header to the BoxNode (OSR compilation).
1958     if (adr->is_BoxLock()) {
1959       return;
1960     }
1961     // Stored value escapes in unsafe access.
1962     if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
1963       delayed_worklist->push(n); // Process unsafe access later.
1964       return;
1965     }
1966 #ifdef ASSERT
1967     n->dump(1);
1968     assert(false, "not unsafe");
1969 #endif
1970   }
1971 }
1972 
1973 bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) {
1974   Node* adr = n->in(MemNode::Address);
1975   const Type *adr_type = _igvn->type(adr);
1976   adr_type = adr_type->make_ptr();
1977 #ifdef ASSERT
1978   if (adr_type == nullptr) {
1979     n->dump(1);
1980     assert(adr_type != nullptr, "dead node should not be on list");
1981     return true;
1982   }
1983 #endif
1984 
1985   if (adr_type->isa_oopptr()
1986       || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
1987            && adr_type == TypeRawPtr::NOTNULL
1988            && is_captured_store_address(adr))) {
1989     // Point Address to Value
1990     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
1991     assert(adr_ptn != nullptr &&
1992            adr_ptn->as_Field()->is_oop(), "node should be registered");
1993     Node* val = n->in(MemNode::ValueIn);
1994     PointsToNode* ptn = ptnode_adr(val->_idx);
1995     assert(ptn != nullptr, "node should be registered");
1996     add_edge(adr_ptn, ptn);
1997     return true;
1998   } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
1999     // Stored value escapes in unsafe access.
2000     Node* val = n->in(MemNode::ValueIn);
2001     PointsToNode* ptn = ptnode_adr(val->_idx);
2002     assert(ptn != nullptr, "node should be registered");
2003     set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
2004     // Add edge to object for unsafe access with offset.
2005     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
2006     assert(adr_ptn != nullptr, "node should be registered");
2007     if (adr_ptn->is_Field()) {
2008       assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
2009       add_edge(adr_ptn, ptn);
2010     }
2011     return true;
2012   }
2013 #ifdef ASSERT
2014   n->dump(1);
2015   assert(false, "not unsafe");
2016 #endif
2017   return false;
2018 }
2019 
2020 void ConnectionGraph::add_call_node(CallNode* call) {
2021   assert(call->returns_pointer() || call->tf()->returns_inline_type_as_fields(), "only for call which returns pointer");
2022   uint call_idx = call->_idx;
2023   if (call->is_Allocate()) {
2024     Node* k = call->in(AllocateNode::KlassNode);
2025     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
2026     assert(kt != nullptr, "TypeKlassPtr  required.");
2027     PointsToNode::EscapeState es = PointsToNode::NoEscape;
2028     bool scalar_replaceable = true;
2029     NOT_PRODUCT(const char* nsr_reason = "");
2030     if (call->is_AllocateArray()) {
2031       if (!kt->isa_aryklassptr()) { // StressReflectiveCode
2032         es = PointsToNode::GlobalEscape;
2033       } else {
2034         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
2035         if (length < 0) {
2036           // Not scalar replaceable if the length is not constant.
2037           scalar_replaceable = false;
2038           NOT_PRODUCT(nsr_reason = "has a non-constant length");
2039         } else if (length > EliminateAllocationArraySizeLimit) {
2040           // Not scalar replaceable if the length is too big.
2041           scalar_replaceable = false;
2042           NOT_PRODUCT(nsr_reason = "has a length that is too big");
2043         }
2044       }
2045     } else {  // Allocate instance
2046       if (!kt->isa_instklassptr()) { // StressReflectiveCode
2047         es = PointsToNode::GlobalEscape;
2048       } else {
2049         const TypeInstKlassPtr* ikt = kt->is_instklassptr();
2050         ciInstanceKlass* ik = ikt->klass_is_exact() ? ikt->exact_klass()->as_instance_klass() : ikt->instance_klass();
2051         if (ik->is_subclass_of(_compile->env()->Thread_klass()) ||
2052             ik->is_subclass_of(_compile->env()->Reference_klass()) ||
2053             !ik->can_be_instantiated() ||
2054             ik->has_finalizer()) {
2055           es = PointsToNode::GlobalEscape;
2056         } else {
2057           int nfields = ik->as_instance_klass()->nof_nonstatic_fields();
2058           if (nfields > EliminateAllocationFieldsLimit) {
2059             // Not scalar replaceable if there are too many fields.
2060             scalar_replaceable = false;
2061             NOT_PRODUCT(nsr_reason = "has too many fields");
2062           }
2063         }
2064       }
2065     }
2066     add_java_object(call, es);
2067     PointsToNode* ptn = ptnode_adr(call_idx);
2068     if (!scalar_replaceable && ptn->scalar_replaceable()) {
2069       set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA nsr_reason));
2070     }
2071   } else if (call->is_CallStaticJava()) {
2072     // Call nodes could be different types:
2073     //
2074     // 1. CallDynamicJavaNode (what happened during call is unknown):
2075     //
2076     //    - mapped to GlobalEscape JavaObject node if oop is returned;
2077     //
2078     //    - all oop arguments are escaping globally;
2079     //
2080     // 2. CallStaticJavaNode (execute bytecode analysis if possible):
2081     //
2082     //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
2083     //
2084     //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
2085     //    - mapped to NoEscape JavaObject node if non-escaping object allocated
2086     //      during call is returned;
2087     //    - mapped to ArgEscape LocalVar node pointed to object arguments
2088     //      which are returned and does not escape during call;
2089     //
2090     //    - oop arguments escaping status is defined by bytecode analysis;
2091     //
2092     // For a static call, we know exactly what method is being called.
2093     // Use bytecode estimator to record whether the call's return value escapes.
2094     ciMethod* meth = call->as_CallJava()->method();
2095     if (meth == nullptr) {
2096       const char* name = call->as_CallStaticJava()->_name;
2097       assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0 ||
2098              strncmp(name, "C2 Runtime load_unknown_inline", 30) == 0, "TODO: add failed case check");
2099       // Returns a newly allocated non-escaped object.
2100       add_java_object(call, PointsToNode::NoEscape);
2101       set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
2102     } else if (meth->is_boxing_method()) {
2103       // Returns boxing object
2104       PointsToNode::EscapeState es;
2105       vmIntrinsics::ID intr = meth->intrinsic_id();
2106       if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
2107         // It does not escape if object is always allocated.
2108         es = PointsToNode::NoEscape;
2109       } else {
2110         // It escapes globally if object could be loaded from cache.
2111         es = PointsToNode::GlobalEscape;
2112       }
2113       add_java_object(call, es);
2114       if (es == PointsToNode::GlobalEscape) {
2115         set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache"));
2116       }
2117     } else {
2118       BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
2119       call_analyzer->copy_dependencies(_compile->dependencies());
2120       if (call_analyzer->is_return_allocated()) {
2121         // Returns a newly allocated non-escaped object, simply
2122         // update dependency information.
2123         // Mark it as NoEscape so that objects referenced by
2124         // it's fields will be marked as NoEscape at least.
2125         add_java_object(call, PointsToNode::NoEscape);
2126         set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
2127       } else {
2128         // Determine whether any arguments are returned.
2129         const TypeTuple* d = call->tf()->domain_cc();
2130         bool ret_arg = false;
2131         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2132           if (d->field_at(i)->isa_ptr() != nullptr &&
2133               call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
2134             ret_arg = true;
2135             break;
2136           }
2137         }
2138         if (ret_arg) {
2139           add_local_var(call, PointsToNode::ArgEscape);
2140         } else {
2141           // Returns unknown object.
2142           map_ideal_node(call, phantom_obj);
2143         }
2144       }
2145     }
2146   } else {
2147     // An other type of call, assume the worst case:
2148     // returned value is unknown and globally escapes.
2149     assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
2150     map_ideal_node(call, phantom_obj);
2151   }
2152 }
2153 
2154 void ConnectionGraph::process_call_arguments(CallNode *call) {
2155     bool is_arraycopy = false;
2156     switch (call->Opcode()) {
2157 #ifdef ASSERT
2158     case Op_Allocate:
2159     case Op_AllocateArray:
2160     case Op_Lock:
2161     case Op_Unlock:
2162       assert(false, "should be done already");
2163       break;
2164 #endif
2165     case Op_ArrayCopy:
2166     case Op_CallLeafNoFP:
2167       // Most array copies are ArrayCopy nodes at this point but there
2168       // are still a few direct calls to the copy subroutines (See
2169       // PhaseStringOpts::copy_string())
2170       is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
2171         call->as_CallLeaf()->is_call_to_arraycopystub();
2172       // fall through
2173     case Op_CallLeafVector:
2174     case Op_CallLeaf: {
2175       // Stub calls, objects do not escape but they are not scale replaceable.
2176       // Adjust escape state for outgoing arguments.
2177       const TypeTuple * d = call->tf()->domain_sig();
2178       bool src_has_oops = false;
2179       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2180         const Type* at = d->field_at(i);
2181         Node *arg = call->in(i);
2182         if (arg == nullptr) {
2183           continue;
2184         }
2185         const Type *aat = _igvn->type(arg);
2186         if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
2187           continue;
2188         }
2189         if (arg->is_AddP()) {
2190           //
2191           // The inline_native_clone() case when the arraycopy stub is called
2192           // after the allocation before Initialize and CheckCastPP nodes.
2193           // Or normal arraycopy for object arrays case.
2194           //
2195           // Set AddP's base (Allocate) as not scalar replaceable since
2196           // pointer to the base (with offset) is passed as argument.
2197           //
2198           arg = get_addp_base(arg);
2199         }
2200         PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2201         assert(arg_ptn != nullptr, "should be registered");
2202         PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
2203         if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
2204           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
2205                  aat->isa_ptr() != nullptr, "expecting an Ptr");
2206           bool arg_has_oops = aat->isa_oopptr() &&
2207                               (aat->isa_instptr() ||
2208                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)) ||
2209                                (aat->isa_aryptr() && aat->isa_aryptr()->elem() != nullptr &&
2210                                                                aat->isa_aryptr()->is_flat() &&
2211                                                                aat->isa_aryptr()->elem()->inline_klass()->contains_oops()));
2212           if (i == TypeFunc::Parms) {
2213             src_has_oops = arg_has_oops;
2214           }
2215           //
2216           // src or dst could be j.l.Object when other is basic type array:
2217           //
2218           //   arraycopy(char[],0,Object*,0,size);
2219           //   arraycopy(Object*,0,char[],0,size);
2220           //
2221           // Don't add edges in such cases.
2222           //
2223           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
2224                                        arg_has_oops && (i > TypeFunc::Parms);
2225 #ifdef ASSERT
2226           if (!(is_arraycopy ||
2227                 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
2228                 (call->as_CallLeaf()->_name != nullptr &&
2229                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
2230                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
2231                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||
2232                   strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 ||
2233                   strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 ||
2234                   strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||
2235                   strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 ||
2236                   strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 ||
2237                   strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 ||
2238                   strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 ||
2239                   strcmp(call->as_CallLeaf()->_name, "galoisCounterMode_AESCrypt") == 0 ||
2240                   strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 ||
2241                   strcmp(call->as_CallLeaf()->_name, "intpoly_montgomeryMult_P256") == 0 ||
2242                   strcmp(call->as_CallLeaf()->_name, "intpoly_assign") == 0 ||
2243                   strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
2244                   strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 ||
2245                   strcmp(call->as_CallLeaf()->_name, "dilithiumAlmostNtt") == 0 ||
2246                   strcmp(call->as_CallLeaf()->_name, "dilithiumAlmostInverseNtt") == 0 ||
2247                   strcmp(call->as_CallLeaf()->_name, "dilithiumNttMult") == 0 ||
2248                   strcmp(call->as_CallLeaf()->_name, "dilithiumMontMulByConstant") == 0 ||
2249                   strcmp(call->as_CallLeaf()->_name, "dilithiumDecomposePoly") == 0 ||
2250                   strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
2251                   strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
2252                   strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
2253                   strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
2254                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
2255                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
2256                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
2257                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
2258                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
2259                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
2260                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
2261                   strcmp(call->as_CallLeaf()->_name, "double_keccak") == 0 ||
2262                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
2263                   strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
2264                   strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
2265                   strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
2266                   strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
2267                   strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
2268                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2269                   strcmp(call->as_CallLeaf()->_name, "load_unknown_inline") == 0 ||
2270                   strcmp(call->as_CallLeaf()->_name, "store_unknown_inline") == 0 ||
2271                   strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
2272                   strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
2273                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2274                   strcmp(call->as_CallLeaf()->_name, "stringIndexOf") == 0 ||
2275                   strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 ||
2276                   strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 ||
2277                   strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0 ||
2278                   strcmp(call->as_CallLeaf()->_name, "unsafe_setmemory") == 0)
2279                  ))) {
2280             call->dump();
2281             fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
2282           }
2283 #endif
2284           // Always process arraycopy's destination object since
2285           // we need to add all possible edges to references in
2286           // source object.
2287           if (arg_esc >= PointsToNode::ArgEscape &&
2288               !arg_is_arraycopy_dest) {
2289             continue;
2290           }
2291           PointsToNode::EscapeState es = PointsToNode::ArgEscape;
2292           if (call->is_ArrayCopy()) {
2293             ArrayCopyNode* ac = call->as_ArrayCopy();
2294             if (ac->is_clonebasic() ||
2295                 ac->is_arraycopy_validated() ||
2296                 ac->is_copyof_validated() ||
2297                 ac->is_copyofrange_validated()) {
2298               es = PointsToNode::NoEscape;
2299             }
2300           }
2301           set_escape_state(arg_ptn, es NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2302           if (arg_is_arraycopy_dest) {
2303             Node* src = call->in(TypeFunc::Parms);
2304             if (src->is_AddP()) {
2305               src = get_addp_base(src);
2306             }
2307             PointsToNode* src_ptn = ptnode_adr(src->_idx);
2308             assert(src_ptn != nullptr, "should be registered");
2309             if (arg_ptn != src_ptn) {
2310               // Special arraycopy edge:
2311               // A destination object's field can't have the source object
2312               // as base since objects escape states are not related.
2313               // Only escape state of destination object's fields affects
2314               // escape state of fields in source object.
2315               add_arraycopy(call, es, src_ptn, arg_ptn);
2316             }
2317           }
2318         }
2319       }
2320       break;
2321     }
2322     case Op_CallStaticJava: {
2323       // For a static call, we know exactly what method is being called.
2324       // Use bytecode estimator to record the call's escape affects
2325 #ifdef ASSERT
2326       const char* name = call->as_CallStaticJava()->_name;
2327       assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only");
2328 #endif
2329       ciMethod* meth = call->as_CallJava()->method();
2330       if ((meth != nullptr) && meth->is_boxing_method()) {
2331         break; // Boxing methods do not modify any oops.
2332       }
2333       BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
2334       // fall-through if not a Java method or no analyzer information
2335       if (call_analyzer != nullptr) {
2336         PointsToNode* call_ptn = ptnode_adr(call->_idx);
2337         const TypeTuple* d = call->tf()->domain_cc();
2338         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2339           const Type* at = d->field_at(i);
2340           int k = i - TypeFunc::Parms;
2341           Node* arg = call->in(i);
2342           PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2343           if (at->isa_ptr() != nullptr &&
2344               call_analyzer->is_arg_returned(k)) {
2345             // The call returns arguments.
2346             if (call_ptn != nullptr) { // Is call's result used?
2347               assert(call_ptn->is_LocalVar(), "node should be registered");
2348               assert(arg_ptn != nullptr, "node should be registered");
2349               add_edge(call_ptn, arg_ptn);
2350             }
2351           }
2352           if (at->isa_oopptr() != nullptr &&
2353               arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
2354             if (!call_analyzer->is_arg_stack(k)) {
2355               // The argument global escapes
2356               set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2357             } else {
2358               set_escape_state(arg_ptn, PointsToNode::ArgEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2359               if (!call_analyzer->is_arg_local(k)) {
2360                 // The argument itself doesn't escape, but any fields might
2361                 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2362               }
2363             }
2364           }
2365         }
2366         if (call_ptn != nullptr && call_ptn->is_LocalVar()) {
2367           // The call returns arguments.
2368           assert(call_ptn->edge_count() > 0, "sanity");
2369           if (!call_analyzer->is_return_local()) {
2370             // Returns also unknown object.
2371             add_edge(call_ptn, phantom_obj);
2372           }
2373         }
2374         break;
2375       }
2376     }
2377     default: {
2378       // Fall-through here if not a Java method or no analyzer information
2379       // or some other type of call, assume the worst case: all arguments
2380       // globally escape.
2381       const TypeTuple* d = call->tf()->domain_cc();
2382       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2383         const Type* at = d->field_at(i);
2384         if (at->isa_oopptr() != nullptr) {
2385           Node* arg = call->in(i);
2386           if (arg->is_AddP()) {
2387             arg = get_addp_base(arg);
2388           }
2389           assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already");
2390           set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2391         }
2392       }
2393     }
2394   }
2395 }
2396 
2397 
2398 // Finish Graph construction.
2399 bool ConnectionGraph::complete_connection_graph(
2400                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
2401                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,
2402                          GrowableArray<JavaObjectNode*>& java_objects_worklist,
2403                          GrowableArray<FieldNode*>&      oop_fields_worklist) {
2404   // Normally only 1-3 passes needed to build Connection Graph depending
2405   // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
2406   // Set limit to 20 to catch situation when something did go wrong and
2407   // bailout Escape Analysis.
2408   // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag.
2409 #define GRAPH_BUILD_ITER_LIMIT 20
2410 
2411   // Propagate GlobalEscape and ArgEscape escape states and check that
2412   // we still have non-escaping objects. The method pushs on _worklist
2413   // Field nodes which reference phantom_object.
2414   if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) {
2415     return false; // Nothing to do.
2416   }
2417   // Now propagate references to all JavaObject nodes.
2418   int java_objects_length = java_objects_worklist.length();
2419   elapsedTimer build_time;
2420   build_time.start();
2421   elapsedTimer time;
2422   bool timeout = false;
2423   int new_edges = 1;
2424   int iterations = 0;
2425   do {
2426     while ((new_edges > 0) &&
2427            (iterations++ < GRAPH_BUILD_ITER_LIMIT)) {
2428       double start_time = time.seconds();
2429       time.start();
2430       new_edges = 0;
2431       // Propagate references to phantom_object for nodes pushed on _worklist
2432       // by find_non_escaped_objects() and find_field_value().
2433       new_edges += add_java_object_edges(phantom_obj, false);
2434       for (int next = 0; next < java_objects_length; ++next) {
2435         JavaObjectNode* ptn = java_objects_worklist.at(next);
2436         new_edges += add_java_object_edges(ptn, true);
2437 
2438 #define SAMPLE_SIZE 4
2439         if ((next % SAMPLE_SIZE) == 0) {
2440           // Each 4 iterations calculate how much time it will take
2441           // to complete graph construction.
2442           time.stop();
2443           // Poll for requests from shutdown mechanism to quiesce compiler
2444           // because Connection graph construction may take long time.
2445           CompileBroker::maybe_block();
2446           double stop_time = time.seconds();
2447           double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE;
2448           double time_until_end = time_per_iter * (double)(java_objects_length - next);
2449           if ((start_time + time_until_end) >= EscapeAnalysisTimeout) {
2450             timeout = true;
2451             break; // Timeout
2452           }
2453           start_time = stop_time;
2454           time.start();
2455         }
2456 #undef SAMPLE_SIZE
2457 
2458       }
2459       if (timeout) break;
2460       if (new_edges > 0) {
2461         // Update escape states on each iteration if graph was updated.
2462         if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) {
2463           return false; // Nothing to do.
2464         }
2465       }
2466       time.stop();
2467       if (time.seconds() >= EscapeAnalysisTimeout) {
2468         timeout = true;
2469         break;
2470       }
2471     }
2472     if ((iterations < GRAPH_BUILD_ITER_LIMIT) && !timeout) {
2473       time.start();
2474       // Find fields which have unknown value.
2475       int fields_length = oop_fields_worklist.length();
2476       for (int next = 0; next < fields_length; next++) {
2477         FieldNode* field = oop_fields_worklist.at(next);
2478         if (field->edge_count() == 0) {
2479           new_edges += find_field_value(field);
2480           // This code may added new edges to phantom_object.
2481           // Need an other cycle to propagate references to phantom_object.
2482         }
2483       }
2484       time.stop();
2485       if (time.seconds() >= EscapeAnalysisTimeout) {
2486         timeout = true;
2487         break;
2488       }
2489     } else {
2490       new_edges = 0; // Bailout
2491     }
2492   } while (new_edges > 0);
2493 
2494   build_time.stop();
2495   _build_time = build_time.seconds();
2496   _build_iterations = iterations;
2497 
2498   // Bailout if passed limits.
2499   if ((iterations >= GRAPH_BUILD_ITER_LIMIT) || timeout) {
2500     Compile* C = _compile;
2501     if (C->log() != nullptr) {
2502       C->log()->begin_elem("connectionGraph_bailout reason='reached ");
2503       C->log()->text("%s", timeout ? "time" : "iterations");
2504       C->log()->end_elem(" limit'");
2505     }
2506     assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build during invocation %d (%f sec, %d iterations) with %d nodes and worklist size %d",
2507            _invocation, _build_time, _build_iterations, nodes_size(), ptnodes_worklist.length());
2508     // Possible infinite build_connection_graph loop,
2509     // bailout (no changes to ideal graph were made).
2510     return false;
2511   }
2512 
2513 #undef GRAPH_BUILD_ITER_LIMIT
2514 
2515   // Find fields initialized by null for non-escaping Allocations.
2516   int non_escaped_length = non_escaped_allocs_worklist.length();
2517   for (int next = 0; next < non_escaped_length; next++) {
2518     JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
2519     PointsToNode::EscapeState es = ptn->escape_state();
2520     assert(es <= PointsToNode::ArgEscape, "sanity");
2521     if (es == PointsToNode::NoEscape) {
2522       if (find_init_values_null(ptn, _igvn) > 0) {
2523         // Adding references to null object does not change escape states
2524         // since it does not escape. Also no fields are added to null object.
2525         add_java_object_edges(null_obj, false);
2526       }
2527     }
2528     Node* n = ptn->ideal_node();
2529     if (n->is_Allocate()) {
2530       // The object allocated by this Allocate node will never be
2531       // seen by an other thread. Mark it so that when it is
2532       // expanded no MemBarStoreStore is added.
2533       InitializeNode* ini = n->as_Allocate()->initialization();
2534       if (ini != nullptr)
2535         ini->set_does_not_escape();
2536     }
2537   }
2538   return true; // Finished graph construction.
2539 }
2540 
2541 // Propagate GlobalEscape and ArgEscape escape states to all nodes
2542 // and check that we still have non-escaping java objects.
2543 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist,
2544                                                GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist) {
2545   GrowableArray<PointsToNode*> escape_worklist;
2546   // First, put all nodes with GlobalEscape and ArgEscape states on worklist.
2547   int ptnodes_length = ptnodes_worklist.length();
2548   for (int next = 0; next < ptnodes_length; ++next) {
2549     PointsToNode* ptn = ptnodes_worklist.at(next);
2550     if (ptn->escape_state() >= PointsToNode::ArgEscape ||
2551         ptn->fields_escape_state() >= PointsToNode::ArgEscape) {
2552       escape_worklist.push(ptn);
2553     }
2554   }
2555   // Set escape states to referenced nodes (edges list).
2556   while (escape_worklist.length() > 0) {
2557     PointsToNode* ptn = escape_worklist.pop();
2558     PointsToNode::EscapeState es  = ptn->escape_state();
2559     PointsToNode::EscapeState field_es = ptn->fields_escape_state();
2560     if (ptn->is_Field() && ptn->as_Field()->is_oop() &&
2561         es >= PointsToNode::ArgEscape) {
2562       // GlobalEscape or ArgEscape state of field means it has unknown value.
2563       if (add_edge(ptn, phantom_obj)) {
2564         // New edge was added
2565         add_field_uses_to_worklist(ptn->as_Field());
2566       }
2567     }
2568     for (EdgeIterator i(ptn); i.has_next(); i.next()) {
2569       PointsToNode* e = i.get();
2570       if (e->is_Arraycopy()) {
2571         assert(ptn->arraycopy_dst(), "sanity");
2572         // Propagate only fields escape state through arraycopy edge.
2573         if (e->fields_escape_state() < field_es) {
2574           set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2575           escape_worklist.push(e);
2576         }
2577       } else if (es >= field_es) {
2578         // fields_escape_state is also set to 'es' if it is less than 'es'.
2579         if (e->escape_state() < es) {
2580           set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2581           escape_worklist.push(e);
2582         }
2583       } else {
2584         // Propagate field escape state.
2585         bool es_changed = false;
2586         if (e->fields_escape_state() < field_es) {
2587           set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2588           es_changed = true;
2589         }
2590         if ((e->escape_state() < field_es) &&
2591             e->is_Field() && ptn->is_JavaObject() &&
2592             e->as_Field()->is_oop()) {
2593           // Change escape state of referenced fields.
2594           set_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2595           es_changed = true;
2596         } else if (e->escape_state() < es) {
2597           set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2598           es_changed = true;
2599         }
2600         if (es_changed) {
2601           escape_worklist.push(e);
2602         }
2603       }
2604     }
2605   }
2606   // Remove escaped objects from non_escaped list.
2607   for (int next = non_escaped_allocs_worklist.length()-1; next >= 0 ; --next) {
2608     JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
2609     if (ptn->escape_state() >= PointsToNode::GlobalEscape) {
2610       non_escaped_allocs_worklist.delete_at(next);
2611     }
2612     if (ptn->escape_state() == PointsToNode::NoEscape) {
2613       // Find fields in non-escaped allocations which have unknown value.
2614       find_init_values_phantom(ptn);
2615     }
2616   }
2617   return (non_escaped_allocs_worklist.length() > 0);
2618 }
2619 
2620 // Add all references to JavaObject node by walking over all uses.
2621 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) {
2622   int new_edges = 0;
2623   if (populate_worklist) {
2624     // Populate _worklist by uses of jobj's uses.
2625     for (UseIterator i(jobj); i.has_next(); i.next()) {
2626       PointsToNode* use = i.get();
2627       if (use->is_Arraycopy()) {
2628         continue;
2629       }
2630       add_uses_to_worklist(use);
2631       if (use->is_Field() && use->as_Field()->is_oop()) {
2632         // Put on worklist all field's uses (loads) and
2633         // related field nodes (same base and offset).
2634         add_field_uses_to_worklist(use->as_Field());
2635       }
2636     }
2637   }
2638   for (int l = 0; l < _worklist.length(); l++) {
2639     PointsToNode* use = _worklist.at(l);
2640     if (PointsToNode::is_base_use(use)) {
2641       // Add reference from jobj to field and from field to jobj (field's base).
2642       use = PointsToNode::get_use_node(use)->as_Field();
2643       if (add_base(use->as_Field(), jobj)) {
2644         new_edges++;
2645       }
2646       continue;
2647     }
2648     assert(!use->is_JavaObject(), "sanity");
2649     if (use->is_Arraycopy()) {
2650       if (jobj == null_obj) { // null object does not have field edges
2651         continue;
2652       }
2653       // Added edge from Arraycopy node to arraycopy's source java object
2654       if (add_edge(use, jobj)) {
2655         jobj->set_arraycopy_src();
2656         new_edges++;
2657       }
2658       // and stop here.
2659       continue;
2660     }
2661     if (!add_edge(use, jobj)) {
2662       continue; // No new edge added, there was such edge already.
2663     }
2664     new_edges++;
2665     if (use->is_LocalVar()) {
2666       add_uses_to_worklist(use);
2667       if (use->arraycopy_dst()) {
2668         for (EdgeIterator i(use); i.has_next(); i.next()) {
2669           PointsToNode* e = i.get();
2670           if (e->is_Arraycopy()) {
2671             if (jobj == null_obj) { // null object does not have field edges
2672               continue;
2673             }
2674             // Add edge from arraycopy's destination java object to Arraycopy node.
2675             if (add_edge(jobj, e)) {
2676               new_edges++;
2677               jobj->set_arraycopy_dst();
2678             }
2679           }
2680         }
2681       }
2682     } else {
2683       // Added new edge to stored in field values.
2684       // Put on worklist all field's uses (loads) and
2685       // related field nodes (same base and offset).
2686       add_field_uses_to_worklist(use->as_Field());
2687     }
2688   }
2689   _worklist.clear();
2690   _in_worklist.reset();
2691   return new_edges;
2692 }
2693 
2694 // Put on worklist all related field nodes.
2695 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) {
2696   assert(field->is_oop(), "sanity");
2697   int offset = field->offset();
2698   add_uses_to_worklist(field);
2699   // Loop over all bases of this field and push on worklist Field nodes
2700   // with the same offset and base (since they may reference the same field).
2701   for (BaseIterator i(field); i.has_next(); i.next()) {
2702     PointsToNode* base = i.get();
2703     add_fields_to_worklist(field, base);
2704     // Check if the base was source object of arraycopy and go over arraycopy's
2705     // destination objects since values stored to a field of source object are
2706     // accessible by uses (loads) of fields of destination objects.
2707     if (base->arraycopy_src()) {
2708       for (UseIterator j(base); j.has_next(); j.next()) {
2709         PointsToNode* arycp = j.get();
2710         if (arycp->is_Arraycopy()) {
2711           for (UseIterator k(arycp); k.has_next(); k.next()) {
2712             PointsToNode* abase = k.get();
2713             if (abase->arraycopy_dst() && abase != base) {
2714               // Look for the same arraycopy reference.
2715               add_fields_to_worklist(field, abase);
2716             }
2717           }
2718         }
2719       }
2720     }
2721   }
2722 }
2723 
2724 // Put on worklist all related field nodes.
2725 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) {
2726   int offset = field->offset();
2727   if (base->is_LocalVar()) {
2728     for (UseIterator j(base); j.has_next(); j.next()) {
2729       PointsToNode* f = j.get();
2730       if (PointsToNode::is_base_use(f)) { // Field
2731         f = PointsToNode::get_use_node(f);
2732         if (f == field || !f->as_Field()->is_oop()) {
2733           continue;
2734         }
2735         int offs = f->as_Field()->offset();
2736         if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
2737           add_to_worklist(f);
2738         }
2739       }
2740     }
2741   } else {
2742     assert(base->is_JavaObject(), "sanity");
2743     if (// Skip phantom_object since it is only used to indicate that
2744         // this field's content globally escapes.
2745         (base != phantom_obj) &&
2746         // null object node does not have fields.
2747         (base != null_obj)) {
2748       for (EdgeIterator i(base); i.has_next(); i.next()) {
2749         PointsToNode* f = i.get();
2750         // Skip arraycopy edge since store to destination object field
2751         // does not update value in source object field.
2752         if (f->is_Arraycopy()) {
2753           assert(base->arraycopy_dst(), "sanity");
2754           continue;
2755         }
2756         if (f == field || !f->as_Field()->is_oop()) {
2757           continue;
2758         }
2759         int offs = f->as_Field()->offset();
2760         if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
2761           add_to_worklist(f);
2762         }
2763       }
2764     }
2765   }
2766 }
2767 
2768 // Find fields which have unknown value.
2769 int ConnectionGraph::find_field_value(FieldNode* field) {
2770   // Escaped fields should have init value already.
2771   assert(field->escape_state() == PointsToNode::NoEscape, "sanity");
2772   int new_edges = 0;
2773   for (BaseIterator i(field); i.has_next(); i.next()) {
2774     PointsToNode* base = i.get();
2775     if (base->is_JavaObject()) {
2776       // Skip Allocate's fields which will be processed later.
2777       if (base->ideal_node()->is_Allocate()) {
2778         return 0;
2779       }
2780       assert(base == null_obj, "only null ptr base expected here");
2781     }
2782   }
2783   if (add_edge(field, phantom_obj)) {
2784     // New edge was added
2785     new_edges++;
2786     add_field_uses_to_worklist(field);
2787   }
2788   return new_edges;
2789 }
2790 
2791 // Find fields initializing values for allocations.
2792 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
2793   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2794   PointsToNode* init_val = phantom_obj;
2795   Node* alloc = pta->ideal_node();
2796 
2797   // Do nothing for Allocate nodes since its fields values are
2798   // "known" unless they are initialized by arraycopy/clone.
2799   if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
2800     if (alloc->as_Allocate()->in(AllocateNode::InitValue) != nullptr) {
2801       // Non-flat inline type arrays are initialized with
2802       // an init value instead of null. Handle them here.
2803       init_val = ptnode_adr(alloc->as_Allocate()->in(AllocateNode::InitValue)->_idx);
2804       assert(init_val != nullptr, "init value should be registered");
2805     } else {
2806       return 0;
2807     }
2808   }
2809   // Non-escaped allocation returned from Java or runtime call has unknown values in fields.
2810   assert(pta->arraycopy_dst() || alloc->is_CallStaticJava() || init_val != phantom_obj, "sanity");
2811 #ifdef ASSERT
2812   if (alloc->is_CallStaticJava() && alloc->as_CallStaticJava()->method() == nullptr) {
2813     const char* name = alloc->as_CallStaticJava()->_name;
2814     assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0 ||
2815            strncmp(name, "C2 Runtime load_unknown_inline", 30) == 0, "sanity");
2816   }
2817 #endif
2818   // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
2819   int new_edges = 0;
2820   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2821     PointsToNode* field = i.get();
2822     if (field->is_Field() && field->as_Field()->is_oop()) {
2823       if (add_edge(field, init_val)) {
2824         // New edge was added
2825         new_edges++;
2826         add_field_uses_to_worklist(field->as_Field());
2827       }
2828     }
2829   }
2830   return new_edges;
2831 }
2832 
2833 // Find fields initializing values for allocations.
2834 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) {
2835   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2836   Node* alloc = pta->ideal_node();
2837   // Do nothing for Call nodes since its fields values are unknown.
2838   if (!alloc->is_Allocate() || alloc->as_Allocate()->in(AllocateNode::InitValue) != nullptr) {
2839     return 0;
2840   }
2841   InitializeNode* ini = alloc->as_Allocate()->initialization();
2842   bool visited_bottom_offset = false;
2843   GrowableArray<int> offsets_worklist;
2844   int new_edges = 0;
2845 
2846   // Check if an oop field's initializing value is recorded and add
2847   // a corresponding null if field's value if it is not recorded.
2848   // Connection Graph does not record a default initialization by null
2849   // captured by Initialize node.
2850   //
2851   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2852     PointsToNode* field = i.get(); // Field (AddP)
2853     if (!field->is_Field() || !field->as_Field()->is_oop()) {
2854       continue; // Not oop field
2855     }
2856     int offset = field->as_Field()->offset();
2857     if (offset == Type::OffsetBot) {
2858       if (!visited_bottom_offset) {
2859         // OffsetBot is used to reference array's element,
2860         // always add reference to null to all Field nodes since we don't
2861         // known which element is referenced.
2862         if (add_edge(field, null_obj)) {
2863           // New edge was added
2864           new_edges++;
2865           add_field_uses_to_worklist(field->as_Field());
2866           visited_bottom_offset = true;
2867         }
2868       }
2869     } else {
2870       // Check only oop fields.
2871       const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type();
2872       if (adr_type->isa_rawptr()) {
2873 #ifdef ASSERT
2874         // Raw pointers are used for initializing stores so skip it
2875         // since it should be recorded already
2876         Node* base = get_addp_base(field->ideal_node());
2877         assert(adr_type->isa_rawptr() && is_captured_store_address(field->ideal_node()), "unexpected pointer type");
2878 #endif
2879         continue;
2880       }
2881       if (!offsets_worklist.contains(offset)) {
2882         offsets_worklist.append(offset);
2883         Node* value = nullptr;
2884         if (ini != nullptr) {
2885           // StoreP::memory_type() == T_ADDRESS
2886           BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS;
2887           Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase);
2888           // Make sure initializing store has the same type as this AddP.
2889           // This AddP may reference non existing field because it is on a
2890           // dead branch of bimorphic call which is not eliminated yet.
2891           if (store != nullptr && store->is_Store() &&
2892               store->as_Store()->memory_type() == ft) {
2893             value = store->in(MemNode::ValueIn);
2894 #ifdef ASSERT
2895             if (VerifyConnectionGraph) {
2896               // Verify that AddP already points to all objects the value points to.
2897               PointsToNode* val = ptnode_adr(value->_idx);
2898               assert((val != nullptr), "should be processed already");
2899               PointsToNode* missed_obj = nullptr;
2900               if (val->is_JavaObject()) {
2901                 if (!field->points_to(val->as_JavaObject())) {
2902                   missed_obj = val;
2903                 }
2904               } else {
2905                 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
2906                   tty->print_cr("----------init store has invalid value -----");
2907                   store->dump();
2908                   val->dump();
2909                   assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
2910                 }
2911                 for (EdgeIterator j(val); j.has_next(); j.next()) {
2912                   PointsToNode* obj = j.get();
2913                   if (obj->is_JavaObject()) {
2914                     if (!field->points_to(obj->as_JavaObject())) {
2915                       missed_obj = obj;
2916                       break;
2917                     }
2918                   }
2919                 }
2920               }
2921               if (missed_obj != nullptr) {
2922                 tty->print_cr("----------field---------------------------------");
2923                 field->dump();
2924                 tty->print_cr("----------missed reference to object------------");
2925                 missed_obj->dump();
2926                 tty->print_cr("----------object referenced by init store-------");
2927                 store->dump();
2928                 val->dump();
2929                 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
2930               }
2931             }
2932 #endif
2933           } else {
2934             // There could be initializing stores which follow allocation.
2935             // For example, a volatile field store is not collected
2936             // by Initialize node.
2937             //
2938             // Need to check for dependent loads to separate such stores from
2939             // stores which follow loads. For now, add initial value null so
2940             // that compare pointers optimization works correctly.
2941           }
2942         }
2943         if (value == nullptr) {
2944           // A field's initializing value was not recorded. Add null.
2945           if (add_edge(field, null_obj)) {
2946             // New edge was added
2947             new_edges++;
2948             add_field_uses_to_worklist(field->as_Field());
2949           }
2950         }
2951       }
2952     }
2953   }
2954   return new_edges;
2955 }
2956 
2957 // Adjust scalar_replaceable state after Connection Graph is built.
2958 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj, Unique_Node_List &reducible_merges) {
2959   // A Phi 'x' is a _candidate_ to be reducible if 'can_reduce_phi(x)'
2960   // returns true. If one of the constraints in this method set 'jobj' to NSR
2961   // then the candidate Phi is discarded. If the Phi has another SR 'jobj' as
2962   // input, 'adjust_scalar_replaceable_state' will eventually be called with
2963   // that other object and the Phi will become a reducible Phi.
2964   // There could be multiple merges involving the same jobj.
2965   Unique_Node_List candidates;
2966 
2967   // Search for non-escaping objects which are not scalar replaceable
2968   // and mark them to propagate the state to referenced objects.
2969 
2970   for (UseIterator i(jobj); i.has_next(); i.next()) {
2971     PointsToNode* use = i.get();
2972     if (use->is_Arraycopy()) {
2973       continue;
2974     }
2975     if (use->is_Field()) {
2976       FieldNode* field = use->as_Field();
2977       assert(field->is_oop() && field->scalar_replaceable(), "sanity");
2978       // 1. An object is not scalar replaceable if the field into which it is
2979       // stored has unknown offset (stored into unknown element of an array).
2980       if (field->offset() == Type::OffsetBot) {
2981         set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored at unknown offset"));
2982         return;
2983       }
2984       for (BaseIterator i(field); i.has_next(); i.next()) {
2985         PointsToNode* base = i.get();
2986         // 2. An object is not scalar replaceable if the field into which it is
2987         // stored has multiple bases one of which is null.
2988         if ((base == null_obj) && (field->base_count() > 1)) {
2989           set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with potentially null base"));
2990           return;
2991         }
2992         // 2.5. An object is not scalar replaceable if the field into which it is
2993         // stored has NSR base.
2994         if (!base->scalar_replaceable()) {
2995           set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base"));
2996           return;
2997         }
2998       }
2999     }
3000     assert(use->is_Field() || use->is_LocalVar(), "sanity");
3001     // 3. An object is not scalar replaceable if it is merged with other objects
3002     // and we can't remove the merge
3003     for (EdgeIterator j(use); j.has_next(); j.next()) {
3004       PointsToNode* ptn = j.get();
3005       if (ptn->is_JavaObject() && ptn != jobj) {
3006         Node* use_n = use->ideal_node();
3007 
3008         // These other local vars may point to multiple objects through a Phi
3009         // In this case we skip them and see if we can reduce the Phi.
3010         if (use_n->is_CastPP() || use_n->is_CheckCastPP()) {
3011           use_n = use_n->in(1);
3012         }
3013 
3014         // If it's already a candidate or confirmed reducible merge we can skip verification
3015         if (candidates.member(use_n) || reducible_merges.member(use_n)) {
3016           continue;
3017         }
3018 
3019         if (use_n->is_Phi() && can_reduce_phi(use_n->as_Phi())) {
3020           candidates.push(use_n);
3021         } else {
3022           // Mark all objects as NSR if we can't remove the merge
3023           set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA trace_merged_message(ptn)));
3024           set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA trace_merged_message(jobj)));
3025         }
3026       }
3027     }
3028     if (!jobj->scalar_replaceable()) {
3029       return;
3030     }
3031   }
3032 
3033   for (EdgeIterator j(jobj); j.has_next(); j.next()) {
3034     if (j.get()->is_Arraycopy()) {
3035       continue;
3036     }
3037 
3038     // Non-escaping object node should point only to field nodes.
3039     FieldNode* field = j.get()->as_Field();
3040     int offset = field->as_Field()->offset();
3041 
3042     // 4. An object is not scalar replaceable if it has a field with unknown
3043     // offset (array's element is accessed in loop).
3044     if (offset == Type::OffsetBot) {
3045       set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "has field with unknown offset"));
3046       return;
3047     }
3048     // 5. Currently an object is not scalar replaceable if a LoadStore node
3049     // access its field since the field value is unknown after it.
3050     //
3051     Node* n = field->ideal_node();
3052 
3053     // Test for an unsafe access that was parsed as maybe off heap
3054     // (with a CheckCastPP to raw memory).
3055     assert(n->is_AddP(), "expect an address computation");
3056     if (n->in(AddPNode::Base)->is_top() &&
3057         n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) {
3058       assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected");
3059       assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected");
3060       set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used as base of mixed unsafe access"));
3061       return;
3062     }
3063 
3064     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3065       Node* u = n->fast_out(i);
3066       if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) {
3067         set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used in LoadStore or mismatched access"));
3068         return;
3069       }
3070     }
3071 
3072     // 6. Or the address may point to more then one object. This may produce
3073     // the false positive result (set not scalar replaceable)
3074     // since the flow-insensitive escape analysis can't separate
3075     // the case when stores overwrite the field's value from the case
3076     // when stores happened on different control branches.
3077     //
3078     // Note: it will disable scalar replacement in some cases:
3079     //
3080     //    Point p[] = new Point[1];
3081     //    p[0] = new Point(); // Will be not scalar replaced
3082     //
3083     // but it will save us from incorrect optimizations in next cases:
3084     //
3085     //    Point p[] = new Point[1];
3086     //    if ( x ) p[0] = new Point(); // Will be not scalar replaced
3087     //
3088     if (field->base_count() > 1 && candidates.size() == 0) {
3089       if (has_non_reducible_merge(field, reducible_merges)) {
3090         for (BaseIterator i(field); i.has_next(); i.next()) {
3091           PointsToNode* base = i.get();
3092           // Don't take into account LocalVar nodes which
3093           // may point to only one object which should be also
3094           // this field's base by now.
3095           if (base->is_JavaObject() && base != jobj) {
3096             // Mark all bases.
3097             set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "may point to more than one object"));
3098             set_not_scalar_replaceable(base NOT_PRODUCT(COMMA "may point to more than one object"));
3099           }
3100         }
3101 
3102         if (!jobj->scalar_replaceable()) {
3103           return;
3104         }
3105       }
3106     }
3107   }
3108 
3109   // The candidate is truly a reducible merge only if none of the other
3110   // constraints ruled it as NSR. There could be multiple merges involving the
3111   // same jobj.
3112   assert(jobj->scalar_replaceable(), "sanity");
3113   for (uint i = 0; i < candidates.size(); i++ ) {
3114     Node* candidate = candidates.at(i);
3115     reducible_merges.push(candidate);
3116   }
3117 }
3118 
3119 bool ConnectionGraph::has_non_reducible_merge(FieldNode* field, Unique_Node_List& reducible_merges) {
3120   for (BaseIterator i(field); i.has_next(); i.next()) {
3121     Node* base = i.get()->ideal_node();
3122     if (base->is_Phi() && !reducible_merges.member(base)) {
3123       return true;
3124     }
3125   }
3126   return false;
3127 }
3128 
3129 void ConnectionGraph::revisit_reducible_phi_status(JavaObjectNode* jobj, Unique_Node_List& reducible_merges) {
3130   assert(jobj != nullptr && !jobj->scalar_replaceable(), "jobj should be set as NSR before calling this function.");
3131 
3132   // Look for 'phis' that refer to 'jobj' as the last
3133   // remaining scalar replaceable input.
3134   uint reducible_merges_cnt = reducible_merges.size();
3135   for (uint i = 0; i < reducible_merges_cnt; i++) {
3136     Node* phi = reducible_merges.at(i);
3137 
3138     // This 'Phi' will be a 'good' if it still points to
3139     // at least one scalar replaceable object. Note that 'obj'
3140     // was/should be marked as NSR before calling this function.
3141     bool good_phi = false;
3142 
3143     for (uint j = 1; j < phi->req(); j++) {
3144       JavaObjectNode* phi_in_obj = unique_java_object(phi->in(j));
3145       if (phi_in_obj != nullptr && phi_in_obj->scalar_replaceable()) {
3146         good_phi = true;
3147         break;
3148       }
3149     }
3150 
3151     if (!good_phi) {
3152       NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Phi %d became non-reducible after node %d became NSR.", phi->_idx, jobj->ideal_node()->_idx);)
3153       reducible_merges.remove(i);
3154 
3155       // Decrement the index because the 'remove' call above actually
3156       // moves the last entry of the list to position 'i'.
3157       i--;
3158 
3159       reducible_merges_cnt--;
3160     }
3161   }
3162 }
3163 
3164 // Propagate NSR (Not scalar replaceable) state.
3165 void ConnectionGraph::find_scalar_replaceable_allocs(GrowableArray<JavaObjectNode*>& jobj_worklist, Unique_Node_List &reducible_merges) {
3166   int jobj_length = jobj_worklist.length();
3167   bool found_nsr_alloc = true;
3168   while (found_nsr_alloc) {
3169     found_nsr_alloc = false;
3170     for (int next = 0; next < jobj_length; ++next) {
3171       JavaObjectNode* jobj = jobj_worklist.at(next);
3172       for (UseIterator i(jobj); (jobj->scalar_replaceable() && i.has_next()); i.next()) {
3173         PointsToNode* use = i.get();
3174         if (use->is_Field()) {
3175           FieldNode* field = use->as_Field();
3176           assert(field->is_oop() && field->scalar_replaceable(), "sanity");
3177           assert(field->offset() != Type::OffsetBot, "sanity");
3178           for (BaseIterator i(field); i.has_next(); i.next()) {
3179             PointsToNode* base = i.get();
3180             // An object is not scalar replaceable if the field into which
3181             // it is stored has NSR base.
3182             if ((base != null_obj) && !base->scalar_replaceable()) {
3183               set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base"));
3184               // Any merge that had only 'jobj' as scalar-replaceable will now be non-reducible,
3185               // because there is no point in reducing a Phi that won't improve the number of SR
3186               // objects.
3187               revisit_reducible_phi_status(jobj, reducible_merges);
3188               found_nsr_alloc = true;
3189               break;
3190             }
3191           }
3192         }
3193       }
3194     }
3195   }
3196 }
3197 
3198 #ifdef ASSERT
3199 void ConnectionGraph::verify_connection_graph(
3200                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
3201                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,
3202                          GrowableArray<JavaObjectNode*>& java_objects_worklist,
3203                          GrowableArray<Node*>& addp_worklist) {
3204   // Verify that graph is complete - no new edges could be added.
3205   int java_objects_length = java_objects_worklist.length();
3206   int non_escaped_length  = non_escaped_allocs_worklist.length();
3207   int new_edges = 0;
3208   for (int next = 0; next < java_objects_length; ++next) {
3209     JavaObjectNode* ptn = java_objects_worklist.at(next);
3210     new_edges += add_java_object_edges(ptn, true);
3211   }
3212   assert(new_edges == 0, "graph was not complete");
3213   // Verify that escape state is final.
3214   int length = non_escaped_allocs_worklist.length();
3215   find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist);
3216   assert((non_escaped_length == non_escaped_allocs_worklist.length()) &&
3217          (non_escaped_length == length) &&
3218          (_worklist.length() == 0), "escape state was not final");
3219 
3220   // Verify fields information.
3221   int addp_length = addp_worklist.length();
3222   for (int next = 0; next < addp_length; ++next ) {
3223     Node* n = addp_worklist.at(next);
3224     FieldNode* field = ptnode_adr(n->_idx)->as_Field();
3225     if (field->is_oop()) {
3226       // Verify that field has all bases
3227       Node* base = get_addp_base(n);
3228       PointsToNode* ptn = ptnode_adr(base->_idx);
3229       if (ptn->is_JavaObject()) {
3230         assert(field->has_base(ptn->as_JavaObject()), "sanity");
3231       } else {
3232         assert(ptn->is_LocalVar(), "sanity");
3233         for (EdgeIterator i(ptn); i.has_next(); i.next()) {
3234           PointsToNode* e = i.get();
3235           if (e->is_JavaObject()) {
3236             assert(field->has_base(e->as_JavaObject()), "sanity");
3237           }
3238         }
3239       }
3240       // Verify that all fields have initializing values.
3241       if (field->edge_count() == 0) {
3242         tty->print_cr("----------field does not have references----------");
3243         field->dump();
3244         for (BaseIterator i(field); i.has_next(); i.next()) {
3245           PointsToNode* base = i.get();
3246           tty->print_cr("----------field has next base---------------------");
3247           base->dump();
3248           if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) {
3249             tty->print_cr("----------base has fields-------------------------");
3250             for (EdgeIterator j(base); j.has_next(); j.next()) {
3251               j.get()->dump();
3252             }
3253             tty->print_cr("----------base has references---------------------");
3254             for (UseIterator j(base); j.has_next(); j.next()) {
3255               j.get()->dump();
3256             }
3257           }
3258         }
3259         for (UseIterator i(field); i.has_next(); i.next()) {
3260           i.get()->dump();
3261         }
3262         assert(field->edge_count() > 0, "sanity");
3263       }
3264     }
3265   }
3266 }
3267 #endif
3268 
3269 // Optimize ideal graph.
3270 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
3271                                            GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
3272   Compile* C = _compile;
3273   PhaseIterGVN* igvn = _igvn;
3274   if (EliminateLocks) {
3275     // Mark locks before changing ideal graph.
3276     int cnt = C->macro_count();
3277     for (int i = 0; i < cnt; i++) {
3278       Node *n = C->macro_node(i);
3279       if (n->is_AbstractLock()) { // Lock and Unlock nodes
3280         AbstractLockNode* alock = n->as_AbstractLock();
3281         if (!alock->is_non_esc_obj()) {
3282           const Type* obj_type = igvn->type(alock->obj_node());
3283           if (can_eliminate_lock(alock) && !obj_type->is_inlinetypeptr()) {
3284             assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
3285             // The lock could be marked eliminated by lock coarsening
3286             // code during first IGVN before EA. Replace coarsened flag
3287             // to eliminate all associated locks/unlocks.
3288 #ifdef ASSERT
3289             alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
3290 #endif
3291             alock->set_non_esc_obj();
3292           }
3293         }
3294       }
3295     }
3296   }
3297 
3298   if (OptimizePtrCompare) {
3299     for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
3300       Node *n = ptr_cmp_worklist.at(i);
3301       assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
3302       const TypeInt* tcmp = optimize_ptr_compare(n->in(1), n->in(2));
3303       if (tcmp->singleton()) {
3304         Node* cmp = igvn->makecon(tcmp);
3305 #ifndef PRODUCT
3306         if (PrintOptimizePtrCompare) {
3307           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
3308           if (Verbose) {
3309             n->dump(1);
3310           }
3311         }
3312 #endif
3313         igvn->replace_node(n, cmp);
3314       }
3315     }
3316   }
3317 
3318   // For MemBarStoreStore nodes added in library_call.cpp, check
3319   // escape status of associated AllocateNode and optimize out
3320   // MemBarStoreStore node if the allocated object never escapes.
3321   for (int i = 0; i < storestore_worklist.length(); i++) {
3322     Node* storestore = storestore_worklist.at(i);
3323     Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
3324     if (alloc->is_Allocate() && not_global_escape(alloc)) {
3325       if (alloc->in(AllocateNode::InlineType) != nullptr) {
3326         // Non-escaping inline type buffer allocations don't require a membar
3327         storestore->as_MemBar()->remove(_igvn);
3328       } else {
3329         MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
3330         mb->init_req(TypeFunc::Memory,  storestore->in(TypeFunc::Memory));
3331         mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
3332         igvn->register_new_node_with_optimizer(mb);
3333         igvn->replace_node(storestore, mb);
3334       }
3335     }
3336   }
3337 }
3338 
3339 // Optimize objects compare.
3340 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* left, Node* right) {
3341   assert(OptimizePtrCompare, "sanity");
3342   const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
3343   const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
3344   const TypeInt* UNKNOWN = TypeInt::CC;    // [-1, 0,1]
3345 
3346   PointsToNode* ptn1 = ptnode_adr(left->_idx);
3347   PointsToNode* ptn2 = ptnode_adr(right->_idx);
3348   JavaObjectNode* jobj1 = unique_java_object(left);
3349   JavaObjectNode* jobj2 = unique_java_object(right);
3350 
3351   // The use of this method during allocation merge reduction may cause 'left'
3352   // or 'right' be something (e.g., a Phi) that isn't in the connection graph or
3353   // that doesn't reference an unique java object.
3354   if (ptn1 == nullptr || ptn2 == nullptr ||
3355       jobj1 == nullptr || jobj2 == nullptr) {
3356     return UNKNOWN;
3357   }
3358 
3359   assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
3360   assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
3361 
3362   // Check simple cases first.
3363   if (jobj1 != nullptr) {
3364     if (jobj1->escape_state() == PointsToNode::NoEscape) {
3365       if (jobj1 == jobj2) {
3366         // Comparing the same not escaping object.
3367         return EQ;
3368       }
3369       Node* obj = jobj1->ideal_node();
3370       // Comparing not escaping allocation.
3371       if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
3372           !ptn2->points_to(jobj1)) {
3373         return NE; // This includes nullness check.
3374       }
3375     }
3376   }
3377   if (jobj2 != nullptr) {
3378     if (jobj2->escape_state() == PointsToNode::NoEscape) {
3379       Node* obj = jobj2->ideal_node();
3380       // Comparing not escaping allocation.
3381       if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
3382           !ptn1->points_to(jobj2)) {
3383         return NE; // This includes nullness check.
3384       }
3385     }
3386   }
3387   if (jobj1 != nullptr && jobj1 != phantom_obj &&
3388       jobj2 != nullptr && jobj2 != phantom_obj &&
3389       jobj1->ideal_node()->is_Con() &&
3390       jobj2->ideal_node()->is_Con()) {
3391     // Klass or String constants compare. Need to be careful with
3392     // compressed pointers - compare types of ConN and ConP instead of nodes.
3393     const Type* t1 = jobj1->ideal_node()->get_ptr_type();
3394     const Type* t2 = jobj2->ideal_node()->get_ptr_type();
3395     if (t1->make_ptr() == t2->make_ptr()) {
3396       return EQ;
3397     } else {
3398       return NE;
3399     }
3400   }
3401   if (ptn1->meet(ptn2)) {
3402     return UNKNOWN; // Sets are not disjoint
3403   }
3404 
3405   // Sets are disjoint.
3406   bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj);
3407   bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj);
3408   bool set1_has_null_ptr    = ptn1->points_to(null_obj);
3409   bool set2_has_null_ptr    = ptn2->points_to(null_obj);
3410   if ((set1_has_unknown_ptr && set2_has_null_ptr) ||
3411       (set2_has_unknown_ptr && set1_has_null_ptr)) {
3412     // Check nullness of unknown object.
3413     return UNKNOWN;
3414   }
3415 
3416   // Disjointness by itself is not sufficient since
3417   // alias analysis is not complete for escaped objects.
3418   // Disjoint sets are definitely unrelated only when
3419   // at least one set has only not escaping allocations.
3420   if (!set1_has_unknown_ptr && !set1_has_null_ptr) {
3421     if (ptn1->non_escaping_allocation()) {
3422       return NE;
3423     }
3424   }
3425   if (!set2_has_unknown_ptr && !set2_has_null_ptr) {
3426     if (ptn2->non_escaping_allocation()) {
3427       return NE;
3428     }
3429   }
3430   return UNKNOWN;
3431 }
3432 
3433 // Connection Graph construction functions.
3434 
3435 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) {
3436   PointsToNode* ptadr = _nodes.at(n->_idx);
3437   if (ptadr != nullptr) {
3438     assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity");
3439     return;
3440   }
3441   Compile* C = _compile;
3442   ptadr = new (C->comp_arena()) LocalVarNode(this, n, es);
3443   map_ideal_node(n, ptadr);
3444 }
3445 
3446 PointsToNode* ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) {
3447   PointsToNode* ptadr = _nodes.at(n->_idx);
3448   if (ptadr != nullptr) {
3449     assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity");
3450     return ptadr;
3451   }
3452   Compile* C = _compile;
3453   ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es);
3454   map_ideal_node(n, ptadr);
3455   return ptadr;
3456 }
3457 
3458 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) {
3459   PointsToNode* ptadr = _nodes.at(n->_idx);
3460   if (ptadr != nullptr) {
3461     assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity");
3462     return;
3463   }
3464   bool unsafe = false;
3465   bool is_oop = is_oop_field(n, offset, &unsafe);
3466   if (unsafe) {
3467     es = PointsToNode::GlobalEscape;
3468   }
3469   Compile* C = _compile;
3470   FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop);
3471   map_ideal_node(n, field);
3472 }
3473 
3474 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es,
3475                                     PointsToNode* src, PointsToNode* dst) {
3476   assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
3477   assert((src != null_obj) && (dst != null_obj), "not for ConP null");
3478   PointsToNode* ptadr = _nodes.at(n->_idx);
3479   if (ptadr != nullptr) {
3480     assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
3481     return;
3482   }
3483   Compile* C = _compile;
3484   ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
3485   map_ideal_node(n, ptadr);
3486   // Add edge from arraycopy node to source object.
3487   (void)add_edge(ptadr, src);
3488   src->set_arraycopy_src();
3489   // Add edge from destination object to arraycopy node.
3490   (void)add_edge(dst, ptadr);
3491   dst->set_arraycopy_dst();
3492 }
3493 
3494 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
3495   const Type* adr_type = n->as_AddP()->bottom_type();
3496   int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot;
3497   BasicType bt = T_INT;
3498   if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) {
3499     // Check only oop fields.
3500     if (!adr_type->isa_aryptr() ||
3501         adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
3502         adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
3503       // OffsetBot is used to reference array's element. Ignore first AddP.
3504       if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) {
3505         bt = T_OBJECT;
3506       }
3507     }
3508   } else if (offset != oopDesc::klass_offset_in_bytes()) {
3509     if (adr_type->isa_instptr()) {
3510       ciField* field = _compile->alias_type(adr_type->is_ptr())->field();
3511       if (field != nullptr) {
3512         bt = field->layout_type();
3513       } else {
3514         // Check for unsafe oop field access
3515         if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3516             n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3517             n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3518             BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3519           bt = T_OBJECT;
3520           (*unsafe) = true;
3521         }
3522       }
3523     } else if (adr_type->isa_aryptr()) {
3524       if (offset == arrayOopDesc::length_offset_in_bytes()) {
3525         // Ignore array length load.
3526       } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
3527         // Ignore first AddP.
3528       } else {
3529         const Type* elemtype = adr_type->is_aryptr()->elem();
3530         if (adr_type->is_aryptr()->is_flat() && field_offset != Type::OffsetBot) {
3531           ciInlineKlass* vk = elemtype->inline_klass();
3532           field_offset += vk->payload_offset();
3533           bt = vk->get_field_by_offset(field_offset, false)->layout_type();
3534         } else {
3535           bt = elemtype->array_element_basic_type();
3536         }
3537       }
3538     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
3539       // Allocation initialization, ThreadLocal field access, unsafe access
3540       if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3541           n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3542           n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3543           BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3544         bt = T_OBJECT;
3545       }
3546     }
3547   }
3548   // Note: T_NARROWOOP is not classed as a real reference type
3549   return (is_reference_type(bt) || bt == T_NARROWOOP);
3550 }
3551 
3552 // Returns unique pointed java object or null.
3553 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const {
3554   // If the node was created after the escape computation we can't answer.
3555   uint idx = n->_idx;
3556   if (idx >= nodes_size()) {
3557     return nullptr;
3558   }
3559   PointsToNode* ptn = ptnode_adr(idx);
3560   if (ptn == nullptr) {
3561     return nullptr;
3562   }
3563   if (ptn->is_JavaObject()) {
3564     return ptn->as_JavaObject();
3565   }
3566   assert(ptn->is_LocalVar(), "sanity");
3567   // Check all java objects it points to.
3568   JavaObjectNode* jobj = nullptr;
3569   for (EdgeIterator i(ptn); i.has_next(); i.next()) {
3570     PointsToNode* e = i.get();
3571     if (e->is_JavaObject()) {
3572       if (jobj == nullptr) {
3573         jobj = e->as_JavaObject();
3574       } else if (jobj != e) {
3575         return nullptr;
3576       }
3577     }
3578   }
3579   return jobj;
3580 }
3581 
3582 // Return true if this node points only to non-escaping allocations.
3583 bool PointsToNode::non_escaping_allocation() {
3584   if (is_JavaObject()) {
3585     Node* n = ideal_node();
3586     if (n->is_Allocate() || n->is_CallStaticJava()) {
3587       return (escape_state() == PointsToNode::NoEscape);
3588     } else {
3589       return false;
3590     }
3591   }
3592   assert(is_LocalVar(), "sanity");
3593   // Check all java objects it points to.
3594   for (EdgeIterator i(this); i.has_next(); i.next()) {
3595     PointsToNode* e = i.get();
3596     if (e->is_JavaObject()) {
3597       Node* n = e->ideal_node();
3598       if ((e->escape_state() != PointsToNode::NoEscape) ||
3599           !(n->is_Allocate() || n->is_CallStaticJava())) {
3600         return false;
3601       }
3602     }
3603   }
3604   return true;
3605 }
3606 
3607 // Return true if we know the node does not escape globally.
3608 bool ConnectionGraph::not_global_escape(Node *n) {
3609   assert(!_collecting, "should not call during graph construction");
3610   // If the node was created after the escape computation we can't answer.
3611   uint idx = n->_idx;
3612   if (idx >= nodes_size()) {
3613     return false;
3614   }
3615   PointsToNode* ptn = ptnode_adr(idx);
3616   if (ptn == nullptr) {
3617     return false; // not in congraph (e.g. ConI)
3618   }
3619   PointsToNode::EscapeState es = ptn->escape_state();
3620   // If we have already computed a value, return it.
3621   if (es >= PointsToNode::GlobalEscape) {
3622     return false;
3623   }
3624   if (ptn->is_JavaObject()) {
3625     return true; // (es < PointsToNode::GlobalEscape);
3626   }
3627   assert(ptn->is_LocalVar(), "sanity");
3628   // Check all java objects it points to.
3629   for (EdgeIterator i(ptn); i.has_next(); i.next()) {
3630     if (i.get()->escape_state() >= PointsToNode::GlobalEscape) {
3631       return false;
3632     }
3633   }
3634   return true;
3635 }
3636 
3637 // Return true if locked object does not escape globally
3638 // and locked code region (identified by BoxLockNode) is balanced:
3639 // all compiled code paths have corresponding Lock/Unlock pairs.
3640 bool ConnectionGraph::can_eliminate_lock(AbstractLockNode* alock) {
3641   if (alock->is_balanced() && not_global_escape(alock->obj_node())) {
3642     if (EliminateNestedLocks) {
3643       // We can mark whole locking region as Local only when only
3644       // one object is used for locking.
3645       alock->box_node()->as_BoxLock()->set_local();
3646     }
3647     return true;
3648   }
3649   return false;
3650 }
3651 
3652 // Helper functions
3653 
3654 // Return true if this node points to specified node or nodes it points to.
3655 bool PointsToNode::points_to(JavaObjectNode* ptn) const {
3656   if (is_JavaObject()) {
3657     return (this == ptn);
3658   }
3659   assert(is_LocalVar() || is_Field(), "sanity");
3660   for (EdgeIterator i(this); i.has_next(); i.next()) {
3661     if (i.get() == ptn) {
3662       return true;
3663     }
3664   }
3665   return false;
3666 }
3667 
3668 // Return true if one node points to an other.
3669 bool PointsToNode::meet(PointsToNode* ptn) {
3670   if (this == ptn) {
3671     return true;
3672   } else if (ptn->is_JavaObject()) {
3673     return this->points_to(ptn->as_JavaObject());
3674   } else if (this->is_JavaObject()) {
3675     return ptn->points_to(this->as_JavaObject());
3676   }
3677   assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity");
3678   int ptn_count =  ptn->edge_count();
3679   for (EdgeIterator i(this); i.has_next(); i.next()) {
3680     PointsToNode* this_e = i.get();
3681     for (int j = 0; j < ptn_count; j++) {
3682       if (this_e == ptn->edge(j)) {
3683         return true;
3684       }
3685     }
3686   }
3687   return false;
3688 }
3689 
3690 #ifdef ASSERT
3691 // Return true if bases point to this java object.
3692 bool FieldNode::has_base(JavaObjectNode* jobj) const {
3693   for (BaseIterator i(this); i.has_next(); i.next()) {
3694     if (i.get() == jobj) {
3695       return true;
3696     }
3697   }
3698   return false;
3699 }
3700 #endif
3701 
3702 bool ConnectionGraph::is_captured_store_address(Node* addp) {
3703   // Handle simple case first.
3704   assert(_igvn->type(addp)->isa_oopptr() == nullptr, "should be raw access");
3705   if (addp->in(AddPNode::Address)->is_Proj() && addp->in(AddPNode::Address)->in(0)->is_Allocate()) {
3706     return true;
3707   } else if (addp->in(AddPNode::Address)->is_Phi()) {
3708     for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
3709       Node* addp_use = addp->fast_out(i);
3710       if (addp_use->is_Store()) {
3711         for (DUIterator_Fast jmax, j = addp_use->fast_outs(jmax); j < jmax; j++) {
3712           if (addp_use->fast_out(j)->is_Initialize()) {
3713             return true;
3714           }
3715         }
3716       }
3717     }
3718   }
3719   return false;
3720 }
3721 
3722 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) {
3723   const Type *adr_type = phase->type(adr);
3724   if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) {
3725     // We are computing a raw address for a store captured by an Initialize
3726     // compute an appropriate address type. AddP cases #3 and #5 (see below).
3727     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
3728     assert(offs != Type::OffsetBot ||
3729            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
3730            "offset must be a constant or it is initialization of array");
3731     return offs;
3732   }
3733   return adr_type->is_ptr()->flat_offset();
3734 }
3735 
3736 Node* ConnectionGraph::get_addp_base(Node *addp) {
3737   assert(addp->is_AddP(), "must be AddP");
3738   //
3739   // AddP cases for Base and Address inputs:
3740   // case #1. Direct object's field reference:
3741   //     Allocate
3742   //       |
3743   //     Proj #5 ( oop result )
3744   //       |
3745   //     CheckCastPP (cast to instance type)
3746   //      | |
3747   //     AddP  ( base == address )
3748   //
3749   // case #2. Indirect object's field reference:
3750   //      Phi
3751   //       |
3752   //     CastPP (cast to instance type)
3753   //      | |
3754   //     AddP  ( base == address )
3755   //
3756   // case #3. Raw object's field reference for Initialize node:
3757   //      Allocate
3758   //        |
3759   //      Proj #5 ( oop result )
3760   //  top   |
3761   //     \  |
3762   //     AddP  ( base == top )
3763   //
3764   // case #4. Array's element reference:
3765   //   {CheckCastPP | CastPP}
3766   //     |  | |
3767   //     |  AddP ( array's element offset )
3768   //     |  |
3769   //     AddP ( array's offset )
3770   //
3771   // case #5. Raw object's field reference for arraycopy stub call:
3772   //          The inline_native_clone() case when the arraycopy stub is called
3773   //          after the allocation before Initialize and CheckCastPP nodes.
3774   //      Allocate
3775   //        |
3776   //      Proj #5 ( oop result )
3777   //       | |
3778   //       AddP  ( base == address )
3779   //
3780   // case #6. Constant Pool, ThreadLocal, CastX2P or
3781   //          Raw object's field reference:
3782   //      {ConP, ThreadLocal, CastX2P, raw Load}
3783   //  top   |
3784   //     \  |
3785   //     AddP  ( base == top )
3786   //
3787   // case #7. Klass's field reference.
3788   //      LoadKlass
3789   //       | |
3790   //       AddP  ( base == address )
3791   //
3792   // case #8. narrow Klass's field reference.
3793   //      LoadNKlass
3794   //       |
3795   //      DecodeN
3796   //       | |
3797   //       AddP  ( base == address )
3798   //
3799   // case #9. Mixed unsafe access
3800   //    {instance}
3801   //        |
3802   //      CheckCastPP (raw)
3803   //  top   |
3804   //     \  |
3805   //     AddP  ( base == top )
3806   //
3807   Node *base = addp->in(AddPNode::Base);
3808   if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9.
3809     base = addp->in(AddPNode::Address);
3810     while (base->is_AddP()) {
3811       // Case #6 (unsafe access) may have several chained AddP nodes.
3812       assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
3813       base = base->in(AddPNode::Address);
3814     }
3815     if (base->Opcode() == Op_CheckCastPP &&
3816         base->bottom_type()->isa_rawptr() &&
3817         _igvn->type(base->in(1))->isa_oopptr()) {
3818       base = base->in(1); // Case #9
3819     } else {
3820       Node* uncast_base = base->uncast();
3821       int opcode = uncast_base->Opcode();
3822       assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
3823              opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
3824              (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != nullptr)) ||
3825              is_captured_store_address(addp), "sanity");
3826     }
3827   }
3828   return base;
3829 }
3830 
3831 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
3832   assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
3833   Node* addp2 = addp->raw_out(0);
3834   if (addp->outcnt() == 1 && addp2->is_AddP() &&
3835       addp2->in(AddPNode::Base) == n &&
3836       addp2->in(AddPNode::Address) == addp) {
3837     assert(addp->in(AddPNode::Base) == n, "expecting the same base");
3838     //
3839     // Find array's offset to push it on worklist first and
3840     // as result process an array's element offset first (pushed second)
3841     // to avoid CastPP for the array's offset.
3842     // Otherwise the inserted CastPP (LocalVar) will point to what
3843     // the AddP (Field) points to. Which would be wrong since
3844     // the algorithm expects the CastPP has the same point as
3845     // as AddP's base CheckCastPP (LocalVar).
3846     //
3847     //    ArrayAllocation
3848     //     |
3849     //    CheckCastPP
3850     //     |
3851     //    memProj (from ArrayAllocation CheckCastPP)
3852     //     |  ||
3853     //     |  ||   Int (element index)
3854     //     |  ||    |   ConI (log(element size))
3855     //     |  ||    |   /
3856     //     |  ||   LShift
3857     //     |  ||  /
3858     //     |  AddP (array's element offset)
3859     //     |  |
3860     //     |  | ConI (array's offset: #12(32-bits) or #24(64-bits))
3861     //     | / /
3862     //     AddP (array's offset)
3863     //      |
3864     //     Load/Store (memory operation on array's element)
3865     //
3866     return addp2;
3867   }
3868   return nullptr;
3869 }
3870 
3871 //
3872 // Adjust the type and inputs of an AddP which computes the
3873 // address of a field of an instance
3874 //
3875 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
3876   PhaseGVN* igvn = _igvn;
3877   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
3878   assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr");
3879   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
3880   if (t == nullptr) {
3881     // We are computing a raw address for a store captured by an Initialize
3882     // compute an appropriate address type (cases #3 and #5).
3883     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
3884     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
3885     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
3886     assert(offs != Type::OffsetBot, "offset must be a constant");
3887     if (base_t->isa_aryptr() != nullptr) {
3888       // In the case of a flat inline type array, each field has its
3889       // own slice so we need to extract the field being accessed from
3890       // the address computation
3891       t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr();
3892     } else {
3893       t = base_t->add_offset(offs)->is_oopptr();
3894     }
3895   }
3896   int inst_id = base_t->instance_id();
3897   assert(!t->is_known_instance() || t->instance_id() == inst_id,
3898                              "old type must be non-instance or match new type");
3899 
3900   // The type 't' could be subclass of 'base_t'.
3901   // As result t->offset() could be large then base_t's size and it will
3902   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
3903   // constructor verifies correctness of the offset.
3904   //
3905   // It could happened on subclass's branch (from the type profiling
3906   // inlining) which was not eliminated during parsing since the exactness
3907   // of the allocation type was not propagated to the subclass type check.
3908   //
3909   // Or the type 't' could be not related to 'base_t' at all.
3910   // It could happen when CHA type is different from MDO type on a dead path
3911   // (for example, from instanceof check) which is not collapsed during parsing.
3912   //
3913   // Do nothing for such AddP node and don't process its users since
3914   // this code branch will go away.
3915   //
3916   if (!t->is_known_instance() &&
3917       !base_t->maybe_java_subtype_of(t)) {
3918      return false; // bail out
3919   }
3920   const TypePtr* tinst = base_t->add_offset(t->offset());
3921   if (tinst->isa_aryptr() && t->isa_aryptr()) {
3922     // In the case of a flat inline type array, each field has its
3923     // own slice so we need to keep track of the field being accessed.
3924     tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get());
3925     // Keep array properties (not flat/null-free)
3926     tinst = tinst->is_aryptr()->update_properties(t->is_aryptr());
3927     if (tinst == nullptr) {
3928       return false; // Skip dead path with inconsistent properties
3929     }
3930   }
3931 
3932   // Do NOT remove the next line: ensure a new alias index is allocated
3933   // for the instance type. Note: C++ will not remove it since the call
3934   // has side effect.
3935   int alias_idx = _compile->get_alias_index(tinst);
3936   igvn->set_type(addp, tinst);
3937   // record the allocation in the node map
3938   set_map(addp, get_map(base->_idx));
3939   // Set addp's Base and Address to 'base'.
3940   Node *abase = addp->in(AddPNode::Base);
3941   Node *adr   = addp->in(AddPNode::Address);
3942   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
3943       adr->in(0)->_idx == (uint)inst_id) {
3944     // Skip AddP cases #3 and #5.
3945   } else {
3946     assert(!abase->is_top(), "sanity"); // AddP case #3
3947     if (abase != base) {
3948       igvn->hash_delete(addp);
3949       addp->set_req(AddPNode::Base, base);
3950       if (abase == adr) {
3951         addp->set_req(AddPNode::Address, base);
3952       } else {
3953         // AddP case #4 (adr is array's element offset AddP node)
3954 #ifdef ASSERT
3955         const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr();
3956         assert(adr->is_AddP() && atype != nullptr &&
3957                atype->instance_id() == inst_id, "array's element offset should be processed first");
3958 #endif
3959       }
3960       igvn->hash_insert(addp);
3961     }
3962   }
3963   // Put on IGVN worklist since at least addp's type was changed above.
3964   record_for_optimizer(addp);
3965   return true;
3966 }
3967 
3968 //
3969 // Create a new version of orig_phi if necessary. Returns either the newly
3970 // created phi or an existing phi.  Sets create_new to indicate whether a new
3971 // phi was created.  Cache the last newly created phi in the node map.
3972 //
3973 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, bool &new_created) {
3974   Compile *C = _compile;
3975   PhaseGVN* igvn = _igvn;
3976   new_created = false;
3977   int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());
3978   // nothing to do if orig_phi is bottom memory or matches alias_idx
3979   if (phi_alias_idx == alias_idx) {
3980     return orig_phi;
3981   }
3982   // Have we recently created a Phi for this alias index?
3983   PhiNode *result = get_map_phi(orig_phi->_idx);
3984   if (result != nullptr && C->get_alias_index(result->adr_type()) == alias_idx) {
3985     return result;
3986   }
3987   // Previous check may fail when the same wide memory Phi was split into Phis
3988   // for different memory slices. Search all Phis for this region.
3989   if (result != nullptr) {
3990     Node* region = orig_phi->in(0);
3991     for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
3992       Node* phi = region->fast_out(i);
3993       if (phi->is_Phi() &&
3994           C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) {
3995         assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice");
3996         return phi->as_Phi();
3997       }
3998     }
3999   }
4000   if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) {
4001     if (C->do_escape_analysis() == true && !C->failing()) {
4002       // Retry compilation without escape analysis.
4003       // If this is the first failure, the sentinel string will "stick"
4004       // to the Compile object, and the C2Compiler will see it and retry.
4005       C->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4006     }
4007     return nullptr;
4008   }
4009   orig_phi_worklist.append_if_missing(orig_phi);
4010   const TypePtr *atype = C->get_adr_type(alias_idx);
4011   result = PhiNode::make(orig_phi->in(0), nullptr, Type::MEMORY, atype);
4012   C->copy_node_notes_to(result, orig_phi);
4013   igvn->set_type(result, result->bottom_type());
4014   record_for_optimizer(result);
4015   set_map(orig_phi, result);
4016   new_created = true;
4017   return result;
4018 }
4019 
4020 //
4021 // Return a new version of Memory Phi "orig_phi" with the inputs having the
4022 // specified alias index.
4023 //
4024 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, uint rec_depth) {
4025   assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");
4026   Compile *C = _compile;
4027   PhaseGVN* igvn = _igvn;
4028   bool new_phi_created;
4029   PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created);
4030   if (!new_phi_created) {
4031     return result;
4032   }
4033   GrowableArray<PhiNode *>  phi_list;
4034   GrowableArray<uint>  cur_input;
4035   PhiNode *phi = orig_phi;
4036   uint idx = 1;
4037   bool finished = false;
4038   while(!finished) {
4039     while (idx < phi->req()) {
4040       Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist, rec_depth + 1);
4041       if (mem != nullptr && mem->is_Phi()) {
4042         PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created);
4043         if (new_phi_created) {
4044           // found an phi for which we created a new split, push current one on worklist and begin
4045           // processing new one
4046           phi_list.push(phi);
4047           cur_input.push(idx);
4048           phi = mem->as_Phi();
4049           result = newphi;
4050           idx = 1;
4051           continue;
4052         } else {
4053           mem = newphi;
4054         }
4055       }
4056       if (C->failing()) {
4057         return nullptr;
4058       }
4059       result->set_req(idx++, mem);
4060     }
4061 #ifdef ASSERT
4062     // verify that the new Phi has an input for each input of the original
4063     assert( phi->req() == result->req(), "must have same number of inputs.");
4064     assert( result->in(0) != nullptr && result->in(0) == phi->in(0), "regions must match");
4065 #endif
4066     // Check if all new phi's inputs have specified alias index.
4067     // Otherwise use old phi.
4068     for (uint i = 1; i < phi->req(); i++) {
4069       Node* in = result->in(i);
4070       assert((phi->in(i) == nullptr) == (in == nullptr), "inputs must correspond.");
4071     }
4072     // we have finished processing a Phi, see if there are any more to do
4073     finished = (phi_list.length() == 0 );
4074     if (!finished) {
4075       phi = phi_list.pop();
4076       idx = cur_input.pop();
4077       PhiNode *prev_result = get_map_phi(phi->_idx);
4078       prev_result->set_req(idx++, result);
4079       result = prev_result;
4080     }
4081   }
4082   return result;
4083 }
4084 
4085 //
4086 // The next methods are derived from methods in MemNode.
4087 //
4088 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {
4089   Node *mem = mmem;
4090   // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally
4091   // means an array I have not precisely typed yet.  Do not do any
4092   // alias stuff with it any time soon.
4093   if (toop->base() != Type::AnyPtr &&
4094       !(toop->isa_instptr() &&
4095         toop->is_instptr()->instance_klass()->is_java_lang_Object() &&
4096         toop->offset() == Type::OffsetBot)) {
4097     mem = mmem->memory_at(alias_idx);
4098     // Update input if it is progress over what we have now
4099   }
4100   return mem;
4101 }
4102 
4103 //
4104 // Move memory users to their memory slices.
4105 //
4106 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis) {
4107   Compile* C = _compile;
4108   PhaseGVN* igvn = _igvn;
4109   const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();
4110   assert(tp != nullptr, "ptr type");
4111   int alias_idx = C->get_alias_index(tp);
4112   int general_idx = C->get_general_index(alias_idx);
4113 
4114   // Move users first
4115   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4116     Node* use = n->fast_out(i);
4117     if (use->is_MergeMem()) {
4118       MergeMemNode* mmem = use->as_MergeMem();
4119       assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice");
4120       if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) {
4121         continue; // Nothing to do
4122       }
4123       // Replace previous general reference to mem node.
4124       uint orig_uniq = C->unique();
4125       Node* m = find_inst_mem(n, general_idx, orig_phis);
4126       assert(orig_uniq == C->unique(), "no new nodes");
4127       mmem->set_memory_at(general_idx, m);
4128       --imax;
4129       --i;
4130     } else if (use->is_MemBar()) {
4131       assert(!use->is_Initialize(), "initializing stores should not be moved");
4132       if (use->req() > MemBarNode::Precedent &&
4133           use->in(MemBarNode::Precedent) == n) {
4134         // Don't move related membars.
4135         record_for_optimizer(use);
4136         continue;
4137       }
4138       tp = use->as_MemBar()->adr_type()->isa_ptr();
4139       if ((tp != nullptr && C->get_alias_index(tp) == alias_idx) ||
4140           alias_idx == general_idx) {
4141         continue; // Nothing to do
4142       }
4143       // Move to general memory slice.
4144       uint orig_uniq = C->unique();
4145       Node* m = find_inst_mem(n, general_idx, orig_phis);
4146       assert(orig_uniq == C->unique(), "no new nodes");
4147       igvn->hash_delete(use);
4148       imax -= use->replace_edge(n, m, igvn);
4149       igvn->hash_insert(use);
4150       record_for_optimizer(use);
4151       --i;
4152 #ifdef ASSERT
4153     } else if (use->is_Mem()) {
4154       // Memory nodes should have new memory input.
4155       tp = igvn->type(use->in(MemNode::Address))->isa_ptr();
4156       assert(tp != nullptr, "ptr type");
4157       int idx = C->get_alias_index(tp);
4158       assert(get_map(use->_idx) != nullptr || idx == alias_idx,
4159              "Following memory nodes should have new memory input or be on the same memory slice");
4160     } else if (use->is_Phi()) {
4161       // Phi nodes should be split and moved already.
4162       tp = use->as_Phi()->adr_type()->isa_ptr();
4163       assert(tp != nullptr, "ptr type");
4164       int idx = C->get_alias_index(tp);
4165       assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");
4166     } else {
4167       use->dump();
4168       assert(false, "should not be here");
4169 #endif
4170     }
4171   }
4172 }
4173 
4174 //
4175 // Search memory chain of "mem" to find a MemNode whose address
4176 // is the specified alias index.
4177 //
4178 #define FIND_INST_MEM_RECURSION_DEPTH_LIMIT 1000
4179 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *>  &orig_phis, uint rec_depth) {
4180   if (rec_depth > FIND_INST_MEM_RECURSION_DEPTH_LIMIT) {
4181     _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4182     return nullptr;
4183   }
4184   if (orig_mem == nullptr) {
4185     return orig_mem;
4186   }
4187   Compile* C = _compile;
4188   PhaseGVN* igvn = _igvn;
4189   const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr();
4190   bool is_instance = (toop != nullptr) && toop->is_known_instance();
4191   Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
4192   Node *prev = nullptr;
4193   Node *result = orig_mem;
4194   while (prev != result) {
4195     prev = result;
4196     if (result == start_mem) {
4197       break;  // hit one of our sentinels
4198     }
4199     if (result->is_Mem()) {
4200       const Type *at = igvn->type(result->in(MemNode::Address));
4201       if (at == Type::TOP) {
4202         break; // Dead
4203       }
4204       assert (at->isa_ptr() != nullptr, "pointer type required.");
4205       int idx = C->get_alias_index(at->is_ptr());
4206       if (idx == alias_idx) {
4207         break; // Found
4208       }
4209       if (!is_instance && (at->isa_oopptr() == nullptr ||
4210                            !at->is_oopptr()->is_known_instance())) {
4211         break; // Do not skip store to general memory slice.
4212       }
4213       result = result->in(MemNode::Memory);
4214     }
4215     if (!is_instance) {
4216       continue;  // don't search further for non-instance types
4217     }
4218     // skip over a call which does not affect this memory slice
4219     if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
4220       Node *proj_in = result->in(0);
4221       if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) {
4222         break;  // hit one of our sentinels
4223       } else if (proj_in->is_Call()) {
4224         // ArrayCopy node processed here as well
4225         CallNode *call = proj_in->as_Call();
4226         if (!call->may_modify(toop, igvn)) {
4227           result = call->in(TypeFunc::Memory);
4228         }
4229       } else if (proj_in->is_Initialize()) {
4230         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
4231         // Stop if this is the initialization for the object instance which
4232         // which contains this memory slice, otherwise skip over it.
4233         if (alloc == nullptr || alloc->_idx != (uint)toop->instance_id()) {
4234           result = proj_in->in(TypeFunc::Memory);
4235         }
4236       } else if (proj_in->is_MemBar()) {
4237         // Check if there is an array copy for a clone
4238         // Step over GC barrier when ReduceInitialCardMarks is disabled
4239         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4240         Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0));
4241 
4242         if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) {
4243           // Stop if it is a clone
4244           ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy();
4245           if (ac->may_modify(toop, igvn)) {
4246             break;
4247           }
4248         }
4249         result = proj_in->in(TypeFunc::Memory);
4250       }
4251     } else if (result->is_MergeMem()) {
4252       MergeMemNode *mmem = result->as_MergeMem();
4253       result = step_through_mergemem(mmem, alias_idx, toop);
4254       if (result == mmem->base_memory()) {
4255         // Didn't find instance memory, search through general slice recursively.
4256         result = mmem->memory_at(C->get_general_index(alias_idx));
4257         result = find_inst_mem(result, alias_idx, orig_phis, rec_depth + 1);
4258         if (C->failing()) {
4259           return nullptr;
4260         }
4261         mmem->set_memory_at(alias_idx, result);
4262       }
4263     } else if (result->is_Phi() &&
4264                C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
4265       Node *un = result->as_Phi()->unique_input(igvn);
4266       if (un != nullptr) {
4267         orig_phis.append_if_missing(result->as_Phi());
4268         result = un;
4269       } else {
4270         break;
4271       }
4272     } else if (result->is_ClearArray()) {
4273       if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {
4274         // Can not bypass initialization of the instance
4275         // we are looking for.
4276         break;
4277       }
4278       // Otherwise skip it (the call updated 'result' value).
4279     } else if (result->Opcode() == Op_SCMemProj) {
4280       Node* mem = result->in(0);
4281       Node* adr = nullptr;
4282       if (mem->is_LoadStore()) {
4283         adr = mem->in(MemNode::Address);
4284       } else {
4285         assert(mem->Opcode() == Op_EncodeISOArray ||
4286                mem->Opcode() == Op_StrCompressedCopy, "sanity");
4287         adr = mem->in(3); // Memory edge corresponds to destination array
4288       }
4289       const Type *at = igvn->type(adr);
4290       if (at != Type::TOP) {
4291         assert(at->isa_ptr() != nullptr, "pointer type required.");
4292         int idx = C->get_alias_index(at->is_ptr());
4293         if (idx == alias_idx) {
4294           // Assert in debug mode
4295           assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
4296           break; // In product mode return SCMemProj node
4297         }
4298       }
4299       result = mem->in(MemNode::Memory);
4300     } else if (result->Opcode() == Op_StrInflatedCopy) {
4301       Node* adr = result->in(3); // Memory edge corresponds to destination array
4302       const Type *at = igvn->type(adr);
4303       if (at != Type::TOP) {
4304         assert(at->isa_ptr() != nullptr, "pointer type required.");
4305         int idx = C->get_alias_index(at->is_ptr());
4306         if (idx == alias_idx) {
4307           // Assert in debug mode
4308           assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field");
4309           break; // In product mode return SCMemProj node
4310         }
4311       }
4312       result = result->in(MemNode::Memory);
4313     }
4314   }
4315   if (result->is_Phi()) {
4316     PhiNode *mphi = result->as_Phi();
4317     assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
4318     const TypePtr *t = mphi->adr_type();
4319     if (!is_instance) {
4320       // Push all non-instance Phis on the orig_phis worklist to update inputs
4321       // during Phase 4 if needed.
4322       orig_phis.append_if_missing(mphi);
4323     } else if (C->get_alias_index(t) != alias_idx) {
4324       // Create a new Phi with the specified alias index type.
4325       result = split_memory_phi(mphi, alias_idx, orig_phis, rec_depth + 1);
4326     }
4327   }
4328   // the result is either MemNode, PhiNode, InitializeNode.
4329   return result;
4330 }
4331 
4332 //
4333 //  Convert the types of non-escaped object to instance types where possible,
4334 //  propagate the new type information through the graph, and update memory
4335 //  edges and MergeMem inputs to reflect the new type.
4336 //
4337 //  We start with allocations (and calls which may be allocations)  on alloc_worklist.
4338 //  The processing is done in 4 phases:
4339 //
4340 //  Phase 1:  Process possible allocations from alloc_worklist.  Create instance
4341 //            types for the CheckCastPP for allocations where possible.
4342 //            Propagate the new types through users as follows:
4343 //               casts and Phi:  push users on alloc_worklist
4344 //               AddP:  cast Base and Address inputs to the instance type
4345 //                      push any AddP users on alloc_worklist and push any memnode
4346 //                      users onto memnode_worklist.
4347 //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
4348 //            search the Memory chain for a store with the appropriate type
4349 //            address type.  If a Phi is found, create a new version with
4350 //            the appropriate memory slices from each of the Phi inputs.
4351 //            For stores, process the users as follows:
4352 //               MemNode:  push on memnode_worklist
4353 //               MergeMem: push on mergemem_worklist
4354 //  Phase 3:  Process MergeMem nodes from mergemem_worklist.  Walk each memory slice
4355 //            moving the first node encountered of each  instance type to the
4356 //            the input corresponding to its alias index.
4357 //            appropriate memory slice.
4358 //  Phase 4:  Update the inputs of non-instance memory Phis and the Memory input of memnodes.
4359 //
4360 // In the following example, the CheckCastPP nodes are the cast of allocation
4361 // results and the allocation of node 29 is non-escaped and eligible to be an
4362 // instance type.
4363 //
4364 // We start with:
4365 //
4366 //     7 Parm #memory
4367 //    10  ConI  "12"
4368 //    19  CheckCastPP   "Foo"
4369 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
4370 //    29  CheckCastPP   "Foo"
4371 //    30  AddP  _ 29 29 10  Foo+12  alias_index=4
4372 //
4373 //    40  StoreP  25   7  20   ... alias_index=4
4374 //    50  StoreP  35  40  30   ... alias_index=4
4375 //    60  StoreP  45  50  20   ... alias_index=4
4376 //    70  LoadP    _  60  30   ... alias_index=4
4377 //    80  Phi     75  50  60   Memory alias_index=4
4378 //    90  LoadP    _  80  30   ... alias_index=4
4379 //   100  LoadP    _  80  20   ... alias_index=4
4380 //
4381 //
4382 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24
4383 // and creating a new alias index for node 30.  This gives:
4384 //
4385 //     7 Parm #memory
4386 //    10  ConI  "12"
4387 //    19  CheckCastPP   "Foo"
4388 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
4389 //    29  CheckCastPP   "Foo"  iid=24
4390 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
4391 //
4392 //    40  StoreP  25   7  20   ... alias_index=4
4393 //    50  StoreP  35  40  30   ... alias_index=6
4394 //    60  StoreP  45  50  20   ... alias_index=4
4395 //    70  LoadP    _  60  30   ... alias_index=6
4396 //    80  Phi     75  50  60   Memory alias_index=4
4397 //    90  LoadP    _  80  30   ... alias_index=6
4398 //   100  LoadP    _  80  20   ... alias_index=4
4399 //
4400 // In phase 2, new memory inputs are computed for the loads and stores,
4401 // And a new version of the phi is created.  In phase 4, the inputs to
4402 // node 80 are updated and then the memory nodes are updated with the
4403 // values computed in phase 2.  This results in:
4404 //
4405 //     7 Parm #memory
4406 //    10  ConI  "12"
4407 //    19  CheckCastPP   "Foo"
4408 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
4409 //    29  CheckCastPP   "Foo"  iid=24
4410 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
4411 //
4412 //    40  StoreP  25  7   20   ... alias_index=4
4413 //    50  StoreP  35  7   30   ... alias_index=6
4414 //    60  StoreP  45  40  20   ... alias_index=4
4415 //    70  LoadP    _  50  30   ... alias_index=6
4416 //    80  Phi     75  40  60   Memory alias_index=4
4417 //   120  Phi     75  50  50   Memory alias_index=6
4418 //    90  LoadP    _ 120  30   ... alias_index=6
4419 //   100  LoadP    _  80  20   ... alias_index=4
4420 //
4421 void ConnectionGraph::split_unique_types(GrowableArray<Node *>  &alloc_worklist,
4422                                          GrowableArray<ArrayCopyNode*> &arraycopy_worklist,
4423                                          GrowableArray<MergeMemNode*> &mergemem_worklist,
4424                                          Unique_Node_List &reducible_merges) {
4425   DEBUG_ONLY(Unique_Node_List reduced_merges;)
4426   GrowableArray<Node *>  memnode_worklist;
4427   GrowableArray<PhiNode *>  orig_phis;
4428   PhaseIterGVN  *igvn = _igvn;
4429   uint new_index_start = (uint) _compile->num_alias_types();
4430   VectorSet visited;
4431   ideal_nodes.clear(); // Reset for use with set_map/get_map.
4432   uint unique_old = _compile->unique();
4433 
4434   //  Phase 1:  Process possible allocations from alloc_worklist.
4435   //  Create instance types for the CheckCastPP for allocations where possible.
4436   //
4437   // (Note: don't forget to change the order of the second AddP node on
4438   //  the alloc_worklist if the order of the worklist processing is changed,
4439   //  see the comment in find_second_addp().)
4440   //
4441   while (alloc_worklist.length() != 0) {
4442     Node *n = alloc_worklist.pop();
4443     uint ni = n->_idx;
4444     if (n->is_Call()) {
4445       CallNode *alloc = n->as_Call();
4446       // copy escape information to call node
4447       PointsToNode* ptn = ptnode_adr(alloc->_idx);
4448       PointsToNode::EscapeState es = ptn->escape_state();
4449       // We have an allocation or call which returns a Java object,
4450       // see if it is non-escaped.
4451       if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) {
4452         continue;
4453       }
4454       // Find CheckCastPP for the allocate or for the return value of a call
4455       n = alloc->result_cast();
4456       if (n == nullptr) {            // No uses except Initialize node
4457         if (alloc->is_Allocate()) {
4458           // Set the scalar_replaceable flag for allocation
4459           // so it could be eliminated if it has no uses.
4460           alloc->as_Allocate()->_is_scalar_replaceable = true;
4461         }
4462         continue;
4463       }
4464       if (!n->is_CheckCastPP()) { // not unique CheckCastPP.
4465         // we could reach here for allocate case if one init is associated with many allocs.
4466         if (alloc->is_Allocate()) {
4467           alloc->as_Allocate()->_is_scalar_replaceable = false;
4468         }
4469         continue;
4470       }
4471 
4472       // The inline code for Object.clone() casts the allocation result to
4473       // java.lang.Object and then to the actual type of the allocated
4474       // object. Detect this case and use the second cast.
4475       // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when
4476       // the allocation result is cast to java.lang.Object and then
4477       // to the actual Array type.
4478       if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL
4479           && (alloc->is_AllocateArray() ||
4480               igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeInstKlassPtr::OBJECT)) {
4481         Node *cast2 = nullptr;
4482         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4483           Node *use = n->fast_out(i);
4484           if (use->is_CheckCastPP()) {
4485             cast2 = use;
4486             break;
4487           }
4488         }
4489         if (cast2 != nullptr) {
4490           n = cast2;
4491         } else {
4492           // Non-scalar replaceable if the allocation type is unknown statically
4493           // (reflection allocation), the object can't be restored during
4494           // deoptimization without precise type.
4495           continue;
4496         }
4497       }
4498 
4499       const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
4500       if (t == nullptr) {
4501         continue;  // not a TypeOopPtr
4502       }
4503       if (!t->klass_is_exact()) {
4504         continue; // not an unique type
4505       }
4506       if (alloc->is_Allocate()) {
4507         // Set the scalar_replaceable flag for allocation
4508         // so it could be eliminated.
4509         alloc->as_Allocate()->_is_scalar_replaceable = true;
4510       }
4511       set_escape_state(ptnode_adr(n->_idx), es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); // CheckCastPP escape state
4512       // in order for an object to be scalar-replaceable, it must be:
4513       //   - a direct allocation (not a call returning an object)
4514       //   - non-escaping
4515       //   - eligible to be a unique type
4516       //   - not determined to be ineligible by escape analysis
4517       set_map(alloc, n);
4518       set_map(n, alloc);
4519       const TypeOopPtr* tinst = t->cast_to_instance_id(ni);
4520       igvn->hash_delete(n);
4521       igvn->set_type(n,  tinst);
4522       n->raise_bottom_type(tinst);
4523       igvn->hash_insert(n);
4524       record_for_optimizer(n);
4525       // Allocate an alias index for the header fields. Accesses to
4526       // the header emitted during macro expansion wouldn't have
4527       // correct memory state otherwise.
4528       _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes()));
4529       _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes()));
4530       if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {
4531 
4532         // First, put on the worklist all Field edges from Connection Graph
4533         // which is more accurate than putting immediate users from Ideal Graph.
4534         for (EdgeIterator e(ptn); e.has_next(); e.next()) {
4535           PointsToNode* tgt = e.get();
4536           if (tgt->is_Arraycopy()) {
4537             continue;
4538           }
4539           Node* use = tgt->ideal_node();
4540           assert(tgt->is_Field() && use->is_AddP(),
4541                  "only AddP nodes are Field edges in CG");
4542           if (use->outcnt() > 0) { // Don't process dead nodes
4543             Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
4544             if (addp2 != nullptr) {
4545               assert(alloc->is_AllocateArray(),"array allocation was expected");
4546               alloc_worklist.append_if_missing(addp2);
4547             }
4548             alloc_worklist.append_if_missing(use);
4549           }
4550         }
4551 
4552         // An allocation may have an Initialize which has raw stores. Scan
4553         // the users of the raw allocation result and push AddP users
4554         // on alloc_worklist.
4555         Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms);
4556         assert (raw_result != nullptr, "must have an allocation result");
4557         for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) {
4558           Node *use = raw_result->fast_out(i);
4559           if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes
4560             Node* addp2 = find_second_addp(use, raw_result);
4561             if (addp2 != nullptr) {
4562               assert(alloc->is_AllocateArray(),"array allocation was expected");
4563               alloc_worklist.append_if_missing(addp2);
4564             }
4565             alloc_worklist.append_if_missing(use);
4566           } else if (use->is_MemBar()) {
4567             memnode_worklist.append_if_missing(use);
4568           }
4569         }
4570       }
4571     } else if (n->is_AddP()) {
4572       if (has_reducible_merge_base(n->as_AddP(), reducible_merges)) {
4573         // This AddP will go away when we reduce the Phi
4574         continue;
4575       }
4576       Node* addp_base = get_addp_base(n);
4577       JavaObjectNode* jobj = unique_java_object(addp_base);
4578       if (jobj == nullptr || jobj == phantom_obj) {
4579 #ifdef ASSERT
4580         ptnode_adr(get_addp_base(n)->_idx)->dump();
4581         ptnode_adr(n->_idx)->dump();
4582         assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
4583 #endif
4584         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4585         return;
4586       }
4587       Node *base = get_map(jobj->idx());  // CheckCastPP node
4588       if (!split_AddP(n, base)) continue; // wrong type from dead path
4589     } else if (n->is_Phi() ||
4590                n->is_CheckCastPP() ||
4591                n->is_EncodeP() ||
4592                n->is_DecodeN() ||
4593                (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
4594       if (visited.test_set(n->_idx)) {
4595         assert(n->is_Phi(), "loops only through Phi's");
4596         continue;  // already processed
4597       }
4598       // Reducible Phi's will be removed from the graph after split_unique_types
4599       // finishes. For now we just try to split out the SR inputs of the merge.
4600       Node* parent = n->in(1);
4601       if (reducible_merges.member(n)) {
4602         reduce_phi(n->as_Phi(), alloc_worklist, memnode_worklist);
4603 #ifdef ASSERT
4604         if (VerifyReduceAllocationMerges) {
4605           reduced_merges.push(n);
4606         }
4607 #endif
4608         continue;
4609       } else if (reducible_merges.member(parent)) {
4610         // 'n' is an user of a reducible merge (a Phi). It will be simplified as
4611         // part of reduce_merge.
4612         continue;
4613       }
4614       JavaObjectNode* jobj = unique_java_object(n);
4615       if (jobj == nullptr || jobj == phantom_obj) {
4616 #ifdef ASSERT
4617         ptnode_adr(n->_idx)->dump();
4618         assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
4619 #endif
4620         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4621         return;
4622       } else {
4623         Node *val = get_map(jobj->idx());   // CheckCastPP node
4624         TypeNode *tn = n->as_Type();
4625         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
4626         assert(tinst != nullptr && tinst->is_known_instance() &&
4627                tinst->instance_id() == jobj->idx() , "instance type expected.");
4628 
4629         const Type *tn_type = igvn->type(tn);
4630         const TypeOopPtr *tn_t;
4631         if (tn_type->isa_narrowoop()) {
4632           tn_t = tn_type->make_ptr()->isa_oopptr();
4633         } else {
4634           tn_t = tn_type->isa_oopptr();
4635         }
4636         if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {
4637           if (tn_t->isa_aryptr()) {
4638             // Keep array properties (not flat/null-free)
4639             tinst = tinst->is_aryptr()->update_properties(tn_t->is_aryptr());
4640             if (tinst == nullptr) {
4641               continue; // Skip dead path with inconsistent properties
4642             }
4643           }
4644           if (tn_type->isa_narrowoop()) {
4645             tn_type = tinst->make_narrowoop();
4646           } else {
4647             tn_type = tinst;
4648           }
4649           igvn->hash_delete(tn);
4650           igvn->set_type(tn, tn_type);
4651           tn->set_type(tn_type);
4652           igvn->hash_insert(tn);
4653           record_for_optimizer(n);
4654         } else {
4655           assert(tn_type == TypePtr::NULL_PTR ||
4656                  (tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t)),
4657                  "unexpected type");
4658           continue; // Skip dead path with different type
4659         }
4660       }
4661     } else {
4662       debug_only(n->dump();)
4663       assert(false, "EA: unexpected node");
4664       continue;
4665     }
4666     // push allocation's users on appropriate worklist
4667     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4668       Node *use = n->fast_out(i);
4669       if (use->is_Mem() && use->in(MemNode::Address) == n) {
4670         // Load/store to instance's field
4671         memnode_worklist.append_if_missing(use);
4672       } else if (use->is_MemBar()) {
4673         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4674           memnode_worklist.append_if_missing(use);
4675         }
4676       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
4677         Node* addp2 = find_second_addp(use, n);
4678         if (addp2 != nullptr) {
4679           alloc_worklist.append_if_missing(addp2);
4680         }
4681         alloc_worklist.append_if_missing(use);
4682       } else if (use->is_Phi() ||
4683                  use->is_CheckCastPP() ||
4684                  use->is_EncodeNarrowPtr() ||
4685                  use->is_DecodeNarrowPtr() ||
4686                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
4687         alloc_worklist.append_if_missing(use);
4688 #ifdef ASSERT
4689       } else if (use->is_Mem()) {
4690         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
4691       } else if (use->is_MergeMem()) {
4692         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4693       } else if (use->is_SafePoint()) {
4694         // Look for MergeMem nodes for calls which reference unique allocation
4695         // (through CheckCastPP nodes) even for debug info.
4696         Node* m = use->in(TypeFunc::Memory);
4697         if (m->is_MergeMem()) {
4698           assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4699         }
4700       } else if (use->Opcode() == Op_EncodeISOArray) {
4701         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4702           // EncodeISOArray overwrites destination array
4703           memnode_worklist.append_if_missing(use);
4704         }
4705       } else if (use->Opcode() == Op_Return) {
4706         // Allocation is referenced by field of returned inline type
4707         assert(_compile->tf()->returns_inline_type_as_fields(), "EA: unexpected reference by ReturnNode");
4708       } else {
4709         uint op = use->Opcode();
4710         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
4711             (use->in(MemNode::Memory) == n)) {
4712           // They overwrite memory edge corresponding to destination array,
4713           memnode_worklist.append_if_missing(use);
4714         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
4715               op == Op_CastP2X ||
4716               op == Op_FastLock || op == Op_AryEq ||
4717               op == Op_StrComp || op == Op_CountPositives ||
4718               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
4719               op == Op_StrEquals || op == Op_VectorizedHashCode ||
4720               op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
4721               op == Op_SubTypeCheck || op == Op_InlineType || op == Op_FlatArrayCheck ||
4722               op == Op_ReinterpretS2HF ||
4723               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
4724           n->dump();
4725           use->dump();
4726           assert(false, "EA: missing allocation reference path");
4727         }
4728 #endif
4729       }
4730     }
4731 
4732   }
4733 
4734 #ifdef ASSERT
4735   if (VerifyReduceAllocationMerges) {
4736     for (uint i = 0; i < reducible_merges.size(); i++) {
4737       Node* phi = reducible_merges.at(i);
4738 
4739       if (!reduced_merges.member(phi)) {
4740         phi->dump(2);
4741         phi->dump(-2);
4742         assert(false, "This reducible merge wasn't reduced.");
4743       }
4744 
4745       // At this point reducible Phis shouldn't have AddP users anymore; only SafePoints or Casts.
4746       for (DUIterator_Fast jmax, j = phi->fast_outs(jmax); j < jmax; j++) {
4747         Node* use = phi->fast_out(j);
4748         if (!use->is_SafePoint() && !use->is_CastPP()) {
4749           phi->dump(2);
4750           phi->dump(-2);
4751           assert(false, "Unexpected user of reducible Phi -> %d:%s:%d", use->_idx, use->Name(), use->outcnt());
4752         }
4753       }
4754     }
4755   }
4756 #endif
4757 
4758   // Go over all ArrayCopy nodes and if one of the inputs has a unique
4759   // type, record it in the ArrayCopy node so we know what memory this
4760   // node uses/modified.
4761   for (int next = 0; next < arraycopy_worklist.length(); next++) {
4762     ArrayCopyNode* ac = arraycopy_worklist.at(next);
4763     Node* dest = ac->in(ArrayCopyNode::Dest);
4764     if (dest->is_AddP()) {
4765       dest = get_addp_base(dest);
4766     }
4767     JavaObjectNode* jobj = unique_java_object(dest);
4768     if (jobj != nullptr) {
4769       Node *base = get_map(jobj->idx());
4770       if (base != nullptr) {
4771         const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr();
4772         ac->_dest_type = base_t;
4773       }
4774     }
4775     Node* src = ac->in(ArrayCopyNode::Src);
4776     if (src->is_AddP()) {
4777       src = get_addp_base(src);
4778     }
4779     jobj = unique_java_object(src);
4780     if (jobj != nullptr) {
4781       Node* base = get_map(jobj->idx());
4782       if (base != nullptr) {
4783         const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr();
4784         ac->_src_type = base_t;
4785       }
4786     }
4787   }
4788 
4789   // New alias types were created in split_AddP().
4790   uint new_index_end = (uint) _compile->num_alias_types();
4791 
4792   //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
4793   //            compute new values for Memory inputs  (the Memory inputs are not
4794   //            actually updated until phase 4.)
4795   if (memnode_worklist.length() == 0)
4796     return;  // nothing to do
4797   while (memnode_worklist.length() != 0) {
4798     Node *n = memnode_worklist.pop();
4799     if (visited.test_set(n->_idx)) {
4800       continue;
4801     }
4802     if (n->is_Phi() || n->is_ClearArray()) {
4803       // we don't need to do anything, but the users must be pushed
4804     } else if (n->is_MemBar()) { // Initialize, MemBar nodes
4805       // we don't need to do anything, but the users must be pushed
4806       n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
4807       if (n == nullptr) {
4808         continue;
4809       }
4810     } else if (n->is_CallLeaf()) {
4811       // Runtime calls with narrow memory input (no MergeMem node)
4812       // get the memory projection
4813       n = n->as_Call()->proj_out_or_null(TypeFunc::Memory);
4814       if (n == nullptr) {
4815         continue;
4816       }
4817     } else if (n->Opcode() == Op_StrInflatedCopy) {
4818       // Check direct uses of StrInflatedCopy.
4819       // It is memory type Node - no special SCMemProj node.
4820     } else if (n->Opcode() == Op_StrCompressedCopy ||
4821                n->Opcode() == Op_EncodeISOArray) {
4822       // get the memory projection
4823       n = n->find_out_with(Op_SCMemProj);
4824       assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4825     } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != nullptr &&
4826                strcmp(n->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
4827       n = n->as_CallLeaf()->proj_out(TypeFunc::Memory);
4828     } else {
4829 #ifdef ASSERT
4830       if (!n->is_Mem()) {
4831         n->dump();
4832       }
4833       assert(n->is_Mem(), "memory node required.");
4834 #endif
4835       Node *addr = n->in(MemNode::Address);
4836       const Type *addr_t = igvn->type(addr);
4837       if (addr_t == Type::TOP) {
4838         continue;
4839       }
4840       assert (addr_t->isa_ptr() != nullptr, "pointer type required.");
4841       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
4842       assert ((uint)alias_idx < new_index_end, "wrong alias index");
4843       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
4844       if (_compile->failing()) {
4845         return;
4846       }
4847       if (mem != n->in(MemNode::Memory)) {
4848         // We delay the memory edge update since we need old one in
4849         // MergeMem code below when instances memory slices are separated.
4850         set_map(n, mem);
4851       }
4852       if (n->is_Load()) {
4853         continue;  // don't push users
4854       } else if (n->is_LoadStore()) {
4855         // get the memory projection
4856         n = n->find_out_with(Op_SCMemProj);
4857         assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4858       }
4859     }
4860     // push user on appropriate worklist
4861     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4862       Node *use = n->fast_out(i);
4863       if (use->is_Phi() || use->is_ClearArray()) {
4864         memnode_worklist.append_if_missing(use);
4865       } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
4866         memnode_worklist.append_if_missing(use);
4867       } else if (use->is_MemBar() || use->is_CallLeaf()) {
4868         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4869           memnode_worklist.append_if_missing(use);
4870         }
4871 #ifdef ASSERT
4872       } else if (use->is_Mem()) {
4873         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
4874       } else if (use->is_MergeMem()) {
4875         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4876       } else if (use->Opcode() == Op_EncodeISOArray) {
4877         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4878           // EncodeISOArray overwrites destination array
4879           memnode_worklist.append_if_missing(use);
4880         }
4881       } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != nullptr &&
4882                  strcmp(use->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
4883         // store_unknown_inline overwrites destination array
4884         memnode_worklist.append_if_missing(use);
4885       } else {
4886         uint op = use->Opcode();
4887         if ((use->in(MemNode::Memory) == n) &&
4888             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
4889           // They overwrite memory edge corresponding to destination array,
4890           memnode_worklist.append_if_missing(use);
4891         } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
4892               op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
4893               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
4894               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || op == Op_FlatArrayCheck)) {
4895           n->dump();
4896           use->dump();
4897           assert(false, "EA: missing memory path");
4898         }
4899 #endif
4900       }
4901     }
4902   }
4903 
4904   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
4905   //            Walk each memory slice moving the first node encountered of each
4906   //            instance type to the input corresponding to its alias index.
4907   uint length = mergemem_worklist.length();
4908   for( uint next = 0; next < length; ++next ) {
4909     MergeMemNode* nmm = mergemem_worklist.at(next);
4910     assert(!visited.test_set(nmm->_idx), "should not be visited before");
4911     // Note: we don't want to use MergeMemStream here because we only want to
4912     // scan inputs which exist at the start, not ones we add during processing.
4913     // Note 2: MergeMem may already contains instance memory slices added
4914     // during find_inst_mem() call when memory nodes were processed above.
4915     igvn->hash_delete(nmm);
4916     uint nslices = MIN2(nmm->req(), new_index_start);
4917     for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
4918       Node* mem = nmm->in(i);
4919       Node* cur = nullptr;
4920       if (mem == nullptr || mem->is_top()) {
4921         continue;
4922       }
4923       // First, update mergemem by moving memory nodes to corresponding slices
4924       // if their type became more precise since this mergemem was created.
4925       while (mem->is_Mem()) {
4926         const Type *at = igvn->type(mem->in(MemNode::Address));
4927         if (at != Type::TOP) {
4928           assert (at->isa_ptr() != nullptr, "pointer type required.");
4929           uint idx = (uint)_compile->get_alias_index(at->is_ptr());
4930           if (idx == i) {
4931             if (cur == nullptr) {
4932               cur = mem;
4933             }
4934           } else {
4935             if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) {
4936               nmm->set_memory_at(idx, mem);
4937             }
4938           }
4939         }
4940         mem = mem->in(MemNode::Memory);
4941       }
4942       nmm->set_memory_at(i, (cur != nullptr) ? cur : mem);
4943       // Find any instance of the current type if we haven't encountered
4944       // already a memory slice of the instance along the memory chain.
4945       for (uint ni = new_index_start; ni < new_index_end; ni++) {
4946         if((uint)_compile->get_general_index(ni) == i) {
4947           Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
4948           if (nmm->is_empty_memory(m)) {
4949             Node* result = find_inst_mem(mem, ni, orig_phis);
4950             if (_compile->failing()) {
4951               return;
4952             }
4953             nmm->set_memory_at(ni, result);
4954           }
4955         }
4956       }
4957     }
4958     // Find the rest of instances values
4959     for (uint ni = new_index_start; ni < new_index_end; ni++) {
4960       const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();
4961       Node* result = step_through_mergemem(nmm, ni, tinst);
4962       if (result == nmm->base_memory()) {
4963         // Didn't find instance memory, search through general slice recursively.
4964         result = nmm->memory_at(_compile->get_general_index(ni));
4965         result = find_inst_mem(result, ni, orig_phis);
4966         if (_compile->failing()) {
4967           return;
4968         }
4969         nmm->set_memory_at(ni, result);
4970       }
4971     }
4972 
4973     // If we have crossed the 3/4 point of max node limit it's too risky
4974     // to continue with EA/SR because we might hit the max node limit.
4975     if (_compile->live_nodes() >= _compile->max_node_limit() * 0.75) {
4976       if (_compile->do_reduce_allocation_merges()) {
4977         _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
4978       } else if (_invocation > 0) {
4979         _compile->record_failure(C2Compiler::retry_no_iterative_escape_analysis());
4980       } else {
4981         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
4982       }
4983       return;
4984     }
4985 
4986     igvn->hash_insert(nmm);
4987     record_for_optimizer(nmm);
4988   }
4989 
4990   //  Phase 4:  Update the inputs of non-instance memory Phis and
4991   //            the Memory input of memnodes
4992   // First update the inputs of any non-instance Phi's from
4993   // which we split out an instance Phi.  Note we don't have
4994   // to recursively process Phi's encountered on the input memory
4995   // chains as is done in split_memory_phi() since they will
4996   // also be processed here.
4997   for (int j = 0; j < orig_phis.length(); j++) {
4998     PhiNode *phi = orig_phis.at(j);
4999     int alias_idx = _compile->get_alias_index(phi->adr_type());
5000     igvn->hash_delete(phi);
5001     for (uint i = 1; i < phi->req(); i++) {
5002       Node *mem = phi->in(i);
5003       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
5004       if (_compile->failing()) {
5005         return;
5006       }
5007       if (mem != new_mem) {
5008         phi->set_req(i, new_mem);
5009       }
5010     }
5011     igvn->hash_insert(phi);
5012     record_for_optimizer(phi);
5013   }
5014 
5015   // Update the memory inputs of MemNodes with the value we computed
5016   // in Phase 2 and move stores memory users to corresponding memory slices.
5017   // Disable memory split verification code until the fix for 6984348.
5018   // Currently it produces false negative results since it does not cover all cases.
5019 #if 0 // ifdef ASSERT
5020   visited.Reset();
5021   Node_Stack old_mems(arena, _compile->unique() >> 2);
5022 #endif
5023   for (uint i = 0; i < ideal_nodes.size(); i++) {
5024     Node*    n = ideal_nodes.at(i);
5025     Node* nmem = get_map(n->_idx);
5026     assert(nmem != nullptr, "sanity");
5027     if (n->is_Mem()) {
5028 #if 0 // ifdef ASSERT
5029       Node* old_mem = n->in(MemNode::Memory);
5030       if (!visited.test_set(old_mem->_idx)) {
5031         old_mems.push(old_mem, old_mem->outcnt());
5032       }
5033 #endif
5034       assert(n->in(MemNode::Memory) != nmem, "sanity");
5035       if (!n->is_Load()) {
5036         // Move memory users of a store first.
5037         move_inst_mem(n, orig_phis);
5038       }
5039       // Now update memory input
5040       igvn->hash_delete(n);
5041       n->set_req(MemNode::Memory, nmem);
5042       igvn->hash_insert(n);
5043       record_for_optimizer(n);
5044     } else {
5045       assert(n->is_Allocate() || n->is_CheckCastPP() ||
5046              n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
5047     }
5048   }
5049 #if 0 // ifdef ASSERT
5050   // Verify that memory was split correctly
5051   while (old_mems.is_nonempty()) {
5052     Node* old_mem = old_mems.node();
5053     uint  old_cnt = old_mems.index();
5054     old_mems.pop();
5055     assert(old_cnt == old_mem->outcnt(), "old mem could be lost");
5056   }
5057 #endif
5058 }
5059 
5060 #ifndef PRODUCT
5061 int ConnectionGraph::_no_escape_counter = 0;
5062 int ConnectionGraph::_arg_escape_counter = 0;
5063 int ConnectionGraph::_global_escape_counter = 0;
5064 
5065 static const char *node_type_names[] = {
5066   "UnknownType",
5067   "JavaObject",
5068   "LocalVar",
5069   "Field",
5070   "Arraycopy"
5071 };
5072 
5073 static const char *esc_names[] = {
5074   "UnknownEscape",
5075   "NoEscape",
5076   "ArgEscape",
5077   "GlobalEscape"
5078 };
5079 
5080 void PointsToNode::dump_header(bool print_state, outputStream* out) const {
5081   NodeType nt = node_type();
5082   out->print("%s(%d) ", node_type_names[(int) nt], _pidx);
5083   if (print_state) {
5084     EscapeState es = escape_state();
5085     EscapeState fields_es = fields_escape_state();
5086     out->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]);
5087     if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) {
5088       out->print("NSR ");
5089     }
5090   }
5091 }
5092 
5093 void PointsToNode::dump(bool print_state, outputStream* out, bool newline) const {
5094   dump_header(print_state, out);
5095   if (is_Field()) {
5096     FieldNode* f = (FieldNode*)this;
5097     if (f->is_oop()) {
5098       out->print("oop ");
5099     }
5100     if (f->offset() > 0) {
5101       out->print("+%d ", f->offset());
5102     }
5103     out->print("(");
5104     for (BaseIterator i(f); i.has_next(); i.next()) {
5105       PointsToNode* b = i.get();
5106       out->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : ""));
5107     }
5108     out->print(" )");
5109   }
5110   out->print("[");
5111   for (EdgeIterator i(this); i.has_next(); i.next()) {
5112     PointsToNode* e = i.get();
5113     out->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : "");
5114   }
5115   out->print(" [");
5116   for (UseIterator i(this); i.has_next(); i.next()) {
5117     PointsToNode* u = i.get();
5118     bool is_base = false;
5119     if (PointsToNode::is_base_use(u)) {
5120       is_base = true;
5121       u = PointsToNode::get_use_node(u)->as_Field();
5122     }
5123     out->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : "");
5124   }
5125   out->print(" ]]  ");
5126   if (_node == nullptr) {
5127     out->print("<null>%s", newline ? "\n" : "");
5128   } else {
5129     _node->dump(newline ? "\n" : "", false, out);
5130   }
5131 }
5132 
5133 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) {
5134   bool first = true;
5135   int ptnodes_length = ptnodes_worklist.length();
5136   for (int i = 0; i < ptnodes_length; i++) {
5137     PointsToNode *ptn = ptnodes_worklist.at(i);
5138     if (ptn == nullptr || !ptn->is_JavaObject()) {
5139       continue;
5140     }
5141     PointsToNode::EscapeState es = ptn->escape_state();
5142     if ((es != PointsToNode::NoEscape) && !Verbose) {
5143       continue;
5144     }
5145     Node* n = ptn->ideal_node();
5146     if (n->is_Allocate() || (n->is_CallStaticJava() &&
5147                              n->as_CallStaticJava()->is_boxing_method())) {
5148       if (first) {
5149         tty->cr();
5150         tty->print("======== Connection graph for ");
5151         _compile->method()->print_short_name();
5152         tty->cr();
5153         tty->print_cr("invocation #%d: %d iterations and %f sec to build connection graph with %d nodes and worklist size %d",
5154                       _invocation, _build_iterations, _build_time, nodes_size(), ptnodes_worklist.length());
5155         tty->cr();
5156         first = false;
5157       }
5158       ptn->dump();
5159       // Print all locals and fields which reference this allocation
5160       for (UseIterator j(ptn); j.has_next(); j.next()) {
5161         PointsToNode* use = j.get();
5162         if (use->is_LocalVar()) {
5163           use->dump(Verbose);
5164         } else if (Verbose) {
5165           use->dump();
5166         }
5167       }
5168       tty->cr();
5169     }
5170   }
5171 }
5172 
5173 void ConnectionGraph::print_statistics() {
5174   tty->print_cr("No escape = %d, Arg escape = %d, Global escape = %d", Atomic::load(&_no_escape_counter), Atomic::load(&_arg_escape_counter), Atomic::load(&_global_escape_counter));
5175 }
5176 
5177 void ConnectionGraph::escape_state_statistics(GrowableArray<JavaObjectNode*>& java_objects_worklist) {
5178   if (!PrintOptoStatistics || (_invocation > 0)) { // Collect data only for the first invocation
5179     return;
5180   }
5181   for (int next = 0; next < java_objects_worklist.length(); ++next) {
5182     JavaObjectNode* ptn = java_objects_worklist.at(next);
5183     if (ptn->ideal_node()->is_Allocate()) {
5184       if (ptn->escape_state() == PointsToNode::NoEscape) {
5185         Atomic::inc(&ConnectionGraph::_no_escape_counter);
5186       } else if (ptn->escape_state() == PointsToNode::ArgEscape) {
5187         Atomic::inc(&ConnectionGraph::_arg_escape_counter);
5188       } else if (ptn->escape_state() == PointsToNode::GlobalEscape) {
5189         Atomic::inc(&ConnectionGraph::_global_escape_counter);
5190       } else {
5191         assert(false, "Unexpected Escape State");
5192       }
5193     }
5194   }
5195 }
5196 
5197 void ConnectionGraph::trace_es_update_helper(PointsToNode* ptn, PointsToNode::EscapeState es, bool fields, const char* reason) const {
5198   if (_compile->directive()->TraceEscapeAnalysisOption) {
5199     assert(ptn != nullptr, "should not be null");
5200     assert(reason != nullptr, "should not be null");
5201     ptn->dump_header(true);
5202     PointsToNode::EscapeState new_es = fields ? ptn->escape_state() : es;
5203     PointsToNode::EscapeState new_fields_es = fields ? es : ptn->fields_escape_state();
5204     tty->print_cr("-> %s(%s) %s", esc_names[(int)new_es], esc_names[(int)new_fields_es], reason);
5205   }
5206 }
5207 
5208 const char* ConnectionGraph::trace_propagate_message(PointsToNode* from) const {
5209   if (_compile->directive()->TraceEscapeAnalysisOption) {
5210     stringStream ss;
5211     ss.print("propagated from: ");
5212     from->dump(true, &ss, false);
5213     return ss.as_string();
5214   } else {
5215     return nullptr;
5216   }
5217 }
5218 
5219 const char* ConnectionGraph::trace_arg_escape_message(CallNode* call) const {
5220   if (_compile->directive()->TraceEscapeAnalysisOption) {
5221     stringStream ss;
5222     ss.print("escapes as arg to:");
5223     call->dump("", false, &ss);
5224     return ss.as_string();
5225   } else {
5226     return nullptr;
5227   }
5228 }
5229 
5230 const char* ConnectionGraph::trace_merged_message(PointsToNode* other) const {
5231   if (_compile->directive()->TraceEscapeAnalysisOption) {
5232     stringStream ss;
5233     ss.print("is merged with other object: ");
5234     other->dump_header(true, &ss);
5235     return ss.as_string();
5236   } else {
5237     return nullptr;
5238   }
5239 }
5240 
5241 #endif
5242 
5243 void ConnectionGraph::record_for_optimizer(Node *n) {
5244   _igvn->_worklist.push(n);
5245   _igvn->add_users_to_worklist(n);
5246 }