1 /*
   2  * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "libadt/vectset.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/metaspace.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "opto/c2compiler.hpp"
  35 #include "opto/arraycopynode.hpp"
  36 #include "opto/callnode.hpp"
  37 #include "opto/cfgnode.hpp"
  38 #include "opto/compile.hpp"
  39 #include "opto/escape.hpp"
  40 #include "opto/inlinetypenode.hpp"
  41 #include "opto/macro.hpp"
  42 #include "opto/locknode.hpp"
  43 #include "opto/phaseX.hpp"
  44 #include "opto/movenode.hpp"
  45 #include "opto/narrowptrnode.hpp"
  46 #include "opto/castnode.hpp"
  47 #include "opto/rootnode.hpp"
  48 #include "utilities/macros.hpp"
  49 
  50 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
  51   // If ReduceAllocationMerges is enabled we might call split_through_phi during
  52   // split_unique_types and that will create additional nodes that need to be
  53   // pushed to the ConnectionGraph. The code below bumps the initial capacity of
  54   // _nodes by 10% to account for these additional nodes. If capacity is exceeded
  55   // the array will be reallocated.
  56   _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr),
  57   _in_worklist(C->comp_arena()),
  58   _next_pidx(0),
  59   _collecting(true),
  60   _verify(false),
  61   _compile(C),
  62   _igvn(igvn),
  63   _invocation(invocation),
  64   _build_iterations(0),
  65   _build_time(0.),
  66   _node_map(C->comp_arena()) {
  67   // Add unknown java object.
  68   add_java_object(C->top(), PointsToNode::GlobalEscape);
  69   phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
  70   set_not_scalar_replaceable(phantom_obj NOT_PRODUCT(COMMA "Phantom object"));
  71   // Add ConP and ConN null oop nodes
  72   Node* oop_null = igvn->zerocon(T_OBJECT);
  73   assert(oop_null->_idx < nodes_size(), "should be created already");
  74   add_java_object(oop_null, PointsToNode::NoEscape);
  75   null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
  76   set_not_scalar_replaceable(null_obj NOT_PRODUCT(COMMA "Null object"));
  77   if (UseCompressedOops) {
  78     Node* noop_null = igvn->zerocon(T_NARROWOOP);
  79     assert(noop_null->_idx < nodes_size(), "should be created already");
  80     map_ideal_node(noop_null, null_obj);
  81   }
  82 }
  83 
  84 bool ConnectionGraph::has_candidates(Compile *C) {
  85   // EA brings benefits only when the code has allocations and/or locks which
  86   // are represented by ideal Macro nodes.
  87   int cnt = C->macro_count();
  88   for (int i = 0; i < cnt; i++) {
  89     Node *n = C->macro_node(i);
  90     if (n->is_Allocate()) {
  91       return true;
  92     }
  93     if (n->is_Lock()) {
  94       Node* obj = n->as_Lock()->obj_node()->uncast();
  95       if (!(obj->is_Parm() || obj->is_Con())) {
  96         return true;
  97       }
  98     }
  99     if (n->is_CallStaticJava() &&
 100         n->as_CallStaticJava()->is_boxing_method()) {
 101       return true;
 102     }
 103   }
 104   return false;
 105 }
 106 
 107 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
 108   Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]);
 109   ResourceMark rm;
 110 
 111   // Add ConP and ConN null oop nodes before ConnectionGraph construction
 112   // to create space for them in ConnectionGraph::_nodes[].
 113   Node* oop_null = igvn->zerocon(T_OBJECT);
 114   Node* noop_null = igvn->zerocon(T_NARROWOOP);
 115   int invocation = 0;
 116   if (C->congraph() != nullptr) {
 117     invocation = C->congraph()->_invocation + 1;
 118   }
 119   ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn, invocation);
 120   // Perform escape analysis
 121   if (congraph->compute_escape()) {
 122     // There are non escaping objects.
 123     C->set_congraph(congraph);
 124   }
 125   // Cleanup.
 126   if (oop_null->outcnt() == 0) {
 127     igvn->hash_delete(oop_null);
 128   }
 129   if (noop_null->outcnt() == 0) {
 130     igvn->hash_delete(noop_null);
 131   }
 132 }
 133 
 134 bool ConnectionGraph::compute_escape() {
 135   Compile* C = _compile;
 136   PhaseGVN* igvn = _igvn;
 137 
 138   // Worklists used by EA.
 139   Unique_Node_List delayed_worklist;
 140   Unique_Node_List reducible_merges;
 141   GrowableArray<Node*> alloc_worklist;
 142   GrowableArray<Node*> ptr_cmp_worklist;
 143   GrowableArray<MemBarStoreStoreNode*> storestore_worklist;
 144   GrowableArray<ArrayCopyNode*>  arraycopy_worklist;
 145   GrowableArray<PointsToNode*>   ptnodes_worklist;
 146   GrowableArray<JavaObjectNode*> java_objects_worklist;
 147   GrowableArray<JavaObjectNode*> non_escaped_allocs_worklist;
 148   GrowableArray<FieldNode*>      oop_fields_worklist;
 149   GrowableArray<SafePointNode*>  sfn_worklist;
 150   GrowableArray<MergeMemNode*>   mergemem_worklist;
 151   DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
 152 
 153   { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
 154 
 155   // 1. Populate Connection Graph (CG) with PointsTo nodes.
 156   ideal_nodes.map(C->live_nodes(), nullptr);  // preallocate space
 157   // Initialize worklist
 158   if (C->root() != nullptr) {
 159     ideal_nodes.push(C->root());
 160   }
 161   // Processed ideal nodes are unique on ideal_nodes list
 162   // but several ideal nodes are mapped to the phantom_obj.
 163   // To avoid duplicated entries on the following worklists
 164   // add the phantom_obj only once to them.
 165   ptnodes_worklist.append(phantom_obj);
 166   java_objects_worklist.append(phantom_obj);
 167   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
 168     Node* n = ideal_nodes.at(next);
 169     if ((n->Opcode() == Op_LoadX || n->Opcode() == Op_StoreX) &&
 170         !n->in(MemNode::Address)->is_AddP() &&
 171         _igvn->type(n->in(MemNode::Address))->isa_oopptr()) {
 172       // Load/Store at mark work address is at offset 0 so has no AddP which confuses EA
 173       Node* addp = new AddPNode(n->in(MemNode::Address), n->in(MemNode::Address), _igvn->MakeConX(0));
 174       _igvn->register_new_node_with_optimizer(addp);
 175       _igvn->replace_input_of(n, MemNode::Address, addp);
 176       ideal_nodes.push(addp);
 177       _nodes.at_put_grow(addp->_idx, nullptr, nullptr);
 178     }
 179     // Create PointsTo nodes and add them to Connection Graph. Called
 180     // only once per ideal node since ideal_nodes is Unique_Node list.
 181     add_node_to_connection_graph(n, &delayed_worklist);
 182     PointsToNode* ptn = ptnode_adr(n->_idx);
 183     if (ptn != nullptr && ptn != phantom_obj) {
 184       ptnodes_worklist.append(ptn);
 185       if (ptn->is_JavaObject()) {
 186         java_objects_worklist.append(ptn->as_JavaObject());
 187         if ((n->is_Allocate() || n->is_CallStaticJava()) &&
 188             (ptn->escape_state() < PointsToNode::GlobalEscape)) {
 189           // Only allocations and java static calls results are interesting.
 190           non_escaped_allocs_worklist.append(ptn->as_JavaObject());
 191         }
 192       } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
 193         oop_fields_worklist.append(ptn->as_Field());
 194       }
 195     }
 196     // Collect some interesting nodes for further use.
 197     switch (n->Opcode()) {
 198       case Op_MergeMem:
 199         // Collect all MergeMem nodes to add memory slices for
 200         // scalar replaceable objects in split_unique_types().
 201         mergemem_worklist.append(n->as_MergeMem());
 202         break;
 203       case Op_CmpP:
 204       case Op_CmpN:
 205         // Collect compare pointers nodes.
 206         if (OptimizePtrCompare) {
 207           ptr_cmp_worklist.append(n);
 208         }
 209         break;
 210       case Op_MemBarStoreStore:
 211         // Collect all MemBarStoreStore nodes so that depending on the
 212         // escape status of the associated Allocate node some of them
 213         // may be eliminated.
 214         if (!UseStoreStoreForCtor || n->req() > MemBarNode::Precedent) {
 215           storestore_worklist.append(n->as_MemBarStoreStore());
 216         }
 217         break;
 218       case Op_MemBarRelease:
 219         if (n->req() > MemBarNode::Precedent) {
 220           record_for_optimizer(n);
 221         }
 222         break;
 223 #ifdef ASSERT
 224       case Op_AddP:
 225         // Collect address nodes for graph verification.
 226         addp_worklist.append(n);
 227         break;
 228 #endif
 229       case Op_ArrayCopy:
 230         // Keep a list of ArrayCopy nodes so if one of its input is non
 231         // escaping, we can record a unique type
 232         arraycopy_worklist.append(n->as_ArrayCopy());
 233         break;
 234       default:
 235         // not interested now, ignore...
 236         break;
 237     }
 238     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 239       Node* m = n->fast_out(i);   // Get user
 240       ideal_nodes.push(m);
 241     }
 242     if (n->is_SafePoint()) {
 243       sfn_worklist.append(n->as_SafePoint());
 244     }
 245   }
 246 
 247 #ifndef PRODUCT
 248   if (_compile->directive()->TraceEscapeAnalysisOption) {
 249     tty->print("+++++ Initial worklist for ");
 250     _compile->method()->print_name();
 251     tty->print_cr(" (ea_inv=%d)", _invocation);
 252     for (int i = 0; i < ptnodes_worklist.length(); i++) {
 253       PointsToNode* ptn = ptnodes_worklist.at(i);
 254       ptn->dump();
 255     }
 256     tty->print_cr("+++++ Calculating escape states and scalar replaceability");
 257   }
 258 #endif
 259 
 260   if (non_escaped_allocs_worklist.length() == 0) {
 261     _collecting = false;
 262     NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 263     return false; // Nothing to do.
 264   }
 265   // Add final simple edges to graph.
 266   while(delayed_worklist.size() > 0) {
 267     Node* n = delayed_worklist.pop();
 268     add_final_edges(n);
 269   }
 270 
 271 #ifdef ASSERT
 272   if (VerifyConnectionGraph) {
 273     // Verify that no new simple edges could be created and all
 274     // local vars has edges.
 275     _verify = true;
 276     int ptnodes_length = ptnodes_worklist.length();
 277     for (int next = 0; next < ptnodes_length; ++next) {
 278       PointsToNode* ptn = ptnodes_worklist.at(next);
 279       add_final_edges(ptn->ideal_node());
 280       if (ptn->is_LocalVar() && ptn->edge_count() == 0) {
 281         ptn->dump();
 282         assert(ptn->as_LocalVar()->edge_count() > 0, "sanity");
 283       }
 284     }
 285     _verify = false;
 286   }
 287 #endif
 288   // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes
 289   // processing, calls to CI to resolve symbols (types, fields, methods)
 290   // referenced in bytecode. During symbol resolution VM may throw
 291   // an exception which CI cleans and converts to compilation failure.
 292   if (C->failing()) {
 293     NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 294     return false;
 295   }
 296 
 297   // 2. Finish Graph construction by propagating references to all
 298   //    java objects through graph.
 299   if (!complete_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist,
 300                                  java_objects_worklist, oop_fields_worklist)) {
 301     // All objects escaped or hit time or iterations limits.
 302     _collecting = false;
 303     NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 304     return false;
 305   }
 306 
 307   // 3. Adjust scalar_replaceable state of nonescaping objects and push
 308   //    scalar replaceable allocations on alloc_worklist for processing
 309   //    in split_unique_types().
 310   GrowableArray<JavaObjectNode*> jobj_worklist;
 311   int non_escaped_length = non_escaped_allocs_worklist.length();
 312   bool found_nsr_alloc = false;
 313   for (int next = 0; next < non_escaped_length; next++) {
 314     JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
 315     bool noescape = (ptn->escape_state() == PointsToNode::NoEscape);
 316     Node* n = ptn->ideal_node();
 317     if (n->is_Allocate()) {
 318       n->as_Allocate()->_is_non_escaping = noescape;
 319     }
 320     if (noescape && ptn->scalar_replaceable()) {
 321       adjust_scalar_replaceable_state(ptn, reducible_merges);
 322       if (ptn->scalar_replaceable()) {
 323         jobj_worklist.push(ptn);
 324       } else {
 325         found_nsr_alloc = true;
 326       }
 327     }
 328   }
 329 
 330   // Propagate NSR (Not Scalar Replaceable) state.
 331   if (found_nsr_alloc) {
 332     find_scalar_replaceable_allocs(jobj_worklist, reducible_merges);
 333   }
 334 
 335   // alloc_worklist will be processed in reverse push order.
 336   // Therefore the reducible Phis will be processed for last and that's what we
 337   // want because by then the scalarizable inputs of the merge will already have
 338   // an unique instance type.
 339   for (uint i = 0; i < reducible_merges.size(); i++ ) {
 340     Node* n = reducible_merges.at(i);
 341     alloc_worklist.append(n);
 342   }
 343 
 344   for (int next = 0; next < jobj_worklist.length(); ++next) {
 345     JavaObjectNode* jobj = jobj_worklist.at(next);
 346     if (jobj->scalar_replaceable()) {
 347       alloc_worklist.append(jobj->ideal_node());
 348     }
 349   }
 350 
 351 #ifdef ASSERT
 352   if (VerifyConnectionGraph) {
 353     // Verify that graph is complete - no new edges could be added or needed.
 354     verify_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist,
 355                             java_objects_worklist, addp_worklist);
 356   }
 357   assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");
 358   assert(null_obj->escape_state() == PointsToNode::NoEscape &&
 359          null_obj->edge_count() == 0 &&
 360          !null_obj->arraycopy_src() &&
 361          !null_obj->arraycopy_dst(), "sanity");
 362 #endif
 363 
 364   _collecting = false;
 365 
 366   } // TracePhase t3("connectionGraph")
 367 
 368   // 4. Optimize ideal graph based on EA information.
 369   bool has_non_escaping_obj = (non_escaped_allocs_worklist.length() > 0);
 370   if (has_non_escaping_obj) {
 371     optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist);
 372   }
 373 
 374 #ifndef PRODUCT
 375   if (PrintEscapeAnalysis) {
 376     dump(ptnodes_worklist); // Dump ConnectionGraph
 377   }
 378 #endif
 379 
 380 #ifdef ASSERT
 381   if (VerifyConnectionGraph) {
 382     int alloc_length = alloc_worklist.length();
 383     for (int next = 0; next < alloc_length; ++next) {
 384       Node* n = alloc_worklist.at(next);
 385       PointsToNode* ptn = ptnode_adr(n->_idx);
 386       assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity");
 387     }
 388   }
 389 
 390   if (VerifyReduceAllocationMerges) {
 391     for (uint i = 0; i < reducible_merges.size(); i++ ) {
 392       Node* n = reducible_merges.at(i);
 393       if (!can_reduce_phi(n->as_Phi())) {
 394         TraceReduceAllocationMerges = true;
 395         n->dump(2);
 396         n->dump(-2);
 397         assert(can_reduce_phi(n->as_Phi()), "Sanity: previous reducible Phi is no longer reducible before SUT.");
 398       }
 399     }
 400   }
 401 #endif
 402 
 403   // 5. Separate memory graph for scalar replaceable allcations.
 404   bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);
 405   if (has_scalar_replaceable_candidates && EliminateAllocations) {
 406     assert(C->do_aliasing(), "Aliasing should be enabled");
 407     // Now use the escape information to create unique types for
 408     // scalar replaceable objects.
 409     split_unique_types(alloc_worklist, arraycopy_worklist, mergemem_worklist, reducible_merges);
 410     if (C->failing()) {
 411       NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 412       return false;
 413     }
 414     C->print_method(PHASE_AFTER_EA, 2);
 415 
 416 #ifdef ASSERT
 417   } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
 418     tty->print("=== No allocations eliminated for ");
 419     C->method()->print_short_name();
 420     if (!EliminateAllocations) {
 421       tty->print(" since EliminateAllocations is off ===");
 422     } else if(!has_scalar_replaceable_candidates) {
 423       tty->print(" since there are no scalar replaceable candidates ===");
 424     }
 425     tty->cr();
 426 #endif
 427   }
 428 
 429   // 6. Reduce allocation merges used as debug information. This is done after
 430   // split_unique_types because the methods used to create SafePointScalarObject
 431   // need to traverse the memory graph to find values for object fields. We also
 432   // set to null the scalarized inputs of reducible Phis so that the Allocate
 433   // that they point can be later scalar replaced.
 434   bool delay = _igvn->delay_transform();
 435   _igvn->set_delay_transform(true);
 436   for (uint i = 0; i < reducible_merges.size(); i++) {
 437     Node* n = reducible_merges.at(i);
 438     if (n->outcnt() > 0) {
 439       if (!reduce_phi_on_safepoints(n->as_Phi())) {
 440         NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 441         C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
 442         return false;
 443       }
 444 
 445       // Now we set the scalar replaceable inputs of ophi to null, which is
 446       // the last piece that would prevent it from being scalar replaceable.
 447       reset_scalar_replaceable_entries(n->as_Phi());
 448     }
 449   }
 450   _igvn->set_delay_transform(delay);
 451 
 452   // Annotate at safepoints if they have <= ArgEscape objects in their scope and at
 453   // java calls if they pass ArgEscape objects as parameters.
 454   if (has_non_escaping_obj &&
 455       (C->env()->should_retain_local_variables() ||
 456        C->env()->jvmti_can_get_owned_monitor_info() ||
 457        C->env()->jvmti_can_walk_any_space() ||
 458        DeoptimizeObjectsALot)) {
 459     int sfn_length = sfn_worklist.length();
 460     for (int next = 0; next < sfn_length; next++) {
 461       SafePointNode* sfn = sfn_worklist.at(next);
 462       sfn->set_has_ea_local_in_scope(has_ea_local_in_scope(sfn));
 463       if (sfn->is_CallJava()) {
 464         CallJavaNode* call = sfn->as_CallJava();
 465         call->set_arg_escape(has_arg_escape(call));
 466       }
 467     }
 468   }
 469 
 470   NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 471   return has_non_escaping_obj;
 472 }
 473 
 474 // Check if it's profitable to reduce the Phi passed as parameter.  Returns true
 475 // if at least one scalar replaceable allocation participates in the merge.
 476 bool ConnectionGraph::can_reduce_phi_check_inputs(PhiNode* ophi) const {
 477   bool found_sr_allocate = false;
 478 
 479   for (uint i = 1; i < ophi->req(); i++) {
 480     JavaObjectNode* ptn = unique_java_object(ophi->in(i));
 481     if (ptn != nullptr && ptn->scalar_replaceable()) {
 482       AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
 483 
 484       // Don't handle arrays.
 485       if (alloc->Opcode() != Op_Allocate) {
 486         assert(alloc->Opcode() == Op_AllocateArray, "Unexpected type of allocation.");
 487         continue;
 488       }
 489 
 490       if (PhaseMacroExpand::can_eliminate_allocation(_igvn, alloc, nullptr)) {
 491         found_sr_allocate = true;
 492       } else {
 493         NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("%dth input of Phi %d is SR but can't be eliminated.", i, ophi->_idx);)
 494         ptn->set_scalar_replaceable(false);
 495       }
 496     }
 497   }
 498 
 499   NOT_PRODUCT(if (TraceReduceAllocationMerges && !found_sr_allocate) tty->print_cr("Can NOT reduce Phi %d on invocation %d. No SR Allocate as input.", ophi->_idx, _invocation);)
 500   return found_sr_allocate;
 501 }
 502 
 503 // We can reduce the Cmp if it's a comparison between the Phi and a constant.
 504 // I require the 'other' input to be a constant so that I can move the Cmp
 505 // around safely.
 506 bool ConnectionGraph::can_reduce_cmp(Node* n, Node* cmp) const {
 507   assert(cmp->Opcode() == Op_CmpP || cmp->Opcode() == Op_CmpN, "not expected node: %s", cmp->Name());
 508   Node* left = cmp->in(1);
 509   Node* right = cmp->in(2);
 510 
 511   return (left == n || right == n) &&
 512          (left->is_Con() || right->is_Con()) &&
 513          cmp->outcnt() == 1;
 514 }
 515 
 516 // We are going to check if any of the SafePointScalarMerge entries
 517 // in the SafePoint reference the Phi that we are checking.
 518 bool ConnectionGraph::has_been_reduced(PhiNode* n, SafePointNode* sfpt) const {
 519   JVMState *jvms = sfpt->jvms();
 520 
 521   for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) {
 522     Node* sfpt_in = sfpt->in(i);
 523     if (sfpt_in->is_SafePointScalarMerge()) {
 524       SafePointScalarMergeNode* smerge = sfpt_in->as_SafePointScalarMerge();
 525       Node* nsr_ptr = sfpt->in(smerge->merge_pointer_idx(jvms));
 526       if (nsr_ptr == n) {
 527         return true;
 528       }
 529     }
 530   }
 531 
 532   return false;
 533 }
 534 
 535 // Check if we are able to untangle the merge. The following patterns are
 536 // supported:
 537 //  - Phi -> SafePoints
 538 //  - Phi -> CmpP/N
 539 //  - Phi -> AddP -> Load
 540 //  - Phi -> CastPP -> SafePoints
 541 //  - Phi -> CastPP -> AddP -> Load
 542 bool ConnectionGraph::can_reduce_check_users(Node* n, uint nesting) const {
 543   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 544     Node* use = n->fast_out(i);
 545 
 546     if (use->is_SafePoint()) {
 547       if (use->is_Call() && use->as_Call()->has_non_debug_use(n)) {
 548         NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Call has non_debug_use().", n->_idx, _invocation);)
 549         return false;
 550       } else if (has_been_reduced(n->is_Phi() ? n->as_Phi() : n->as_CastPP()->in(1)->as_Phi(), use->as_SafePoint())) {
 551         NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. It has already been reduced.", n->_idx, _invocation);)
 552         return false;
 553       }
 554     } else if (use->is_AddP()) {
 555       Node* addp = use;
 556       for (DUIterator_Fast jmax, j = addp->fast_outs(jmax); j < jmax; j++) {
 557         Node* use_use = addp->fast_out(j);
 558         const Type* load_type = _igvn->type(use_use);
 559 
 560         if (!use_use->is_Load() || !use_use->as_Load()->can_split_through_phi_base(_igvn)) {
 561           NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. AddP user isn't a [splittable] Load(): %s", n->_idx, _invocation, use_use->Name());)
 562           return false;
 563         } else if (load_type->isa_narrowklass() || load_type->isa_klassptr()) {
 564           NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. [Narrow] Klass Load: %s", n->_idx, _invocation, use_use->Name());)
 565           return false;
 566         }
 567       }
 568     } else if (nesting > 0) {
 569       NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Unsupported user %s at nesting level %d.", n->_idx, _invocation, use->Name(), nesting);)
 570       return false;
 571     } else if (use->is_CastPP()) {
 572       const Type* cast_t = _igvn->type(use);
 573       if (cast_t == nullptr || cast_t->make_ptr()->isa_instptr() == nullptr) {
 574 #ifndef PRODUCT
 575         if (TraceReduceAllocationMerges) {
 576           tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP is not to an instance.", n->_idx, _invocation);
 577           use->dump();
 578         }
 579 #endif
 580         return false;
 581       }
 582 
 583       bool is_trivial_control = use->in(0) == nullptr || use->in(0) == n->in(0);
 584       if (!is_trivial_control) {
 585         // If it's not a trivial control then we check if we can reduce the
 586         // CmpP/N used by the If controlling the cast.
 587         if (use->in(0)->is_IfTrue() || use->in(0)->is_IfFalse()) {
 588           Node* iff = use->in(0)->in(0);
 589           // We may have an OpaqueNotNull node between If and Bool nodes. But we could also have a sub class of IfNode,
 590           // for example, an OuterStripMinedLoopEnd or a Parse Predicate. Bail out in all these cases.
 591           bool can_reduce = (iff->Opcode() == Op_If) && iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp();
 592           if (can_reduce) {
 593             Node* iff_cmp = iff->in(1)->in(1);
 594             int opc = iff_cmp->Opcode();
 595             can_reduce = (opc == Op_CmpP || opc == Op_CmpN) && can_reduce_cmp(n, iff_cmp);
 596           }
 597           if (!can_reduce) {
 598 #ifndef PRODUCT
 599             if (TraceReduceAllocationMerges) {
 600               tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP %d doesn't have simple control.", n->_idx, _invocation, use->_idx);
 601               n->dump(5);
 602             }
 603 #endif
 604             return false;
 605           }
 606         }
 607       }
 608 
 609       if (!can_reduce_check_users(use, nesting+1)) {
 610         return false;
 611       }
 612     } else if (use->Opcode() == Op_CmpP || use->Opcode() == Op_CmpN) {
 613       if (!can_reduce_cmp(n, use)) {
 614         NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. CmpP/N %d isn't reducible.", n->_idx, _invocation, use->_idx);)
 615         return false;
 616       }
 617     } else {
 618       NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. One of the uses is: %d %s", n->_idx, _invocation, use->_idx, use->Name());)
 619       return false;
 620     }
 621   }
 622 
 623   return true;
 624 }
 625 
 626 // Returns true if: 1) It's profitable to reduce the merge, and 2) The Phi is
 627 // only used in some certain code shapes. Check comments in
 628 // 'can_reduce_phi_inputs' and 'can_reduce_phi_users' for more
 629 // details.
 630 bool ConnectionGraph::can_reduce_phi(PhiNode* ophi) const {
 631   // If there was an error attempting to reduce allocation merges for this
 632   // method we might have disabled the compilation and be retrying with RAM
 633   // disabled.
 634   if (!_compile->do_reduce_allocation_merges() || ophi->region()->Opcode() != Op_Region) {
 635     return false;
 636   }
 637 
 638   const Type* phi_t = _igvn->type(ophi);
 639   if (phi_t == nullptr ||
 640       phi_t->make_ptr() == nullptr ||
 641       phi_t->make_ptr()->isa_aryptr() != nullptr) {
 642     return false;
 643   }
 644 
 645   if (!can_reduce_phi_check_inputs(ophi) || !can_reduce_check_users(ophi, /* nesting: */ 0)) {
 646     return false;
 647   }
 648 
 649   NOT_PRODUCT(if (TraceReduceAllocationMerges) { tty->print_cr("Can reduce Phi %d during invocation %d: ", ophi->_idx, _invocation); })
 650   return true;
 651 }
 652 
 653 // This method will return a CmpP/N that we need to use on the If controlling a
 654 // CastPP after it was split. This method is only called on bases that are
 655 // nullable therefore we always need a controlling if for the splitted CastPP.
 656 //
 657 // 'curr_ctrl' is the control of the CastPP that we want to split through phi.
 658 // If the CastPP currently doesn't have a control then the CmpP/N will be
 659 // against the NULL constant, otherwise it will be against the constant input of
 660 // the existing CmpP/N. It's guaranteed that there will be a CmpP/N in the later
 661 // case because we have constraints on it and because the CastPP has a control
 662 // input.
 663 Node* ConnectionGraph::specialize_cmp(Node* base, Node* curr_ctrl) {
 664   const Type* t = base->bottom_type();
 665   Node* con = nullptr;
 666 
 667   if (curr_ctrl == nullptr || curr_ctrl->is_Region()) {
 668     con = _igvn->zerocon(t->basic_type());
 669   } else {
 670     // can_reduce_check_users() verified graph: true/false -> if -> bool -> cmp
 671     assert(curr_ctrl->in(0)->Opcode() == Op_If, "unexpected node %s", curr_ctrl->in(0)->Name());
 672     Node* bol = curr_ctrl->in(0)->in(1);
 673     assert(bol->is_Bool(), "unexpected node %s", bol->Name());
 674     Node* curr_cmp = bol->in(1);
 675     assert(curr_cmp->Opcode() == Op_CmpP || curr_cmp->Opcode() == Op_CmpN, "unexpected node %s", curr_cmp->Name());
 676     con = curr_cmp->in(1)->is_Con() ? curr_cmp->in(1) : curr_cmp->in(2);
 677   }
 678 
 679   return CmpNode::make(base, con, t->basic_type());
 680 }
 681 
 682 // This method 'specializes' the CastPP passed as parameter to the base passed
 683 // as parameter. Note that the existing CastPP input is a Phi. "Specialize"
 684 // means that the CastPP now will be specific for a given base instead of a Phi.
 685 // An If-Then-Else-Region block is inserted to control the CastPP. The control
 686 // of the CastPP is a copy of the current one (if there is one) or a check
 687 // against NULL.
 688 //
 689 // Before:
 690 //
 691 //    C1     C2  ... Cn
 692 //     \      |      /
 693 //      \     |     /
 694 //       \    |    /
 695 //        \   |   /
 696 //         \  |  /
 697 //          \ | /
 698 //           \|/
 699 //          Region     B1      B2  ... Bn
 700 //            |          \      |      /
 701 //            |           \     |     /
 702 //            |            \    |    /
 703 //            |             \   |   /
 704 //            |              \  |  /
 705 //            |               \ | /
 706 //            ---------------> Phi
 707 //                              |
 708 //                      X       |
 709 //                      |       |
 710 //                      |       |
 711 //                      ------> CastPP
 712 //
 713 // After (only partial illustration; base = B2, current_control = C2):
 714 //
 715 //                      C2
 716 //                      |
 717 //                      If
 718 //                     / \
 719 //                    /   \
 720 //                   T     F
 721 //                  /\     /
 722 //                 /  \   /
 723 //                /    \ /
 724 //      C1    CastPP   Reg        Cn
 725 //       |              |          |
 726 //       |              |          |
 727 //       |              |          |
 728 //       -------------- | ----------
 729 //                    | | |
 730 //                    Region
 731 //
 732 Node* ConnectionGraph::specialize_castpp(Node* castpp, Node* base, Node* current_control) {
 733   Node* control_successor  = current_control->unique_ctrl_out();
 734   Node* cmp                = _igvn->transform(specialize_cmp(base, castpp->in(0)));
 735   Node* bol                = _igvn->transform(new BoolNode(cmp, BoolTest::ne));
 736   IfNode* if_ne            = _igvn->transform(new IfNode(current_control, bol, PROB_MIN, COUNT_UNKNOWN))->as_If();
 737   Node* not_eq_control     = _igvn->transform(new IfTrueNode(if_ne));
 738   Node* yes_eq_control     = _igvn->transform(new IfFalseNode(if_ne));
 739   Node* end_region         = _igvn->transform(new RegionNode(3));
 740 
 741   // Insert the new if-else-region block into the graph
 742   end_region->set_req(1, not_eq_control);
 743   end_region->set_req(2, yes_eq_control);
 744   control_successor->replace_edge(current_control, end_region, _igvn);
 745 
 746   _igvn->_worklist.push(current_control);
 747   _igvn->_worklist.push(control_successor);
 748 
 749   return _igvn->transform(ConstraintCastNode::make_cast_for_type(not_eq_control, base, _igvn->type(castpp), ConstraintCastNode::UnconditionalDependency, nullptr));
 750 }
 751 
 752 Node* ConnectionGraph::split_castpp_load_through_phi(Node* curr_addp, Node* curr_load, Node* region, GrowableArray<Node*>* bases_for_loads, GrowableArray<Node *>  &alloc_worklist) {
 753   const Type* load_type = _igvn->type(curr_load);
 754   Node* nsr_value = _igvn->zerocon(load_type->basic_type());
 755   Node* memory = curr_load->in(MemNode::Memory);
 756 
 757   // The data_phi merging the loads needs to be nullable if
 758   // we are loading pointers.
 759   if (load_type->make_ptr() != nullptr) {
 760     if (load_type->isa_narrowoop()) {
 761       load_type = load_type->meet(TypeNarrowOop::NULL_PTR);
 762     } else if (load_type->isa_ptr()) {
 763       load_type = load_type->meet(TypePtr::NULL_PTR);
 764     } else {
 765       assert(false, "Unexpected load ptr type.");
 766     }
 767   }
 768 
 769   Node* data_phi = PhiNode::make(region, nsr_value, load_type);
 770 
 771   for (int i = 1; i < bases_for_loads->length(); i++) {
 772     Node* base = bases_for_loads->at(i);
 773     Node* cmp_region = nullptr;
 774     if (base != nullptr) {
 775       if (base->is_CFG()) { // means that we added a CastPP as child of this CFG node
 776         cmp_region = base->unique_ctrl_out_or_null();
 777         assert(cmp_region != nullptr, "There should be.");
 778         base = base->find_out_with(Op_CastPP);
 779       }
 780 
 781       Node* addr = _igvn->transform(new AddPNode(base, base, curr_addp->in(AddPNode::Offset)));
 782       Node* mem = (memory->is_Phi() && (memory->in(0) == region)) ? memory->in(i) : memory;
 783       Node* load = curr_load->clone();
 784       load->set_req(0, nullptr);
 785       load->set_req(1, mem);
 786       load->set_req(2, addr);
 787 
 788       if (cmp_region != nullptr) { // see comment on previous if
 789         Node* intermediate_phi = PhiNode::make(cmp_region, nsr_value, load_type);
 790         intermediate_phi->set_req(1, _igvn->transform(load));
 791         load = intermediate_phi;
 792       }
 793 
 794       data_phi->set_req(i, _igvn->transform(load));
 795     } else {
 796       // Just use the default, which is already in phi
 797     }
 798   }
 799 
 800   // Takes care of updating CG and split_unique_types worklists due
 801   // to cloned AddP->Load.
 802   updates_after_load_split(data_phi, curr_load, alloc_worklist);
 803 
 804   return _igvn->transform(data_phi);
 805 }
 806 
 807 // This method only reduces CastPP fields loads; SafePoints are handled
 808 // separately. The idea here is basically to clone the CastPP and place copies
 809 // on each input of the Phi, including non-scalar replaceable inputs.
 810 // Experimentation shows that the resulting IR graph is simpler that way than if
 811 // we just split the cast through scalar-replaceable inputs.
 812 //
 813 // The reduction process requires that CastPP's control be one of:
 814 //  1) no control,
 815 //  2) the same region as Ophi, or
 816 //  3) an IfTrue/IfFalse coming from an CmpP/N between Ophi and a constant.
 817 //
 818 // After splitting the CastPP we'll put it under an If-Then-Else-Region control
 819 // flow. If the CastPP originally had an IfTrue/False control input then we'll
 820 // use a similar CmpP/N to control the new If-Then-Else-Region. Otherwise, we'll
 821 // juse use a CmpP/N against the NULL constant.
 822 //
 823 // The If-Then-Else-Region isn't always needed. For instance, if input to
 824 // splitted cast was not nullable (or if it was the NULL constant) then we don't
 825 // need (shouldn't) use a CastPP at all.
 826 //
 827 // After the casts are splitted we'll split the AddP->Loads through the Phi and
 828 // connect them to the just split CastPPs.
 829 //
 830 // Before (CastPP control is same as Phi):
 831 //
 832 //          Region     Allocate   Null    Call
 833 //            |             \      |      /
 834 //            |              \     |     /
 835 //            |               \    |    /
 836 //            |                \   |   /
 837 //            |                 \  |  /
 838 //            |                  \ | /
 839 //            ------------------> Phi            # Oop Phi
 840 //            |                    |
 841 //            |                    |
 842 //            |                    |
 843 //            |                    |
 844 //            ----------------> CastPP
 845 //                                 |
 846 //                               AddP
 847 //                                 |
 848 //                               Load
 849 //
 850 // After (Very much simplified):
 851 //
 852 //                         Call  NULL
 853 //                            \  /
 854 //                            CmpP
 855 //                             |
 856 //                           Bool#NE
 857 //                             |
 858 //                             If
 859 //                            / \
 860 //                           T   F
 861 //                          / \ /
 862 //                         /   R
 863 //                     CastPP  |
 864 //                       |     |
 865 //                     AddP    |
 866 //                       |     |
 867 //                     Load    |
 868 //                         \   |   0
 869 //            Allocate      \  |  /
 870 //                \          \ | /
 871 //               AddP         Phi
 872 //                  \         /
 873 //                 Load      /
 874 //                    \  0  /
 875 //                     \ | /
 876 //                      \|/
 877 //                      Phi        # "Field" Phi
 878 //
 879 void ConnectionGraph::reduce_phi_on_castpp_field_load(Node* curr_castpp, GrowableArray<Node *>  &alloc_worklist, GrowableArray<Node *>  &memnode_worklist) {
 880   Node* ophi = curr_castpp->in(1);
 881   assert(ophi->is_Phi(), "Expected this to be a Phi node.");
 882 
 883   // Identify which base should be used for AddP->Load later when spliting the
 884   // CastPP->Loads through ophi. Three kind of values may be stored in this
 885   // array, depending on the nullability status of the corresponding input in
 886   // ophi.
 887   //
 888   //  - nullptr:    Meaning that the base is actually the NULL constant and therefore
 889   //                we won't try to load from it.
 890   //
 891   //  - CFG Node:   Meaning that the base is a CastPP that was specialized for
 892   //                this input of Ophi. I.e., we added an If->Then->Else-Region
 893   //                that will 'activate' the CastPp only when the input is not Null.
 894   //
 895   //  - Other Node: Meaning that the base is not nullable and therefore we'll try
 896   //                to load directly from it.
 897   GrowableArray<Node*> bases_for_loads(ophi->req(), ophi->req(), nullptr);
 898 
 899   for (uint i = 1; i < ophi->req(); i++) {
 900     Node* base = ophi->in(i);
 901     const Type* base_t = _igvn->type(base);
 902 
 903     if (base_t->maybe_null()) {
 904       if (base->is_Con()) {
 905         // Nothing todo as bases_for_loads[i] is already nullptr
 906       } else {
 907         Node* new_castpp = specialize_castpp(curr_castpp, base, ophi->in(0)->in(i));
 908         bases_for_loads.at_put(i, new_castpp->in(0)); // Use the ctrl of the new node just as a flag
 909       }
 910     } else {
 911       bases_for_loads.at_put(i, base);
 912     }
 913   }
 914 
 915   // Now let's split the CastPP->Loads through the Phi
 916   for (int i = curr_castpp->outcnt()-1; i >= 0;) {
 917     Node* use = curr_castpp->raw_out(i);
 918     if (use->is_AddP()) {
 919       for (int j = use->outcnt()-1; j >= 0;) {
 920         Node* use_use = use->raw_out(j);
 921         assert(use_use->is_Load(), "Expected this to be a Load node.");
 922 
 923         // We can't make an unconditional load from a nullable input. The
 924         // 'split_castpp_load_through_phi` method will add an
 925         // 'If-Then-Else-Region` around nullable bases and only load from them
 926         // when the input is not null.
 927         Node* phi = split_castpp_load_through_phi(use, use_use, ophi->in(0), &bases_for_loads, alloc_worklist);
 928         _igvn->replace_node(use_use, phi);
 929 
 930         --j;
 931         j = MIN2(j, (int)use->outcnt()-1);
 932       }
 933 
 934       _igvn->remove_dead_node(use);
 935     }
 936     --i;
 937     i = MIN2(i, (int)curr_castpp->outcnt()-1);
 938   }
 939 }
 940 
 941 // This method split a given CmpP/N through the Phi used in one of its inputs.
 942 // As a result we convert a comparison with a pointer to a comparison with an
 943 // integer.
 944 // The only requirement is that one of the inputs of the CmpP/N must be a Phi
 945 // while the other must be a constant.
 946 // The splitting process is basically just cloning the CmpP/N above the input
 947 // Phi.  However, some (most) of the cloned CmpP/Ns won't be requred because we
 948 // can prove at compile time the result of the comparison.
 949 //
 950 // Before:
 951 //
 952 //             in1    in2 ... inN
 953 //              \      |      /
 954 //               \     |     /
 955 //                \    |    /
 956 //                 \   |   /
 957 //                  \  |  /
 958 //                   \ | /
 959 //                    Phi
 960 //                     |   Other
 961 //                     |    /
 962 //                     |   /
 963 //                     |  /
 964 //                    CmpP/N
 965 //
 966 // After:
 967 //
 968 //        in1  Other   in2 Other  inN  Other
 969 //         |    |      |   |      |    |
 970 //         \    |      |   |      |    |
 971 //          \  /       |   /      |    /
 972 //          CmpP/N    CmpP/N     CmpP/N
 973 //          Bool      Bool       Bool
 974 //            \        |        /
 975 //             \       |       /
 976 //              \      |      /
 977 //               \     |     /
 978 //                \    |    /
 979 //                 \   |   /
 980 //                  \  |  /
 981 //                   \ | /
 982 //                    Phi
 983 //                     |
 984 //                     |   Zero
 985 //                     |    /
 986 //                     |   /
 987 //                     |  /
 988 //                     CmpI
 989 //
 990 //
 991 void ConnectionGraph::reduce_phi_on_cmp(Node* cmp) {
 992   Node* ophi = cmp->in(1)->is_Con() ? cmp->in(2) : cmp->in(1);
 993   assert(ophi->is_Phi(), "Expected this to be a Phi node.");
 994 
 995   Node* other = cmp->in(1)->is_Con() ? cmp->in(1) : cmp->in(2);
 996   Node* zero = _igvn->intcon(0);
 997   BoolTest::mask mask = cmp->unique_out()->as_Bool()->_test._test;
 998 
 999   // This Phi will merge the result of the Cmps split through the Phi
1000   Node* res_phi  = _igvn->transform(PhiNode::make(ophi->in(0), zero, TypeInt::INT));
1001 
1002   for (uint i=1; i<ophi->req(); i++) {
1003     Node* ophi_input = ophi->in(i);
1004     Node* res_phi_input = nullptr;
1005 
1006     const TypeInt* tcmp = optimize_ptr_compare(ophi_input, other);
1007     if (tcmp->singleton()) {
1008       res_phi_input = _igvn->makecon(tcmp);
1009     } else {
1010       Node* ncmp = _igvn->transform(cmp->clone());
1011       ncmp->set_req(1, ophi_input);
1012       ncmp->set_req(2, other);
1013       Node* bol = _igvn->transform(new BoolNode(ncmp, mask));
1014       res_phi_input = bol->as_Bool()->as_int_value(_igvn);
1015     }
1016 
1017     res_phi->set_req(i, res_phi_input);
1018   }
1019 
1020   Node* new_cmp = _igvn->transform(new CmpINode(res_phi, zero));
1021   _igvn->replace_node(cmp, new_cmp);
1022 }
1023 
1024 // Push the newly created AddP on alloc_worklist and patch
1025 // the connection graph. Note that the changes in the CG below
1026 // won't affect the ES of objects since the new nodes have the
1027 // same status as the old ones.
1028 void ConnectionGraph::updates_after_load_split(Node* data_phi, Node* previous_load, GrowableArray<Node *>  &alloc_worklist) {
1029   assert(data_phi != nullptr, "Output of split_through_phi is null.");
1030   assert(data_phi != previous_load, "Output of split_through_phi is same as input.");
1031   assert(data_phi->is_Phi(), "Output of split_through_phi isn't a Phi.");
1032 
1033   if (data_phi == nullptr || !data_phi->is_Phi()) {
1034     // Make this a retry?
1035     return ;
1036   }
1037 
1038   Node* previous_addp = previous_load->in(MemNode::Address);
1039   FieldNode* fn = ptnode_adr(previous_addp->_idx)->as_Field();
1040   for (uint i = 1; i < data_phi->req(); i++) {
1041     Node* new_load = data_phi->in(i);
1042 
1043     if (new_load->is_Phi()) {
1044       // new_load is currently the "intermediate_phi" from an specialized
1045       // CastPP.
1046       new_load = new_load->in(1);
1047     }
1048 
1049     // "new_load" might actually be a constant, parameter, etc.
1050     if (new_load->is_Load()) {
1051       Node* new_addp = new_load->in(MemNode::Address);
1052       Node* base = get_addp_base(new_addp);
1053 
1054       // The base might not be something that we can create an unique
1055       // type for. If that's the case we are done with that input.
1056       PointsToNode* jobj_ptn = unique_java_object(base);
1057       if (jobj_ptn == nullptr || !jobj_ptn->scalar_replaceable()) {
1058         continue;
1059       }
1060 
1061       // Push to alloc_worklist since the base has an unique_type
1062       alloc_worklist.append_if_missing(new_addp);
1063 
1064       // Now let's add the node to the connection graph
1065       _nodes.at_grow(new_addp->_idx, nullptr);
1066       add_field(new_addp, fn->escape_state(), fn->offset());
1067       add_base(ptnode_adr(new_addp->_idx)->as_Field(), ptnode_adr(base->_idx));
1068 
1069       // If the load doesn't load an object then it won't be
1070       // part of the connection graph
1071       PointsToNode* curr_load_ptn = ptnode_adr(previous_load->_idx);
1072       if (curr_load_ptn != nullptr) {
1073         _nodes.at_grow(new_load->_idx, nullptr);
1074         add_local_var(new_load, curr_load_ptn->escape_state());
1075         add_edge(ptnode_adr(new_load->_idx), ptnode_adr(new_addp->_idx)->as_Field());
1076       }
1077     }
1078   }
1079 }
1080 
1081 void ConnectionGraph::reduce_phi_on_field_access(Node* previous_addp, GrowableArray<Node *>  &alloc_worklist) {
1082   // We'll pass this to 'split_through_phi' so that it'll do the split even
1083   // though the load doesn't have an unique instance type.
1084   bool ignore_missing_instance_id = true;
1085 
1086   // All AddPs are present in the connection graph
1087   FieldNode* fn = ptnode_adr(previous_addp->_idx)->as_Field();
1088 
1089   // Iterate over AddP looking for a Load
1090   for (int k = previous_addp->outcnt()-1; k >= 0;) {
1091     Node* previous_load = previous_addp->raw_out(k);
1092     if (previous_load->is_Load()) {
1093       Node* data_phi = previous_load->as_Load()->split_through_phi(_igvn, ignore_missing_instance_id);
1094 
1095       // Takes care of updating CG and split_unique_types worklists due to cloned
1096       // AddP->Load.
1097       updates_after_load_split(data_phi, previous_load, alloc_worklist);
1098 
1099       _igvn->replace_node(previous_load, data_phi);
1100     }
1101     --k;
1102     k = MIN2(k, (int)previous_addp->outcnt()-1);
1103   }
1104 
1105   // Remove the old AddP from the processing list because it's dead now
1106   assert(previous_addp->outcnt() == 0, "AddP should be dead now.");
1107   alloc_worklist.remove_if_existing(previous_addp);
1108 }
1109 
1110 // Create a 'selector' Phi based on the inputs of 'ophi'. If index 'i' of the
1111 // selector is:
1112 //    -> a '-1' constant, the i'th input of the original Phi is NSR.
1113 //    -> a 'x' constant >=0, the i'th input of of original Phi will be SR and
1114 //       the info about the scalarized object will be at index x of ObjectMergeValue::possible_objects
1115 PhiNode* ConnectionGraph::create_selector(PhiNode* ophi) const {
1116   Node* minus_one = _igvn->register_new_node_with_optimizer(ConINode::make(-1));
1117   Node* selector  = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), minus_one, TypeInt::INT));
1118   uint number_of_sr_objects = 0;
1119   for (uint i = 1; i < ophi->req(); i++) {
1120     Node* base = ophi->in(i);
1121     JavaObjectNode* ptn = unique_java_object(base);
1122 
1123     if (ptn != nullptr && ptn->scalar_replaceable()) {
1124       Node* sr_obj_idx = _igvn->register_new_node_with_optimizer(ConINode::make(number_of_sr_objects));
1125       selector->set_req(i, sr_obj_idx);
1126       number_of_sr_objects++;
1127     }
1128   }
1129 
1130   return selector->as_Phi();
1131 }
1132 
1133 // Returns true if the AddP node 'n' has at least one base that is a reducible
1134 // merge. If the base is a CastPP/CheckCastPP then the input of the cast is
1135 // checked instead.
1136 bool ConnectionGraph::has_reducible_merge_base(AddPNode* n, Unique_Node_List &reducible_merges) {
1137   PointsToNode* ptn = ptnode_adr(n->_idx);
1138   if (ptn == nullptr || !ptn->is_Field() || ptn->as_Field()->base_count() < 2) {
1139     return false;
1140   }
1141 
1142   for (BaseIterator i(ptn->as_Field()); i.has_next(); i.next()) {
1143     Node* base = i.get()->ideal_node();
1144 
1145     if (reducible_merges.member(base)) {
1146       return true;
1147     }
1148 
1149     if (base->is_CastPP() || base->is_CheckCastPP()) {
1150       base = base->in(1);
1151       if (reducible_merges.member(base)) {
1152         return true;
1153       }
1154     }
1155   }
1156 
1157   return false;
1158 }
1159 
1160 // This method will call its helper method to reduce SafePoint nodes that use
1161 // 'ophi' or a casted version of 'ophi'. All SafePoint nodes using the same
1162 // "version" of Phi use the same debug information (regarding the Phi).
1163 // Therefore, I collect all safepoints and patch them all at once.
1164 //
1165 // The safepoints using the Phi node have to be processed before safepoints of
1166 // CastPP nodes. The reason is, when reducing a CastPP we add a reference (the
1167 // NSR merge pointer) to the input of the CastPP (i.e., the Phi) in the
1168 // safepoint. If we process CastPP's safepoints before Phi's safepoints the
1169 // algorithm that process Phi's safepoints will think that the added Phi
1170 // reference is a regular reference.
1171 bool ConnectionGraph::reduce_phi_on_safepoints(PhiNode* ophi) {
1172   PhiNode* selector = create_selector(ophi);
1173   Unique_Node_List safepoints;
1174   Unique_Node_List casts;
1175 
1176   // Just collect the users of the Phis for later processing
1177   // in the needed order.
1178   for (uint i = 0; i < ophi->outcnt(); i++) {
1179     Node* use = ophi->raw_out(i);
1180     if (use->is_SafePoint()) {
1181       safepoints.push(use);
1182     } else if (use->is_CastPP()) {
1183       casts.push(use);
1184     } else {
1185       assert(use->outcnt() == 0, "Only CastPP & SafePoint users should be left.");
1186     }
1187   }
1188 
1189   // Need to process safepoints using the Phi first
1190   if (!reduce_phi_on_safepoints_helper(ophi, nullptr, selector, safepoints)) {
1191     return false;
1192   }
1193 
1194   // Now process CastPP->safepoints
1195   for (uint i = 0; i < casts.size(); i++) {
1196     Node* cast = casts.at(i);
1197     Unique_Node_List cast_sfpts;
1198 
1199     for (DUIterator_Fast jmax, j = cast->fast_outs(jmax); j < jmax; j++) {
1200       Node* use_use = cast->fast_out(j);
1201       if (use_use->is_SafePoint()) {
1202         cast_sfpts.push(use_use);
1203       } else {
1204         assert(use_use->outcnt() == 0, "Only SafePoint users should be left.");
1205       }
1206     }
1207 
1208     if (!reduce_phi_on_safepoints_helper(ophi, cast, selector, cast_sfpts)) {
1209       return false;
1210     }
1211   }
1212 
1213   return true;
1214 }
1215 
1216 // This method will create a SafePointScalarMERGEnode for each SafePoint in
1217 // 'safepoints'. It then will iterate on the inputs of 'ophi' and create a
1218 // SafePointScalarObjectNode for each scalar replaceable input. Each
1219 // SafePointScalarMergeNode may describe multiple scalar replaced objects -
1220 // check detailed description in SafePointScalarMergeNode class header.
1221 bool ConnectionGraph::reduce_phi_on_safepoints_helper(Node* ophi, Node* cast, Node* selector, Unique_Node_List& safepoints) {
1222   PhaseMacroExpand mexp(*_igvn);
1223   Node* original_sfpt_parent =  cast != nullptr ? cast : ophi;
1224   const TypeOopPtr* merge_t = _igvn->type(original_sfpt_parent)->make_oopptr();
1225 
1226   Node* nsr_merge_pointer = ophi;
1227   if (cast != nullptr) {
1228     const Type* new_t = merge_t->meet(TypePtr::NULL_PTR);
1229     nsr_merge_pointer = _igvn->transform(ConstraintCastNode::make_cast_for_type(cast->in(0), cast->in(1), new_t, ConstraintCastNode::RegularDependency, nullptr));
1230   }
1231 
1232   for (uint spi = 0; spi < safepoints.size(); spi++) {
1233     SafePointNode* sfpt = safepoints.at(spi)->as_SafePoint();
1234     JVMState *jvms      = sfpt->jvms();
1235     uint merge_idx      = (sfpt->req() - jvms->scloff());
1236     int debug_start     = jvms->debug_start();
1237 
1238     SafePointScalarMergeNode* smerge = new SafePointScalarMergeNode(merge_t, merge_idx);
1239     smerge->init_req(0, _compile->root());
1240     _igvn->register_new_node_with_optimizer(smerge);
1241 
1242     // The next two inputs are:
1243     //  (1) A copy of the original pointer to NSR objects.
1244     //  (2) A selector, used to decide if we need to rematerialize an object
1245     //      or use the pointer to a NSR object.
1246     // See more details of these fields in the declaration of SafePointScalarMergeNode
1247     sfpt->add_req(nsr_merge_pointer);
1248     sfpt->add_req(selector);
1249 
1250     for (uint i = 1; i < ophi->req(); i++) {
1251       Node* base = ophi->in(i);
1252       JavaObjectNode* ptn = unique_java_object(base);
1253 
1254       // If the base is not scalar replaceable we don't need to register information about
1255       // it at this time.
1256       if (ptn == nullptr || !ptn->scalar_replaceable()) {
1257         continue;
1258       }
1259 
1260       AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
1261       Unique_Node_List value_worklist;
1262 #ifdef ASSERT
1263       const Type* res_type = alloc->result_cast()->bottom_type();
1264       if (res_type->is_inlinetypeptr() && !Compile::current()->has_circular_inline_type()) {
1265         PhiNode* phi = ophi->as_Phi();
1266         assert(!ophi->as_Phi()->can_push_inline_types_down(_igvn), "missed earlier scalarization opportunity");
1267       }
1268 #endif
1269       SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt, &value_worklist);
1270       if (sobj == nullptr) {
1271         _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1272         return false;
1273       }
1274 
1275       // Now make a pass over the debug information replacing any references
1276       // to the allocated object with "sobj"
1277       Node* ccpp = alloc->result_cast();
1278       sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn);
1279 
1280       // Register the scalarized object as a candidate for reallocation
1281       smerge->add_req(sobj);
1282 
1283       // Scalarize inline types that were added to the safepoint.
1284       // Don't allow linking a constant oop (if available) for flat array elements
1285       // because Deoptimization::reassign_flat_array_elements needs field values.
1286       const bool allow_oop = !merge_t->is_flat();
1287       for (uint j = 0; j < value_worklist.size(); ++j) {
1288         InlineTypeNode* vt = value_worklist.at(j)->as_InlineType();
1289         vt->make_scalar_in_safepoints(_igvn, allow_oop);
1290       }
1291     }
1292 
1293     // Replaces debug information references to "original_sfpt_parent" in "sfpt" with references to "smerge"
1294     sfpt->replace_edges_in_range(original_sfpt_parent, smerge, debug_start, jvms->debug_end(), _igvn);
1295 
1296     // The call to 'replace_edges_in_range' above might have removed the
1297     // reference to ophi that we need at _merge_pointer_idx. The line below make
1298     // sure the reference is maintained.
1299     sfpt->set_req(smerge->merge_pointer_idx(jvms), nsr_merge_pointer);
1300     _igvn->_worklist.push(sfpt);
1301   }
1302 
1303   return true;
1304 }
1305 
1306 void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node *>  &alloc_worklist, GrowableArray<Node *>  &memnode_worklist) {
1307   bool delay = _igvn->delay_transform();
1308   _igvn->set_delay_transform(true);
1309   _igvn->hash_delete(ophi);
1310 
1311   // Copying all users first because some will be removed and others won't.
1312   // Ophi also may acquire some new users as part of Cast reduction.
1313   // CastPPs also need to be processed before CmpPs.
1314   Unique_Node_List castpps;
1315   Unique_Node_List others;
1316   for (DUIterator_Fast imax, i = ophi->fast_outs(imax); i < imax; i++) {
1317     Node* use = ophi->fast_out(i);
1318 
1319     if (use->is_CastPP()) {
1320       castpps.push(use);
1321     } else if (use->is_AddP() || use->is_Cmp()) {
1322       others.push(use);
1323     } else if (use->is_SafePoint()) {
1324       // processed later
1325     } else {
1326       assert(use->is_SafePoint(), "Unexpected user of reducible Phi %d -> %d:%s:%d", ophi->_idx, use->_idx, use->Name(), use->outcnt());
1327     }
1328   }
1329 
1330   // CastPPs need to be processed before Cmps because during the process of
1331   // splitting CastPPs we make reference to the inputs of the Cmp that is used
1332   // by the If controlling the CastPP.
1333   for (uint i = 0; i < castpps.size(); i++) {
1334     reduce_phi_on_castpp_field_load(castpps.at(i), alloc_worklist, memnode_worklist);
1335   }
1336 
1337   for (uint i = 0; i < others.size(); i++) {
1338     Node* use = others.at(i);
1339 
1340     if (use->is_AddP()) {
1341       reduce_phi_on_field_access(use, alloc_worklist);
1342     } else if(use->is_Cmp()) {
1343       reduce_phi_on_cmp(use);
1344     }
1345   }
1346 
1347   _igvn->set_delay_transform(delay);
1348 }
1349 
1350 void ConnectionGraph::reset_scalar_replaceable_entries(PhiNode* ophi) {
1351   Node* null_ptr            = _igvn->makecon(TypePtr::NULL_PTR);
1352   const TypeOopPtr* merge_t = _igvn->type(ophi)->make_oopptr();
1353   const Type* new_t         = merge_t->meet(TypePtr::NULL_PTR);
1354   Node* new_phi             = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), null_ptr, new_t));
1355 
1356   for (uint i = 1; i < ophi->req(); i++) {
1357     Node* base          = ophi->in(i);
1358     JavaObjectNode* ptn = unique_java_object(base);
1359 
1360     if (ptn != nullptr && ptn->scalar_replaceable()) {
1361       new_phi->set_req(i, null_ptr);
1362     } else {
1363       new_phi->set_req(i, ophi->in(i));
1364     }
1365   }
1366 
1367   for (int i = ophi->outcnt()-1; i >= 0;) {
1368     Node* out = ophi->raw_out(i);
1369 
1370     if (out->is_ConstraintCast()) {
1371       const Type* out_t = _igvn->type(out)->make_ptr();
1372       const Type* out_new_t = out_t->meet(TypePtr::NULL_PTR);
1373       bool change = out_new_t != out_t;
1374 
1375       for (int j = out->outcnt()-1; change && j >= 0; --j) {
1376         Node* out2 = out->raw_out(j);
1377         if (!out2->is_SafePoint()) {
1378           change = false;
1379           break;
1380         }
1381       }
1382 
1383       if (change) {
1384         Node* new_cast = ConstraintCastNode::make_cast_for_type(out->in(0), out->in(1), out_new_t, ConstraintCastNode::StrongDependency, nullptr);
1385         _igvn->replace_node(out, new_cast);
1386         _igvn->register_new_node_with_optimizer(new_cast);
1387       }
1388     }
1389 
1390     --i;
1391     i = MIN2(i, (int)ophi->outcnt()-1);
1392   }
1393 
1394   _igvn->replace_node(ophi, new_phi);
1395 }
1396 
1397 void ConnectionGraph::verify_ram_nodes(Compile* C, Node* root) {
1398   if (!C->do_reduce_allocation_merges()) return;
1399 
1400   Unique_Node_List ideal_nodes;
1401   ideal_nodes.map(C->live_nodes(), nullptr);  // preallocate space
1402   ideal_nodes.push(root);
1403 
1404   for (uint next = 0; next < ideal_nodes.size(); ++next) {
1405     Node* n = ideal_nodes.at(next);
1406 
1407     if (n->is_SafePointScalarMerge()) {
1408       SafePointScalarMergeNode* merge = n->as_SafePointScalarMerge();
1409 
1410       // Validate inputs of merge
1411       for (uint i = 1; i < merge->req(); i++) {
1412         if (merge->in(i) != nullptr && !merge->in(i)->is_top() && !merge->in(i)->is_SafePointScalarObject()) {
1413           assert(false, "SafePointScalarMerge inputs should be null/top or SafePointScalarObject.");
1414           C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1415         }
1416       }
1417 
1418       // Validate users of merge
1419       for (DUIterator_Fast imax, i = merge->fast_outs(imax); i < imax; i++) {
1420         Node* sfpt = merge->fast_out(i);
1421         if (sfpt->is_SafePoint()) {
1422           int merge_idx = merge->merge_pointer_idx(sfpt->as_SafePoint()->jvms());
1423 
1424           if (sfpt->in(merge_idx) != nullptr && sfpt->in(merge_idx)->is_SafePointScalarMerge()) {
1425             assert(false, "SafePointScalarMerge nodes can't be nested.");
1426             C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1427           }
1428         } else {
1429           assert(false, "Only safepoints can use SafePointScalarMerge nodes.");
1430           C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1431         }
1432       }
1433     }
1434 
1435     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1436       Node* m = n->fast_out(i);
1437       ideal_nodes.push(m);
1438     }
1439   }
1440 }
1441 
1442 // Returns true if there is an object in the scope of sfn that does not escape globally.
1443 bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) {
1444   Compile* C = _compile;
1445   for (JVMState* jvms = sfn->jvms(); jvms != nullptr; jvms = jvms->caller()) {
1446     if (C->env()->should_retain_local_variables() || C->env()->jvmti_can_walk_any_space() ||
1447         DeoptimizeObjectsALot) {
1448       // Jvmti agents can access locals. Must provide info about local objects at runtime.
1449       int num_locs = jvms->loc_size();
1450       for (int idx = 0; idx < num_locs; idx++) {
1451         Node* l = sfn->local(jvms, idx);
1452         if (not_global_escape(l)) {
1453           return true;
1454         }
1455       }
1456     }
1457     if (C->env()->jvmti_can_get_owned_monitor_info() ||
1458         C->env()->jvmti_can_walk_any_space() || DeoptimizeObjectsALot) {
1459       // Jvmti agents can read monitors. Must provide info about locked objects at runtime.
1460       int num_mon = jvms->nof_monitors();
1461       for (int idx = 0; idx < num_mon; idx++) {
1462         Node* m = sfn->monitor_obj(jvms, idx);
1463         if (m != nullptr && not_global_escape(m)) {
1464           return true;
1465         }
1466       }
1467     }
1468   }
1469   return false;
1470 }
1471 
1472 // Returns true if at least one of the arguments to the call is an object
1473 // that does not escape globally.
1474 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
1475   if (call->method() != nullptr) {
1476     uint max_idx = TypeFunc::Parms + call->method()->arg_size();
1477     for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
1478       Node* p = call->in(idx);
1479       if (not_global_escape(p)) {
1480         return true;
1481       }
1482     }
1483   } else {
1484     const char* name = call->as_CallStaticJava()->_name;
1485     assert(name != nullptr, "no name");
1486     // no arg escapes through uncommon traps
1487     if (strcmp(name, "uncommon_trap") != 0) {
1488       // process_call_arguments() assumes that all arguments escape globally
1489       const TypeTuple* d = call->tf()->domain_sig();
1490       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1491         const Type* at = d->field_at(i);
1492         if (at->isa_oopptr() != nullptr) {
1493           return true;
1494         }
1495       }
1496     }
1497   }
1498   return false;
1499 }
1500 
1501 
1502 
1503 // Utility function for nodes that load an object
1504 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1505   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1506   // ThreadLocal has RawPtr type.
1507   const Type* t = _igvn->type(n);
1508   if (t->make_ptr() != nullptr) {
1509     Node* adr = n->in(MemNode::Address);
1510 #ifdef ASSERT
1511     if (!adr->is_AddP()) {
1512       assert(_igvn->type(adr)->isa_rawptr(), "sanity");
1513     } else {
1514       assert((ptnode_adr(adr->_idx) == nullptr ||
1515               ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");
1516     }
1517 #endif
1518     add_local_var_and_edge(n, PointsToNode::NoEscape,
1519                            adr, delayed_worklist);
1520   }
1521 }
1522 
1523 // Populate Connection Graph with PointsTo nodes and create simple
1524 // connection graph edges.
1525 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1526   assert(!_verify, "this method should not be called for verification");
1527   PhaseGVN* igvn = _igvn;
1528   uint n_idx = n->_idx;
1529   PointsToNode* n_ptn = ptnode_adr(n_idx);
1530   if (n_ptn != nullptr) {
1531     return; // No need to redefine PointsTo node during first iteration.
1532   }
1533   int opcode = n->Opcode();
1534   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode);
1535   if (gc_handled) {
1536     return; // Ignore node if already handled by GC.
1537   }
1538 
1539   if (n->is_Call()) {
1540     // Arguments to allocation and locking don't escape.
1541     if (n->is_AbstractLock()) {
1542       // Put Lock and Unlock nodes on IGVN worklist to process them during
1543       // first IGVN optimization when escape information is still available.
1544       record_for_optimizer(n);
1545     } else if (n->is_Allocate()) {
1546       add_call_node(n->as_Call());
1547       record_for_optimizer(n);
1548     } else {
1549       if (n->is_CallStaticJava()) {
1550         const char* name = n->as_CallStaticJava()->_name;
1551         if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
1552           return; // Skip uncommon traps
1553         }
1554       }
1555       // Don't mark as processed since call's arguments have to be processed.
1556       delayed_worklist->push(n);
1557       // Check if a call returns an object.
1558       if ((n->as_Call()->returns_pointer() &&
1559            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
1560           (n->is_CallStaticJava() &&
1561            n->as_CallStaticJava()->is_boxing_method())) {
1562         add_call_node(n->as_Call());
1563       } else if (n->as_Call()->tf()->returns_inline_type_as_fields()) {
1564         bool returns_oop = false;
1565         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && !returns_oop; i++) {
1566           ProjNode* pn = n->fast_out(i)->as_Proj();
1567           if (pn->_con >= TypeFunc::Parms && pn->bottom_type()->isa_ptr()) {
1568             returns_oop = true;
1569           }
1570         }
1571         if (returns_oop) {
1572           add_call_node(n->as_Call());
1573         }
1574       }
1575     }
1576     return;
1577   }
1578   // Put this check here to process call arguments since some call nodes
1579   // point to phantom_obj.
1580   if (n_ptn == phantom_obj || n_ptn == null_obj) {
1581     return; // Skip predefined nodes.
1582   }
1583   switch (opcode) {
1584     case Op_AddP: {
1585       Node* base = get_addp_base(n);
1586       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1587       // Field nodes are created for all field types. They are used in
1588       // adjust_scalar_replaceable_state() and split_unique_types().
1589       // Note, non-oop fields will have only base edges in Connection
1590       // Graph because such fields are not used for oop loads and stores.
1591       int offset = address_offset(n, igvn);
1592       add_field(n, PointsToNode::NoEscape, offset);
1593       if (ptn_base == nullptr) {
1594         delayed_worklist->push(n); // Process it later.
1595       } else {
1596         n_ptn = ptnode_adr(n_idx);
1597         add_base(n_ptn->as_Field(), ptn_base);
1598       }
1599       break;
1600     }
1601     case Op_CastX2P: {
1602       map_ideal_node(n, phantom_obj);
1603       break;
1604     }
1605     case Op_InlineType:
1606     case Op_CastPP:
1607     case Op_CheckCastPP:
1608     case Op_EncodeP:
1609     case Op_DecodeN:
1610     case Op_EncodePKlass:
1611     case Op_DecodeNKlass: {
1612       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
1613       break;
1614     }
1615     case Op_CMoveP: {
1616       add_local_var(n, PointsToNode::NoEscape);
1617       // Do not add edges during first iteration because some could be
1618       // not defined yet.
1619       delayed_worklist->push(n);
1620       break;
1621     }
1622     case Op_ConP:
1623     case Op_ConN:
1624     case Op_ConNKlass: {
1625       // assume all oop constants globally escape except for null
1626       PointsToNode::EscapeState es;
1627       const Type* t = igvn->type(n);
1628       if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) {
1629         es = PointsToNode::NoEscape;
1630       } else {
1631         es = PointsToNode::GlobalEscape;
1632       }
1633       PointsToNode* ptn_con = add_java_object(n, es);
1634       set_not_scalar_replaceable(ptn_con NOT_PRODUCT(COMMA "Constant pointer"));
1635       break;
1636     }
1637     case Op_CreateEx: {
1638       // assume that all exception objects globally escape
1639       map_ideal_node(n, phantom_obj);
1640       break;
1641     }
1642     case Op_LoadKlass:
1643     case Op_LoadNKlass: {
1644       // Unknown class is loaded
1645       map_ideal_node(n, phantom_obj);
1646       break;
1647     }
1648     case Op_LoadP:
1649     case Op_LoadN: {
1650       add_objload_to_connection_graph(n, delayed_worklist);
1651       break;
1652     }
1653     case Op_Parm: {
1654       map_ideal_node(n, phantom_obj);
1655       break;
1656     }
1657     case Op_PartialSubtypeCheck: {
1658       // Produces Null or notNull and is used in only in CmpP so
1659       // phantom_obj could be used.
1660       map_ideal_node(n, phantom_obj); // Result is unknown
1661       break;
1662     }
1663     case Op_Phi: {
1664       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1665       // ThreadLocal has RawPtr type.
1666       const Type* t = n->as_Phi()->type();
1667       if (t->make_ptr() != nullptr) {
1668         add_local_var(n, PointsToNode::NoEscape);
1669         // Do not add edges during first iteration because some could be
1670         // not defined yet.
1671         delayed_worklist->push(n);
1672       }
1673       break;
1674     }
1675     case Op_Proj: {
1676       // we are only interested in the oop result projection from a call
1677       if (n->as_Proj()->_con >= TypeFunc::Parms && n->in(0)->is_Call() &&
1678           (n->in(0)->as_Call()->returns_pointer() || n->bottom_type()->isa_ptr())) {
1679         assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
1680                n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
1681         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1682       }
1683       break;
1684     }
1685     case Op_Rethrow: // Exception object escapes
1686     case Op_Return: {
1687       if (n->req() > TypeFunc::Parms &&
1688           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
1689         // Treat Return value as LocalVar with GlobalEscape escape state.
1690         add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
1691       }
1692       break;
1693     }
1694     case Op_CompareAndExchangeP:
1695     case Op_CompareAndExchangeN:
1696     case Op_GetAndSetP:
1697     case Op_GetAndSetN: {
1698       add_objload_to_connection_graph(n, delayed_worklist);
1699       // fall-through
1700     }
1701     case Op_StoreP:
1702     case Op_StoreN:
1703     case Op_StoreNKlass:
1704     case Op_WeakCompareAndSwapP:
1705     case Op_WeakCompareAndSwapN:
1706     case Op_CompareAndSwapP:
1707     case Op_CompareAndSwapN: {
1708       add_to_congraph_unsafe_access(n, opcode, delayed_worklist);
1709       break;
1710     }
1711     case Op_AryEq:
1712     case Op_CountPositives:
1713     case Op_StrComp:
1714     case Op_StrEquals:
1715     case Op_StrIndexOf:
1716     case Op_StrIndexOfChar:
1717     case Op_StrInflatedCopy:
1718     case Op_StrCompressedCopy:
1719     case Op_VectorizedHashCode:
1720     case Op_EncodeISOArray: {
1721       add_local_var(n, PointsToNode::ArgEscape);
1722       delayed_worklist->push(n); // Process it later.
1723       break;
1724     }
1725     case Op_ThreadLocal: {
1726       PointsToNode* ptn_thr = add_java_object(n, PointsToNode::ArgEscape);
1727       set_not_scalar_replaceable(ptn_thr NOT_PRODUCT(COMMA "Constant pointer"));
1728       break;
1729     }
1730     case Op_Blackhole: {
1731       // All blackhole pointer arguments are globally escaping.
1732       // Only do this if there is at least one pointer argument.
1733       // Do not add edges during first iteration because some could be
1734       // not defined yet, defer to final step.
1735       for (uint i = 0; i < n->req(); i++) {
1736         Node* in = n->in(i);
1737         if (in != nullptr) {
1738           const Type* at = _igvn->type(in);
1739           if (!at->isa_ptr()) continue;
1740 
1741           add_local_var(n, PointsToNode::GlobalEscape);
1742           delayed_worklist->push(n);
1743           break;
1744         }
1745       }
1746       break;
1747     }
1748     default:
1749       ; // Do nothing for nodes not related to EA.
1750   }
1751   return;
1752 }
1753 
1754 // Add final simple edges to graph.
1755 void ConnectionGraph::add_final_edges(Node *n) {
1756   PointsToNode* n_ptn = ptnode_adr(n->_idx);
1757 #ifdef ASSERT
1758   if (_verify && n_ptn->is_JavaObject())
1759     return; // This method does not change graph for JavaObject.
1760 #endif
1761 
1762   if (n->is_Call()) {
1763     process_call_arguments(n->as_Call());
1764     return;
1765   }
1766   assert(n->is_Store() || n->is_LoadStore() ||
1767          ((n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr)),
1768          "node should be registered already");
1769   int opcode = n->Opcode();
1770   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
1771   if (gc_handled) {
1772     return; // Ignore node if already handled by GC.
1773   }
1774   switch (opcode) {
1775     case Op_AddP: {
1776       Node* base = get_addp_base(n);
1777       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1778       assert(ptn_base != nullptr, "field's base should be registered");
1779       add_base(n_ptn->as_Field(), ptn_base);
1780       break;
1781     }
1782     case Op_InlineType:
1783     case Op_CastPP:
1784     case Op_CheckCastPP:
1785     case Op_EncodeP:
1786     case Op_DecodeN:
1787     case Op_EncodePKlass:
1788     case Op_DecodeNKlass: {
1789       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1790       break;
1791     }
1792     case Op_CMoveP: {
1793       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
1794         Node* in = n->in(i);
1795         if (in == nullptr) {
1796           continue;  // ignore null
1797         }
1798         Node* uncast_in = in->uncast();
1799         if (uncast_in->is_top() || uncast_in == n) {
1800           continue;  // ignore top or inputs which go back this node
1801         }
1802         PointsToNode* ptn = ptnode_adr(in->_idx);
1803         assert(ptn != nullptr, "node should be registered");
1804         add_edge(n_ptn, ptn);
1805       }
1806       break;
1807     }
1808     case Op_LoadP:
1809     case Op_LoadN: {
1810       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1811       // ThreadLocal has RawPtr type.
1812       assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1813       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1814       break;
1815     }
1816     case Op_Phi: {
1817       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1818       // ThreadLocal has RawPtr type.
1819       assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type");
1820       for (uint i = 1; i < n->req(); i++) {
1821         Node* in = n->in(i);
1822         if (in == nullptr) {
1823           continue;  // ignore null
1824         }
1825         Node* uncast_in = in->uncast();
1826         if (uncast_in->is_top() || uncast_in == n) {
1827           continue;  // ignore top or inputs which go back this node
1828         }
1829         PointsToNode* ptn = ptnode_adr(in->_idx);
1830         assert(ptn != nullptr, "node should be registered");
1831         add_edge(n_ptn, ptn);
1832       }
1833       break;
1834     }
1835     case Op_Proj: {
1836       // we are only interested in the oop result projection from a call
1837       assert((n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->as_Call()->returns_pointer()) ||
1838              n->in(0)->as_Call()->tf()->returns_inline_type_as_fields(), "what kind of oop return is it?");
1839       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr);
1840       break;
1841     }
1842     case Op_Rethrow: // Exception object escapes
1843     case Op_Return: {
1844       assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
1845              "Unexpected node type");
1846       // Treat Return value as LocalVar with GlobalEscape escape state.
1847       add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr);
1848       break;
1849     }
1850     case Op_CompareAndExchangeP:
1851     case Op_CompareAndExchangeN:
1852     case Op_GetAndSetP:
1853     case Op_GetAndSetN:{
1854       assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1855       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1856       // fall-through
1857     }
1858     case Op_CompareAndSwapP:
1859     case Op_CompareAndSwapN:
1860     case Op_WeakCompareAndSwapP:
1861     case Op_WeakCompareAndSwapN:
1862     case Op_StoreP:
1863     case Op_StoreN:
1864     case Op_StoreNKlass:{
1865       add_final_edges_unsafe_access(n, opcode);
1866       break;
1867     }
1868     case Op_VectorizedHashCode:
1869     case Op_AryEq:
1870     case Op_CountPositives:
1871     case Op_StrComp:
1872     case Op_StrEquals:
1873     case Op_StrIndexOf:
1874     case Op_StrIndexOfChar:
1875     case Op_StrInflatedCopy:
1876     case Op_StrCompressedCopy:
1877     case Op_EncodeISOArray: {
1878       // char[]/byte[] arrays passed to string intrinsic do not escape but
1879       // they are not scalar replaceable. Adjust escape state for them.
1880       // Start from in(2) edge since in(1) is memory edge.
1881       for (uint i = 2; i < n->req(); i++) {
1882         Node* adr = n->in(i);
1883         const Type* at = _igvn->type(adr);
1884         if (!adr->is_top() && at->isa_ptr()) {
1885           assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
1886                  at->isa_ptr() != nullptr, "expecting a pointer");
1887           if (adr->is_AddP()) {
1888             adr = get_addp_base(adr);
1889           }
1890           PointsToNode* ptn = ptnode_adr(adr->_idx);
1891           assert(ptn != nullptr, "node should be registered");
1892           add_edge(n_ptn, ptn);
1893         }
1894       }
1895       break;
1896     }
1897     case Op_Blackhole: {
1898       // All blackhole pointer arguments are globally escaping.
1899       for (uint i = 0; i < n->req(); i++) {
1900         Node* in = n->in(i);
1901         if (in != nullptr) {
1902           const Type* at = _igvn->type(in);
1903           if (!at->isa_ptr()) continue;
1904 
1905           if (in->is_AddP()) {
1906             in = get_addp_base(in);
1907           }
1908 
1909           PointsToNode* ptn = ptnode_adr(in->_idx);
1910           assert(ptn != nullptr, "should be defined already");
1911           set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "blackhole"));
1912           add_edge(n_ptn, ptn);
1913         }
1914       }
1915       break;
1916     }
1917     default: {
1918       // This method should be called only for EA specific nodes which may
1919       // miss some edges when they were created.
1920 #ifdef ASSERT
1921       n->dump(1);
1922 #endif
1923       guarantee(false, "unknown node");
1924     }
1925   }
1926   return;
1927 }
1928 
1929 void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) {
1930   Node* adr = n->in(MemNode::Address);
1931   const Type* adr_type = _igvn->type(adr);
1932   adr_type = adr_type->make_ptr();
1933   if (adr_type == nullptr) {
1934     return; // skip dead nodes
1935   }
1936   if (adr_type->isa_oopptr()
1937       || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
1938           && adr_type == TypeRawPtr::NOTNULL
1939           && is_captured_store_address(adr))) {
1940     delayed_worklist->push(n); // Process it later.
1941 #ifdef ASSERT
1942     assert (adr->is_AddP(), "expecting an AddP");
1943     if (adr_type == TypeRawPtr::NOTNULL) {
1944       // Verify a raw address for a store captured by Initialize node.
1945       int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
1946       assert(offs != Type::OffsetBot, "offset must be a constant");
1947     }
1948 #endif
1949   } else {
1950     // Ignore copy the displaced header to the BoxNode (OSR compilation).
1951     if (adr->is_BoxLock()) {
1952       return;
1953     }
1954     // Stored value escapes in unsafe access.
1955     if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
1956       delayed_worklist->push(n); // Process unsafe access later.
1957       return;
1958     }
1959 #ifdef ASSERT
1960     n->dump(1);
1961     assert(false, "not unsafe");
1962 #endif
1963   }
1964 }
1965 
1966 bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) {
1967   Node* adr = n->in(MemNode::Address);
1968   const Type *adr_type = _igvn->type(adr);
1969   adr_type = adr_type->make_ptr();
1970 #ifdef ASSERT
1971   if (adr_type == nullptr) {
1972     n->dump(1);
1973     assert(adr_type != nullptr, "dead node should not be on list");
1974     return true;
1975   }
1976 #endif
1977 
1978   if (adr_type->isa_oopptr()
1979       || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
1980            && adr_type == TypeRawPtr::NOTNULL
1981            && is_captured_store_address(adr))) {
1982     // Point Address to Value
1983     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
1984     assert(adr_ptn != nullptr &&
1985            adr_ptn->as_Field()->is_oop(), "node should be registered");
1986     Node* val = n->in(MemNode::ValueIn);
1987     PointsToNode* ptn = ptnode_adr(val->_idx);
1988     assert(ptn != nullptr, "node should be registered");
1989     add_edge(adr_ptn, ptn);
1990     return true;
1991   } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
1992     // Stored value escapes in unsafe access.
1993     Node* val = n->in(MemNode::ValueIn);
1994     PointsToNode* ptn = ptnode_adr(val->_idx);
1995     assert(ptn != nullptr, "node should be registered");
1996     set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
1997     // Add edge to object for unsafe access with offset.
1998     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
1999     assert(adr_ptn != nullptr, "node should be registered");
2000     if (adr_ptn->is_Field()) {
2001       assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
2002       add_edge(adr_ptn, ptn);
2003     }
2004     return true;
2005   }
2006 #ifdef ASSERT
2007   n->dump(1);
2008   assert(false, "not unsafe");
2009 #endif
2010   return false;
2011 }
2012 
2013 void ConnectionGraph::add_call_node(CallNode* call) {
2014   assert(call->returns_pointer() || call->tf()->returns_inline_type_as_fields(), "only for call which returns pointer");
2015   uint call_idx = call->_idx;
2016   if (call->is_Allocate()) {
2017     Node* k = call->in(AllocateNode::KlassNode);
2018     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
2019     assert(kt != nullptr, "TypeKlassPtr  required.");
2020     PointsToNode::EscapeState es = PointsToNode::NoEscape;
2021     bool scalar_replaceable = true;
2022     NOT_PRODUCT(const char* nsr_reason = "");
2023     if (call->is_AllocateArray()) {
2024       if (!kt->isa_aryklassptr()) { // StressReflectiveCode
2025         es = PointsToNode::GlobalEscape;
2026       } else {
2027         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
2028         if (length < 0) {
2029           // Not scalar replaceable if the length is not constant.
2030           scalar_replaceable = false;
2031           NOT_PRODUCT(nsr_reason = "has a non-constant length");
2032         } else if (length > EliminateAllocationArraySizeLimit) {
2033           // Not scalar replaceable if the length is too big.
2034           scalar_replaceable = false;
2035           NOT_PRODUCT(nsr_reason = "has a length that is too big");
2036         }
2037       }
2038     } else {  // Allocate instance
2039       if (!kt->isa_instklassptr()) { // StressReflectiveCode
2040         es = PointsToNode::GlobalEscape;
2041       } else {
2042         const TypeInstKlassPtr* ikt = kt->is_instklassptr();
2043         ciInstanceKlass* ik = ikt->klass_is_exact() ? ikt->exact_klass()->as_instance_klass() : ikt->instance_klass();
2044         if (ik->is_subclass_of(_compile->env()->Thread_klass()) ||
2045             ik->is_subclass_of(_compile->env()->Reference_klass()) ||
2046             !ik->can_be_instantiated() ||
2047             ik->has_finalizer()) {
2048           es = PointsToNode::GlobalEscape;
2049         } else {
2050           int nfields = ik->as_instance_klass()->nof_nonstatic_fields();
2051           if (nfields > EliminateAllocationFieldsLimit) {
2052             // Not scalar replaceable if there are too many fields.
2053             scalar_replaceable = false;
2054             NOT_PRODUCT(nsr_reason = "has too many fields");
2055           }
2056         }
2057       }
2058     }
2059     add_java_object(call, es);
2060     PointsToNode* ptn = ptnode_adr(call_idx);
2061     if (!scalar_replaceable && ptn->scalar_replaceable()) {
2062       set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA nsr_reason));
2063     }
2064   } else if (call->is_CallStaticJava()) {
2065     // Call nodes could be different types:
2066     //
2067     // 1. CallDynamicJavaNode (what happened during call is unknown):
2068     //
2069     //    - mapped to GlobalEscape JavaObject node if oop is returned;
2070     //
2071     //    - all oop arguments are escaping globally;
2072     //
2073     // 2. CallStaticJavaNode (execute bytecode analysis if possible):
2074     //
2075     //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
2076     //
2077     //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
2078     //    - mapped to NoEscape JavaObject node if non-escaping object allocated
2079     //      during call is returned;
2080     //    - mapped to ArgEscape LocalVar node pointed to object arguments
2081     //      which are returned and does not escape during call;
2082     //
2083     //    - oop arguments escaping status is defined by bytecode analysis;
2084     //
2085     // For a static call, we know exactly what method is being called.
2086     // Use bytecode estimator to record whether the call's return value escapes.
2087     ciMethod* meth = call->as_CallJava()->method();
2088     if (meth == nullptr) {
2089       const char* name = call->as_CallStaticJava()->_name;
2090       assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0 ||
2091              strncmp(name, "C2 Runtime load_unknown_inline", 30) == 0, "TODO: add failed case check");
2092       // Returns a newly allocated non-escaped object.
2093       add_java_object(call, PointsToNode::NoEscape);
2094       set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
2095     } else if (meth->is_boxing_method()) {
2096       // Returns boxing object
2097       PointsToNode::EscapeState es;
2098       vmIntrinsics::ID intr = meth->intrinsic_id();
2099       if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
2100         // It does not escape if object is always allocated.
2101         es = PointsToNode::NoEscape;
2102       } else {
2103         // It escapes globally if object could be loaded from cache.
2104         es = PointsToNode::GlobalEscape;
2105       }
2106       add_java_object(call, es);
2107       if (es == PointsToNode::GlobalEscape) {
2108         set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache"));
2109       }
2110     } else {
2111       BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
2112       call_analyzer->copy_dependencies(_compile->dependencies());
2113       if (call_analyzer->is_return_allocated()) {
2114         // Returns a newly allocated non-escaped object, simply
2115         // update dependency information.
2116         // Mark it as NoEscape so that objects referenced by
2117         // it's fields will be marked as NoEscape at least.
2118         add_java_object(call, PointsToNode::NoEscape);
2119         set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
2120       } else {
2121         // Determine whether any arguments are returned.
2122         const TypeTuple* d = call->tf()->domain_cc();
2123         bool ret_arg = false;
2124         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2125           if (d->field_at(i)->isa_ptr() != nullptr &&
2126               call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
2127             ret_arg = true;
2128             break;
2129           }
2130         }
2131         if (ret_arg) {
2132           add_local_var(call, PointsToNode::ArgEscape);
2133         } else {
2134           // Returns unknown object.
2135           map_ideal_node(call, phantom_obj);
2136         }
2137       }
2138     }
2139   } else {
2140     // An other type of call, assume the worst case:
2141     // returned value is unknown and globally escapes.
2142     assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
2143     map_ideal_node(call, phantom_obj);
2144   }
2145 }
2146 
2147 void ConnectionGraph::process_call_arguments(CallNode *call) {
2148     bool is_arraycopy = false;
2149     switch (call->Opcode()) {
2150 #ifdef ASSERT
2151     case Op_Allocate:
2152     case Op_AllocateArray:
2153     case Op_Lock:
2154     case Op_Unlock:
2155       assert(false, "should be done already");
2156       break;
2157 #endif
2158     case Op_ArrayCopy:
2159     case Op_CallLeafNoFP:
2160       // Most array copies are ArrayCopy nodes at this point but there
2161       // are still a few direct calls to the copy subroutines (See
2162       // PhaseStringOpts::copy_string())
2163       is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
2164         call->as_CallLeaf()->is_call_to_arraycopystub();
2165       // fall through
2166     case Op_CallLeafVector:
2167     case Op_CallLeaf: {
2168       // Stub calls, objects do not escape but they are not scale replaceable.
2169       // Adjust escape state for outgoing arguments.
2170       const TypeTuple * d = call->tf()->domain_sig();
2171       bool src_has_oops = false;
2172       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2173         const Type* at = d->field_at(i);
2174         Node *arg = call->in(i);
2175         if (arg == nullptr) {
2176           continue;
2177         }
2178         const Type *aat = _igvn->type(arg);
2179         if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
2180           continue;
2181         }
2182         if (arg->is_AddP()) {
2183           //
2184           // The inline_native_clone() case when the arraycopy stub is called
2185           // after the allocation before Initialize and CheckCastPP nodes.
2186           // Or normal arraycopy for object arrays case.
2187           //
2188           // Set AddP's base (Allocate) as not scalar replaceable since
2189           // pointer to the base (with offset) is passed as argument.
2190           //
2191           arg = get_addp_base(arg);
2192         }
2193         PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2194         assert(arg_ptn != nullptr, "should be registered");
2195         PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
2196         if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
2197           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
2198                  aat->isa_ptr() != nullptr, "expecting an Ptr");
2199           bool arg_has_oops = aat->isa_oopptr() &&
2200                               (aat->isa_instptr() ||
2201                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)) ||
2202                                (aat->isa_aryptr() && aat->isa_aryptr()->elem() != nullptr &&
2203                                                                aat->isa_aryptr()->is_flat() &&
2204                                                                aat->isa_aryptr()->elem()->inline_klass()->contains_oops()));
2205           if (i == TypeFunc::Parms) {
2206             src_has_oops = arg_has_oops;
2207           }
2208           //
2209           // src or dst could be j.l.Object when other is basic type array:
2210           //
2211           //   arraycopy(char[],0,Object*,0,size);
2212           //   arraycopy(Object*,0,char[],0,size);
2213           //
2214           // Don't add edges in such cases.
2215           //
2216           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
2217                                        arg_has_oops && (i > TypeFunc::Parms);
2218 #ifdef ASSERT
2219           if (!(is_arraycopy ||
2220                 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
2221                 (call->as_CallLeaf()->_name != nullptr &&
2222                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
2223                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
2224                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||
2225                   strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 ||
2226                   strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 ||
2227                   strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||
2228                   strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 ||
2229                   strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 ||
2230                   strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 ||
2231                   strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 ||
2232                   strcmp(call->as_CallLeaf()->_name, "galoisCounterMode_AESCrypt") == 0 ||
2233                   strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 ||
2234                   strcmp(call->as_CallLeaf()->_name, "intpoly_montgomeryMult_P256") == 0 ||
2235                   strcmp(call->as_CallLeaf()->_name, "intpoly_assign") == 0 ||
2236                   strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
2237                   strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 ||
2238                   strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
2239                   strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
2240                   strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
2241                   strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
2242                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
2243                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
2244                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
2245                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
2246                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
2247                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
2248                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
2249                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
2250                   strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
2251                   strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
2252                   strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
2253                   strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
2254                   strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
2255                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2256                   strcmp(call->as_CallLeaf()->_name, "load_unknown_inline") == 0 ||
2257                   strcmp(call->as_CallLeaf()->_name, "store_unknown_inline") == 0 ||
2258                   strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
2259                   strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
2260                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2261                   strcmp(call->as_CallLeaf()->_name, "stringIndexOf") == 0 ||
2262                   strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 ||
2263                   strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 ||
2264                   strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0 ||
2265                   strcmp(call->as_CallLeaf()->_name, "unsafe_setmemory") == 0)
2266                  ))) {
2267             call->dump();
2268             fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
2269           }
2270 #endif
2271           // Always process arraycopy's destination object since
2272           // we need to add all possible edges to references in
2273           // source object.
2274           if (arg_esc >= PointsToNode::ArgEscape &&
2275               !arg_is_arraycopy_dest) {
2276             continue;
2277           }
2278           PointsToNode::EscapeState es = PointsToNode::ArgEscape;
2279           if (call->is_ArrayCopy()) {
2280             ArrayCopyNode* ac = call->as_ArrayCopy();
2281             if (ac->is_clonebasic() ||
2282                 ac->is_arraycopy_validated() ||
2283                 ac->is_copyof_validated() ||
2284                 ac->is_copyofrange_validated()) {
2285               es = PointsToNode::NoEscape;
2286             }
2287           }
2288           set_escape_state(arg_ptn, es NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2289           if (arg_is_arraycopy_dest) {
2290             Node* src = call->in(TypeFunc::Parms);
2291             if (src->is_AddP()) {
2292               src = get_addp_base(src);
2293             }
2294             PointsToNode* src_ptn = ptnode_adr(src->_idx);
2295             assert(src_ptn != nullptr, "should be registered");
2296             if (arg_ptn != src_ptn) {
2297               // Special arraycopy edge:
2298               // A destination object's field can't have the source object
2299               // as base since objects escape states are not related.
2300               // Only escape state of destination object's fields affects
2301               // escape state of fields in source object.
2302               add_arraycopy(call, es, src_ptn, arg_ptn);
2303             }
2304           }
2305         }
2306       }
2307       break;
2308     }
2309     case Op_CallStaticJava: {
2310       // For a static call, we know exactly what method is being called.
2311       // Use bytecode estimator to record the call's escape affects
2312 #ifdef ASSERT
2313       const char* name = call->as_CallStaticJava()->_name;
2314       assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only");
2315 #endif
2316       ciMethod* meth = call->as_CallJava()->method();
2317       if ((meth != nullptr) && meth->is_boxing_method()) {
2318         break; // Boxing methods do not modify any oops.
2319       }
2320       BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
2321       // fall-through if not a Java method or no analyzer information
2322       if (call_analyzer != nullptr) {
2323         PointsToNode* call_ptn = ptnode_adr(call->_idx);
2324         const TypeTuple* d = call->tf()->domain_cc();
2325         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2326           const Type* at = d->field_at(i);
2327           int k = i - TypeFunc::Parms;
2328           Node* arg = call->in(i);
2329           PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2330           if (at->isa_ptr() != nullptr &&
2331               call_analyzer->is_arg_returned(k)) {
2332             // The call returns arguments.
2333             if (call_ptn != nullptr) { // Is call's result used?
2334               assert(call_ptn->is_LocalVar(), "node should be registered");
2335               assert(arg_ptn != nullptr, "node should be registered");
2336               add_edge(call_ptn, arg_ptn);
2337             }
2338           }
2339           if (at->isa_oopptr() != nullptr &&
2340               arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
2341             if (!call_analyzer->is_arg_stack(k)) {
2342               // The argument global escapes
2343               set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2344             } else {
2345               set_escape_state(arg_ptn, PointsToNode::ArgEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2346               if (!call_analyzer->is_arg_local(k)) {
2347                 // The argument itself doesn't escape, but any fields might
2348                 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2349               }
2350             }
2351           }
2352         }
2353         if (call_ptn != nullptr && call_ptn->is_LocalVar()) {
2354           // The call returns arguments.
2355           assert(call_ptn->edge_count() > 0, "sanity");
2356           if (!call_analyzer->is_return_local()) {
2357             // Returns also unknown object.
2358             add_edge(call_ptn, phantom_obj);
2359           }
2360         }
2361         break;
2362       }
2363     }
2364     default: {
2365       // Fall-through here if not a Java method or no analyzer information
2366       // or some other type of call, assume the worst case: all arguments
2367       // globally escape.
2368       const TypeTuple* d = call->tf()->domain_cc();
2369       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2370         const Type* at = d->field_at(i);
2371         if (at->isa_oopptr() != nullptr) {
2372           Node* arg = call->in(i);
2373           if (arg->is_AddP()) {
2374             arg = get_addp_base(arg);
2375           }
2376           assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already");
2377           set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2378         }
2379       }
2380     }
2381   }
2382 }
2383 
2384 
2385 // Finish Graph construction.
2386 bool ConnectionGraph::complete_connection_graph(
2387                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
2388                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,
2389                          GrowableArray<JavaObjectNode*>& java_objects_worklist,
2390                          GrowableArray<FieldNode*>&      oop_fields_worklist) {
2391   // Normally only 1-3 passes needed to build Connection Graph depending
2392   // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
2393   // Set limit to 20 to catch situation when something did go wrong and
2394   // bailout Escape Analysis.
2395   // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag.
2396 #define GRAPH_BUILD_ITER_LIMIT 20
2397 
2398   // Propagate GlobalEscape and ArgEscape escape states and check that
2399   // we still have non-escaping objects. The method pushs on _worklist
2400   // Field nodes which reference phantom_object.
2401   if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) {
2402     return false; // Nothing to do.
2403   }
2404   // Now propagate references to all JavaObject nodes.
2405   int java_objects_length = java_objects_worklist.length();
2406   elapsedTimer build_time;
2407   build_time.start();
2408   elapsedTimer time;
2409   bool timeout = false;
2410   int new_edges = 1;
2411   int iterations = 0;
2412   do {
2413     while ((new_edges > 0) &&
2414            (iterations++ < GRAPH_BUILD_ITER_LIMIT)) {
2415       double start_time = time.seconds();
2416       time.start();
2417       new_edges = 0;
2418       // Propagate references to phantom_object for nodes pushed on _worklist
2419       // by find_non_escaped_objects() and find_field_value().
2420       new_edges += add_java_object_edges(phantom_obj, false);
2421       for (int next = 0; next < java_objects_length; ++next) {
2422         JavaObjectNode* ptn = java_objects_worklist.at(next);
2423         new_edges += add_java_object_edges(ptn, true);
2424 
2425 #define SAMPLE_SIZE 4
2426         if ((next % SAMPLE_SIZE) == 0) {
2427           // Each 4 iterations calculate how much time it will take
2428           // to complete graph construction.
2429           time.stop();
2430           // Poll for requests from shutdown mechanism to quiesce compiler
2431           // because Connection graph construction may take long time.
2432           CompileBroker::maybe_block();
2433           double stop_time = time.seconds();
2434           double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE;
2435           double time_until_end = time_per_iter * (double)(java_objects_length - next);
2436           if ((start_time + time_until_end) >= EscapeAnalysisTimeout) {
2437             timeout = true;
2438             break; // Timeout
2439           }
2440           start_time = stop_time;
2441           time.start();
2442         }
2443 #undef SAMPLE_SIZE
2444 
2445       }
2446       if (timeout) break;
2447       if (new_edges > 0) {
2448         // Update escape states on each iteration if graph was updated.
2449         if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) {
2450           return false; // Nothing to do.
2451         }
2452       }
2453       time.stop();
2454       if (time.seconds() >= EscapeAnalysisTimeout) {
2455         timeout = true;
2456         break;
2457       }
2458     }
2459     if ((iterations < GRAPH_BUILD_ITER_LIMIT) && !timeout) {
2460       time.start();
2461       // Find fields which have unknown value.
2462       int fields_length = oop_fields_worklist.length();
2463       for (int next = 0; next < fields_length; next++) {
2464         FieldNode* field = oop_fields_worklist.at(next);
2465         if (field->edge_count() == 0) {
2466           new_edges += find_field_value(field);
2467           // This code may added new edges to phantom_object.
2468           // Need an other cycle to propagate references to phantom_object.
2469         }
2470       }
2471       time.stop();
2472       if (time.seconds() >= EscapeAnalysisTimeout) {
2473         timeout = true;
2474         break;
2475       }
2476     } else {
2477       new_edges = 0; // Bailout
2478     }
2479   } while (new_edges > 0);
2480 
2481   build_time.stop();
2482   _build_time = build_time.seconds();
2483   _build_iterations = iterations;
2484 
2485   // Bailout if passed limits.
2486   if ((iterations >= GRAPH_BUILD_ITER_LIMIT) || timeout) {
2487     Compile* C = _compile;
2488     if (C->log() != nullptr) {
2489       C->log()->begin_elem("connectionGraph_bailout reason='reached ");
2490       C->log()->text("%s", timeout ? "time" : "iterations");
2491       C->log()->end_elem(" limit'");
2492     }
2493     assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build during invocation %d (%f sec, %d iterations) with %d nodes and worklist size %d",
2494            _invocation, _build_time, _build_iterations, nodes_size(), ptnodes_worklist.length());
2495     // Possible infinite build_connection_graph loop,
2496     // bailout (no changes to ideal graph were made).
2497     return false;
2498   }
2499 
2500 #undef GRAPH_BUILD_ITER_LIMIT
2501 
2502   // Find fields initialized by null for non-escaping Allocations.
2503   int non_escaped_length = non_escaped_allocs_worklist.length();
2504   for (int next = 0; next < non_escaped_length; next++) {
2505     JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
2506     PointsToNode::EscapeState es = ptn->escape_state();
2507     assert(es <= PointsToNode::ArgEscape, "sanity");
2508     if (es == PointsToNode::NoEscape) {
2509       if (find_init_values_null(ptn, _igvn) > 0) {
2510         // Adding references to null object does not change escape states
2511         // since it does not escape. Also no fields are added to null object.
2512         add_java_object_edges(null_obj, false);
2513       }
2514     }
2515     Node* n = ptn->ideal_node();
2516     if (n->is_Allocate()) {
2517       // The object allocated by this Allocate node will never be
2518       // seen by an other thread. Mark it so that when it is
2519       // expanded no MemBarStoreStore is added.
2520       InitializeNode* ini = n->as_Allocate()->initialization();
2521       if (ini != nullptr)
2522         ini->set_does_not_escape();
2523     }
2524   }
2525   return true; // Finished graph construction.
2526 }
2527 
2528 // Propagate GlobalEscape and ArgEscape escape states to all nodes
2529 // and check that we still have non-escaping java objects.
2530 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist,
2531                                                GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist) {
2532   GrowableArray<PointsToNode*> escape_worklist;
2533   // First, put all nodes with GlobalEscape and ArgEscape states on worklist.
2534   int ptnodes_length = ptnodes_worklist.length();
2535   for (int next = 0; next < ptnodes_length; ++next) {
2536     PointsToNode* ptn = ptnodes_worklist.at(next);
2537     if (ptn->escape_state() >= PointsToNode::ArgEscape ||
2538         ptn->fields_escape_state() >= PointsToNode::ArgEscape) {
2539       escape_worklist.push(ptn);
2540     }
2541   }
2542   // Set escape states to referenced nodes (edges list).
2543   while (escape_worklist.length() > 0) {
2544     PointsToNode* ptn = escape_worklist.pop();
2545     PointsToNode::EscapeState es  = ptn->escape_state();
2546     PointsToNode::EscapeState field_es = ptn->fields_escape_state();
2547     if (ptn->is_Field() && ptn->as_Field()->is_oop() &&
2548         es >= PointsToNode::ArgEscape) {
2549       // GlobalEscape or ArgEscape state of field means it has unknown value.
2550       if (add_edge(ptn, phantom_obj)) {
2551         // New edge was added
2552         add_field_uses_to_worklist(ptn->as_Field());
2553       }
2554     }
2555     for (EdgeIterator i(ptn); i.has_next(); i.next()) {
2556       PointsToNode* e = i.get();
2557       if (e->is_Arraycopy()) {
2558         assert(ptn->arraycopy_dst(), "sanity");
2559         // Propagate only fields escape state through arraycopy edge.
2560         if (e->fields_escape_state() < field_es) {
2561           set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2562           escape_worklist.push(e);
2563         }
2564       } else if (es >= field_es) {
2565         // fields_escape_state is also set to 'es' if it is less than 'es'.
2566         if (e->escape_state() < es) {
2567           set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2568           escape_worklist.push(e);
2569         }
2570       } else {
2571         // Propagate field escape state.
2572         bool es_changed = false;
2573         if (e->fields_escape_state() < field_es) {
2574           set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2575           es_changed = true;
2576         }
2577         if ((e->escape_state() < field_es) &&
2578             e->is_Field() && ptn->is_JavaObject() &&
2579             e->as_Field()->is_oop()) {
2580           // Change escape state of referenced fields.
2581           set_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2582           es_changed = true;
2583         } else if (e->escape_state() < es) {
2584           set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2585           es_changed = true;
2586         }
2587         if (es_changed) {
2588           escape_worklist.push(e);
2589         }
2590       }
2591     }
2592   }
2593   // Remove escaped objects from non_escaped list.
2594   for (int next = non_escaped_allocs_worklist.length()-1; next >= 0 ; --next) {
2595     JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
2596     if (ptn->escape_state() >= PointsToNode::GlobalEscape) {
2597       non_escaped_allocs_worklist.delete_at(next);
2598     }
2599     if (ptn->escape_state() == PointsToNode::NoEscape) {
2600       // Find fields in non-escaped allocations which have unknown value.
2601       find_init_values_phantom(ptn);
2602     }
2603   }
2604   return (non_escaped_allocs_worklist.length() > 0);
2605 }
2606 
2607 // Add all references to JavaObject node by walking over all uses.
2608 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) {
2609   int new_edges = 0;
2610   if (populate_worklist) {
2611     // Populate _worklist by uses of jobj's uses.
2612     for (UseIterator i(jobj); i.has_next(); i.next()) {
2613       PointsToNode* use = i.get();
2614       if (use->is_Arraycopy()) {
2615         continue;
2616       }
2617       add_uses_to_worklist(use);
2618       if (use->is_Field() && use->as_Field()->is_oop()) {
2619         // Put on worklist all field's uses (loads) and
2620         // related field nodes (same base and offset).
2621         add_field_uses_to_worklist(use->as_Field());
2622       }
2623     }
2624   }
2625   for (int l = 0; l < _worklist.length(); l++) {
2626     PointsToNode* use = _worklist.at(l);
2627     if (PointsToNode::is_base_use(use)) {
2628       // Add reference from jobj to field and from field to jobj (field's base).
2629       use = PointsToNode::get_use_node(use)->as_Field();
2630       if (add_base(use->as_Field(), jobj)) {
2631         new_edges++;
2632       }
2633       continue;
2634     }
2635     assert(!use->is_JavaObject(), "sanity");
2636     if (use->is_Arraycopy()) {
2637       if (jobj == null_obj) { // null object does not have field edges
2638         continue;
2639       }
2640       // Added edge from Arraycopy node to arraycopy's source java object
2641       if (add_edge(use, jobj)) {
2642         jobj->set_arraycopy_src();
2643         new_edges++;
2644       }
2645       // and stop here.
2646       continue;
2647     }
2648     if (!add_edge(use, jobj)) {
2649       continue; // No new edge added, there was such edge already.
2650     }
2651     new_edges++;
2652     if (use->is_LocalVar()) {
2653       add_uses_to_worklist(use);
2654       if (use->arraycopy_dst()) {
2655         for (EdgeIterator i(use); i.has_next(); i.next()) {
2656           PointsToNode* e = i.get();
2657           if (e->is_Arraycopy()) {
2658             if (jobj == null_obj) { // null object does not have field edges
2659               continue;
2660             }
2661             // Add edge from arraycopy's destination java object to Arraycopy node.
2662             if (add_edge(jobj, e)) {
2663               new_edges++;
2664               jobj->set_arraycopy_dst();
2665             }
2666           }
2667         }
2668       }
2669     } else {
2670       // Added new edge to stored in field values.
2671       // Put on worklist all field's uses (loads) and
2672       // related field nodes (same base and offset).
2673       add_field_uses_to_worklist(use->as_Field());
2674     }
2675   }
2676   _worklist.clear();
2677   _in_worklist.reset();
2678   return new_edges;
2679 }
2680 
2681 // Put on worklist all related field nodes.
2682 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) {
2683   assert(field->is_oop(), "sanity");
2684   int offset = field->offset();
2685   add_uses_to_worklist(field);
2686   // Loop over all bases of this field and push on worklist Field nodes
2687   // with the same offset and base (since they may reference the same field).
2688   for (BaseIterator i(field); i.has_next(); i.next()) {
2689     PointsToNode* base = i.get();
2690     add_fields_to_worklist(field, base);
2691     // Check if the base was source object of arraycopy and go over arraycopy's
2692     // destination objects since values stored to a field of source object are
2693     // accessible by uses (loads) of fields of destination objects.
2694     if (base->arraycopy_src()) {
2695       for (UseIterator j(base); j.has_next(); j.next()) {
2696         PointsToNode* arycp = j.get();
2697         if (arycp->is_Arraycopy()) {
2698           for (UseIterator k(arycp); k.has_next(); k.next()) {
2699             PointsToNode* abase = k.get();
2700             if (abase->arraycopy_dst() && abase != base) {
2701               // Look for the same arraycopy reference.
2702               add_fields_to_worklist(field, abase);
2703             }
2704           }
2705         }
2706       }
2707     }
2708   }
2709 }
2710 
2711 // Put on worklist all related field nodes.
2712 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) {
2713   int offset = field->offset();
2714   if (base->is_LocalVar()) {
2715     for (UseIterator j(base); j.has_next(); j.next()) {
2716       PointsToNode* f = j.get();
2717       if (PointsToNode::is_base_use(f)) { // Field
2718         f = PointsToNode::get_use_node(f);
2719         if (f == field || !f->as_Field()->is_oop()) {
2720           continue;
2721         }
2722         int offs = f->as_Field()->offset();
2723         if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
2724           add_to_worklist(f);
2725         }
2726       }
2727     }
2728   } else {
2729     assert(base->is_JavaObject(), "sanity");
2730     if (// Skip phantom_object since it is only used to indicate that
2731         // this field's content globally escapes.
2732         (base != phantom_obj) &&
2733         // null object node does not have fields.
2734         (base != null_obj)) {
2735       for (EdgeIterator i(base); i.has_next(); i.next()) {
2736         PointsToNode* f = i.get();
2737         // Skip arraycopy edge since store to destination object field
2738         // does not update value in source object field.
2739         if (f->is_Arraycopy()) {
2740           assert(base->arraycopy_dst(), "sanity");
2741           continue;
2742         }
2743         if (f == field || !f->as_Field()->is_oop()) {
2744           continue;
2745         }
2746         int offs = f->as_Field()->offset();
2747         if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
2748           add_to_worklist(f);
2749         }
2750       }
2751     }
2752   }
2753 }
2754 
2755 // Find fields which have unknown value.
2756 int ConnectionGraph::find_field_value(FieldNode* field) {
2757   // Escaped fields should have init value already.
2758   assert(field->escape_state() == PointsToNode::NoEscape, "sanity");
2759   int new_edges = 0;
2760   for (BaseIterator i(field); i.has_next(); i.next()) {
2761     PointsToNode* base = i.get();
2762     if (base->is_JavaObject()) {
2763       // Skip Allocate's fields which will be processed later.
2764       if (base->ideal_node()->is_Allocate()) {
2765         return 0;
2766       }
2767       assert(base == null_obj, "only null ptr base expected here");
2768     }
2769   }
2770   if (add_edge(field, phantom_obj)) {
2771     // New edge was added
2772     new_edges++;
2773     add_field_uses_to_worklist(field);
2774   }
2775   return new_edges;
2776 }
2777 
2778 // Find fields initializing values for allocations.
2779 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
2780   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2781   PointsToNode* init_val = phantom_obj;
2782   Node* alloc = pta->ideal_node();
2783 
2784   // Do nothing for Allocate nodes since its fields values are
2785   // "known" unless they are initialized by arraycopy/clone.
2786   if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
2787     if (alloc->as_Allocate()->in(AllocateNode::DefaultValue) != nullptr) {
2788       // Non-flat inline type arrays are initialized with
2789       // the default value instead of null. Handle them here.
2790       init_val = ptnode_adr(alloc->as_Allocate()->in(AllocateNode::DefaultValue)->_idx);
2791       assert(init_val != nullptr, "default value should be registered");
2792     } else {
2793       return 0;
2794     }
2795   }
2796   // Non-escaped allocation returned from Java or runtime call has unknown values in fields.
2797   assert(pta->arraycopy_dst() || alloc->is_CallStaticJava() || init_val != phantom_obj, "sanity");
2798 #ifdef ASSERT
2799   if (alloc->is_CallStaticJava() && alloc->as_CallStaticJava()->method() == nullptr) {
2800     const char* name = alloc->as_CallStaticJava()->_name;
2801     assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0 ||
2802            strncmp(name, "C2 Runtime load_unknown_inline", 30) == 0, "sanity");
2803   }
2804 #endif
2805   // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
2806   int new_edges = 0;
2807   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2808     PointsToNode* field = i.get();
2809     if (field->is_Field() && field->as_Field()->is_oop()) {
2810       if (add_edge(field, init_val)) {
2811         // New edge was added
2812         new_edges++;
2813         add_field_uses_to_worklist(field->as_Field());
2814       }
2815     }
2816   }
2817   return new_edges;
2818 }
2819 
2820 // Find fields initializing values for allocations.
2821 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) {
2822   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2823   Node* alloc = pta->ideal_node();
2824   // Do nothing for Call nodes since its fields values are unknown.
2825   if (!alloc->is_Allocate() || alloc->as_Allocate()->in(AllocateNode::DefaultValue) != nullptr) {
2826     return 0;
2827   }
2828   InitializeNode* ini = alloc->as_Allocate()->initialization();
2829   bool visited_bottom_offset = false;
2830   GrowableArray<int> offsets_worklist;
2831   int new_edges = 0;
2832 
2833   // Check if an oop field's initializing value is recorded and add
2834   // a corresponding null if field's value if it is not recorded.
2835   // Connection Graph does not record a default initialization by null
2836   // captured by Initialize node.
2837   //
2838   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2839     PointsToNode* field = i.get(); // Field (AddP)
2840     if (!field->is_Field() || !field->as_Field()->is_oop()) {
2841       continue; // Not oop field
2842     }
2843     int offset = field->as_Field()->offset();
2844     if (offset == Type::OffsetBot) {
2845       if (!visited_bottom_offset) {
2846         // OffsetBot is used to reference array's element,
2847         // always add reference to null to all Field nodes since we don't
2848         // known which element is referenced.
2849         if (add_edge(field, null_obj)) {
2850           // New edge was added
2851           new_edges++;
2852           add_field_uses_to_worklist(field->as_Field());
2853           visited_bottom_offset = true;
2854         }
2855       }
2856     } else {
2857       // Check only oop fields.
2858       const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type();
2859       if (adr_type->isa_rawptr()) {
2860 #ifdef ASSERT
2861         // Raw pointers are used for initializing stores so skip it
2862         // since it should be recorded already
2863         Node* base = get_addp_base(field->ideal_node());
2864         assert(adr_type->isa_rawptr() && is_captured_store_address(field->ideal_node()), "unexpected pointer type");
2865 #endif
2866         continue;
2867       }
2868       if (!offsets_worklist.contains(offset)) {
2869         offsets_worklist.append(offset);
2870         Node* value = nullptr;
2871         if (ini != nullptr) {
2872           // StoreP::memory_type() == T_ADDRESS
2873           BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS;
2874           Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase);
2875           // Make sure initializing store has the same type as this AddP.
2876           // This AddP may reference non existing field because it is on a
2877           // dead branch of bimorphic call which is not eliminated yet.
2878           if (store != nullptr && store->is_Store() &&
2879               store->as_Store()->memory_type() == ft) {
2880             value = store->in(MemNode::ValueIn);
2881 #ifdef ASSERT
2882             if (VerifyConnectionGraph) {
2883               // Verify that AddP already points to all objects the value points to.
2884               PointsToNode* val = ptnode_adr(value->_idx);
2885               assert((val != nullptr), "should be processed already");
2886               PointsToNode* missed_obj = nullptr;
2887               if (val->is_JavaObject()) {
2888                 if (!field->points_to(val->as_JavaObject())) {
2889                   missed_obj = val;
2890                 }
2891               } else {
2892                 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
2893                   tty->print_cr("----------init store has invalid value -----");
2894                   store->dump();
2895                   val->dump();
2896                   assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
2897                 }
2898                 for (EdgeIterator j(val); j.has_next(); j.next()) {
2899                   PointsToNode* obj = j.get();
2900                   if (obj->is_JavaObject()) {
2901                     if (!field->points_to(obj->as_JavaObject())) {
2902                       missed_obj = obj;
2903                       break;
2904                     }
2905                   }
2906                 }
2907               }
2908               if (missed_obj != nullptr) {
2909                 tty->print_cr("----------field---------------------------------");
2910                 field->dump();
2911                 tty->print_cr("----------missed reference to object------------");
2912                 missed_obj->dump();
2913                 tty->print_cr("----------object referenced by init store-------");
2914                 store->dump();
2915                 val->dump();
2916                 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
2917               }
2918             }
2919 #endif
2920           } else {
2921             // There could be initializing stores which follow allocation.
2922             // For example, a volatile field store is not collected
2923             // by Initialize node.
2924             //
2925             // Need to check for dependent loads to separate such stores from
2926             // stores which follow loads. For now, add initial value null so
2927             // that compare pointers optimization works correctly.
2928           }
2929         }
2930         if (value == nullptr) {
2931           // A field's initializing value was not recorded. Add null.
2932           if (add_edge(field, null_obj)) {
2933             // New edge was added
2934             new_edges++;
2935             add_field_uses_to_worklist(field->as_Field());
2936           }
2937         }
2938       }
2939     }
2940   }
2941   return new_edges;
2942 }
2943 
2944 // Adjust scalar_replaceable state after Connection Graph is built.
2945 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj, Unique_Node_List &reducible_merges) {
2946   // A Phi 'x' is a _candidate_ to be reducible if 'can_reduce_phi(x)'
2947   // returns true. If one of the constraints in this method set 'jobj' to NSR
2948   // then the candidate Phi is discarded. If the Phi has another SR 'jobj' as
2949   // input, 'adjust_scalar_replaceable_state' will eventually be called with
2950   // that other object and the Phi will become a reducible Phi.
2951   // There could be multiple merges involving the same jobj.
2952   Unique_Node_List candidates;
2953 
2954   // Search for non-escaping objects which are not scalar replaceable
2955   // and mark them to propagate the state to referenced objects.
2956 
2957   for (UseIterator i(jobj); i.has_next(); i.next()) {
2958     PointsToNode* use = i.get();
2959     if (use->is_Arraycopy()) {
2960       continue;
2961     }
2962     if (use->is_Field()) {
2963       FieldNode* field = use->as_Field();
2964       assert(field->is_oop() && field->scalar_replaceable(), "sanity");
2965       // 1. An object is not scalar replaceable if the field into which it is
2966       // stored has unknown offset (stored into unknown element of an array).
2967       if (field->offset() == Type::OffsetBot) {
2968         set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored at unknown offset"));
2969         return;
2970       }
2971       for (BaseIterator i(field); i.has_next(); i.next()) {
2972         PointsToNode* base = i.get();
2973         // 2. An object is not scalar replaceable if the field into which it is
2974         // stored has multiple bases one of which is null.
2975         if ((base == null_obj) && (field->base_count() > 1)) {
2976           set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with potentially null base"));
2977           return;
2978         }
2979         // 2.5. An object is not scalar replaceable if the field into which it is
2980         // stored has NSR base.
2981         if (!base->scalar_replaceable()) {
2982           set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base"));
2983           return;
2984         }
2985       }
2986     }
2987     assert(use->is_Field() || use->is_LocalVar(), "sanity");
2988     // 3. An object is not scalar replaceable if it is merged with other objects
2989     // and we can't remove the merge
2990     for (EdgeIterator j(use); j.has_next(); j.next()) {
2991       PointsToNode* ptn = j.get();
2992       if (ptn->is_JavaObject() && ptn != jobj) {
2993         Node* use_n = use->ideal_node();
2994 
2995         // These other local vars may point to multiple objects through a Phi
2996         // In this case we skip them and see if we can reduce the Phi.
2997         if (use_n->is_CastPP() || use_n->is_CheckCastPP()) {
2998           use_n = use_n->in(1);
2999         }
3000 
3001         // If it's already a candidate or confirmed reducible merge we can skip verification
3002         if (candidates.member(use_n) || reducible_merges.member(use_n)) {
3003           continue;
3004         }
3005 
3006         if (use_n->is_Phi() && can_reduce_phi(use_n->as_Phi())) {
3007           candidates.push(use_n);
3008         } else {
3009           // Mark all objects as NSR if we can't remove the merge
3010           set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA trace_merged_message(ptn)));
3011           set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA trace_merged_message(jobj)));
3012         }
3013       }
3014     }
3015     if (!jobj->scalar_replaceable()) {
3016       return;
3017     }
3018   }
3019 
3020   for (EdgeIterator j(jobj); j.has_next(); j.next()) {
3021     if (j.get()->is_Arraycopy()) {
3022       continue;
3023     }
3024 
3025     // Non-escaping object node should point only to field nodes.
3026     FieldNode* field = j.get()->as_Field();
3027     int offset = field->as_Field()->offset();
3028 
3029     // 4. An object is not scalar replaceable if it has a field with unknown
3030     // offset (array's element is accessed in loop).
3031     if (offset == Type::OffsetBot) {
3032       set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "has field with unknown offset"));
3033       return;
3034     }
3035     // 5. Currently an object is not scalar replaceable if a LoadStore node
3036     // access its field since the field value is unknown after it.
3037     //
3038     Node* n = field->ideal_node();
3039 
3040     // Test for an unsafe access that was parsed as maybe off heap
3041     // (with a CheckCastPP to raw memory).
3042     assert(n->is_AddP(), "expect an address computation");
3043     if (n->in(AddPNode::Base)->is_top() &&
3044         n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) {
3045       assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected");
3046       assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected");
3047       set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used as base of mixed unsafe access"));
3048       return;
3049     }
3050 
3051     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
3052       Node* u = n->fast_out(i);
3053       if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) {
3054         set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used in LoadStore or mismatched access"));
3055         return;
3056       }
3057     }
3058 
3059     // 6. Or the address may point to more then one object. This may produce
3060     // the false positive result (set not scalar replaceable)
3061     // since the flow-insensitive escape analysis can't separate
3062     // the case when stores overwrite the field's value from the case
3063     // when stores happened on different control branches.
3064     //
3065     // Note: it will disable scalar replacement in some cases:
3066     //
3067     //    Point p[] = new Point[1];
3068     //    p[0] = new Point(); // Will be not scalar replaced
3069     //
3070     // but it will save us from incorrect optimizations in next cases:
3071     //
3072     //    Point p[] = new Point[1];
3073     //    if ( x ) p[0] = new Point(); // Will be not scalar replaced
3074     //
3075     if (field->base_count() > 1 && candidates.size() == 0) {
3076       if (has_non_reducible_merge(field, reducible_merges)) {
3077         for (BaseIterator i(field); i.has_next(); i.next()) {
3078           PointsToNode* base = i.get();
3079           // Don't take into account LocalVar nodes which
3080           // may point to only one object which should be also
3081           // this field's base by now.
3082           if (base->is_JavaObject() && base != jobj) {
3083             // Mark all bases.
3084             set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "may point to more than one object"));
3085             set_not_scalar_replaceable(base NOT_PRODUCT(COMMA "may point to more than one object"));
3086           }
3087         }
3088 
3089         if (!jobj->scalar_replaceable()) {
3090           return;
3091         }
3092       }
3093     }
3094   }
3095 
3096   // The candidate is truly a reducible merge only if none of the other
3097   // constraints ruled it as NSR. There could be multiple merges involving the
3098   // same jobj.
3099   assert(jobj->scalar_replaceable(), "sanity");
3100   for (uint i = 0; i < candidates.size(); i++ ) {
3101     Node* candidate = candidates.at(i);
3102     reducible_merges.push(candidate);
3103   }
3104 }
3105 
3106 bool ConnectionGraph::has_non_reducible_merge(FieldNode* field, Unique_Node_List& reducible_merges) {
3107   for (BaseIterator i(field); i.has_next(); i.next()) {
3108     Node* base = i.get()->ideal_node();
3109     if (base->is_Phi() && !reducible_merges.member(base)) {
3110       return true;
3111     }
3112   }
3113   return false;
3114 }
3115 
3116 void ConnectionGraph::revisit_reducible_phi_status(JavaObjectNode* jobj, Unique_Node_List& reducible_merges) {
3117   assert(jobj != nullptr && !jobj->scalar_replaceable(), "jobj should be set as NSR before calling this function.");
3118 
3119   // Look for 'phis' that refer to 'jobj' as the last
3120   // remaining scalar replaceable input.
3121   uint reducible_merges_cnt = reducible_merges.size();
3122   for (uint i = 0; i < reducible_merges_cnt; i++) {
3123     Node* phi = reducible_merges.at(i);
3124 
3125     // This 'Phi' will be a 'good' if it still points to
3126     // at least one scalar replaceable object. Note that 'obj'
3127     // was/should be marked as NSR before calling this function.
3128     bool good_phi = false;
3129 
3130     for (uint j = 1; j < phi->req(); j++) {
3131       JavaObjectNode* phi_in_obj = unique_java_object(phi->in(j));
3132       if (phi_in_obj != nullptr && phi_in_obj->scalar_replaceable()) {
3133         good_phi = true;
3134         break;
3135       }
3136     }
3137 
3138     if (!good_phi) {
3139       NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Phi %d became non-reducible after node %d became NSR.", phi->_idx, jobj->ideal_node()->_idx);)
3140       reducible_merges.remove(i);
3141 
3142       // Decrement the index because the 'remove' call above actually
3143       // moves the last entry of the list to position 'i'.
3144       i--;
3145 
3146       reducible_merges_cnt--;
3147     }
3148   }
3149 }
3150 
3151 // Propagate NSR (Not scalar replaceable) state.
3152 void ConnectionGraph::find_scalar_replaceable_allocs(GrowableArray<JavaObjectNode*>& jobj_worklist, Unique_Node_List &reducible_merges) {
3153   int jobj_length = jobj_worklist.length();
3154   bool found_nsr_alloc = true;
3155   while (found_nsr_alloc) {
3156     found_nsr_alloc = false;
3157     for (int next = 0; next < jobj_length; ++next) {
3158       JavaObjectNode* jobj = jobj_worklist.at(next);
3159       for (UseIterator i(jobj); (jobj->scalar_replaceable() && i.has_next()); i.next()) {
3160         PointsToNode* use = i.get();
3161         if (use->is_Field()) {
3162           FieldNode* field = use->as_Field();
3163           assert(field->is_oop() && field->scalar_replaceable(), "sanity");
3164           assert(field->offset() != Type::OffsetBot, "sanity");
3165           for (BaseIterator i(field); i.has_next(); i.next()) {
3166             PointsToNode* base = i.get();
3167             // An object is not scalar replaceable if the field into which
3168             // it is stored has NSR base.
3169             if ((base != null_obj) && !base->scalar_replaceable()) {
3170               set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base"));
3171               // Any merge that had only 'jobj' as scalar-replaceable will now be non-reducible,
3172               // because there is no point in reducing a Phi that won't improve the number of SR
3173               // objects.
3174               revisit_reducible_phi_status(jobj, reducible_merges);
3175               found_nsr_alloc = true;
3176               break;
3177             }
3178           }
3179         }
3180       }
3181     }
3182   }
3183 }
3184 
3185 #ifdef ASSERT
3186 void ConnectionGraph::verify_connection_graph(
3187                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
3188                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,
3189                          GrowableArray<JavaObjectNode*>& java_objects_worklist,
3190                          GrowableArray<Node*>& addp_worklist) {
3191   // Verify that graph is complete - no new edges could be added.
3192   int java_objects_length = java_objects_worklist.length();
3193   int non_escaped_length  = non_escaped_allocs_worklist.length();
3194   int new_edges = 0;
3195   for (int next = 0; next < java_objects_length; ++next) {
3196     JavaObjectNode* ptn = java_objects_worklist.at(next);
3197     new_edges += add_java_object_edges(ptn, true);
3198   }
3199   assert(new_edges == 0, "graph was not complete");
3200   // Verify that escape state is final.
3201   int length = non_escaped_allocs_worklist.length();
3202   find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist);
3203   assert((non_escaped_length == non_escaped_allocs_worklist.length()) &&
3204          (non_escaped_length == length) &&
3205          (_worklist.length() == 0), "escape state was not final");
3206 
3207   // Verify fields information.
3208   int addp_length = addp_worklist.length();
3209   for (int next = 0; next < addp_length; ++next ) {
3210     Node* n = addp_worklist.at(next);
3211     FieldNode* field = ptnode_adr(n->_idx)->as_Field();
3212     if (field->is_oop()) {
3213       // Verify that field has all bases
3214       Node* base = get_addp_base(n);
3215       PointsToNode* ptn = ptnode_adr(base->_idx);
3216       if (ptn->is_JavaObject()) {
3217         assert(field->has_base(ptn->as_JavaObject()), "sanity");
3218       } else {
3219         assert(ptn->is_LocalVar(), "sanity");
3220         for (EdgeIterator i(ptn); i.has_next(); i.next()) {
3221           PointsToNode* e = i.get();
3222           if (e->is_JavaObject()) {
3223             assert(field->has_base(e->as_JavaObject()), "sanity");
3224           }
3225         }
3226       }
3227       // Verify that all fields have initializing values.
3228       if (field->edge_count() == 0) {
3229         tty->print_cr("----------field does not have references----------");
3230         field->dump();
3231         for (BaseIterator i(field); i.has_next(); i.next()) {
3232           PointsToNode* base = i.get();
3233           tty->print_cr("----------field has next base---------------------");
3234           base->dump();
3235           if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) {
3236             tty->print_cr("----------base has fields-------------------------");
3237             for (EdgeIterator j(base); j.has_next(); j.next()) {
3238               j.get()->dump();
3239             }
3240             tty->print_cr("----------base has references---------------------");
3241             for (UseIterator j(base); j.has_next(); j.next()) {
3242               j.get()->dump();
3243             }
3244           }
3245         }
3246         for (UseIterator i(field); i.has_next(); i.next()) {
3247           i.get()->dump();
3248         }
3249         assert(field->edge_count() > 0, "sanity");
3250       }
3251     }
3252   }
3253 }
3254 #endif
3255 
3256 // Optimize ideal graph.
3257 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
3258                                            GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
3259   Compile* C = _compile;
3260   PhaseIterGVN* igvn = _igvn;
3261   if (EliminateLocks) {
3262     // Mark locks before changing ideal graph.
3263     int cnt = C->macro_count();
3264     for (int i = 0; i < cnt; i++) {
3265       Node *n = C->macro_node(i);
3266       if (n->is_AbstractLock()) { // Lock and Unlock nodes
3267         AbstractLockNode* alock = n->as_AbstractLock();
3268         if (!alock->is_non_esc_obj()) {
3269           const Type* obj_type = igvn->type(alock->obj_node());
3270           if (can_eliminate_lock(alock) && !obj_type->is_inlinetypeptr()) {
3271             assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
3272             // The lock could be marked eliminated by lock coarsening
3273             // code during first IGVN before EA. Replace coarsened flag
3274             // to eliminate all associated locks/unlocks.
3275 #ifdef ASSERT
3276             alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
3277 #endif
3278             alock->set_non_esc_obj();
3279           }
3280         }
3281       }
3282     }
3283   }
3284 
3285   if (OptimizePtrCompare) {
3286     for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
3287       Node *n = ptr_cmp_worklist.at(i);
3288       assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
3289       const TypeInt* tcmp = optimize_ptr_compare(n->in(1), n->in(2));
3290       if (tcmp->singleton()) {
3291         Node* cmp = igvn->makecon(tcmp);
3292 #ifndef PRODUCT
3293         if (PrintOptimizePtrCompare) {
3294           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
3295           if (Verbose) {
3296             n->dump(1);
3297           }
3298         }
3299 #endif
3300         igvn->replace_node(n, cmp);
3301       }
3302     }
3303   }
3304 
3305   // For MemBarStoreStore nodes added in library_call.cpp, check
3306   // escape status of associated AllocateNode and optimize out
3307   // MemBarStoreStore node if the allocated object never escapes.
3308   for (int i = 0; i < storestore_worklist.length(); i++) {
3309     Node* storestore = storestore_worklist.at(i);
3310     Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
3311     if (alloc->is_Allocate() && not_global_escape(alloc)) {
3312       if (alloc->in(AllocateNode::InlineType) != nullptr) {
3313         // Non-escaping inline type buffer allocations don't require a membar
3314         storestore->as_MemBar()->remove(_igvn);
3315       } else {
3316         MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
3317         mb->init_req(TypeFunc::Memory,  storestore->in(TypeFunc::Memory));
3318         mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
3319         igvn->register_new_node_with_optimizer(mb);
3320         igvn->replace_node(storestore, mb);
3321       }
3322     }
3323   }
3324 }
3325 
3326 // Optimize objects compare.
3327 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* left, Node* right) {
3328   assert(OptimizePtrCompare, "sanity");
3329   const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
3330   const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
3331   const TypeInt* UNKNOWN = TypeInt::CC;    // [-1, 0,1]
3332 
3333   PointsToNode* ptn1 = ptnode_adr(left->_idx);
3334   PointsToNode* ptn2 = ptnode_adr(right->_idx);
3335   JavaObjectNode* jobj1 = unique_java_object(left);
3336   JavaObjectNode* jobj2 = unique_java_object(right);
3337 
3338   // The use of this method during allocation merge reduction may cause 'left'
3339   // or 'right' be something (e.g., a Phi) that isn't in the connection graph or
3340   // that doesn't reference an unique java object.
3341   if (ptn1 == nullptr || ptn2 == nullptr ||
3342       jobj1 == nullptr || jobj2 == nullptr) {
3343     return UNKNOWN;
3344   }
3345 
3346   assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
3347   assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
3348 
3349   // Check simple cases first.
3350   if (jobj1 != nullptr) {
3351     if (jobj1->escape_state() == PointsToNode::NoEscape) {
3352       if (jobj1 == jobj2) {
3353         // Comparing the same not escaping object.
3354         return EQ;
3355       }
3356       Node* obj = jobj1->ideal_node();
3357       // Comparing not escaping allocation.
3358       if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
3359           !ptn2->points_to(jobj1)) {
3360         return NE; // This includes nullness check.
3361       }
3362     }
3363   }
3364   if (jobj2 != nullptr) {
3365     if (jobj2->escape_state() == PointsToNode::NoEscape) {
3366       Node* obj = jobj2->ideal_node();
3367       // Comparing not escaping allocation.
3368       if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
3369           !ptn1->points_to(jobj2)) {
3370         return NE; // This includes nullness check.
3371       }
3372     }
3373   }
3374   if (jobj1 != nullptr && jobj1 != phantom_obj &&
3375       jobj2 != nullptr && jobj2 != phantom_obj &&
3376       jobj1->ideal_node()->is_Con() &&
3377       jobj2->ideal_node()->is_Con()) {
3378     // Klass or String constants compare. Need to be careful with
3379     // compressed pointers - compare types of ConN and ConP instead of nodes.
3380     const Type* t1 = jobj1->ideal_node()->get_ptr_type();
3381     const Type* t2 = jobj2->ideal_node()->get_ptr_type();
3382     if (t1->make_ptr() == t2->make_ptr()) {
3383       return EQ;
3384     } else {
3385       return NE;
3386     }
3387   }
3388   if (ptn1->meet(ptn2)) {
3389     return UNKNOWN; // Sets are not disjoint
3390   }
3391 
3392   // Sets are disjoint.
3393   bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj);
3394   bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj);
3395   bool set1_has_null_ptr    = ptn1->points_to(null_obj);
3396   bool set2_has_null_ptr    = ptn2->points_to(null_obj);
3397   if ((set1_has_unknown_ptr && set2_has_null_ptr) ||
3398       (set2_has_unknown_ptr && set1_has_null_ptr)) {
3399     // Check nullness of unknown object.
3400     return UNKNOWN;
3401   }
3402 
3403   // Disjointness by itself is not sufficient since
3404   // alias analysis is not complete for escaped objects.
3405   // Disjoint sets are definitely unrelated only when
3406   // at least one set has only not escaping allocations.
3407   if (!set1_has_unknown_ptr && !set1_has_null_ptr) {
3408     if (ptn1->non_escaping_allocation()) {
3409       return NE;
3410     }
3411   }
3412   if (!set2_has_unknown_ptr && !set2_has_null_ptr) {
3413     if (ptn2->non_escaping_allocation()) {
3414       return NE;
3415     }
3416   }
3417   return UNKNOWN;
3418 }
3419 
3420 // Connection Graph construction functions.
3421 
3422 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) {
3423   PointsToNode* ptadr = _nodes.at(n->_idx);
3424   if (ptadr != nullptr) {
3425     assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity");
3426     return;
3427   }
3428   Compile* C = _compile;
3429   ptadr = new (C->comp_arena()) LocalVarNode(this, n, es);
3430   map_ideal_node(n, ptadr);
3431 }
3432 
3433 PointsToNode* ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) {
3434   PointsToNode* ptadr = _nodes.at(n->_idx);
3435   if (ptadr != nullptr) {
3436     assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity");
3437     return ptadr;
3438   }
3439   Compile* C = _compile;
3440   ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es);
3441   map_ideal_node(n, ptadr);
3442   return ptadr;
3443 }
3444 
3445 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) {
3446   PointsToNode* ptadr = _nodes.at(n->_idx);
3447   if (ptadr != nullptr) {
3448     assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity");
3449     return;
3450   }
3451   bool unsafe = false;
3452   bool is_oop = is_oop_field(n, offset, &unsafe);
3453   if (unsafe) {
3454     es = PointsToNode::GlobalEscape;
3455   }
3456   Compile* C = _compile;
3457   FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop);
3458   map_ideal_node(n, field);
3459 }
3460 
3461 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es,
3462                                     PointsToNode* src, PointsToNode* dst) {
3463   assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
3464   assert((src != null_obj) && (dst != null_obj), "not for ConP null");
3465   PointsToNode* ptadr = _nodes.at(n->_idx);
3466   if (ptadr != nullptr) {
3467     assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
3468     return;
3469   }
3470   Compile* C = _compile;
3471   ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
3472   map_ideal_node(n, ptadr);
3473   // Add edge from arraycopy node to source object.
3474   (void)add_edge(ptadr, src);
3475   src->set_arraycopy_src();
3476   // Add edge from destination object to arraycopy node.
3477   (void)add_edge(dst, ptadr);
3478   dst->set_arraycopy_dst();
3479 }
3480 
3481 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
3482   const Type* adr_type = n->as_AddP()->bottom_type();
3483   int field_offset = adr_type->isa_aryptr() ? adr_type->isa_aryptr()->field_offset().get() : Type::OffsetBot;
3484   BasicType bt = T_INT;
3485   if (offset == Type::OffsetBot && field_offset == Type::OffsetBot) {
3486     // Check only oop fields.
3487     if (!adr_type->isa_aryptr() ||
3488         adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
3489         adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
3490       // OffsetBot is used to reference array's element. Ignore first AddP.
3491       if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) {
3492         bt = T_OBJECT;
3493       }
3494     }
3495   } else if (offset != oopDesc::klass_offset_in_bytes()) {
3496     if (adr_type->isa_instptr()) {
3497       ciField* field = _compile->alias_type(adr_type->is_ptr())->field();
3498       if (field != nullptr) {
3499         bt = field->layout_type();
3500       } else {
3501         // Check for unsafe oop field access
3502         if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3503             n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3504             n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3505             BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3506           bt = T_OBJECT;
3507           (*unsafe) = true;
3508         }
3509       }
3510     } else if (adr_type->isa_aryptr()) {
3511       if (offset == arrayOopDesc::length_offset_in_bytes()) {
3512         // Ignore array length load.
3513       } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
3514         // Ignore first AddP.
3515       } else {
3516         const Type* elemtype = adr_type->is_aryptr()->elem();
3517         if (adr_type->is_aryptr()->is_flat() && field_offset != Type::OffsetBot) {
3518           ciInlineKlass* vk = elemtype->inline_klass();
3519           field_offset += vk->first_field_offset();
3520           bt = vk->get_field_by_offset(field_offset, false)->layout_type();
3521         } else {
3522           bt = elemtype->array_element_basic_type();
3523         }
3524       }
3525     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
3526       // Allocation initialization, ThreadLocal field access, unsafe access
3527       if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3528           n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3529           n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3530           BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3531         bt = T_OBJECT;
3532       }
3533     }
3534   }
3535   // Note: T_NARROWOOP is not classed as a real reference type
3536   return (is_reference_type(bt) || bt == T_NARROWOOP);
3537 }
3538 
3539 // Returns unique pointed java object or null.
3540 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const {
3541   // If the node was created after the escape computation we can't answer.
3542   uint idx = n->_idx;
3543   if (idx >= nodes_size()) {
3544     return nullptr;
3545   }
3546   PointsToNode* ptn = ptnode_adr(idx);
3547   if (ptn == nullptr) {
3548     return nullptr;
3549   }
3550   if (ptn->is_JavaObject()) {
3551     return ptn->as_JavaObject();
3552   }
3553   assert(ptn->is_LocalVar(), "sanity");
3554   // Check all java objects it points to.
3555   JavaObjectNode* jobj = nullptr;
3556   for (EdgeIterator i(ptn); i.has_next(); i.next()) {
3557     PointsToNode* e = i.get();
3558     if (e->is_JavaObject()) {
3559       if (jobj == nullptr) {
3560         jobj = e->as_JavaObject();
3561       } else if (jobj != e) {
3562         return nullptr;
3563       }
3564     }
3565   }
3566   return jobj;
3567 }
3568 
3569 // Return true if this node points only to non-escaping allocations.
3570 bool PointsToNode::non_escaping_allocation() {
3571   if (is_JavaObject()) {
3572     Node* n = ideal_node();
3573     if (n->is_Allocate() || n->is_CallStaticJava()) {
3574       return (escape_state() == PointsToNode::NoEscape);
3575     } else {
3576       return false;
3577     }
3578   }
3579   assert(is_LocalVar(), "sanity");
3580   // Check all java objects it points to.
3581   for (EdgeIterator i(this); i.has_next(); i.next()) {
3582     PointsToNode* e = i.get();
3583     if (e->is_JavaObject()) {
3584       Node* n = e->ideal_node();
3585       if ((e->escape_state() != PointsToNode::NoEscape) ||
3586           !(n->is_Allocate() || n->is_CallStaticJava())) {
3587         return false;
3588       }
3589     }
3590   }
3591   return true;
3592 }
3593 
3594 // Return true if we know the node does not escape globally.
3595 bool ConnectionGraph::not_global_escape(Node *n) {
3596   assert(!_collecting, "should not call during graph construction");
3597   // If the node was created after the escape computation we can't answer.
3598   uint idx = n->_idx;
3599   if (idx >= nodes_size()) {
3600     return false;
3601   }
3602   PointsToNode* ptn = ptnode_adr(idx);
3603   if (ptn == nullptr) {
3604     return false; // not in congraph (e.g. ConI)
3605   }
3606   PointsToNode::EscapeState es = ptn->escape_state();
3607   // If we have already computed a value, return it.
3608   if (es >= PointsToNode::GlobalEscape) {
3609     return false;
3610   }
3611   if (ptn->is_JavaObject()) {
3612     return true; // (es < PointsToNode::GlobalEscape);
3613   }
3614   assert(ptn->is_LocalVar(), "sanity");
3615   // Check all java objects it points to.
3616   for (EdgeIterator i(ptn); i.has_next(); i.next()) {
3617     if (i.get()->escape_state() >= PointsToNode::GlobalEscape) {
3618       return false;
3619     }
3620   }
3621   return true;
3622 }
3623 
3624 // Return true if locked object does not escape globally
3625 // and locked code region (identified by BoxLockNode) is balanced:
3626 // all compiled code paths have corresponding Lock/Unlock pairs.
3627 bool ConnectionGraph::can_eliminate_lock(AbstractLockNode* alock) {
3628   if (alock->is_balanced() && not_global_escape(alock->obj_node())) {
3629     if (EliminateNestedLocks) {
3630       // We can mark whole locking region as Local only when only
3631       // one object is used for locking.
3632       alock->box_node()->as_BoxLock()->set_local();
3633     }
3634     return true;
3635   }
3636   return false;
3637 }
3638 
3639 // Helper functions
3640 
3641 // Return true if this node points to specified node or nodes it points to.
3642 bool PointsToNode::points_to(JavaObjectNode* ptn) const {
3643   if (is_JavaObject()) {
3644     return (this == ptn);
3645   }
3646   assert(is_LocalVar() || is_Field(), "sanity");
3647   for (EdgeIterator i(this); i.has_next(); i.next()) {
3648     if (i.get() == ptn) {
3649       return true;
3650     }
3651   }
3652   return false;
3653 }
3654 
3655 // Return true if one node points to an other.
3656 bool PointsToNode::meet(PointsToNode* ptn) {
3657   if (this == ptn) {
3658     return true;
3659   } else if (ptn->is_JavaObject()) {
3660     return this->points_to(ptn->as_JavaObject());
3661   } else if (this->is_JavaObject()) {
3662     return ptn->points_to(this->as_JavaObject());
3663   }
3664   assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity");
3665   int ptn_count =  ptn->edge_count();
3666   for (EdgeIterator i(this); i.has_next(); i.next()) {
3667     PointsToNode* this_e = i.get();
3668     for (int j = 0; j < ptn_count; j++) {
3669       if (this_e == ptn->edge(j)) {
3670         return true;
3671       }
3672     }
3673   }
3674   return false;
3675 }
3676 
3677 #ifdef ASSERT
3678 // Return true if bases point to this java object.
3679 bool FieldNode::has_base(JavaObjectNode* jobj) const {
3680   for (BaseIterator i(this); i.has_next(); i.next()) {
3681     if (i.get() == jobj) {
3682       return true;
3683     }
3684   }
3685   return false;
3686 }
3687 #endif
3688 
3689 bool ConnectionGraph::is_captured_store_address(Node* addp) {
3690   // Handle simple case first.
3691   assert(_igvn->type(addp)->isa_oopptr() == nullptr, "should be raw access");
3692   if (addp->in(AddPNode::Address)->is_Proj() && addp->in(AddPNode::Address)->in(0)->is_Allocate()) {
3693     return true;
3694   } else if (addp->in(AddPNode::Address)->is_Phi()) {
3695     for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
3696       Node* addp_use = addp->fast_out(i);
3697       if (addp_use->is_Store()) {
3698         for (DUIterator_Fast jmax, j = addp_use->fast_outs(jmax); j < jmax; j++) {
3699           if (addp_use->fast_out(j)->is_Initialize()) {
3700             return true;
3701           }
3702         }
3703       }
3704     }
3705   }
3706   return false;
3707 }
3708 
3709 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) {
3710   const Type *adr_type = phase->type(adr);
3711   if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) {
3712     // We are computing a raw address for a store captured by an Initialize
3713     // compute an appropriate address type. AddP cases #3 and #5 (see below).
3714     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
3715     assert(offs != Type::OffsetBot ||
3716            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
3717            "offset must be a constant or it is initialization of array");
3718     return offs;
3719   }
3720   return adr_type->is_ptr()->flat_offset();
3721 }
3722 
3723 Node* ConnectionGraph::get_addp_base(Node *addp) {
3724   assert(addp->is_AddP(), "must be AddP");
3725   //
3726   // AddP cases for Base and Address inputs:
3727   // case #1. Direct object's field reference:
3728   //     Allocate
3729   //       |
3730   //     Proj #5 ( oop result )
3731   //       |
3732   //     CheckCastPP (cast to instance type)
3733   //      | |
3734   //     AddP  ( base == address )
3735   //
3736   // case #2. Indirect object's field reference:
3737   //      Phi
3738   //       |
3739   //     CastPP (cast to instance type)
3740   //      | |
3741   //     AddP  ( base == address )
3742   //
3743   // case #3. Raw object's field reference for Initialize node:
3744   //      Allocate
3745   //        |
3746   //      Proj #5 ( oop result )
3747   //  top   |
3748   //     \  |
3749   //     AddP  ( base == top )
3750   //
3751   // case #4. Array's element reference:
3752   //   {CheckCastPP | CastPP}
3753   //     |  | |
3754   //     |  AddP ( array's element offset )
3755   //     |  |
3756   //     AddP ( array's offset )
3757   //
3758   // case #5. Raw object's field reference for arraycopy stub call:
3759   //          The inline_native_clone() case when the arraycopy stub is called
3760   //          after the allocation before Initialize and CheckCastPP nodes.
3761   //      Allocate
3762   //        |
3763   //      Proj #5 ( oop result )
3764   //       | |
3765   //       AddP  ( base == address )
3766   //
3767   // case #6. Constant Pool, ThreadLocal, CastX2P or
3768   //          Raw object's field reference:
3769   //      {ConP, ThreadLocal, CastX2P, raw Load}
3770   //  top   |
3771   //     \  |
3772   //     AddP  ( base == top )
3773   //
3774   // case #7. Klass's field reference.
3775   //      LoadKlass
3776   //       | |
3777   //       AddP  ( base == address )
3778   //
3779   // case #8. narrow Klass's field reference.
3780   //      LoadNKlass
3781   //       |
3782   //      DecodeN
3783   //       | |
3784   //       AddP  ( base == address )
3785   //
3786   // case #9. Mixed unsafe access
3787   //    {instance}
3788   //        |
3789   //      CheckCastPP (raw)
3790   //  top   |
3791   //     \  |
3792   //     AddP  ( base == top )
3793   //
3794   Node *base = addp->in(AddPNode::Base);
3795   if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9.
3796     base = addp->in(AddPNode::Address);
3797     while (base->is_AddP()) {
3798       // Case #6 (unsafe access) may have several chained AddP nodes.
3799       assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
3800       base = base->in(AddPNode::Address);
3801     }
3802     if (base->Opcode() == Op_CheckCastPP &&
3803         base->bottom_type()->isa_rawptr() &&
3804         _igvn->type(base->in(1))->isa_oopptr()) {
3805       base = base->in(1); // Case #9
3806     } else {
3807       Node* uncast_base = base->uncast();
3808       int opcode = uncast_base->Opcode();
3809       assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
3810              opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
3811              (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != nullptr)) ||
3812              is_captured_store_address(addp), "sanity");
3813     }
3814   }
3815   return base;
3816 }
3817 
3818 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
3819   assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
3820   Node* addp2 = addp->raw_out(0);
3821   if (addp->outcnt() == 1 && addp2->is_AddP() &&
3822       addp2->in(AddPNode::Base) == n &&
3823       addp2->in(AddPNode::Address) == addp) {
3824     assert(addp->in(AddPNode::Base) == n, "expecting the same base");
3825     //
3826     // Find array's offset to push it on worklist first and
3827     // as result process an array's element offset first (pushed second)
3828     // to avoid CastPP for the array's offset.
3829     // Otherwise the inserted CastPP (LocalVar) will point to what
3830     // the AddP (Field) points to. Which would be wrong since
3831     // the algorithm expects the CastPP has the same point as
3832     // as AddP's base CheckCastPP (LocalVar).
3833     //
3834     //    ArrayAllocation
3835     //     |
3836     //    CheckCastPP
3837     //     |
3838     //    memProj (from ArrayAllocation CheckCastPP)
3839     //     |  ||
3840     //     |  ||   Int (element index)
3841     //     |  ||    |   ConI (log(element size))
3842     //     |  ||    |   /
3843     //     |  ||   LShift
3844     //     |  ||  /
3845     //     |  AddP (array's element offset)
3846     //     |  |
3847     //     |  | ConI (array's offset: #12(32-bits) or #24(64-bits))
3848     //     | / /
3849     //     AddP (array's offset)
3850     //      |
3851     //     Load/Store (memory operation on array's element)
3852     //
3853     return addp2;
3854   }
3855   return nullptr;
3856 }
3857 
3858 //
3859 // Adjust the type and inputs of an AddP which computes the
3860 // address of a field of an instance
3861 //
3862 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
3863   PhaseGVN* igvn = _igvn;
3864   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
3865   assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr");
3866   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
3867   if (t == nullptr) {
3868     // We are computing a raw address for a store captured by an Initialize
3869     // compute an appropriate address type (cases #3 and #5).
3870     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
3871     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
3872     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
3873     assert(offs != Type::OffsetBot, "offset must be a constant");
3874     if (base_t->isa_aryptr() != nullptr) {
3875       // In the case of a flat inline type array, each field has its
3876       // own slice so we need to extract the field being accessed from
3877       // the address computation
3878       t = base_t->isa_aryptr()->add_field_offset_and_offset(offs)->is_oopptr();
3879     } else {
3880       t = base_t->add_offset(offs)->is_oopptr();
3881     }
3882   }
3883   int inst_id = base_t->instance_id();
3884   assert(!t->is_known_instance() || t->instance_id() == inst_id,
3885                              "old type must be non-instance or match new type");
3886 
3887   // The type 't' could be subclass of 'base_t'.
3888   // As result t->offset() could be large then base_t's size and it will
3889   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
3890   // constructor verifies correctness of the offset.
3891   //
3892   // It could happened on subclass's branch (from the type profiling
3893   // inlining) which was not eliminated during parsing since the exactness
3894   // of the allocation type was not propagated to the subclass type check.
3895   //
3896   // Or the type 't' could be not related to 'base_t' at all.
3897   // It could happen when CHA type is different from MDO type on a dead path
3898   // (for example, from instanceof check) which is not collapsed during parsing.
3899   //
3900   // Do nothing for such AddP node and don't process its users since
3901   // this code branch will go away.
3902   //
3903   if (!t->is_known_instance() &&
3904       !base_t->maybe_java_subtype_of(t)) {
3905      return false; // bail out
3906   }
3907   const TypePtr* tinst = base_t->add_offset(t->offset());
3908   if (tinst->isa_aryptr() && t->isa_aryptr()) {
3909     // In the case of a flat inline type array, each field has its
3910     // own slice so we need to keep track of the field being accessed.
3911     tinst = tinst->is_aryptr()->with_field_offset(t->is_aryptr()->field_offset().get());
3912     // Keep array properties (not flat/null-free)
3913     tinst = tinst->is_aryptr()->update_properties(t->is_aryptr());
3914     if (tinst == nullptr) {
3915       return false; // Skip dead path with inconsistent properties
3916     }
3917   }
3918 
3919   // Do NOT remove the next line: ensure a new alias index is allocated
3920   // for the instance type. Note: C++ will not remove it since the call
3921   // has side effect.
3922   int alias_idx = _compile->get_alias_index(tinst);
3923   igvn->set_type(addp, tinst);
3924   // record the allocation in the node map
3925   set_map(addp, get_map(base->_idx));
3926   // Set addp's Base and Address to 'base'.
3927   Node *abase = addp->in(AddPNode::Base);
3928   Node *adr   = addp->in(AddPNode::Address);
3929   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
3930       adr->in(0)->_idx == (uint)inst_id) {
3931     // Skip AddP cases #3 and #5.
3932   } else {
3933     assert(!abase->is_top(), "sanity"); // AddP case #3
3934     if (abase != base) {
3935       igvn->hash_delete(addp);
3936       addp->set_req(AddPNode::Base, base);
3937       if (abase == adr) {
3938         addp->set_req(AddPNode::Address, base);
3939       } else {
3940         // AddP case #4 (adr is array's element offset AddP node)
3941 #ifdef ASSERT
3942         const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr();
3943         assert(adr->is_AddP() && atype != nullptr &&
3944                atype->instance_id() == inst_id, "array's element offset should be processed first");
3945 #endif
3946       }
3947       igvn->hash_insert(addp);
3948     }
3949   }
3950   // Put on IGVN worklist since at least addp's type was changed above.
3951   record_for_optimizer(addp);
3952   return true;
3953 }
3954 
3955 //
3956 // Create a new version of orig_phi if necessary. Returns either the newly
3957 // created phi or an existing phi.  Sets create_new to indicate whether a new
3958 // phi was created.  Cache the last newly created phi in the node map.
3959 //
3960 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, bool &new_created) {
3961   Compile *C = _compile;
3962   PhaseGVN* igvn = _igvn;
3963   new_created = false;
3964   int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());
3965   // nothing to do if orig_phi is bottom memory or matches alias_idx
3966   if (phi_alias_idx == alias_idx) {
3967     return orig_phi;
3968   }
3969   // Have we recently created a Phi for this alias index?
3970   PhiNode *result = get_map_phi(orig_phi->_idx);
3971   if (result != nullptr && C->get_alias_index(result->adr_type()) == alias_idx) {
3972     return result;
3973   }
3974   // Previous check may fail when the same wide memory Phi was split into Phis
3975   // for different memory slices. Search all Phis for this region.
3976   if (result != nullptr) {
3977     Node* region = orig_phi->in(0);
3978     for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
3979       Node* phi = region->fast_out(i);
3980       if (phi->is_Phi() &&
3981           C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) {
3982         assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice");
3983         return phi->as_Phi();
3984       }
3985     }
3986   }
3987   if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) {
3988     if (C->do_escape_analysis() == true && !C->failing()) {
3989       // Retry compilation without escape analysis.
3990       // If this is the first failure, the sentinel string will "stick"
3991       // to the Compile object, and the C2Compiler will see it and retry.
3992       C->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
3993     }
3994     return nullptr;
3995   }
3996   orig_phi_worklist.append_if_missing(orig_phi);
3997   const TypePtr *atype = C->get_adr_type(alias_idx);
3998   result = PhiNode::make(orig_phi->in(0), nullptr, Type::MEMORY, atype);
3999   C->copy_node_notes_to(result, orig_phi);
4000   igvn->set_type(result, result->bottom_type());
4001   record_for_optimizer(result);
4002   set_map(orig_phi, result);
4003   new_created = true;
4004   return result;
4005 }
4006 
4007 //
4008 // Return a new version of Memory Phi "orig_phi" with the inputs having the
4009 // specified alias index.
4010 //
4011 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, uint rec_depth) {
4012   assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");
4013   Compile *C = _compile;
4014   PhaseGVN* igvn = _igvn;
4015   bool new_phi_created;
4016   PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created);
4017   if (!new_phi_created) {
4018     return result;
4019   }
4020   GrowableArray<PhiNode *>  phi_list;
4021   GrowableArray<uint>  cur_input;
4022   PhiNode *phi = orig_phi;
4023   uint idx = 1;
4024   bool finished = false;
4025   while(!finished) {
4026     while (idx < phi->req()) {
4027       Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist, rec_depth + 1);
4028       if (mem != nullptr && mem->is_Phi()) {
4029         PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created);
4030         if (new_phi_created) {
4031           // found an phi for which we created a new split, push current one on worklist and begin
4032           // processing new one
4033           phi_list.push(phi);
4034           cur_input.push(idx);
4035           phi = mem->as_Phi();
4036           result = newphi;
4037           idx = 1;
4038           continue;
4039         } else {
4040           mem = newphi;
4041         }
4042       }
4043       if (C->failing()) {
4044         return nullptr;
4045       }
4046       result->set_req(idx++, mem);
4047     }
4048 #ifdef ASSERT
4049     // verify that the new Phi has an input for each input of the original
4050     assert( phi->req() == result->req(), "must have same number of inputs.");
4051     assert( result->in(0) != nullptr && result->in(0) == phi->in(0), "regions must match");
4052 #endif
4053     // Check if all new phi's inputs have specified alias index.
4054     // Otherwise use old phi.
4055     for (uint i = 1; i < phi->req(); i++) {
4056       Node* in = result->in(i);
4057       assert((phi->in(i) == nullptr) == (in == nullptr), "inputs must correspond.");
4058     }
4059     // we have finished processing a Phi, see if there are any more to do
4060     finished = (phi_list.length() == 0 );
4061     if (!finished) {
4062       phi = phi_list.pop();
4063       idx = cur_input.pop();
4064       PhiNode *prev_result = get_map_phi(phi->_idx);
4065       prev_result->set_req(idx++, result);
4066       result = prev_result;
4067     }
4068   }
4069   return result;
4070 }
4071 
4072 //
4073 // The next methods are derived from methods in MemNode.
4074 //
4075 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {
4076   Node *mem = mmem;
4077   // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally
4078   // means an array I have not precisely typed yet.  Do not do any
4079   // alias stuff with it any time soon.
4080   if (toop->base() != Type::AnyPtr &&
4081       !(toop->isa_instptr() &&
4082         toop->is_instptr()->instance_klass()->is_java_lang_Object() &&
4083         toop->offset() == Type::OffsetBot)) {
4084     mem = mmem->memory_at(alias_idx);
4085     // Update input if it is progress over what we have now
4086   }
4087   return mem;
4088 }
4089 
4090 //
4091 // Move memory users to their memory slices.
4092 //
4093 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis) {
4094   Compile* C = _compile;
4095   PhaseGVN* igvn = _igvn;
4096   const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();
4097   assert(tp != nullptr, "ptr type");
4098   int alias_idx = C->get_alias_index(tp);
4099   int general_idx = C->get_general_index(alias_idx);
4100 
4101   // Move users first
4102   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4103     Node* use = n->fast_out(i);
4104     if (use->is_MergeMem()) {
4105       MergeMemNode* mmem = use->as_MergeMem();
4106       assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice");
4107       if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) {
4108         continue; // Nothing to do
4109       }
4110       // Replace previous general reference to mem node.
4111       uint orig_uniq = C->unique();
4112       Node* m = find_inst_mem(n, general_idx, orig_phis);
4113       assert(orig_uniq == C->unique(), "no new nodes");
4114       mmem->set_memory_at(general_idx, m);
4115       --imax;
4116       --i;
4117     } else if (use->is_MemBar()) {
4118       assert(!use->is_Initialize(), "initializing stores should not be moved");
4119       if (use->req() > MemBarNode::Precedent &&
4120           use->in(MemBarNode::Precedent) == n) {
4121         // Don't move related membars.
4122         record_for_optimizer(use);
4123         continue;
4124       }
4125       tp = use->as_MemBar()->adr_type()->isa_ptr();
4126       if ((tp != nullptr && C->get_alias_index(tp) == alias_idx) ||
4127           alias_idx == general_idx) {
4128         continue; // Nothing to do
4129       }
4130       // Move to general memory slice.
4131       uint orig_uniq = C->unique();
4132       Node* m = find_inst_mem(n, general_idx, orig_phis);
4133       assert(orig_uniq == C->unique(), "no new nodes");
4134       igvn->hash_delete(use);
4135       imax -= use->replace_edge(n, m, igvn);
4136       igvn->hash_insert(use);
4137       record_for_optimizer(use);
4138       --i;
4139 #ifdef ASSERT
4140     } else if (use->is_Mem()) {
4141       // Memory nodes should have new memory input.
4142       tp = igvn->type(use->in(MemNode::Address))->isa_ptr();
4143       assert(tp != nullptr, "ptr type");
4144       int idx = C->get_alias_index(tp);
4145       assert(get_map(use->_idx) != nullptr || idx == alias_idx,
4146              "Following memory nodes should have new memory input or be on the same memory slice");
4147     } else if (use->is_Phi()) {
4148       // Phi nodes should be split and moved already.
4149       tp = use->as_Phi()->adr_type()->isa_ptr();
4150       assert(tp != nullptr, "ptr type");
4151       int idx = C->get_alias_index(tp);
4152       assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");
4153     } else {
4154       use->dump();
4155       assert(false, "should not be here");
4156 #endif
4157     }
4158   }
4159 }
4160 
4161 //
4162 // Search memory chain of "mem" to find a MemNode whose address
4163 // is the specified alias index.
4164 //
4165 #define FIND_INST_MEM_RECURSION_DEPTH_LIMIT 1000
4166 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *>  &orig_phis, uint rec_depth) {
4167   if (rec_depth > FIND_INST_MEM_RECURSION_DEPTH_LIMIT) {
4168     _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4169     return nullptr;
4170   }
4171   if (orig_mem == nullptr) {
4172     return orig_mem;
4173   }
4174   Compile* C = _compile;
4175   PhaseGVN* igvn = _igvn;
4176   const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr();
4177   bool is_instance = (toop != nullptr) && toop->is_known_instance();
4178   Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
4179   Node *prev = nullptr;
4180   Node *result = orig_mem;
4181   while (prev != result) {
4182     prev = result;
4183     if (result == start_mem) {
4184       break;  // hit one of our sentinels
4185     }
4186     if (result->is_Mem()) {
4187       const Type *at = igvn->type(result->in(MemNode::Address));
4188       if (at == Type::TOP) {
4189         break; // Dead
4190       }
4191       assert (at->isa_ptr() != nullptr, "pointer type required.");
4192       int idx = C->get_alias_index(at->is_ptr());
4193       if (idx == alias_idx) {
4194         break; // Found
4195       }
4196       if (!is_instance && (at->isa_oopptr() == nullptr ||
4197                            !at->is_oopptr()->is_known_instance())) {
4198         break; // Do not skip store to general memory slice.
4199       }
4200       result = result->in(MemNode::Memory);
4201     }
4202     if (!is_instance) {
4203       continue;  // don't search further for non-instance types
4204     }
4205     // skip over a call which does not affect this memory slice
4206     if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
4207       Node *proj_in = result->in(0);
4208       if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) {
4209         break;  // hit one of our sentinels
4210       } else if (proj_in->is_Call()) {
4211         // ArrayCopy node processed here as well
4212         CallNode *call = proj_in->as_Call();
4213         if (!call->may_modify(toop, igvn)) {
4214           result = call->in(TypeFunc::Memory);
4215         }
4216       } else if (proj_in->is_Initialize()) {
4217         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
4218         // Stop if this is the initialization for the object instance which
4219         // which contains this memory slice, otherwise skip over it.
4220         if (alloc == nullptr || alloc->_idx != (uint)toop->instance_id()) {
4221           result = proj_in->in(TypeFunc::Memory);
4222         }
4223       } else if (proj_in->is_MemBar()) {
4224         // Check if there is an array copy for a clone
4225         // Step over GC barrier when ReduceInitialCardMarks is disabled
4226         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4227         Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0));
4228 
4229         if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) {
4230           // Stop if it is a clone
4231           ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy();
4232           if (ac->may_modify(toop, igvn)) {
4233             break;
4234           }
4235         }
4236         result = proj_in->in(TypeFunc::Memory);
4237       }
4238     } else if (result->is_MergeMem()) {
4239       MergeMemNode *mmem = result->as_MergeMem();
4240       result = step_through_mergemem(mmem, alias_idx, toop);
4241       if (result == mmem->base_memory()) {
4242         // Didn't find instance memory, search through general slice recursively.
4243         result = mmem->memory_at(C->get_general_index(alias_idx));
4244         result = find_inst_mem(result, alias_idx, orig_phis, rec_depth + 1);
4245         if (C->failing()) {
4246           return nullptr;
4247         }
4248         mmem->set_memory_at(alias_idx, result);
4249       }
4250     } else if (result->is_Phi() &&
4251                C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
4252       Node *un = result->as_Phi()->unique_input(igvn);
4253       if (un != nullptr) {
4254         orig_phis.append_if_missing(result->as_Phi());
4255         result = un;
4256       } else {
4257         break;
4258       }
4259     } else if (result->is_ClearArray()) {
4260       if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {
4261         // Can not bypass initialization of the instance
4262         // we are looking for.
4263         break;
4264       }
4265       // Otherwise skip it (the call updated 'result' value).
4266     } else if (result->Opcode() == Op_SCMemProj) {
4267       Node* mem = result->in(0);
4268       Node* adr = nullptr;
4269       if (mem->is_LoadStore()) {
4270         adr = mem->in(MemNode::Address);
4271       } else {
4272         assert(mem->Opcode() == Op_EncodeISOArray ||
4273                mem->Opcode() == Op_StrCompressedCopy, "sanity");
4274         adr = mem->in(3); // Memory edge corresponds to destination array
4275       }
4276       const Type *at = igvn->type(adr);
4277       if (at != Type::TOP) {
4278         assert(at->isa_ptr() != nullptr, "pointer type required.");
4279         int idx = C->get_alias_index(at->is_ptr());
4280         if (idx == alias_idx) {
4281           // Assert in debug mode
4282           assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
4283           break; // In product mode return SCMemProj node
4284         }
4285       }
4286       result = mem->in(MemNode::Memory);
4287     } else if (result->Opcode() == Op_StrInflatedCopy) {
4288       Node* adr = result->in(3); // Memory edge corresponds to destination array
4289       const Type *at = igvn->type(adr);
4290       if (at != Type::TOP) {
4291         assert(at->isa_ptr() != nullptr, "pointer type required.");
4292         int idx = C->get_alias_index(at->is_ptr());
4293         if (idx == alias_idx) {
4294           // Assert in debug mode
4295           assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field");
4296           break; // In product mode return SCMemProj node
4297         }
4298       }
4299       result = result->in(MemNode::Memory);
4300     }
4301   }
4302   if (result->is_Phi()) {
4303     PhiNode *mphi = result->as_Phi();
4304     assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
4305     const TypePtr *t = mphi->adr_type();
4306     if (!is_instance) {
4307       // Push all non-instance Phis on the orig_phis worklist to update inputs
4308       // during Phase 4 if needed.
4309       orig_phis.append_if_missing(mphi);
4310     } else if (C->get_alias_index(t) != alias_idx) {
4311       // Create a new Phi with the specified alias index type.
4312       result = split_memory_phi(mphi, alias_idx, orig_phis, rec_depth + 1);
4313     }
4314   }
4315   // the result is either MemNode, PhiNode, InitializeNode.
4316   return result;
4317 }
4318 
4319 //
4320 //  Convert the types of non-escaped object to instance types where possible,
4321 //  propagate the new type information through the graph, and update memory
4322 //  edges and MergeMem inputs to reflect the new type.
4323 //
4324 //  We start with allocations (and calls which may be allocations)  on alloc_worklist.
4325 //  The processing is done in 4 phases:
4326 //
4327 //  Phase 1:  Process possible allocations from alloc_worklist.  Create instance
4328 //            types for the CheckCastPP for allocations where possible.
4329 //            Propagate the new types through users as follows:
4330 //               casts and Phi:  push users on alloc_worklist
4331 //               AddP:  cast Base and Address inputs to the instance type
4332 //                      push any AddP users on alloc_worklist and push any memnode
4333 //                      users onto memnode_worklist.
4334 //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
4335 //            search the Memory chain for a store with the appropriate type
4336 //            address type.  If a Phi is found, create a new version with
4337 //            the appropriate memory slices from each of the Phi inputs.
4338 //            For stores, process the users as follows:
4339 //               MemNode:  push on memnode_worklist
4340 //               MergeMem: push on mergemem_worklist
4341 //  Phase 3:  Process MergeMem nodes from mergemem_worklist.  Walk each memory slice
4342 //            moving the first node encountered of each  instance type to the
4343 //            the input corresponding to its alias index.
4344 //            appropriate memory slice.
4345 //  Phase 4:  Update the inputs of non-instance memory Phis and the Memory input of memnodes.
4346 //
4347 // In the following example, the CheckCastPP nodes are the cast of allocation
4348 // results and the allocation of node 29 is non-escaped and eligible to be an
4349 // instance type.
4350 //
4351 // We start with:
4352 //
4353 //     7 Parm #memory
4354 //    10  ConI  "12"
4355 //    19  CheckCastPP   "Foo"
4356 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
4357 //    29  CheckCastPP   "Foo"
4358 //    30  AddP  _ 29 29 10  Foo+12  alias_index=4
4359 //
4360 //    40  StoreP  25   7  20   ... alias_index=4
4361 //    50  StoreP  35  40  30   ... alias_index=4
4362 //    60  StoreP  45  50  20   ... alias_index=4
4363 //    70  LoadP    _  60  30   ... alias_index=4
4364 //    80  Phi     75  50  60   Memory alias_index=4
4365 //    90  LoadP    _  80  30   ... alias_index=4
4366 //   100  LoadP    _  80  20   ... alias_index=4
4367 //
4368 //
4369 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24
4370 // and creating a new alias index for node 30.  This gives:
4371 //
4372 //     7 Parm #memory
4373 //    10  ConI  "12"
4374 //    19  CheckCastPP   "Foo"
4375 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
4376 //    29  CheckCastPP   "Foo"  iid=24
4377 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
4378 //
4379 //    40  StoreP  25   7  20   ... alias_index=4
4380 //    50  StoreP  35  40  30   ... alias_index=6
4381 //    60  StoreP  45  50  20   ... alias_index=4
4382 //    70  LoadP    _  60  30   ... alias_index=6
4383 //    80  Phi     75  50  60   Memory alias_index=4
4384 //    90  LoadP    _  80  30   ... alias_index=6
4385 //   100  LoadP    _  80  20   ... alias_index=4
4386 //
4387 // In phase 2, new memory inputs are computed for the loads and stores,
4388 // And a new version of the phi is created.  In phase 4, the inputs to
4389 // node 80 are updated and then the memory nodes are updated with the
4390 // values computed in phase 2.  This results in:
4391 //
4392 //     7 Parm #memory
4393 //    10  ConI  "12"
4394 //    19  CheckCastPP   "Foo"
4395 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
4396 //    29  CheckCastPP   "Foo"  iid=24
4397 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
4398 //
4399 //    40  StoreP  25  7   20   ... alias_index=4
4400 //    50  StoreP  35  7   30   ... alias_index=6
4401 //    60  StoreP  45  40  20   ... alias_index=4
4402 //    70  LoadP    _  50  30   ... alias_index=6
4403 //    80  Phi     75  40  60   Memory alias_index=4
4404 //   120  Phi     75  50  50   Memory alias_index=6
4405 //    90  LoadP    _ 120  30   ... alias_index=6
4406 //   100  LoadP    _  80  20   ... alias_index=4
4407 //
4408 void ConnectionGraph::split_unique_types(GrowableArray<Node *>  &alloc_worklist,
4409                                          GrowableArray<ArrayCopyNode*> &arraycopy_worklist,
4410                                          GrowableArray<MergeMemNode*> &mergemem_worklist,
4411                                          Unique_Node_List &reducible_merges) {
4412   DEBUG_ONLY(Unique_Node_List reduced_merges;)
4413   GrowableArray<Node *>  memnode_worklist;
4414   GrowableArray<PhiNode *>  orig_phis;
4415   PhaseIterGVN  *igvn = _igvn;
4416   uint new_index_start = (uint) _compile->num_alias_types();
4417   VectorSet visited;
4418   ideal_nodes.clear(); // Reset for use with set_map/get_map.
4419   uint unique_old = _compile->unique();
4420 
4421   //  Phase 1:  Process possible allocations from alloc_worklist.
4422   //  Create instance types for the CheckCastPP for allocations where possible.
4423   //
4424   // (Note: don't forget to change the order of the second AddP node on
4425   //  the alloc_worklist if the order of the worklist processing is changed,
4426   //  see the comment in find_second_addp().)
4427   //
4428   while (alloc_worklist.length() != 0) {
4429     Node *n = alloc_worklist.pop();
4430     uint ni = n->_idx;
4431     if (n->is_Call()) {
4432       CallNode *alloc = n->as_Call();
4433       // copy escape information to call node
4434       PointsToNode* ptn = ptnode_adr(alloc->_idx);
4435       PointsToNode::EscapeState es = ptn->escape_state();
4436       // We have an allocation or call which returns a Java object,
4437       // see if it is non-escaped.
4438       if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) {
4439         continue;
4440       }
4441       // Find CheckCastPP for the allocate or for the return value of a call
4442       n = alloc->result_cast();
4443       if (n == nullptr) {            // No uses except Initialize node
4444         if (alloc->is_Allocate()) {
4445           // Set the scalar_replaceable flag for allocation
4446           // so it could be eliminated if it has no uses.
4447           alloc->as_Allocate()->_is_scalar_replaceable = true;
4448         }
4449         continue;
4450       }
4451       if (!n->is_CheckCastPP()) { // not unique CheckCastPP.
4452         // we could reach here for allocate case if one init is associated with many allocs.
4453         if (alloc->is_Allocate()) {
4454           alloc->as_Allocate()->_is_scalar_replaceable = false;
4455         }
4456         continue;
4457       }
4458 
4459       // The inline code for Object.clone() casts the allocation result to
4460       // java.lang.Object and then to the actual type of the allocated
4461       // object. Detect this case and use the second cast.
4462       // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when
4463       // the allocation result is cast to java.lang.Object and then
4464       // to the actual Array type.
4465       if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL
4466           && (alloc->is_AllocateArray() ||
4467               igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeInstKlassPtr::OBJECT)) {
4468         Node *cast2 = nullptr;
4469         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4470           Node *use = n->fast_out(i);
4471           if (use->is_CheckCastPP()) {
4472             cast2 = use;
4473             break;
4474           }
4475         }
4476         if (cast2 != nullptr) {
4477           n = cast2;
4478         } else {
4479           // Non-scalar replaceable if the allocation type is unknown statically
4480           // (reflection allocation), the object can't be restored during
4481           // deoptimization without precise type.
4482           continue;
4483         }
4484       }
4485 
4486       const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
4487       if (t == nullptr) {
4488         continue;  // not a TypeOopPtr
4489       }
4490       if (!t->klass_is_exact()) {
4491         continue; // not an unique type
4492       }
4493       if (alloc->is_Allocate()) {
4494         // Set the scalar_replaceable flag for allocation
4495         // so it could be eliminated.
4496         alloc->as_Allocate()->_is_scalar_replaceable = true;
4497       }
4498       set_escape_state(ptnode_adr(n->_idx), es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); // CheckCastPP escape state
4499       // in order for an object to be scalar-replaceable, it must be:
4500       //   - a direct allocation (not a call returning an object)
4501       //   - non-escaping
4502       //   - eligible to be a unique type
4503       //   - not determined to be ineligible by escape analysis
4504       set_map(alloc, n);
4505       set_map(n, alloc);
4506       const TypeOopPtr* tinst = t->cast_to_instance_id(ni);
4507       igvn->hash_delete(n);
4508       igvn->set_type(n,  tinst);
4509       n->raise_bottom_type(tinst);
4510       igvn->hash_insert(n);
4511       record_for_optimizer(n);
4512       // Allocate an alias index for the header fields. Accesses to
4513       // the header emitted during macro expansion wouldn't have
4514       // correct memory state otherwise.
4515       _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes()));
4516       _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes()));
4517       if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {
4518 
4519         // First, put on the worklist all Field edges from Connection Graph
4520         // which is more accurate than putting immediate users from Ideal Graph.
4521         for (EdgeIterator e(ptn); e.has_next(); e.next()) {
4522           PointsToNode* tgt = e.get();
4523           if (tgt->is_Arraycopy()) {
4524             continue;
4525           }
4526           Node* use = tgt->ideal_node();
4527           assert(tgt->is_Field() && use->is_AddP(),
4528                  "only AddP nodes are Field edges in CG");
4529           if (use->outcnt() > 0) { // Don't process dead nodes
4530             Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
4531             if (addp2 != nullptr) {
4532               assert(alloc->is_AllocateArray(),"array allocation was expected");
4533               alloc_worklist.append_if_missing(addp2);
4534             }
4535             alloc_worklist.append_if_missing(use);
4536           }
4537         }
4538 
4539         // An allocation may have an Initialize which has raw stores. Scan
4540         // the users of the raw allocation result and push AddP users
4541         // on alloc_worklist.
4542         Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms);
4543         assert (raw_result != nullptr, "must have an allocation result");
4544         for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) {
4545           Node *use = raw_result->fast_out(i);
4546           if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes
4547             Node* addp2 = find_second_addp(use, raw_result);
4548             if (addp2 != nullptr) {
4549               assert(alloc->is_AllocateArray(),"array allocation was expected");
4550               alloc_worklist.append_if_missing(addp2);
4551             }
4552             alloc_worklist.append_if_missing(use);
4553           } else if (use->is_MemBar()) {
4554             memnode_worklist.append_if_missing(use);
4555           }
4556         }
4557       }
4558     } else if (n->is_AddP()) {
4559       if (has_reducible_merge_base(n->as_AddP(), reducible_merges)) {
4560         // This AddP will go away when we reduce the the Phi
4561         continue;
4562       }
4563       Node* addp_base = get_addp_base(n);
4564       JavaObjectNode* jobj = unique_java_object(addp_base);
4565       if (jobj == nullptr || jobj == phantom_obj) {
4566 #ifdef ASSERT
4567         ptnode_adr(get_addp_base(n)->_idx)->dump();
4568         ptnode_adr(n->_idx)->dump();
4569         assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
4570 #endif
4571         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4572         return;
4573       }
4574       Node *base = get_map(jobj->idx());  // CheckCastPP node
4575       if (!split_AddP(n, base)) continue; // wrong type from dead path
4576     } else if (n->is_Phi() ||
4577                n->is_CheckCastPP() ||
4578                n->is_EncodeP() ||
4579                n->is_DecodeN() ||
4580                (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
4581       if (visited.test_set(n->_idx)) {
4582         assert(n->is_Phi(), "loops only through Phi's");
4583         continue;  // already processed
4584       }
4585       // Reducible Phi's will be removed from the graph after split_unique_types
4586       // finishes. For now we just try to split out the SR inputs of the merge.
4587       Node* parent = n->in(1);
4588       if (reducible_merges.member(n)) {
4589         reduce_phi(n->as_Phi(), alloc_worklist, memnode_worklist);
4590 #ifdef ASSERT
4591         if (VerifyReduceAllocationMerges) {
4592           reduced_merges.push(n);
4593         }
4594 #endif
4595         continue;
4596       } else if (reducible_merges.member(parent)) {
4597         // 'n' is an user of a reducible merge (a Phi). It will be simplified as
4598         // part of reduce_merge.
4599         continue;
4600       }
4601       JavaObjectNode* jobj = unique_java_object(n);
4602       if (jobj == nullptr || jobj == phantom_obj) {
4603 #ifdef ASSERT
4604         ptnode_adr(n->_idx)->dump();
4605         assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
4606 #endif
4607         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4608         return;
4609       } else {
4610         Node *val = get_map(jobj->idx());   // CheckCastPP node
4611         TypeNode *tn = n->as_Type();
4612         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
4613         assert(tinst != nullptr && tinst->is_known_instance() &&
4614                tinst->instance_id() == jobj->idx() , "instance type expected.");
4615 
4616         const Type *tn_type = igvn->type(tn);
4617         const TypeOopPtr *tn_t;
4618         if (tn_type->isa_narrowoop()) {
4619           tn_t = tn_type->make_ptr()->isa_oopptr();
4620         } else {
4621           tn_t = tn_type->isa_oopptr();
4622         }
4623         if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {
4624           if (tn_t->isa_aryptr()) {
4625             // Keep array properties (not flat/null-free)
4626             tinst = tinst->is_aryptr()->update_properties(tn_t->is_aryptr());
4627             if (tinst == nullptr) {
4628               continue; // Skip dead path with inconsistent properties
4629             }
4630           }
4631           if (tn_type->isa_narrowoop()) {
4632             tn_type = tinst->make_narrowoop();
4633           } else {
4634             tn_type = tinst;
4635           }
4636           igvn->hash_delete(tn);
4637           igvn->set_type(tn, tn_type);
4638           tn->set_type(tn_type);
4639           igvn->hash_insert(tn);
4640           record_for_optimizer(n);
4641         } else {
4642           assert(tn_type == TypePtr::NULL_PTR ||
4643                  (tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t)),
4644                  "unexpected type");
4645           continue; // Skip dead path with different type
4646         }
4647       }
4648     } else {
4649       debug_only(n->dump();)
4650       assert(false, "EA: unexpected node");
4651       continue;
4652     }
4653     // push allocation's users on appropriate worklist
4654     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4655       Node *use = n->fast_out(i);
4656       if (use->is_Mem() && use->in(MemNode::Address) == n) {
4657         // Load/store to instance's field
4658         memnode_worklist.append_if_missing(use);
4659       } else if (use->is_MemBar()) {
4660         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4661           memnode_worklist.append_if_missing(use);
4662         }
4663       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
4664         Node* addp2 = find_second_addp(use, n);
4665         if (addp2 != nullptr) {
4666           alloc_worklist.append_if_missing(addp2);
4667         }
4668         alloc_worklist.append_if_missing(use);
4669       } else if (use->is_Phi() ||
4670                  use->is_CheckCastPP() ||
4671                  use->is_EncodeNarrowPtr() ||
4672                  use->is_DecodeNarrowPtr() ||
4673                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
4674         alloc_worklist.append_if_missing(use);
4675 #ifdef ASSERT
4676       } else if (use->is_Mem()) {
4677         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
4678       } else if (use->is_MergeMem()) {
4679         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4680       } else if (use->is_SafePoint()) {
4681         // Look for MergeMem nodes for calls which reference unique allocation
4682         // (through CheckCastPP nodes) even for debug info.
4683         Node* m = use->in(TypeFunc::Memory);
4684         if (m->is_MergeMem()) {
4685           assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4686         }
4687       } else if (use->Opcode() == Op_EncodeISOArray) {
4688         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4689           // EncodeISOArray overwrites destination array
4690           memnode_worklist.append_if_missing(use);
4691         }
4692       } else if (use->Opcode() == Op_Return) {
4693         // Allocation is referenced by field of returned inline type
4694         assert(_compile->tf()->returns_inline_type_as_fields(), "EA: unexpected reference by ReturnNode");
4695       } else {
4696         uint op = use->Opcode();
4697         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
4698             (use->in(MemNode::Memory) == n)) {
4699           // They overwrite memory edge corresponding to destination array,
4700           memnode_worklist.append_if_missing(use);
4701         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
4702               op == Op_CastP2X ||
4703               op == Op_FastLock || op == Op_AryEq ||
4704               op == Op_StrComp || op == Op_CountPositives ||
4705               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
4706               op == Op_StrEquals || op == Op_VectorizedHashCode ||
4707               op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
4708               op == Op_SubTypeCheck || op == Op_InlineType || op == Op_FlatArrayCheck ||
4709               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
4710           n->dump();
4711           use->dump();
4712           assert(false, "EA: missing allocation reference path");
4713         }
4714 #endif
4715       }
4716     }
4717 
4718   }
4719 
4720 #ifdef ASSERT
4721   if (VerifyReduceAllocationMerges) {
4722     for (uint i = 0; i < reducible_merges.size(); i++) {
4723       Node* phi = reducible_merges.at(i);
4724 
4725       if (!reduced_merges.member(phi)) {
4726         phi->dump(2);
4727         phi->dump(-2);
4728         assert(false, "This reducible merge wasn't reduced.");
4729       }
4730 
4731       // At this point reducible Phis shouldn't have AddP users anymore; only SafePoints or Casts.
4732       for (DUIterator_Fast jmax, j = phi->fast_outs(jmax); j < jmax; j++) {
4733         Node* use = phi->fast_out(j);
4734         if (!use->is_SafePoint() && !use->is_CastPP()) {
4735           phi->dump(2);
4736           phi->dump(-2);
4737           assert(false, "Unexpected user of reducible Phi -> %d:%s:%d", use->_idx, use->Name(), use->outcnt());
4738         }
4739       }
4740     }
4741   }
4742 #endif
4743 
4744   // Go over all ArrayCopy nodes and if one of the inputs has a unique
4745   // type, record it in the ArrayCopy node so we know what memory this
4746   // node uses/modified.
4747   for (int next = 0; next < arraycopy_worklist.length(); next++) {
4748     ArrayCopyNode* ac = arraycopy_worklist.at(next);
4749     Node* dest = ac->in(ArrayCopyNode::Dest);
4750     if (dest->is_AddP()) {
4751       dest = get_addp_base(dest);
4752     }
4753     JavaObjectNode* jobj = unique_java_object(dest);
4754     if (jobj != nullptr) {
4755       Node *base = get_map(jobj->idx());
4756       if (base != nullptr) {
4757         const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr();
4758         ac->_dest_type = base_t;
4759       }
4760     }
4761     Node* src = ac->in(ArrayCopyNode::Src);
4762     if (src->is_AddP()) {
4763       src = get_addp_base(src);
4764     }
4765     jobj = unique_java_object(src);
4766     if (jobj != nullptr) {
4767       Node* base = get_map(jobj->idx());
4768       if (base != nullptr) {
4769         const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr();
4770         ac->_src_type = base_t;
4771       }
4772     }
4773   }
4774 
4775   // New alias types were created in split_AddP().
4776   uint new_index_end = (uint) _compile->num_alias_types();
4777 
4778   //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
4779   //            compute new values for Memory inputs  (the Memory inputs are not
4780   //            actually updated until phase 4.)
4781   if (memnode_worklist.length() == 0)
4782     return;  // nothing to do
4783   while (memnode_worklist.length() != 0) {
4784     Node *n = memnode_worklist.pop();
4785     if (visited.test_set(n->_idx)) {
4786       continue;
4787     }
4788     if (n->is_Phi() || n->is_ClearArray()) {
4789       // we don't need to do anything, but the users must be pushed
4790     } else if (n->is_MemBar()) { // Initialize, MemBar nodes
4791       // we don't need to do anything, but the users must be pushed
4792       n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
4793       if (n == nullptr) {
4794         continue;
4795       }
4796     } else if (n->is_CallLeaf()) {
4797       // Runtime calls with narrow memory input (no MergeMem node)
4798       // get the memory projection
4799       n = n->as_Call()->proj_out_or_null(TypeFunc::Memory);
4800       if (n == nullptr) {
4801         continue;
4802       }
4803     } else if (n->Opcode() == Op_StrCompressedCopy ||
4804                n->Opcode() == Op_EncodeISOArray) {
4805       // get the memory projection
4806       n = n->find_out_with(Op_SCMemProj);
4807       assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4808     } else if (n->is_CallLeaf() && n->as_CallLeaf()->_name != nullptr &&
4809                strcmp(n->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
4810       n = n->as_CallLeaf()->proj_out(TypeFunc::Memory);
4811     } else {
4812       assert(n->is_Mem(), "memory node required.");
4813       Node *addr = n->in(MemNode::Address);
4814       const Type *addr_t = igvn->type(addr);
4815       if (addr_t == Type::TOP) {
4816         continue;
4817       }
4818       assert (addr_t->isa_ptr() != nullptr, "pointer type required.");
4819       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
4820       assert ((uint)alias_idx < new_index_end, "wrong alias index");
4821       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
4822       if (_compile->failing()) {
4823         return;
4824       }
4825       if (mem != n->in(MemNode::Memory)) {
4826         // We delay the memory edge update since we need old one in
4827         // MergeMem code below when instances memory slices are separated.
4828         set_map(n, mem);
4829       }
4830       if (n->is_Load()) {
4831         continue;  // don't push users
4832       } else if (n->is_LoadStore()) {
4833         // get the memory projection
4834         n = n->find_out_with(Op_SCMemProj);
4835         assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4836       }
4837     }
4838     // push user on appropriate worklist
4839     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4840       Node *use = n->fast_out(i);
4841       if (use->is_Phi() || use->is_ClearArray()) {
4842         memnode_worklist.append_if_missing(use);
4843       } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
4844         memnode_worklist.append_if_missing(use);
4845       } else if (use->is_MemBar() || use->is_CallLeaf()) {
4846         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4847           memnode_worklist.append_if_missing(use);
4848         }
4849 #ifdef ASSERT
4850       } else if (use->is_Mem()) {
4851         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
4852       } else if (use->is_MergeMem()) {
4853         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4854       } else if (use->Opcode() == Op_EncodeISOArray) {
4855         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4856           // EncodeISOArray overwrites destination array
4857           memnode_worklist.append_if_missing(use);
4858         }
4859       } else if (use->is_CallLeaf() && use->as_CallLeaf()->_name != nullptr &&
4860                  strcmp(use->as_CallLeaf()->_name, "store_unknown_inline") == 0) {
4861         // store_unknown_inline overwrites destination array
4862         memnode_worklist.append_if_missing(use);
4863       } else {
4864         uint op = use->Opcode();
4865         if ((use->in(MemNode::Memory) == n) &&
4866             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
4867           // They overwrite memory edge corresponding to destination array,
4868           memnode_worklist.append_if_missing(use);
4869         } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
4870               op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
4871               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
4872               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar || op == Op_FlatArrayCheck)) {
4873           n->dump();
4874           use->dump();
4875           assert(false, "EA: missing memory path");
4876         }
4877 #endif
4878       }
4879     }
4880   }
4881 
4882   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
4883   //            Walk each memory slice moving the first node encountered of each
4884   //            instance type to the input corresponding to its alias index.
4885   uint length = mergemem_worklist.length();
4886   for( uint next = 0; next < length; ++next ) {
4887     MergeMemNode* nmm = mergemem_worklist.at(next);
4888     assert(!visited.test_set(nmm->_idx), "should not be visited before");
4889     // Note: we don't want to use MergeMemStream here because we only want to
4890     // scan inputs which exist at the start, not ones we add during processing.
4891     // Note 2: MergeMem may already contains instance memory slices added
4892     // during find_inst_mem() call when memory nodes were processed above.
4893     igvn->hash_delete(nmm);
4894     uint nslices = MIN2(nmm->req(), new_index_start);
4895     for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
4896       Node* mem = nmm->in(i);
4897       Node* cur = nullptr;
4898       if (mem == nullptr || mem->is_top()) {
4899         continue;
4900       }
4901       // First, update mergemem by moving memory nodes to corresponding slices
4902       // if their type became more precise since this mergemem was created.
4903       while (mem->is_Mem()) {
4904         const Type *at = igvn->type(mem->in(MemNode::Address));
4905         if (at != Type::TOP) {
4906           assert (at->isa_ptr() != nullptr, "pointer type required.");
4907           uint idx = (uint)_compile->get_alias_index(at->is_ptr());
4908           if (idx == i) {
4909             if (cur == nullptr) {
4910               cur = mem;
4911             }
4912           } else {
4913             if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) {
4914               nmm->set_memory_at(idx, mem);
4915             }
4916           }
4917         }
4918         mem = mem->in(MemNode::Memory);
4919       }
4920       nmm->set_memory_at(i, (cur != nullptr) ? cur : mem);
4921       // Find any instance of the current type if we haven't encountered
4922       // already a memory slice of the instance along the memory chain.
4923       for (uint ni = new_index_start; ni < new_index_end; ni++) {
4924         if((uint)_compile->get_general_index(ni) == i) {
4925           Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
4926           if (nmm->is_empty_memory(m)) {
4927             Node* result = find_inst_mem(mem, ni, orig_phis);
4928             if (_compile->failing()) {
4929               return;
4930             }
4931             nmm->set_memory_at(ni, result);
4932           }
4933         }
4934       }
4935     }
4936     // Find the rest of instances values
4937     for (uint ni = new_index_start; ni < new_index_end; ni++) {
4938       const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();
4939       Node* result = step_through_mergemem(nmm, ni, tinst);
4940       if (result == nmm->base_memory()) {
4941         // Didn't find instance memory, search through general slice recursively.
4942         result = nmm->memory_at(_compile->get_general_index(ni));
4943         result = find_inst_mem(result, ni, orig_phis);
4944         if (_compile->failing()) {
4945           return;
4946         }
4947         nmm->set_memory_at(ni, result);
4948       }
4949     }
4950 
4951     // If we have crossed the 3/4 point of max node limit it's too risky
4952     // to continue with EA/SR because we might hit the max node limit.
4953     if (_compile->live_nodes() >= _compile->max_node_limit() * 0.75) {
4954       if (_compile->do_reduce_allocation_merges()) {
4955         _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
4956       } else if (_invocation > 0) {
4957         _compile->record_failure(C2Compiler::retry_no_iterative_escape_analysis());
4958       } else {
4959         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
4960       }
4961       return;
4962     }
4963 
4964     igvn->hash_insert(nmm);
4965     record_for_optimizer(nmm);
4966   }
4967 
4968   //  Phase 4:  Update the inputs of non-instance memory Phis and
4969   //            the Memory input of memnodes
4970   // First update the inputs of any non-instance Phi's from
4971   // which we split out an instance Phi.  Note we don't have
4972   // to recursively process Phi's encountered on the input memory
4973   // chains as is done in split_memory_phi() since they will
4974   // also be processed here.
4975   for (int j = 0; j < orig_phis.length(); j++) {
4976     PhiNode *phi = orig_phis.at(j);
4977     int alias_idx = _compile->get_alias_index(phi->adr_type());
4978     igvn->hash_delete(phi);
4979     for (uint i = 1; i < phi->req(); i++) {
4980       Node *mem = phi->in(i);
4981       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
4982       if (_compile->failing()) {
4983         return;
4984       }
4985       if (mem != new_mem) {
4986         phi->set_req(i, new_mem);
4987       }
4988     }
4989     igvn->hash_insert(phi);
4990     record_for_optimizer(phi);
4991   }
4992 
4993   // Update the memory inputs of MemNodes with the value we computed
4994   // in Phase 2 and move stores memory users to corresponding memory slices.
4995   // Disable memory split verification code until the fix for 6984348.
4996   // Currently it produces false negative results since it does not cover all cases.
4997 #if 0 // ifdef ASSERT
4998   visited.Reset();
4999   Node_Stack old_mems(arena, _compile->unique() >> 2);
5000 #endif
5001   for (uint i = 0; i < ideal_nodes.size(); i++) {
5002     Node*    n = ideal_nodes.at(i);
5003     Node* nmem = get_map(n->_idx);
5004     assert(nmem != nullptr, "sanity");
5005     if (n->is_Mem()) {
5006 #if 0 // ifdef ASSERT
5007       Node* old_mem = n->in(MemNode::Memory);
5008       if (!visited.test_set(old_mem->_idx)) {
5009         old_mems.push(old_mem, old_mem->outcnt());
5010       }
5011 #endif
5012       assert(n->in(MemNode::Memory) != nmem, "sanity");
5013       if (!n->is_Load()) {
5014         // Move memory users of a store first.
5015         move_inst_mem(n, orig_phis);
5016       }
5017       // Now update memory input
5018       igvn->hash_delete(n);
5019       n->set_req(MemNode::Memory, nmem);
5020       igvn->hash_insert(n);
5021       record_for_optimizer(n);
5022     } else {
5023       assert(n->is_Allocate() || n->is_CheckCastPP() ||
5024              n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
5025     }
5026   }
5027 #if 0 // ifdef ASSERT
5028   // Verify that memory was split correctly
5029   while (old_mems.is_nonempty()) {
5030     Node* old_mem = old_mems.node();
5031     uint  old_cnt = old_mems.index();
5032     old_mems.pop();
5033     assert(old_cnt == old_mem->outcnt(), "old mem could be lost");
5034   }
5035 #endif
5036 }
5037 
5038 #ifndef PRODUCT
5039 int ConnectionGraph::_no_escape_counter = 0;
5040 int ConnectionGraph::_arg_escape_counter = 0;
5041 int ConnectionGraph::_global_escape_counter = 0;
5042 
5043 static const char *node_type_names[] = {
5044   "UnknownType",
5045   "JavaObject",
5046   "LocalVar",
5047   "Field",
5048   "Arraycopy"
5049 };
5050 
5051 static const char *esc_names[] = {
5052   "UnknownEscape",
5053   "NoEscape",
5054   "ArgEscape",
5055   "GlobalEscape"
5056 };
5057 
5058 void PointsToNode::dump_header(bool print_state, outputStream* out) const {
5059   NodeType nt = node_type();
5060   out->print("%s(%d) ", node_type_names[(int) nt], _pidx);
5061   if (print_state) {
5062     EscapeState es = escape_state();
5063     EscapeState fields_es = fields_escape_state();
5064     out->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]);
5065     if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) {
5066       out->print("NSR ");
5067     }
5068   }
5069 }
5070 
5071 void PointsToNode::dump(bool print_state, outputStream* out, bool newline) const {
5072   dump_header(print_state, out);
5073   if (is_Field()) {
5074     FieldNode* f = (FieldNode*)this;
5075     if (f->is_oop()) {
5076       out->print("oop ");
5077     }
5078     if (f->offset() > 0) {
5079       out->print("+%d ", f->offset());
5080     }
5081     out->print("(");
5082     for (BaseIterator i(f); i.has_next(); i.next()) {
5083       PointsToNode* b = i.get();
5084       out->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : ""));
5085     }
5086     out->print(" )");
5087   }
5088   out->print("[");
5089   for (EdgeIterator i(this); i.has_next(); i.next()) {
5090     PointsToNode* e = i.get();
5091     out->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : "");
5092   }
5093   out->print(" [");
5094   for (UseIterator i(this); i.has_next(); i.next()) {
5095     PointsToNode* u = i.get();
5096     bool is_base = false;
5097     if (PointsToNode::is_base_use(u)) {
5098       is_base = true;
5099       u = PointsToNode::get_use_node(u)->as_Field();
5100     }
5101     out->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : "");
5102   }
5103   out->print(" ]]  ");
5104   if (_node == nullptr) {
5105     out->print("<null>%s", newline ? "\n" : "");
5106   } else {
5107     _node->dump(newline ? "\n" : "", false, out);
5108   }
5109 }
5110 
5111 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) {
5112   bool first = true;
5113   int ptnodes_length = ptnodes_worklist.length();
5114   for (int i = 0; i < ptnodes_length; i++) {
5115     PointsToNode *ptn = ptnodes_worklist.at(i);
5116     if (ptn == nullptr || !ptn->is_JavaObject()) {
5117       continue;
5118     }
5119     PointsToNode::EscapeState es = ptn->escape_state();
5120     if ((es != PointsToNode::NoEscape) && !Verbose) {
5121       continue;
5122     }
5123     Node* n = ptn->ideal_node();
5124     if (n->is_Allocate() || (n->is_CallStaticJava() &&
5125                              n->as_CallStaticJava()->is_boxing_method())) {
5126       if (first) {
5127         tty->cr();
5128         tty->print("======== Connection graph for ");
5129         _compile->method()->print_short_name();
5130         tty->cr();
5131         tty->print_cr("invocation #%d: %d iterations and %f sec to build connection graph with %d nodes and worklist size %d",
5132                       _invocation, _build_iterations, _build_time, nodes_size(), ptnodes_worklist.length());
5133         tty->cr();
5134         first = false;
5135       }
5136       ptn->dump();
5137       // Print all locals and fields which reference this allocation
5138       for (UseIterator j(ptn); j.has_next(); j.next()) {
5139         PointsToNode* use = j.get();
5140         if (use->is_LocalVar()) {
5141           use->dump(Verbose);
5142         } else if (Verbose) {
5143           use->dump();
5144         }
5145       }
5146       tty->cr();
5147     }
5148   }
5149 }
5150 
5151 void ConnectionGraph::print_statistics() {
5152   tty->print_cr("No escape = %d, Arg escape = %d, Global escape = %d", Atomic::load(&_no_escape_counter), Atomic::load(&_arg_escape_counter), Atomic::load(&_global_escape_counter));
5153 }
5154 
5155 void ConnectionGraph::escape_state_statistics(GrowableArray<JavaObjectNode*>& java_objects_worklist) {
5156   if (!PrintOptoStatistics || (_invocation > 0)) { // Collect data only for the first invocation
5157     return;
5158   }
5159   for (int next = 0; next < java_objects_worklist.length(); ++next) {
5160     JavaObjectNode* ptn = java_objects_worklist.at(next);
5161     if (ptn->ideal_node()->is_Allocate()) {
5162       if (ptn->escape_state() == PointsToNode::NoEscape) {
5163         Atomic::inc(&ConnectionGraph::_no_escape_counter);
5164       } else if (ptn->escape_state() == PointsToNode::ArgEscape) {
5165         Atomic::inc(&ConnectionGraph::_arg_escape_counter);
5166       } else if (ptn->escape_state() == PointsToNode::GlobalEscape) {
5167         Atomic::inc(&ConnectionGraph::_global_escape_counter);
5168       } else {
5169         assert(false, "Unexpected Escape State");
5170       }
5171     }
5172   }
5173 }
5174 
5175 void ConnectionGraph::trace_es_update_helper(PointsToNode* ptn, PointsToNode::EscapeState es, bool fields, const char* reason) const {
5176   if (_compile->directive()->TraceEscapeAnalysisOption) {
5177     assert(ptn != nullptr, "should not be null");
5178     assert(reason != nullptr, "should not be null");
5179     ptn->dump_header(true);
5180     PointsToNode::EscapeState new_es = fields ? ptn->escape_state() : es;
5181     PointsToNode::EscapeState new_fields_es = fields ? es : ptn->fields_escape_state();
5182     tty->print_cr("-> %s(%s) %s", esc_names[(int)new_es], esc_names[(int)new_fields_es], reason);
5183   }
5184 }
5185 
5186 const char* ConnectionGraph::trace_propagate_message(PointsToNode* from) const {
5187   if (_compile->directive()->TraceEscapeAnalysisOption) {
5188     stringStream ss;
5189     ss.print("propagated from: ");
5190     from->dump(true, &ss, false);
5191     return ss.as_string();
5192   } else {
5193     return nullptr;
5194   }
5195 }
5196 
5197 const char* ConnectionGraph::trace_arg_escape_message(CallNode* call) const {
5198   if (_compile->directive()->TraceEscapeAnalysisOption) {
5199     stringStream ss;
5200     ss.print("escapes as arg to:");
5201     call->dump("", false, &ss);
5202     return ss.as_string();
5203   } else {
5204     return nullptr;
5205   }
5206 }
5207 
5208 const char* ConnectionGraph::trace_merged_message(PointsToNode* other) const {
5209   if (_compile->directive()->TraceEscapeAnalysisOption) {
5210     stringStream ss;
5211     ss.print("is merged with other object: ");
5212     other->dump_header(true, &ss);
5213     return ss.as_string();
5214   } else {
5215     return nullptr;
5216   }
5217 }
5218 
5219 #endif
5220 
5221 void ConnectionGraph::record_for_optimizer(Node *n) {
5222   _igvn->_worklist.push(n);
5223   _igvn->add_users_to_worklist(n);
5224 }