1 /*
   2  * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "ci/bcEscapeAnalyzer.hpp"
  27 #include "compiler/compileLog.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/c2/barrierSetC2.hpp"
  30 #include "libadt/vectset.hpp"
  31 #include "memory/allocation.hpp"
  32 #include "memory/resourceArea.hpp"
  33 #include "opto/c2compiler.hpp"
  34 #include "opto/arraycopynode.hpp"
  35 #include "opto/callnode.hpp"
  36 #include "opto/cfgnode.hpp"
  37 #include "opto/compile.hpp"
  38 #include "opto/escape.hpp"
  39 #include "opto/macro.hpp"
  40 #include "opto/locknode.hpp"
  41 #include "opto/phaseX.hpp"
  42 #include "opto/movenode.hpp"
  43 #include "opto/narrowptrnode.hpp"
  44 #include "opto/castnode.hpp"
  45 #include "opto/rootnode.hpp"
  46 #include "utilities/macros.hpp"
  47 
  48 ConnectionGraph::ConnectionGraph(Compile * C, PhaseIterGVN *igvn, int invocation) :
  49   // If ReduceAllocationMerges is enabled we might call split_through_phi during
  50   // split_unique_types and that will create additional nodes that need to be
  51   // pushed to the ConnectionGraph. The code below bumps the initial capacity of
  52   // _nodes by 10% to account for these additional nodes. If capacity is exceeded
  53   // the array will be reallocated.
  54   _nodes(C->comp_arena(), C->do_reduce_allocation_merges() ? C->unique()*1.10 : C->unique(), C->unique(), nullptr),
  55   _in_worklist(C->comp_arena()),
  56   _next_pidx(0),
  57   _collecting(true),
  58   _verify(false),
  59   _compile(C),
  60   _igvn(igvn),
  61   _invocation(invocation),
  62   _build_iterations(0),
  63   _build_time(0.),
  64   _node_map(C->comp_arena()) {
  65   // Add unknown java object.
  66   add_java_object(C->top(), PointsToNode::GlobalEscape);
  67   phantom_obj = ptnode_adr(C->top()->_idx)->as_JavaObject();
  68   set_not_scalar_replaceable(phantom_obj NOT_PRODUCT(COMMA "Phantom object"));
  69   // Add ConP and ConN null oop nodes
  70   Node* oop_null = igvn->zerocon(T_OBJECT);
  71   assert(oop_null->_idx < nodes_size(), "should be created already");
  72   add_java_object(oop_null, PointsToNode::NoEscape);
  73   null_obj = ptnode_adr(oop_null->_idx)->as_JavaObject();
  74   set_not_scalar_replaceable(null_obj NOT_PRODUCT(COMMA "Null object"));
  75   if (UseCompressedOops) {
  76     Node* noop_null = igvn->zerocon(T_NARROWOOP);
  77     assert(noop_null->_idx < nodes_size(), "should be created already");
  78     map_ideal_node(noop_null, null_obj);
  79   }
  80 }
  81 
  82 bool ConnectionGraph::has_candidates(Compile *C) {
  83   // EA brings benefits only when the code has allocations and/or locks which
  84   // are represented by ideal Macro nodes.
  85   int cnt = C->macro_count();
  86   for (int i = 0; i < cnt; i++) {
  87     Node *n = C->macro_node(i);
  88     if (n->is_Allocate()) {
  89       return true;
  90     }
  91     if (n->is_Lock()) {
  92       Node* obj = n->as_Lock()->obj_node()->uncast();
  93       if (!(obj->is_Parm() || obj->is_Con())) {
  94         return true;
  95       }
  96     }
  97     if (n->is_CallStaticJava() &&
  98         n->as_CallStaticJava()->is_boxing_method()) {
  99       return true;
 100     }
 101   }
 102   return false;
 103 }
 104 
 105 void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
 106   Compile::TracePhase tp("escapeAnalysis", &Phase::timers[Phase::_t_escapeAnalysis]);
 107   ResourceMark rm;
 108 
 109   // Add ConP and ConN null oop nodes before ConnectionGraph construction
 110   // to create space for them in ConnectionGraph::_nodes[].
 111   Node* oop_null = igvn->zerocon(T_OBJECT);
 112   Node* noop_null = igvn->zerocon(T_NARROWOOP);
 113   int invocation = 0;
 114   if (C->congraph() != nullptr) {
 115     invocation = C->congraph()->_invocation + 1;
 116   }
 117   ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn, invocation);
 118   // Perform escape analysis
 119   if (congraph->compute_escape()) {
 120     // There are non escaping objects.
 121     C->set_congraph(congraph);
 122   }
 123   // Cleanup.
 124   if (oop_null->outcnt() == 0) {
 125     igvn->hash_delete(oop_null);
 126   }
 127   if (noop_null->outcnt() == 0) {
 128     igvn->hash_delete(noop_null);
 129   }
 130 }
 131 
 132 bool ConnectionGraph::compute_escape() {
 133   Compile* C = _compile;
 134   PhaseGVN* igvn = _igvn;
 135 
 136   // Worklists used by EA.
 137   Unique_Node_List delayed_worklist;
 138   Unique_Node_List reducible_merges;
 139   GrowableArray<Node*> alloc_worklist;
 140   GrowableArray<Node*> ptr_cmp_worklist;
 141   GrowableArray<MemBarStoreStoreNode*> storestore_worklist;
 142   GrowableArray<ArrayCopyNode*>  arraycopy_worklist;
 143   GrowableArray<PointsToNode*>   ptnodes_worklist;
 144   GrowableArray<JavaObjectNode*> java_objects_worklist;
 145   GrowableArray<JavaObjectNode*> non_escaped_allocs_worklist;
 146   GrowableArray<FieldNode*>      oop_fields_worklist;
 147   GrowableArray<SafePointNode*>  sfn_worklist;
 148   GrowableArray<MergeMemNode*>   mergemem_worklist;
 149   DEBUG_ONLY( GrowableArray<Node*> addp_worklist; )
 150 
 151   { Compile::TracePhase tp("connectionGraph", &Phase::timers[Phase::_t_connectionGraph]);
 152 
 153   // 1. Populate Connection Graph (CG) with PointsTo nodes.
 154   ideal_nodes.map(C->live_nodes(), nullptr);  // preallocate space
 155   // Initialize worklist
 156   if (C->root() != nullptr) {
 157     ideal_nodes.push(C->root());
 158   }
 159   // Processed ideal nodes are unique on ideal_nodes list
 160   // but several ideal nodes are mapped to the phantom_obj.
 161   // To avoid duplicated entries on the following worklists
 162   // add the phantom_obj only once to them.
 163   ptnodes_worklist.append(phantom_obj);
 164   java_objects_worklist.append(phantom_obj);
 165   for( uint next = 0; next < ideal_nodes.size(); ++next ) {
 166     Node* n = ideal_nodes.at(next);
 167     // Create PointsTo nodes and add them to Connection Graph. Called
 168     // only once per ideal node since ideal_nodes is Unique_Node list.
 169     add_node_to_connection_graph(n, &delayed_worklist);
 170     PointsToNode* ptn = ptnode_adr(n->_idx);
 171     if (ptn != nullptr && ptn != phantom_obj) {
 172       ptnodes_worklist.append(ptn);
 173       if (ptn->is_JavaObject()) {
 174         java_objects_worklist.append(ptn->as_JavaObject());
 175         if ((n->is_Allocate() || n->is_CallStaticJava()) &&
 176             (ptn->escape_state() < PointsToNode::GlobalEscape)) {
 177           // Only allocations and java static calls results are interesting.
 178           non_escaped_allocs_worklist.append(ptn->as_JavaObject());
 179         }
 180       } else if (ptn->is_Field() && ptn->as_Field()->is_oop()) {
 181         oop_fields_worklist.append(ptn->as_Field());
 182       }
 183     }
 184     // Collect some interesting nodes for further use.
 185     switch (n->Opcode()) {
 186       case Op_MergeMem:
 187         // Collect all MergeMem nodes to add memory slices for
 188         // scalar replaceable objects in split_unique_types().
 189         mergemem_worklist.append(n->as_MergeMem());
 190         break;
 191       case Op_CmpP:
 192       case Op_CmpN:
 193         // Collect compare pointers nodes.
 194         if (OptimizePtrCompare) {
 195           ptr_cmp_worklist.append(n);
 196         }
 197         break;
 198       case Op_MemBarStoreStore:
 199         // Collect all MemBarStoreStore nodes so that depending on the
 200         // escape status of the associated Allocate node some of them
 201         // may be eliminated.
 202         if (!UseStoreStoreForCtor || n->req() > MemBarNode::Precedent) {
 203           storestore_worklist.append(n->as_MemBarStoreStore());
 204         }
 205         break;
 206       case Op_MemBarRelease:
 207         if (n->req() > MemBarNode::Precedent) {
 208           record_for_optimizer(n);
 209         }
 210         break;
 211 #ifdef ASSERT
 212       case Op_AddP:
 213         // Collect address nodes for graph verification.
 214         addp_worklist.append(n);
 215         break;
 216 #endif
 217       case Op_ArrayCopy:
 218         // Keep a list of ArrayCopy nodes so if one of its input is non
 219         // escaping, we can record a unique type
 220         arraycopy_worklist.append(n->as_ArrayCopy());
 221         break;
 222       default:
 223         // not interested now, ignore...
 224         break;
 225     }
 226     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 227       Node* m = n->fast_out(i);   // Get user
 228       ideal_nodes.push(m);
 229     }
 230     if (n->is_SafePoint()) {
 231       sfn_worklist.append(n->as_SafePoint());
 232     }
 233   }
 234 
 235 #ifndef PRODUCT
 236   if (_compile->directive()->TraceEscapeAnalysisOption) {
 237     tty->print("+++++ Initial worklist for ");
 238     _compile->method()->print_name();
 239     tty->print_cr(" (ea_inv=%d)", _invocation);
 240     for (int i = 0; i < ptnodes_worklist.length(); i++) {
 241       PointsToNode* ptn = ptnodes_worklist.at(i);
 242       ptn->dump();
 243     }
 244     tty->print_cr("+++++ Calculating escape states and scalar replaceability");
 245   }
 246 #endif
 247 
 248   if (non_escaped_allocs_worklist.length() == 0) {
 249     _collecting = false;
 250     NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 251     return false; // Nothing to do.
 252   }
 253   // Add final simple edges to graph.
 254   while(delayed_worklist.size() > 0) {
 255     Node* n = delayed_worklist.pop();
 256     add_final_edges(n);
 257   }
 258 
 259 #ifdef ASSERT
 260   if (VerifyConnectionGraph) {
 261     // Verify that no new simple edges could be created and all
 262     // local vars has edges.
 263     _verify = true;
 264     int ptnodes_length = ptnodes_worklist.length();
 265     for (int next = 0; next < ptnodes_length; ++next) {
 266       PointsToNode* ptn = ptnodes_worklist.at(next);
 267       add_final_edges(ptn->ideal_node());
 268       if (ptn->is_LocalVar() && ptn->edge_count() == 0) {
 269         ptn->dump();
 270         assert(ptn->as_LocalVar()->edge_count() > 0, "sanity");
 271       }
 272     }
 273     _verify = false;
 274   }
 275 #endif
 276   // Bytecode analyzer BCEscapeAnalyzer, used for Call nodes
 277   // processing, calls to CI to resolve symbols (types, fields, methods)
 278   // referenced in bytecode. During symbol resolution VM may throw
 279   // an exception which CI cleans and converts to compilation failure.
 280   if (C->failing()) {
 281     NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 282     return false;
 283   }
 284 
 285   // 2. Finish Graph construction by propagating references to all
 286   //    java objects through graph.
 287   if (!complete_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist,
 288                                  java_objects_worklist, oop_fields_worklist)) {
 289     // All objects escaped or hit time or iterations limits.
 290     _collecting = false;
 291     NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 292     return false;
 293   }
 294 
 295   // 3. Adjust scalar_replaceable state of nonescaping objects and push
 296   //    scalar replaceable allocations on alloc_worklist for processing
 297   //    in split_unique_types().
 298   GrowableArray<JavaObjectNode*> jobj_worklist;
 299   int non_escaped_length = non_escaped_allocs_worklist.length();
 300   bool found_nsr_alloc = false;
 301   for (int next = 0; next < non_escaped_length; next++) {
 302     JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
 303     bool noescape = (ptn->escape_state() == PointsToNode::NoEscape);
 304     Node* n = ptn->ideal_node();
 305     if (n->is_Allocate()) {
 306       n->as_Allocate()->_is_non_escaping = noescape;
 307     }
 308     if (noescape && ptn->scalar_replaceable()) {
 309       adjust_scalar_replaceable_state(ptn, reducible_merges);
 310       if (ptn->scalar_replaceable()) {
 311         jobj_worklist.push(ptn);
 312       } else {
 313         found_nsr_alloc = true;
 314       }
 315     }
 316   }
 317 
 318   // Propagate NSR (Not Scalar Replaceable) state.
 319   if (found_nsr_alloc) {
 320     find_scalar_replaceable_allocs(jobj_worklist, reducible_merges);
 321   }
 322 
 323   // alloc_worklist will be processed in reverse push order.
 324   // Therefore the reducible Phis will be processed for last and that's what we
 325   // want because by then the scalarizable inputs of the merge will already have
 326   // an unique instance type.
 327   for (uint i = 0; i < reducible_merges.size(); i++ ) {
 328     Node* n = reducible_merges.at(i);
 329     alloc_worklist.append(n);
 330   }
 331 
 332   for (int next = 0; next < jobj_worklist.length(); ++next) {
 333     JavaObjectNode* jobj = jobj_worklist.at(next);
 334     if (jobj->scalar_replaceable()) {
 335       alloc_worklist.append(jobj->ideal_node());
 336     }
 337   }
 338 
 339 #ifdef ASSERT
 340   if (VerifyConnectionGraph) {
 341     // Verify that graph is complete - no new edges could be added or needed.
 342     verify_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist,
 343                             java_objects_worklist, addp_worklist);
 344   }
 345   assert(C->unique() == nodes_size(), "no new ideal nodes should be added during ConnectionGraph build");
 346   assert(null_obj->escape_state() == PointsToNode::NoEscape &&
 347          null_obj->edge_count() == 0 &&
 348          !null_obj->arraycopy_src() &&
 349          !null_obj->arraycopy_dst(), "sanity");
 350 #endif
 351 
 352   _collecting = false;
 353 
 354   } // TracePhase t3("connectionGraph")
 355 
 356   // 4. Optimize ideal graph based on EA information.
 357   bool has_non_escaping_obj = (non_escaped_allocs_worklist.length() > 0);
 358   if (has_non_escaping_obj) {
 359     optimize_ideal_graph(ptr_cmp_worklist, storestore_worklist);
 360   }
 361 
 362 #ifndef PRODUCT
 363   if (PrintEscapeAnalysis) {
 364     dump(ptnodes_worklist); // Dump ConnectionGraph
 365   }
 366 #endif
 367 
 368 #ifdef ASSERT
 369   if (VerifyConnectionGraph) {
 370     int alloc_length = alloc_worklist.length();
 371     for (int next = 0; next < alloc_length; ++next) {
 372       Node* n = alloc_worklist.at(next);
 373       PointsToNode* ptn = ptnode_adr(n->_idx);
 374       assert(ptn->escape_state() == PointsToNode::NoEscape && ptn->scalar_replaceable(), "sanity");
 375     }
 376   }
 377 
 378   if (VerifyReduceAllocationMerges) {
 379     for (uint i = 0; i < reducible_merges.size(); i++ ) {
 380       Node* n = reducible_merges.at(i);
 381       if (!can_reduce_phi(n->as_Phi())) {
 382         TraceReduceAllocationMerges = true;
 383         n->dump(2);
 384         n->dump(-2);
 385         assert(can_reduce_phi(n->as_Phi()), "Sanity: previous reducible Phi is no longer reducible before SUT.");
 386       }
 387     }
 388   }
 389 #endif
 390 
 391   // 5. Separate memory graph for scalar replaceable allcations.
 392   bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);
 393   if (has_scalar_replaceable_candidates && EliminateAllocations) {
 394     assert(C->do_aliasing(), "Aliasing should be enabled");
 395     // Now use the escape information to create unique types for
 396     // scalar replaceable objects.
 397     split_unique_types(alloc_worklist, arraycopy_worklist, mergemem_worklist, reducible_merges);
 398     if (C->failing()) {
 399       NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 400       return false;
 401     }
 402     C->print_method(PHASE_AFTER_EA, 2);
 403 
 404 #ifdef ASSERT
 405   } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
 406     tty->print("=== No allocations eliminated for ");
 407     C->method()->print_short_name();
 408     if (!EliminateAllocations) {
 409       tty->print(" since EliminateAllocations is off ===");
 410     } else if(!has_scalar_replaceable_candidates) {
 411       tty->print(" since there are no scalar replaceable candidates ===");
 412     }
 413     tty->cr();
 414 #endif
 415   }
 416 
 417   // 6. Reduce allocation merges used as debug information. This is done after
 418   // split_unique_types because the methods used to create SafePointScalarObject
 419   // need to traverse the memory graph to find values for object fields. We also
 420   // set to null the scalarized inputs of reducible Phis so that the Allocate
 421   // that they point can be later scalar replaced.
 422   bool delay = _igvn->delay_transform();
 423   _igvn->set_delay_transform(true);
 424   for (uint i = 0; i < reducible_merges.size(); i++) {
 425     Node* n = reducible_merges.at(i);
 426     if (n->outcnt() > 0) {
 427       if (!reduce_phi_on_safepoints(n->as_Phi())) {
 428         NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 429         C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
 430         return false;
 431       }
 432 
 433       // Now we set the scalar replaceable inputs of ophi to null, which is
 434       // the last piece that would prevent it from being scalar replaceable.
 435       reset_scalar_replaceable_entries(n->as_Phi());
 436     }
 437   }
 438   _igvn->set_delay_transform(delay);
 439 
 440   // Annotate at safepoints if they have <= ArgEscape objects in their scope and at
 441   // java calls if they pass ArgEscape objects as parameters.
 442   if (has_non_escaping_obj &&
 443       (C->env()->should_retain_local_variables() ||
 444        C->env()->jvmti_can_get_owned_monitor_info() ||
 445        C->env()->jvmti_can_walk_any_space() ||
 446        DeoptimizeObjectsALot)) {
 447     int sfn_length = sfn_worklist.length();
 448     for (int next = 0; next < sfn_length; next++) {
 449       SafePointNode* sfn = sfn_worklist.at(next);
 450       sfn->set_has_ea_local_in_scope(has_ea_local_in_scope(sfn));
 451       if (sfn->is_CallJava()) {
 452         CallJavaNode* call = sfn->as_CallJava();
 453         call->set_arg_escape(has_arg_escape(call));
 454       }
 455     }
 456   }
 457 
 458   NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
 459   return has_non_escaping_obj;
 460 }
 461 
 462 // Check if it's profitable to reduce the Phi passed as parameter.  Returns true
 463 // if at least one scalar replaceable allocation participates in the merge.
 464 bool ConnectionGraph::can_reduce_phi_check_inputs(PhiNode* ophi) const {
 465   bool found_sr_allocate = false;
 466 
 467   for (uint i = 1; i < ophi->req(); i++) {
 468     JavaObjectNode* ptn = unique_java_object(ophi->in(i));
 469     if (ptn != nullptr && ptn->scalar_replaceable()) {
 470       AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
 471 
 472       // Don't handle arrays.
 473       if (alloc->Opcode() != Op_Allocate) {
 474         assert(alloc->Opcode() == Op_AllocateArray, "Unexpected type of allocation.");
 475         continue;
 476       }
 477 
 478       if (PhaseMacroExpand::can_eliminate_allocation(_igvn, alloc, nullptr)) {
 479         found_sr_allocate = true;
 480       } else {
 481         NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("%dth input of Phi %d is SR but can't be eliminated.", i, ophi->_idx);)
 482         ptn->set_scalar_replaceable(false);
 483       }
 484     }
 485   }
 486 
 487   NOT_PRODUCT(if (TraceReduceAllocationMerges && !found_sr_allocate) tty->print_cr("Can NOT reduce Phi %d on invocation %d. No SR Allocate as input.", ophi->_idx, _invocation);)
 488   return found_sr_allocate;
 489 }
 490 
 491 // We can reduce the Cmp if it's a comparison between the Phi and a constant.
 492 // I require the 'other' input to be a constant so that I can move the Cmp
 493 // around safely.
 494 bool ConnectionGraph::can_reduce_cmp(Node* n, Node* cmp) const {
 495   assert(cmp->Opcode() == Op_CmpP || cmp->Opcode() == Op_CmpN, "not expected node: %s", cmp->Name());
 496   Node* left = cmp->in(1);
 497   Node* right = cmp->in(2);
 498 
 499   return (left == n || right == n) &&
 500          (left->is_Con() || right->is_Con()) &&
 501          cmp->outcnt() == 1;
 502 }
 503 
 504 // We are going to check if any of the SafePointScalarMerge entries
 505 // in the SafePoint reference the Phi that we are checking.
 506 bool ConnectionGraph::has_been_reduced(PhiNode* n, SafePointNode* sfpt) const {
 507   JVMState *jvms = sfpt->jvms();
 508 
 509   for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) {
 510     Node* sfpt_in = sfpt->in(i);
 511     if (sfpt_in->is_SafePointScalarMerge()) {
 512       SafePointScalarMergeNode* smerge = sfpt_in->as_SafePointScalarMerge();
 513       Node* nsr_ptr = sfpt->in(smerge->merge_pointer_idx(jvms));
 514       if (nsr_ptr == n) {
 515         return true;
 516       }
 517     }
 518   }
 519 
 520   return false;
 521 }
 522 
 523 // Check if we are able to untangle the merge. The following patterns are
 524 // supported:
 525 //  - Phi -> SafePoints
 526 //  - Phi -> CmpP/N
 527 //  - Phi -> AddP -> Load
 528 //  - Phi -> CastPP -> SafePoints
 529 //  - Phi -> CastPP -> AddP -> Load
 530 bool ConnectionGraph::can_reduce_check_users(Node* n, uint nesting) const {
 531   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
 532     Node* use = n->fast_out(i);
 533 
 534     if (use->is_SafePoint()) {
 535       if (use->is_Call() && use->as_Call()->has_non_debug_use(n)) {
 536         NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Call has non_debug_use().", n->_idx, _invocation);)
 537         return false;
 538       } else if (has_been_reduced(n->is_Phi() ? n->as_Phi() : n->as_CastPP()->in(1)->as_Phi(), use->as_SafePoint())) {
 539         NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. It has already been reduced.", n->_idx, _invocation);)
 540         return false;
 541       }
 542     } else if (use->is_AddP()) {
 543       Node* addp = use;
 544       for (DUIterator_Fast jmax, j = addp->fast_outs(jmax); j < jmax; j++) {
 545         Node* use_use = addp->fast_out(j);
 546         const Type* load_type = _igvn->type(use_use);
 547 
 548         if (!use_use->is_Load() || !use_use->as_Load()->can_split_through_phi_base(_igvn)) {
 549           NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. AddP user isn't a [splittable] Load(): %s", n->_idx, _invocation, use_use->Name());)
 550           return false;
 551         } else if (load_type->isa_narrowklass() || load_type->isa_klassptr()) {
 552           NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. [Narrow] Klass Load: %s", n->_idx, _invocation, use_use->Name());)
 553           return false;
 554         }
 555       }
 556     } else if (nesting > 0) {
 557       NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. Unsupported user %s at nesting level %d.", n->_idx, _invocation, use->Name(), nesting);)
 558       return false;
 559     } else if (use->is_CastPP()) {
 560       const Type* cast_t = _igvn->type(use);
 561       if (cast_t == nullptr || cast_t->make_ptr()->isa_instptr() == nullptr) {
 562 #ifndef PRODUCT
 563         if (TraceReduceAllocationMerges) {
 564           tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP is not to an instance.", n->_idx, _invocation);
 565           use->dump();
 566         }
 567 #endif
 568         return false;
 569       }
 570 
 571       bool is_trivial_control = use->in(0) == nullptr || use->in(0) == n->in(0);
 572       if (!is_trivial_control) {
 573         // If it's not a trivial control then we check if we can reduce the
 574         // CmpP/N used by the If controlling the cast.
 575         if (use->in(0)->is_IfTrue() || use->in(0)->is_IfFalse()) {
 576           Node* iff = use->in(0)->in(0);
 577           // We may have an OpaqueNotNull node between If and Bool nodes. But we could also have a sub class of IfNode,
 578           // for example, an OuterStripMinedLoopEnd or a Parse Predicate. Bail out in all these cases.
 579           bool can_reduce = (iff->Opcode() == Op_If) && iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp();
 580           if (can_reduce) {
 581             Node* iff_cmp = iff->in(1)->in(1);
 582             int opc = iff_cmp->Opcode();
 583             can_reduce = (opc == Op_CmpP || opc == Op_CmpN) && can_reduce_cmp(n, iff_cmp);
 584           }
 585           if (!can_reduce) {
 586 #ifndef PRODUCT
 587             if (TraceReduceAllocationMerges) {
 588               tty->print_cr("Can NOT reduce Phi %d on invocation %d. CastPP %d doesn't have simple control.", n->_idx, _invocation, use->_idx);
 589               n->dump(5);
 590             }
 591 #endif
 592             return false;
 593           }
 594         }
 595       }
 596 
 597       if (!can_reduce_check_users(use, nesting+1)) {
 598         return false;
 599       }
 600     } else if (use->Opcode() == Op_CmpP || use->Opcode() == Op_CmpN) {
 601       if (!can_reduce_cmp(n, use)) {
 602         NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. CmpP/N %d isn't reducible.", n->_idx, _invocation, use->_idx);)
 603         return false;
 604       }
 605     } else {
 606       NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Can NOT reduce Phi %d on invocation %d. One of the uses is: %d %s", n->_idx, _invocation, use->_idx, use->Name());)
 607       return false;
 608     }
 609   }
 610 
 611   return true;
 612 }
 613 
 614 // Returns true if: 1) It's profitable to reduce the merge, and 2) The Phi is
 615 // only used in some certain code shapes. Check comments in
 616 // 'can_reduce_phi_inputs' and 'can_reduce_phi_users' for more
 617 // details.
 618 bool ConnectionGraph::can_reduce_phi(PhiNode* ophi) const {
 619   // If there was an error attempting to reduce allocation merges for this
 620   // method we might have disabled the compilation and be retrying with RAM
 621   // disabled.
 622   if (!_compile->do_reduce_allocation_merges() || ophi->region()->Opcode() != Op_Region) {
 623     return false;
 624   }
 625 
 626   const Type* phi_t = _igvn->type(ophi);
 627   if (phi_t == nullptr ||
 628       phi_t->make_ptr() == nullptr ||
 629       phi_t->make_ptr()->isa_aryptr() != nullptr) {
 630     return false;
 631   }
 632 
 633   if (!can_reduce_phi_check_inputs(ophi) || !can_reduce_check_users(ophi, /* nesting: */ 0)) {
 634     return false;
 635   }
 636 
 637   NOT_PRODUCT(if (TraceReduceAllocationMerges) { tty->print_cr("Can reduce Phi %d during invocation %d: ", ophi->_idx, _invocation); })
 638   return true;
 639 }
 640 
 641 // This method will return a CmpP/N that we need to use on the If controlling a
 642 // CastPP after it was split. This method is only called on bases that are
 643 // nullable therefore we always need a controlling if for the splitted CastPP.
 644 //
 645 // 'curr_ctrl' is the control of the CastPP that we want to split through phi.
 646 // If the CastPP currently doesn't have a control then the CmpP/N will be
 647 // against the NULL constant, otherwise it will be against the constant input of
 648 // the existing CmpP/N. It's guaranteed that there will be a CmpP/N in the later
 649 // case because we have constraints on it and because the CastPP has a control
 650 // input.
 651 Node* ConnectionGraph::specialize_cmp(Node* base, Node* curr_ctrl) {
 652   const Type* t = base->bottom_type();
 653   Node* con = nullptr;
 654 
 655   if (curr_ctrl == nullptr || curr_ctrl->is_Region()) {
 656     con = _igvn->zerocon(t->basic_type());
 657   } else {
 658     // can_reduce_check_users() verified graph: true/false -> if -> bool -> cmp
 659     assert(curr_ctrl->in(0)->Opcode() == Op_If, "unexpected node %s", curr_ctrl->in(0)->Name());
 660     Node* bol = curr_ctrl->in(0)->in(1);
 661     assert(bol->is_Bool(), "unexpected node %s", bol->Name());
 662     Node* curr_cmp = bol->in(1);
 663     assert(curr_cmp->Opcode() == Op_CmpP || curr_cmp->Opcode() == Op_CmpN, "unexpected node %s", curr_cmp->Name());
 664     con = curr_cmp->in(1)->is_Con() ? curr_cmp->in(1) : curr_cmp->in(2);
 665   }
 666 
 667   return CmpNode::make(base, con, t->basic_type());
 668 }
 669 
 670 // This method 'specializes' the CastPP passed as parameter to the base passed
 671 // as parameter. Note that the existing CastPP input is a Phi. "Specialize"
 672 // means that the CastPP now will be specific for a given base instead of a Phi.
 673 // An If-Then-Else-Region block is inserted to control the CastPP. The control
 674 // of the CastPP is a copy of the current one (if there is one) or a check
 675 // against NULL.
 676 //
 677 // Before:
 678 //
 679 //    C1     C2  ... Cn
 680 //     \      |      /
 681 //      \     |     /
 682 //       \    |    /
 683 //        \   |   /
 684 //         \  |  /
 685 //          \ | /
 686 //           \|/
 687 //          Region     B1      B2  ... Bn
 688 //            |          \      |      /
 689 //            |           \     |     /
 690 //            |            \    |    /
 691 //            |             \   |   /
 692 //            |              \  |  /
 693 //            |               \ | /
 694 //            ---------------> Phi
 695 //                              |
 696 //                      X       |
 697 //                      |       |
 698 //                      |       |
 699 //                      ------> CastPP
 700 //
 701 // After (only partial illustration; base = B2, current_control = C2):
 702 //
 703 //                      C2
 704 //                      |
 705 //                      If
 706 //                     / \
 707 //                    /   \
 708 //                   T     F
 709 //                  /\     /
 710 //                 /  \   /
 711 //                /    \ /
 712 //      C1    CastPP   Reg        Cn
 713 //       |              |          |
 714 //       |              |          |
 715 //       |              |          |
 716 //       -------------- | ----------
 717 //                    | | |
 718 //                    Region
 719 //
 720 Node* ConnectionGraph::specialize_castpp(Node* castpp, Node* base, Node* current_control) {
 721   Node* control_successor  = current_control->unique_ctrl_out();
 722   Node* cmp                = _igvn->transform(specialize_cmp(base, castpp->in(0)));
 723   Node* bol                = _igvn->transform(new BoolNode(cmp, BoolTest::ne));
 724   IfNode* if_ne            = _igvn->transform(new IfNode(current_control, bol, PROB_MIN, COUNT_UNKNOWN))->as_If();
 725   Node* not_eq_control     = _igvn->transform(new IfTrueNode(if_ne));
 726   Node* yes_eq_control     = _igvn->transform(new IfFalseNode(if_ne));
 727   Node* end_region         = _igvn->transform(new RegionNode(3));
 728 
 729   // Insert the new if-else-region block into the graph
 730   end_region->set_req(1, not_eq_control);
 731   end_region->set_req(2, yes_eq_control);
 732   control_successor->replace_edge(current_control, end_region, _igvn);
 733 
 734   _igvn->_worklist.push(current_control);
 735   _igvn->_worklist.push(control_successor);
 736 
 737   return _igvn->transform(ConstraintCastNode::make_cast_for_type(not_eq_control, base, _igvn->type(castpp), ConstraintCastNode::UnconditionalDependency, nullptr));
 738 }
 739 
 740 Node* ConnectionGraph::split_castpp_load_through_phi(Node* curr_addp, Node* curr_load, Node* region, GrowableArray<Node*>* bases_for_loads, GrowableArray<Node *>  &alloc_worklist) {
 741   const Type* load_type = _igvn->type(curr_load);
 742   Node* nsr_value = _igvn->zerocon(load_type->basic_type());
 743   Node* memory = curr_load->in(MemNode::Memory);
 744 
 745   // The data_phi merging the loads needs to be nullable if
 746   // we are loading pointers.
 747   if (load_type->make_ptr() != nullptr) {
 748     if (load_type->isa_narrowoop()) {
 749       load_type = load_type->meet(TypeNarrowOop::NULL_PTR);
 750     } else if (load_type->isa_ptr()) {
 751       load_type = load_type->meet(TypePtr::NULL_PTR);
 752     } else {
 753       assert(false, "Unexpected load ptr type.");
 754     }
 755   }
 756 
 757   Node* data_phi = PhiNode::make(region, nsr_value, load_type);
 758 
 759   for (int i = 1; i < bases_for_loads->length(); i++) {
 760     Node* base = bases_for_loads->at(i);
 761     Node* cmp_region = nullptr;
 762     if (base != nullptr) {
 763       if (base->is_CFG()) { // means that we added a CastPP as child of this CFG node
 764         cmp_region = base->unique_ctrl_out_or_null();
 765         assert(cmp_region != nullptr, "There should be.");
 766         base = base->find_out_with(Op_CastPP);
 767       }
 768 
 769       Node* addr = _igvn->transform(new AddPNode(base, base, curr_addp->in(AddPNode::Offset)));
 770       Node* mem = (memory->is_Phi() && (memory->in(0) == region)) ? memory->in(i) : memory;
 771       Node* load = curr_load->clone();
 772       load->set_req(0, nullptr);
 773       load->set_req(1, mem);
 774       load->set_req(2, addr);
 775 
 776       if (cmp_region != nullptr) { // see comment on previous if
 777         Node* intermediate_phi = PhiNode::make(cmp_region, nsr_value, load_type);
 778         intermediate_phi->set_req(1, _igvn->transform(load));
 779         load = intermediate_phi;
 780       }
 781 
 782       data_phi->set_req(i, _igvn->transform(load));
 783     } else {
 784       // Just use the default, which is already in phi
 785     }
 786   }
 787 
 788   // Takes care of updating CG and split_unique_types worklists due
 789   // to cloned AddP->Load.
 790   updates_after_load_split(data_phi, curr_load, alloc_worklist);
 791 
 792   return _igvn->transform(data_phi);
 793 }
 794 
 795 // This method only reduces CastPP fields loads; SafePoints are handled
 796 // separately. The idea here is basically to clone the CastPP and place copies
 797 // on each input of the Phi, including non-scalar replaceable inputs.
 798 // Experimentation shows that the resulting IR graph is simpler that way than if
 799 // we just split the cast through scalar-replaceable inputs.
 800 //
 801 // The reduction process requires that CastPP's control be one of:
 802 //  1) no control,
 803 //  2) the same region as Ophi, or
 804 //  3) an IfTrue/IfFalse coming from an CmpP/N between Ophi and a constant.
 805 //
 806 // After splitting the CastPP we'll put it under an If-Then-Else-Region control
 807 // flow. If the CastPP originally had an IfTrue/False control input then we'll
 808 // use a similar CmpP/N to control the new If-Then-Else-Region. Otherwise, we'll
 809 // juse use a CmpP/N against the NULL constant.
 810 //
 811 // The If-Then-Else-Region isn't always needed. For instance, if input to
 812 // splitted cast was not nullable (or if it was the NULL constant) then we don't
 813 // need (shouldn't) use a CastPP at all.
 814 //
 815 // After the casts are splitted we'll split the AddP->Loads through the Phi and
 816 // connect them to the just split CastPPs.
 817 //
 818 // Before (CastPP control is same as Phi):
 819 //
 820 //          Region     Allocate   Null    Call
 821 //            |             \      |      /
 822 //            |              \     |     /
 823 //            |               \    |    /
 824 //            |                \   |   /
 825 //            |                 \  |  /
 826 //            |                  \ | /
 827 //            ------------------> Phi            # Oop Phi
 828 //            |                    |
 829 //            |                    |
 830 //            |                    |
 831 //            |                    |
 832 //            ----------------> CastPP
 833 //                                 |
 834 //                               AddP
 835 //                                 |
 836 //                               Load
 837 //
 838 // After (Very much simplified):
 839 //
 840 //                         Call  NULL
 841 //                            \  /
 842 //                            CmpP
 843 //                             |
 844 //                           Bool#NE
 845 //                             |
 846 //                             If
 847 //                            / \
 848 //                           T   F
 849 //                          / \ /
 850 //                         /   R
 851 //                     CastPP  |
 852 //                       |     |
 853 //                     AddP    |
 854 //                       |     |
 855 //                     Load    |
 856 //                         \   |   0
 857 //            Allocate      \  |  /
 858 //                \          \ | /
 859 //               AddP         Phi
 860 //                  \         /
 861 //                 Load      /
 862 //                    \  0  /
 863 //                     \ | /
 864 //                      \|/
 865 //                      Phi        # "Field" Phi
 866 //
 867 void ConnectionGraph::reduce_phi_on_castpp_field_load(Node* curr_castpp, GrowableArray<Node *>  &alloc_worklist, GrowableArray<Node *>  &memnode_worklist) {
 868   Node* ophi = curr_castpp->in(1);
 869   assert(ophi->is_Phi(), "Expected this to be a Phi node.");
 870 
 871   // Identify which base should be used for AddP->Load later when spliting the
 872   // CastPP->Loads through ophi. Three kind of values may be stored in this
 873   // array, depending on the nullability status of the corresponding input in
 874   // ophi.
 875   //
 876   //  - nullptr:    Meaning that the base is actually the NULL constant and therefore
 877   //                we won't try to load from it.
 878   //
 879   //  - CFG Node:   Meaning that the base is a CastPP that was specialized for
 880   //                this input of Ophi. I.e., we added an If->Then->Else-Region
 881   //                that will 'activate' the CastPp only when the input is not Null.
 882   //
 883   //  - Other Node: Meaning that the base is not nullable and therefore we'll try
 884   //                to load directly from it.
 885   GrowableArray<Node*> bases_for_loads(ophi->req(), ophi->req(), nullptr);
 886 
 887   for (uint i = 1; i < ophi->req(); i++) {
 888     Node* base = ophi->in(i);
 889     const Type* base_t = _igvn->type(base);
 890 
 891     if (base_t->maybe_null()) {
 892       if (base->is_Con()) {
 893         // Nothing todo as bases_for_loads[i] is already nullptr
 894       } else {
 895         Node* new_castpp = specialize_castpp(curr_castpp, base, ophi->in(0)->in(i));
 896         bases_for_loads.at_put(i, new_castpp->in(0)); // Use the ctrl of the new node just as a flag
 897       }
 898     } else {
 899       bases_for_loads.at_put(i, base);
 900     }
 901   }
 902 
 903   // Now let's split the CastPP->Loads through the Phi
 904   for (int i = curr_castpp->outcnt()-1; i >= 0;) {
 905     Node* use = curr_castpp->raw_out(i);
 906     if (use->is_AddP()) {
 907       for (int j = use->outcnt()-1; j >= 0;) {
 908         Node* use_use = use->raw_out(j);
 909         assert(use_use->is_Load(), "Expected this to be a Load node.");
 910 
 911         // We can't make an unconditional load from a nullable input. The
 912         // 'split_castpp_load_through_phi` method will add an
 913         // 'If-Then-Else-Region` around nullable bases and only load from them
 914         // when the input is not null.
 915         Node* phi = split_castpp_load_through_phi(use, use_use, ophi->in(0), &bases_for_loads, alloc_worklist);
 916         _igvn->replace_node(use_use, phi);
 917 
 918         --j;
 919         j = MIN2(j, (int)use->outcnt()-1);
 920       }
 921 
 922       _igvn->remove_dead_node(use);
 923     }
 924     --i;
 925     i = MIN2(i, (int)curr_castpp->outcnt()-1);
 926   }
 927 }
 928 
 929 // This method split a given CmpP/N through the Phi used in one of its inputs.
 930 // As a result we convert a comparison with a pointer to a comparison with an
 931 // integer.
 932 // The only requirement is that one of the inputs of the CmpP/N must be a Phi
 933 // while the other must be a constant.
 934 // The splitting process is basically just cloning the CmpP/N above the input
 935 // Phi.  However, some (most) of the cloned CmpP/Ns won't be requred because we
 936 // can prove at compile time the result of the comparison.
 937 //
 938 // Before:
 939 //
 940 //             in1    in2 ... inN
 941 //              \      |      /
 942 //               \     |     /
 943 //                \    |    /
 944 //                 \   |   /
 945 //                  \  |  /
 946 //                   \ | /
 947 //                    Phi
 948 //                     |   Other
 949 //                     |    /
 950 //                     |   /
 951 //                     |  /
 952 //                    CmpP/N
 953 //
 954 // After:
 955 //
 956 //        in1  Other   in2 Other  inN  Other
 957 //         |    |      |   |      |    |
 958 //         \    |      |   |      |    |
 959 //          \  /       |   /      |    /
 960 //          CmpP/N    CmpP/N     CmpP/N
 961 //          Bool      Bool       Bool
 962 //            \        |        /
 963 //             \       |       /
 964 //              \      |      /
 965 //               \     |     /
 966 //                \    |    /
 967 //                 \   |   /
 968 //                  \  |  /
 969 //                   \ | /
 970 //                    Phi
 971 //                     |
 972 //                     |   Zero
 973 //                     |    /
 974 //                     |   /
 975 //                     |  /
 976 //                     CmpI
 977 //
 978 //
 979 void ConnectionGraph::reduce_phi_on_cmp(Node* cmp) {
 980   Node* ophi = cmp->in(1)->is_Con() ? cmp->in(2) : cmp->in(1);
 981   assert(ophi->is_Phi(), "Expected this to be a Phi node.");
 982 
 983   Node* other = cmp->in(1)->is_Con() ? cmp->in(1) : cmp->in(2);
 984   Node* zero = _igvn->intcon(0);
 985   BoolTest::mask mask = cmp->unique_out()->as_Bool()->_test._test;
 986 
 987   // This Phi will merge the result of the Cmps split through the Phi
 988   Node* res_phi  = _igvn->transform(PhiNode::make(ophi->in(0), zero, TypeInt::INT));
 989 
 990   for (uint i=1; i<ophi->req(); i++) {
 991     Node* ophi_input = ophi->in(i);
 992     Node* res_phi_input = nullptr;
 993 
 994     const TypeInt* tcmp = optimize_ptr_compare(ophi_input, other);
 995     if (tcmp->singleton()) {
 996       res_phi_input = _igvn->makecon(tcmp);
 997     } else {
 998       Node* ncmp = _igvn->transform(cmp->clone());
 999       ncmp->set_req(1, ophi_input);
1000       ncmp->set_req(2, other);
1001       Node* bol = _igvn->transform(new BoolNode(ncmp, mask));
1002       res_phi_input = bol->as_Bool()->as_int_value(_igvn);
1003     }
1004 
1005     res_phi->set_req(i, res_phi_input);
1006   }
1007 
1008   Node* new_cmp = _igvn->transform(new CmpINode(res_phi, zero));
1009   _igvn->replace_node(cmp, new_cmp);
1010 }
1011 
1012 // Push the newly created AddP on alloc_worklist and patch
1013 // the connection graph. Note that the changes in the CG below
1014 // won't affect the ES of objects since the new nodes have the
1015 // same status as the old ones.
1016 void ConnectionGraph::updates_after_load_split(Node* data_phi, Node* previous_load, GrowableArray<Node *>  &alloc_worklist) {
1017   assert(data_phi != nullptr, "Output of split_through_phi is null.");
1018   assert(data_phi != previous_load, "Output of split_through_phi is same as input.");
1019   assert(data_phi->is_Phi(), "Output of split_through_phi isn't a Phi.");
1020 
1021   if (data_phi == nullptr || !data_phi->is_Phi()) {
1022     // Make this a retry?
1023     return ;
1024   }
1025 
1026   Node* previous_addp = previous_load->in(MemNode::Address);
1027   FieldNode* fn = ptnode_adr(previous_addp->_idx)->as_Field();
1028   for (uint i = 1; i < data_phi->req(); i++) {
1029     Node* new_load = data_phi->in(i);
1030 
1031     if (new_load->is_Phi()) {
1032       // new_load is currently the "intermediate_phi" from an specialized
1033       // CastPP.
1034       new_load = new_load->in(1);
1035     }
1036 
1037     // "new_load" might actually be a constant, parameter, etc.
1038     if (new_load->is_Load()) {
1039       Node* new_addp = new_load->in(MemNode::Address);
1040       Node* base = get_addp_base(new_addp);
1041 
1042       // The base might not be something that we can create an unique
1043       // type for. If that's the case we are done with that input.
1044       PointsToNode* jobj_ptn = unique_java_object(base);
1045       if (jobj_ptn == nullptr || !jobj_ptn->scalar_replaceable()) {
1046         continue;
1047       }
1048 
1049       // Push to alloc_worklist since the base has an unique_type
1050       alloc_worklist.append_if_missing(new_addp);
1051 
1052       // Now let's add the node to the connection graph
1053       _nodes.at_grow(new_addp->_idx, nullptr);
1054       add_field(new_addp, fn->escape_state(), fn->offset());
1055       add_base(ptnode_adr(new_addp->_idx)->as_Field(), ptnode_adr(base->_idx));
1056 
1057       // If the load doesn't load an object then it won't be
1058       // part of the connection graph
1059       PointsToNode* curr_load_ptn = ptnode_adr(previous_load->_idx);
1060       if (curr_load_ptn != nullptr) {
1061         _nodes.at_grow(new_load->_idx, nullptr);
1062         add_local_var(new_load, curr_load_ptn->escape_state());
1063         add_edge(ptnode_adr(new_load->_idx), ptnode_adr(new_addp->_idx)->as_Field());
1064       }
1065     }
1066   }
1067 }
1068 
1069 void ConnectionGraph::reduce_phi_on_field_access(Node* previous_addp, GrowableArray<Node *>  &alloc_worklist) {
1070   // We'll pass this to 'split_through_phi' so that it'll do the split even
1071   // though the load doesn't have an unique instance type.
1072   bool ignore_missing_instance_id = true;
1073 
1074   // All AddPs are present in the connection graph
1075   FieldNode* fn = ptnode_adr(previous_addp->_idx)->as_Field();
1076 
1077   // Iterate over AddP looking for a Load
1078   for (int k = previous_addp->outcnt()-1; k >= 0;) {
1079     Node* previous_load = previous_addp->raw_out(k);
1080     if (previous_load->is_Load()) {
1081       Node* data_phi = previous_load->as_Load()->split_through_phi(_igvn, ignore_missing_instance_id);
1082 
1083       // Takes care of updating CG and split_unique_types worklists due to cloned
1084       // AddP->Load.
1085       updates_after_load_split(data_phi, previous_load, alloc_worklist);
1086 
1087       _igvn->replace_node(previous_load, data_phi);
1088     }
1089     --k;
1090     k = MIN2(k, (int)previous_addp->outcnt()-1);
1091   }
1092 
1093   // Remove the old AddP from the processing list because it's dead now
1094   assert(previous_addp->outcnt() == 0, "AddP should be dead now.");
1095   alloc_worklist.remove_if_existing(previous_addp);
1096 }
1097 
1098 // Create a 'selector' Phi based on the inputs of 'ophi'. If index 'i' of the
1099 // selector is:
1100 //    -> a '-1' constant, the i'th input of the original Phi is NSR.
1101 //    -> a 'x' constant >=0, the i'th input of of original Phi will be SR and
1102 //       the info about the scalarized object will be at index x of ObjectMergeValue::possible_objects
1103 PhiNode* ConnectionGraph::create_selector(PhiNode* ophi) const {
1104   Node* minus_one = _igvn->register_new_node_with_optimizer(ConINode::make(-1));
1105   Node* selector  = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), minus_one, TypeInt::INT));
1106   uint number_of_sr_objects = 0;
1107   for (uint i = 1; i < ophi->req(); i++) {
1108     Node* base = ophi->in(i);
1109     JavaObjectNode* ptn = unique_java_object(base);
1110 
1111     if (ptn != nullptr && ptn->scalar_replaceable()) {
1112       Node* sr_obj_idx = _igvn->register_new_node_with_optimizer(ConINode::make(number_of_sr_objects));
1113       selector->set_req(i, sr_obj_idx);
1114       number_of_sr_objects++;
1115     }
1116   }
1117 
1118   return selector->as_Phi();
1119 }
1120 
1121 // Returns true if the AddP node 'n' has at least one base that is a reducible
1122 // merge. If the base is a CastPP/CheckCastPP then the input of the cast is
1123 // checked instead.
1124 bool ConnectionGraph::has_reducible_merge_base(AddPNode* n, Unique_Node_List &reducible_merges) {
1125   PointsToNode* ptn = ptnode_adr(n->_idx);
1126   if (ptn == nullptr || !ptn->is_Field() || ptn->as_Field()->base_count() < 2) {
1127     return false;
1128   }
1129 
1130   for (BaseIterator i(ptn->as_Field()); i.has_next(); i.next()) {
1131     Node* base = i.get()->ideal_node();
1132 
1133     if (reducible_merges.member(base)) {
1134       return true;
1135     }
1136 
1137     if (base->is_CastPP() || base->is_CheckCastPP()) {
1138       base = base->in(1);
1139       if (reducible_merges.member(base)) {
1140         return true;
1141       }
1142     }
1143   }
1144 
1145   return false;
1146 }
1147 
1148 // This method will call its helper method to reduce SafePoint nodes that use
1149 // 'ophi' or a casted version of 'ophi'. All SafePoint nodes using the same
1150 // "version" of Phi use the same debug information (regarding the Phi).
1151 // Therefore, I collect all safepoints and patch them all at once.
1152 //
1153 // The safepoints using the Phi node have to be processed before safepoints of
1154 // CastPP nodes. The reason is, when reducing a CastPP we add a reference (the
1155 // NSR merge pointer) to the input of the CastPP (i.e., the Phi) in the
1156 // safepoint. If we process CastPP's safepoints before Phi's safepoints the
1157 // algorithm that process Phi's safepoints will think that the added Phi
1158 // reference is a regular reference.
1159 bool ConnectionGraph::reduce_phi_on_safepoints(PhiNode* ophi) {
1160   PhiNode* selector = create_selector(ophi);
1161   Unique_Node_List safepoints;
1162   Unique_Node_List casts;
1163 
1164   // Just collect the users of the Phis for later processing
1165   // in the needed order.
1166   for (uint i = 0; i < ophi->outcnt(); i++) {
1167     Node* use = ophi->raw_out(i);
1168     if (use->is_SafePoint()) {
1169       safepoints.push(use);
1170     } else if (use->is_CastPP()) {
1171       casts.push(use);
1172     } else {
1173       assert(use->outcnt() == 0, "Only CastPP & SafePoint users should be left.");
1174     }
1175   }
1176 
1177   // Need to process safepoints using the Phi first
1178   if (!reduce_phi_on_safepoints_helper(ophi, nullptr, selector, safepoints)) {
1179     return false;
1180   }
1181 
1182   // Now process CastPP->safepoints
1183   for (uint i = 0; i < casts.size(); i++) {
1184     Node* cast = casts.at(i);
1185     Unique_Node_List cast_sfpts;
1186 
1187     for (DUIterator_Fast jmax, j = cast->fast_outs(jmax); j < jmax; j++) {
1188       Node* use_use = cast->fast_out(j);
1189       if (use_use->is_SafePoint()) {
1190         cast_sfpts.push(use_use);
1191       } else {
1192         assert(use_use->outcnt() == 0, "Only SafePoint users should be left.");
1193       }
1194     }
1195 
1196     if (!reduce_phi_on_safepoints_helper(ophi, cast, selector, cast_sfpts)) {
1197       return false;
1198     }
1199   }
1200 
1201   return true;
1202 }
1203 
1204 // This method will create a SafePointScalarMERGEnode for each SafePoint in
1205 // 'safepoints'. It then will iterate on the inputs of 'ophi' and create a
1206 // SafePointScalarObjectNode for each scalar replaceable input. Each
1207 // SafePointScalarMergeNode may describe multiple scalar replaced objects -
1208 // check detailed description in SafePointScalarMergeNode class header.
1209 bool ConnectionGraph::reduce_phi_on_safepoints_helper(Node* ophi, Node* cast, Node* selector, Unique_Node_List& safepoints) {
1210   PhaseMacroExpand mexp(*_igvn);
1211   Node* original_sfpt_parent =  cast != nullptr ? cast : ophi;
1212   const TypeOopPtr* merge_t = _igvn->type(original_sfpt_parent)->make_oopptr();
1213 
1214   Node* nsr_merge_pointer = ophi;
1215   if (cast != nullptr) {
1216     const Type* new_t = merge_t->meet(TypePtr::NULL_PTR);
1217     nsr_merge_pointer = _igvn->transform(ConstraintCastNode::make_cast_for_type(cast->in(0), cast->in(1), new_t, ConstraintCastNode::RegularDependency, nullptr));
1218   }
1219 
1220   for (uint spi = 0; spi < safepoints.size(); spi++) {
1221     SafePointNode* sfpt = safepoints.at(spi)->as_SafePoint();
1222     JVMState *jvms      = sfpt->jvms();
1223     uint merge_idx      = (sfpt->req() - jvms->scloff());
1224     int debug_start     = jvms->debug_start();
1225 
1226     SafePointScalarMergeNode* smerge = new SafePointScalarMergeNode(merge_t, merge_idx);
1227     smerge->init_req(0, _compile->root());
1228     _igvn->register_new_node_with_optimizer(smerge);
1229 
1230     // The next two inputs are:
1231     //  (1) A copy of the original pointer to NSR objects.
1232     //  (2) A selector, used to decide if we need to rematerialize an object
1233     //      or use the pointer to a NSR object.
1234     // See more details of these fields in the declaration of SafePointScalarMergeNode
1235     sfpt->add_req(nsr_merge_pointer);
1236     sfpt->add_req(selector);
1237 
1238     for (uint i = 1; i < ophi->req(); i++) {
1239       Node* base = ophi->in(i);
1240       JavaObjectNode* ptn = unique_java_object(base);
1241 
1242       // If the base is not scalar replaceable we don't need to register information about
1243       // it at this time.
1244       if (ptn == nullptr || !ptn->scalar_replaceable()) {
1245         continue;
1246       }
1247 
1248       AllocateNode* alloc = ptn->ideal_node()->as_Allocate();
1249       SafePointScalarObjectNode* sobj = mexp.create_scalarized_object_description(alloc, sfpt);
1250       if (sobj == nullptr) {
1251         return false;
1252       }
1253 
1254       // Now make a pass over the debug information replacing any references
1255       // to the allocated object with "sobj"
1256       Node* ccpp = alloc->result_cast();
1257       sfpt->replace_edges_in_range(ccpp, sobj, debug_start, jvms->debug_end(), _igvn);
1258 
1259       // Register the scalarized object as a candidate for reallocation
1260       smerge->add_req(sobj);
1261     }
1262 
1263     // Replaces debug information references to "original_sfpt_parent" in "sfpt" with references to "smerge"
1264     sfpt->replace_edges_in_range(original_sfpt_parent, smerge, debug_start, jvms->debug_end(), _igvn);
1265 
1266     // The call to 'replace_edges_in_range' above might have removed the
1267     // reference to ophi that we need at _merge_pointer_idx. The line below make
1268     // sure the reference is maintained.
1269     sfpt->set_req(smerge->merge_pointer_idx(jvms), nsr_merge_pointer);
1270     _igvn->_worklist.push(sfpt);
1271   }
1272 
1273   return true;
1274 }
1275 
1276 void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray<Node *>  &alloc_worklist, GrowableArray<Node *>  &memnode_worklist) {
1277   bool delay = _igvn->delay_transform();
1278   _igvn->set_delay_transform(true);
1279   _igvn->hash_delete(ophi);
1280 
1281   // Copying all users first because some will be removed and others won't.
1282   // Ophi also may acquire some new users as part of Cast reduction.
1283   // CastPPs also need to be processed before CmpPs.
1284   Unique_Node_List castpps;
1285   Unique_Node_List others;
1286   for (DUIterator_Fast imax, i = ophi->fast_outs(imax); i < imax; i++) {
1287     Node* use = ophi->fast_out(i);
1288 
1289     if (use->is_CastPP()) {
1290       castpps.push(use);
1291     } else if (use->is_AddP() || use->is_Cmp()) {
1292       others.push(use);
1293     } else if (use->is_SafePoint()) {
1294       // processed later
1295     } else {
1296       assert(use->is_SafePoint(), "Unexpected user of reducible Phi %d -> %d:%s:%d", ophi->_idx, use->_idx, use->Name(), use->outcnt());
1297     }
1298   }
1299 
1300   // CastPPs need to be processed before Cmps because during the process of
1301   // splitting CastPPs we make reference to the inputs of the Cmp that is used
1302   // by the If controlling the CastPP.
1303   for (uint i = 0; i < castpps.size(); i++) {
1304     reduce_phi_on_castpp_field_load(castpps.at(i), alloc_worklist, memnode_worklist);
1305   }
1306 
1307   for (uint i = 0; i < others.size(); i++) {
1308     Node* use = others.at(i);
1309 
1310     if (use->is_AddP()) {
1311       reduce_phi_on_field_access(use, alloc_worklist);
1312     } else if(use->is_Cmp()) {
1313       reduce_phi_on_cmp(use);
1314     }
1315   }
1316 
1317   _igvn->set_delay_transform(delay);
1318 }
1319 
1320 void ConnectionGraph::reset_scalar_replaceable_entries(PhiNode* ophi) {
1321   Node* null_ptr            = _igvn->makecon(TypePtr::NULL_PTR);
1322   const TypeOopPtr* merge_t = _igvn->type(ophi)->make_oopptr();
1323   const Type* new_t         = merge_t->meet(TypePtr::NULL_PTR);
1324   Node* new_phi             = _igvn->register_new_node_with_optimizer(PhiNode::make(ophi->region(), null_ptr, new_t));
1325 
1326   for (uint i = 1; i < ophi->req(); i++) {
1327     Node* base          = ophi->in(i);
1328     JavaObjectNode* ptn = unique_java_object(base);
1329 
1330     if (ptn != nullptr && ptn->scalar_replaceable()) {
1331       new_phi->set_req(i, null_ptr);
1332     } else {
1333       new_phi->set_req(i, ophi->in(i));
1334     }
1335   }
1336 
1337   for (int i = ophi->outcnt()-1; i >= 0;) {
1338     Node* out = ophi->raw_out(i);
1339 
1340     if (out->is_ConstraintCast()) {
1341       const Type* out_t = _igvn->type(out)->make_ptr();
1342       const Type* out_new_t = out_t->meet(TypePtr::NULL_PTR);
1343       bool change = out_new_t != out_t;
1344 
1345       for (int j = out->outcnt()-1; change && j >= 0; --j) {
1346         Node* out2 = out->raw_out(j);
1347         if (!out2->is_SafePoint()) {
1348           change = false;
1349           break;
1350         }
1351       }
1352 
1353       if (change) {
1354         Node* new_cast = ConstraintCastNode::make_cast_for_type(out->in(0), out->in(1), out_new_t, ConstraintCastNode::StrongDependency, nullptr);
1355         _igvn->replace_node(out, new_cast);
1356         _igvn->register_new_node_with_optimizer(new_cast);
1357       }
1358     }
1359 
1360     --i;
1361     i = MIN2(i, (int)ophi->outcnt()-1);
1362   }
1363 
1364   _igvn->replace_node(ophi, new_phi);
1365 }
1366 
1367 void ConnectionGraph::verify_ram_nodes(Compile* C, Node* root) {
1368   if (!C->do_reduce_allocation_merges()) return;
1369 
1370   Unique_Node_List ideal_nodes;
1371   ideal_nodes.map(C->live_nodes(), nullptr);  // preallocate space
1372   ideal_nodes.push(root);
1373 
1374   for (uint next = 0; next < ideal_nodes.size(); ++next) {
1375     Node* n = ideal_nodes.at(next);
1376 
1377     if (n->is_SafePointScalarMerge()) {
1378       SafePointScalarMergeNode* merge = n->as_SafePointScalarMerge();
1379 
1380       // Validate inputs of merge
1381       for (uint i = 1; i < merge->req(); i++) {
1382         if (merge->in(i) != nullptr && !merge->in(i)->is_top() && !merge->in(i)->is_SafePointScalarObject()) {
1383           assert(false, "SafePointScalarMerge inputs should be null/top or SafePointScalarObject.");
1384           C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1385         }
1386       }
1387 
1388       // Validate users of merge
1389       for (DUIterator_Fast imax, i = merge->fast_outs(imax); i < imax; i++) {
1390         Node* sfpt = merge->fast_out(i);
1391         if (sfpt->is_SafePoint()) {
1392           int merge_idx = merge->merge_pointer_idx(sfpt->as_SafePoint()->jvms());
1393 
1394           if (sfpt->in(merge_idx) != nullptr && sfpt->in(merge_idx)->is_SafePointScalarMerge()) {
1395             assert(false, "SafePointScalarMerge nodes can't be nested.");
1396             C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1397           }
1398         } else {
1399           assert(false, "Only safepoints can use SafePointScalarMerge nodes.");
1400           C->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
1401         }
1402       }
1403     }
1404 
1405     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1406       Node* m = n->fast_out(i);
1407       ideal_nodes.push(m);
1408     }
1409   }
1410 }
1411 
1412 // Returns true if there is an object in the scope of sfn that does not escape globally.
1413 bool ConnectionGraph::has_ea_local_in_scope(SafePointNode* sfn) {
1414   Compile* C = _compile;
1415   for (JVMState* jvms = sfn->jvms(); jvms != nullptr; jvms = jvms->caller()) {
1416     if (C->env()->should_retain_local_variables() || C->env()->jvmti_can_walk_any_space() ||
1417         DeoptimizeObjectsALot) {
1418       // Jvmti agents can access locals. Must provide info about local objects at runtime.
1419       int num_locs = jvms->loc_size();
1420       for (int idx = 0; idx < num_locs; idx++) {
1421         Node* l = sfn->local(jvms, idx);
1422         if (not_global_escape(l)) {
1423           return true;
1424         }
1425       }
1426     }
1427     if (C->env()->jvmti_can_get_owned_monitor_info() ||
1428         C->env()->jvmti_can_walk_any_space() || DeoptimizeObjectsALot) {
1429       // Jvmti agents can read monitors. Must provide info about locked objects at runtime.
1430       int num_mon = jvms->nof_monitors();
1431       for (int idx = 0; idx < num_mon; idx++) {
1432         Node* m = sfn->monitor_obj(jvms, idx);
1433         if (m != nullptr && not_global_escape(m)) {
1434           return true;
1435         }
1436       }
1437     }
1438   }
1439   return false;
1440 }
1441 
1442 // Returns true if at least one of the arguments to the call is an object
1443 // that does not escape globally.
1444 bool ConnectionGraph::has_arg_escape(CallJavaNode* call) {
1445   if (call->method() != nullptr) {
1446     uint max_idx = TypeFunc::Parms + call->method()->arg_size();
1447     for (uint idx = TypeFunc::Parms; idx < max_idx; idx++) {
1448       Node* p = call->in(idx);
1449       if (not_global_escape(p)) {
1450         return true;
1451       }
1452     }
1453   } else {
1454     const char* name = call->as_CallStaticJava()->_name;
1455     assert(name != nullptr, "no name");
1456     // no arg escapes through uncommon traps
1457     if (strcmp(name, "uncommon_trap") != 0) {
1458       // process_call_arguments() assumes that all arguments escape globally
1459       const TypeTuple* d = call->tf()->domain();
1460       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
1461         const Type* at = d->field_at(i);
1462         if (at->isa_oopptr() != nullptr) {
1463           return true;
1464         }
1465       }
1466     }
1467   }
1468   return false;
1469 }
1470 
1471 
1472 
1473 // Utility function for nodes that load an object
1474 void ConnectionGraph::add_objload_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1475   // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1476   // ThreadLocal has RawPtr type.
1477   const Type* t = _igvn->type(n);
1478   if (t->make_ptr() != nullptr) {
1479     Node* adr = n->in(MemNode::Address);
1480 #ifdef ASSERT
1481     if (!adr->is_AddP()) {
1482       assert(_igvn->type(adr)->isa_rawptr(), "sanity");
1483     } else {
1484       assert((ptnode_adr(adr->_idx) == nullptr ||
1485               ptnode_adr(adr->_idx)->as_Field()->is_oop()), "sanity");
1486     }
1487 #endif
1488     add_local_var_and_edge(n, PointsToNode::NoEscape,
1489                            adr, delayed_worklist);
1490   }
1491 }
1492 
1493 // Populate Connection Graph with PointsTo nodes and create simple
1494 // connection graph edges.
1495 void ConnectionGraph::add_node_to_connection_graph(Node *n, Unique_Node_List *delayed_worklist) {
1496   assert(!_verify, "this method should not be called for verification");
1497   PhaseGVN* igvn = _igvn;
1498   uint n_idx = n->_idx;
1499   PointsToNode* n_ptn = ptnode_adr(n_idx);
1500   if (n_ptn != nullptr) {
1501     return; // No need to redefine PointsTo node during first iteration.
1502   }
1503   int opcode = n->Opcode();
1504   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_to_con_graph(this, igvn, delayed_worklist, n, opcode);
1505   if (gc_handled) {
1506     return; // Ignore node if already handled by GC.
1507   }
1508 
1509   if (n->is_Call()) {
1510     // Arguments to allocation and locking don't escape.
1511     if (n->is_AbstractLock()) {
1512       // Put Lock and Unlock nodes on IGVN worklist to process them during
1513       // first IGVN optimization when escape information is still available.
1514       record_for_optimizer(n);
1515     } else if (n->is_Allocate()) {
1516       add_call_node(n->as_Call());
1517       record_for_optimizer(n);
1518     } else {
1519       if (n->is_CallStaticJava()) {
1520         const char* name = n->as_CallStaticJava()->_name;
1521         if (name != nullptr && strcmp(name, "uncommon_trap") == 0) {
1522           return; // Skip uncommon traps
1523         }
1524       }
1525       // Don't mark as processed since call's arguments have to be processed.
1526       delayed_worklist->push(n);
1527       // Check if a call returns an object.
1528       if ((n->as_Call()->returns_pointer() &&
1529            n->as_Call()->proj_out_or_null(TypeFunc::Parms) != nullptr) ||
1530           (n->is_CallStaticJava() &&
1531            n->as_CallStaticJava()->is_boxing_method())) {
1532         add_call_node(n->as_Call());
1533       }
1534     }
1535     return;
1536   }
1537   // Put this check here to process call arguments since some call nodes
1538   // point to phantom_obj.
1539   if (n_ptn == phantom_obj || n_ptn == null_obj) {
1540     return; // Skip predefined nodes.
1541   }
1542   switch (opcode) {
1543     case Op_AddP: {
1544       Node* base = get_addp_base(n);
1545       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1546       // Field nodes are created for all field types. They are used in
1547       // adjust_scalar_replaceable_state() and split_unique_types().
1548       // Note, non-oop fields will have only base edges in Connection
1549       // Graph because such fields are not used for oop loads and stores.
1550       int offset = address_offset(n, igvn);
1551       add_field(n, PointsToNode::NoEscape, offset);
1552       if (ptn_base == nullptr) {
1553         delayed_worklist->push(n); // Process it later.
1554       } else {
1555         n_ptn = ptnode_adr(n_idx);
1556         add_base(n_ptn->as_Field(), ptn_base);
1557       }
1558       break;
1559     }
1560     case Op_CastX2P: {
1561       map_ideal_node(n, phantom_obj);
1562       break;
1563     }
1564     case Op_CastPP:
1565     case Op_CheckCastPP:
1566     case Op_EncodeP:
1567     case Op_DecodeN:
1568     case Op_EncodePKlass:
1569     case Op_DecodeNKlass: {
1570       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist);
1571       break;
1572     }
1573     case Op_CMoveP: {
1574       add_local_var(n, PointsToNode::NoEscape);
1575       // Do not add edges during first iteration because some could be
1576       // not defined yet.
1577       delayed_worklist->push(n);
1578       break;
1579     }
1580     case Op_ConP:
1581     case Op_ConN:
1582     case Op_ConNKlass: {
1583       // assume all oop constants globally escape except for null
1584       PointsToNode::EscapeState es;
1585       const Type* t = igvn->type(n);
1586       if (t == TypePtr::NULL_PTR || t == TypeNarrowOop::NULL_PTR) {
1587         es = PointsToNode::NoEscape;
1588       } else {
1589         es = PointsToNode::GlobalEscape;
1590       }
1591       PointsToNode* ptn_con = add_java_object(n, es);
1592       set_not_scalar_replaceable(ptn_con NOT_PRODUCT(COMMA "Constant pointer"));
1593       break;
1594     }
1595     case Op_CreateEx: {
1596       // assume that all exception objects globally escape
1597       map_ideal_node(n, phantom_obj);
1598       break;
1599     }
1600     case Op_LoadKlass:
1601     case Op_LoadNKlass: {
1602       // Unknown class is loaded
1603       map_ideal_node(n, phantom_obj);
1604       break;
1605     }
1606     case Op_LoadP:
1607     case Op_LoadN: {
1608       add_objload_to_connection_graph(n, delayed_worklist);
1609       break;
1610     }
1611     case Op_Parm: {
1612       map_ideal_node(n, phantom_obj);
1613       break;
1614     }
1615     case Op_PartialSubtypeCheck: {
1616       // Produces Null or notNull and is used in only in CmpP so
1617       // phantom_obj could be used.
1618       map_ideal_node(n, phantom_obj); // Result is unknown
1619       break;
1620     }
1621     case Op_Phi: {
1622       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1623       // ThreadLocal has RawPtr type.
1624       const Type* t = n->as_Phi()->type();
1625       if (t->make_ptr() != nullptr) {
1626         add_local_var(n, PointsToNode::NoEscape);
1627         // Do not add edges during first iteration because some could be
1628         // not defined yet.
1629         delayed_worklist->push(n);
1630       }
1631       break;
1632     }
1633     case Op_Proj: {
1634       // we are only interested in the oop result projection from a call
1635       if (n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
1636           n->in(0)->as_Call()->returns_pointer()) {
1637         add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), delayed_worklist);
1638       }
1639       break;
1640     }
1641     case Op_Rethrow: // Exception object escapes
1642     case Op_Return: {
1643       if (n->req() > TypeFunc::Parms &&
1644           igvn->type(n->in(TypeFunc::Parms))->isa_oopptr()) {
1645         // Treat Return value as LocalVar with GlobalEscape escape state.
1646         add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), delayed_worklist);
1647       }
1648       break;
1649     }
1650     case Op_CompareAndExchangeP:
1651     case Op_CompareAndExchangeN:
1652     case Op_GetAndSetP:
1653     case Op_GetAndSetN: {
1654       add_objload_to_connection_graph(n, delayed_worklist);
1655       // fall-through
1656     }
1657     case Op_StoreP:
1658     case Op_StoreN:
1659     case Op_StoreNKlass:
1660     case Op_WeakCompareAndSwapP:
1661     case Op_WeakCompareAndSwapN:
1662     case Op_CompareAndSwapP:
1663     case Op_CompareAndSwapN: {
1664       add_to_congraph_unsafe_access(n, opcode, delayed_worklist);
1665       break;
1666     }
1667     case Op_AryEq:
1668     case Op_CountPositives:
1669     case Op_StrComp:
1670     case Op_StrEquals:
1671     case Op_StrIndexOf:
1672     case Op_StrIndexOfChar:
1673     case Op_StrInflatedCopy:
1674     case Op_StrCompressedCopy:
1675     case Op_VectorizedHashCode:
1676     case Op_EncodeISOArray: {
1677       add_local_var(n, PointsToNode::ArgEscape);
1678       delayed_worklist->push(n); // Process it later.
1679       break;
1680     }
1681     case Op_ThreadLocal: {
1682       PointsToNode* ptn_thr = add_java_object(n, PointsToNode::ArgEscape);
1683       set_not_scalar_replaceable(ptn_thr NOT_PRODUCT(COMMA "Constant pointer"));
1684       break;
1685     }
1686     case Op_Blackhole: {
1687       // All blackhole pointer arguments are globally escaping.
1688       // Only do this if there is at least one pointer argument.
1689       // Do not add edges during first iteration because some could be
1690       // not defined yet, defer to final step.
1691       for (uint i = 0; i < n->req(); i++) {
1692         Node* in = n->in(i);
1693         if (in != nullptr) {
1694           const Type* at = _igvn->type(in);
1695           if (!at->isa_ptr()) continue;
1696 
1697           add_local_var(n, PointsToNode::GlobalEscape);
1698           delayed_worklist->push(n);
1699           break;
1700         }
1701       }
1702       break;
1703     }
1704     default:
1705       ; // Do nothing for nodes not related to EA.
1706   }
1707   return;
1708 }
1709 
1710 // Add final simple edges to graph.
1711 void ConnectionGraph::add_final_edges(Node *n) {
1712   PointsToNode* n_ptn = ptnode_adr(n->_idx);
1713 #ifdef ASSERT
1714   if (_verify && n_ptn->is_JavaObject())
1715     return; // This method does not change graph for JavaObject.
1716 #endif
1717 
1718   if (n->is_Call()) {
1719     process_call_arguments(n->as_Call());
1720     return;
1721   }
1722   assert(n->is_Store() || n->is_LoadStore() ||
1723          ((n_ptn != nullptr) && (n_ptn->ideal_node() != nullptr)),
1724          "node should be registered already");
1725   int opcode = n->Opcode();
1726   bool gc_handled = BarrierSet::barrier_set()->barrier_set_c2()->escape_add_final_edges(this, _igvn, n, opcode);
1727   if (gc_handled) {
1728     return; // Ignore node if already handled by GC.
1729   }
1730   switch (opcode) {
1731     case Op_AddP: {
1732       Node* base = get_addp_base(n);
1733       PointsToNode* ptn_base = ptnode_adr(base->_idx);
1734       assert(ptn_base != nullptr, "field's base should be registered");
1735       add_base(n_ptn->as_Field(), ptn_base);
1736       break;
1737     }
1738     case Op_CastPP:
1739     case Op_CheckCastPP:
1740     case Op_EncodeP:
1741     case Op_DecodeN:
1742     case Op_EncodePKlass:
1743     case Op_DecodeNKlass: {
1744       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), nullptr);
1745       break;
1746     }
1747     case Op_CMoveP: {
1748       for (uint i = CMoveNode::IfFalse; i < n->req(); i++) {
1749         Node* in = n->in(i);
1750         if (in == nullptr) {
1751           continue;  // ignore null
1752         }
1753         Node* uncast_in = in->uncast();
1754         if (uncast_in->is_top() || uncast_in == n) {
1755           continue;  // ignore top or inputs which go back this node
1756         }
1757         PointsToNode* ptn = ptnode_adr(in->_idx);
1758         assert(ptn != nullptr, "node should be registered");
1759         add_edge(n_ptn, ptn);
1760       }
1761       break;
1762     }
1763     case Op_LoadP:
1764     case Op_LoadN: {
1765       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1766       // ThreadLocal has RawPtr type.
1767       assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1768       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1769       break;
1770     }
1771     case Op_Phi: {
1772       // Using isa_ptr() instead of isa_oopptr() for LoadP and Phi because
1773       // ThreadLocal has RawPtr type.
1774       assert(n->as_Phi()->type()->make_ptr() != nullptr, "Unexpected node type");
1775       for (uint i = 1; i < n->req(); i++) {
1776         Node* in = n->in(i);
1777         if (in == nullptr) {
1778           continue;  // ignore null
1779         }
1780         Node* uncast_in = in->uncast();
1781         if (uncast_in->is_top() || uncast_in == n) {
1782           continue;  // ignore top or inputs which go back this node
1783         }
1784         PointsToNode* ptn = ptnode_adr(in->_idx);
1785         assert(ptn != nullptr, "node should be registered");
1786         add_edge(n_ptn, ptn);
1787       }
1788       break;
1789     }
1790     case Op_Proj: {
1791       // we are only interested in the oop result projection from a call
1792       assert(n->as_Proj()->_con == TypeFunc::Parms && n->in(0)->is_Call() &&
1793              n->in(0)->as_Call()->returns_pointer(), "Unexpected node type");
1794       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0), nullptr);
1795       break;
1796     }
1797     case Op_Rethrow: // Exception object escapes
1798     case Op_Return: {
1799       assert(n->req() > TypeFunc::Parms && _igvn->type(n->in(TypeFunc::Parms))->isa_oopptr(),
1800              "Unexpected node type");
1801       // Treat Return value as LocalVar with GlobalEscape escape state.
1802       add_local_var_and_edge(n, PointsToNode::GlobalEscape, n->in(TypeFunc::Parms), nullptr);
1803       break;
1804     }
1805     case Op_CompareAndExchangeP:
1806     case Op_CompareAndExchangeN:
1807     case Op_GetAndSetP:
1808     case Op_GetAndSetN:{
1809       assert(_igvn->type(n)->make_ptr() != nullptr, "Unexpected node type");
1810       add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), nullptr);
1811       // fall-through
1812     }
1813     case Op_CompareAndSwapP:
1814     case Op_CompareAndSwapN:
1815     case Op_WeakCompareAndSwapP:
1816     case Op_WeakCompareAndSwapN:
1817     case Op_StoreP:
1818     case Op_StoreN:
1819     case Op_StoreNKlass:{
1820       add_final_edges_unsafe_access(n, opcode);
1821       break;
1822     }
1823     case Op_VectorizedHashCode:
1824     case Op_AryEq:
1825     case Op_CountPositives:
1826     case Op_StrComp:
1827     case Op_StrEquals:
1828     case Op_StrIndexOf:
1829     case Op_StrIndexOfChar:
1830     case Op_StrInflatedCopy:
1831     case Op_StrCompressedCopy:
1832     case Op_EncodeISOArray: {
1833       // char[]/byte[] arrays passed to string intrinsic do not escape but
1834       // they are not scalar replaceable. Adjust escape state for them.
1835       // Start from in(2) edge since in(1) is memory edge.
1836       for (uint i = 2; i < n->req(); i++) {
1837         Node* adr = n->in(i);
1838         const Type* at = _igvn->type(adr);
1839         if (!adr->is_top() && at->isa_ptr()) {
1840           assert(at == Type::TOP || at == TypePtr::NULL_PTR ||
1841                  at->isa_ptr() != nullptr, "expecting a pointer");
1842           if (adr->is_AddP()) {
1843             adr = get_addp_base(adr);
1844           }
1845           PointsToNode* ptn = ptnode_adr(adr->_idx);
1846           assert(ptn != nullptr, "node should be registered");
1847           add_edge(n_ptn, ptn);
1848         }
1849       }
1850       break;
1851     }
1852     case Op_Blackhole: {
1853       // All blackhole pointer arguments are globally escaping.
1854       for (uint i = 0; i < n->req(); i++) {
1855         Node* in = n->in(i);
1856         if (in != nullptr) {
1857           const Type* at = _igvn->type(in);
1858           if (!at->isa_ptr()) continue;
1859 
1860           if (in->is_AddP()) {
1861             in = get_addp_base(in);
1862           }
1863 
1864           PointsToNode* ptn = ptnode_adr(in->_idx);
1865           assert(ptn != nullptr, "should be defined already");
1866           set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "blackhole"));
1867           add_edge(n_ptn, ptn);
1868         }
1869       }
1870       break;
1871     }
1872     default: {
1873       // This method should be called only for EA specific nodes which may
1874       // miss some edges when they were created.
1875 #ifdef ASSERT
1876       n->dump(1);
1877 #endif
1878       guarantee(false, "unknown node");
1879     }
1880   }
1881   return;
1882 }
1883 
1884 void ConnectionGraph::add_to_congraph_unsafe_access(Node* n, uint opcode, Unique_Node_List* delayed_worklist) {
1885   Node* adr = n->in(MemNode::Address);
1886   const Type* adr_type = _igvn->type(adr);
1887   adr_type = adr_type->make_ptr();
1888   if (adr_type == nullptr) {
1889     return; // skip dead nodes
1890   }
1891   if (adr_type->isa_oopptr()
1892       || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
1893           && adr_type == TypeRawPtr::NOTNULL
1894           && is_captured_store_address(adr))) {
1895     delayed_worklist->push(n); // Process it later.
1896 #ifdef ASSERT
1897     assert (adr->is_AddP(), "expecting an AddP");
1898     if (adr_type == TypeRawPtr::NOTNULL) {
1899       // Verify a raw address for a store captured by Initialize node.
1900       int offs = (int) _igvn->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
1901       assert(offs != Type::OffsetBot, "offset must be a constant");
1902     }
1903 #endif
1904   } else {
1905     // Ignore copy the displaced header to the BoxNode (OSR compilation).
1906     if (adr->is_BoxLock()) {
1907       return;
1908     }
1909     // Stored value escapes in unsafe access.
1910     if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
1911       delayed_worklist->push(n); // Process unsafe access later.
1912       return;
1913     }
1914 #ifdef ASSERT
1915     n->dump(1);
1916     assert(false, "not unsafe");
1917 #endif
1918   }
1919 }
1920 
1921 bool ConnectionGraph::add_final_edges_unsafe_access(Node* n, uint opcode) {
1922   Node* adr = n->in(MemNode::Address);
1923   const Type *adr_type = _igvn->type(adr);
1924   adr_type = adr_type->make_ptr();
1925 #ifdef ASSERT
1926   if (adr_type == nullptr) {
1927     n->dump(1);
1928     assert(adr_type != nullptr, "dead node should not be on list");
1929     return true;
1930   }
1931 #endif
1932 
1933   if (adr_type->isa_oopptr()
1934       || ((opcode == Op_StoreP || opcode == Op_StoreN || opcode == Op_StoreNKlass)
1935            && adr_type == TypeRawPtr::NOTNULL
1936            && is_captured_store_address(adr))) {
1937     // Point Address to Value
1938     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
1939     assert(adr_ptn != nullptr &&
1940            adr_ptn->as_Field()->is_oop(), "node should be registered");
1941     Node* val = n->in(MemNode::ValueIn);
1942     PointsToNode* ptn = ptnode_adr(val->_idx);
1943     assert(ptn != nullptr, "node should be registered");
1944     add_edge(adr_ptn, ptn);
1945     return true;
1946   } else if ((opcode == Op_StoreP) && adr_type->isa_rawptr()) {
1947     // Stored value escapes in unsafe access.
1948     Node* val = n->in(MemNode::ValueIn);
1949     PointsToNode* ptn = ptnode_adr(val->_idx);
1950     assert(ptn != nullptr, "node should be registered");
1951     set_escape_state(ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA "stored at raw address"));
1952     // Add edge to object for unsafe access with offset.
1953     PointsToNode* adr_ptn = ptnode_adr(adr->_idx);
1954     assert(adr_ptn != nullptr, "node should be registered");
1955     if (adr_ptn->is_Field()) {
1956       assert(adr_ptn->as_Field()->is_oop(), "should be oop field");
1957       add_edge(adr_ptn, ptn);
1958     }
1959     return true;
1960   }
1961 #ifdef ASSERT
1962   n->dump(1);
1963   assert(false, "not unsafe");
1964 #endif
1965   return false;
1966 }
1967 
1968 void ConnectionGraph::add_call_node(CallNode* call) {
1969   assert(call->returns_pointer(), "only for call which returns pointer");
1970   uint call_idx = call->_idx;
1971   if (call->is_Allocate()) {
1972     Node* k = call->in(AllocateNode::KlassNode);
1973     const TypeKlassPtr* kt = k->bottom_type()->isa_klassptr();
1974     assert(kt != nullptr, "TypeKlassPtr  required.");
1975     PointsToNode::EscapeState es = PointsToNode::NoEscape;
1976     bool scalar_replaceable = true;
1977     NOT_PRODUCT(const char* nsr_reason = "");
1978     if (call->is_AllocateArray()) {
1979       if (!kt->isa_aryklassptr()) { // StressReflectiveCode
1980         es = PointsToNode::GlobalEscape;
1981       } else {
1982         int length = call->in(AllocateNode::ALength)->find_int_con(-1);
1983         if (length < 0) {
1984           // Not scalar replaceable if the length is not constant.
1985           scalar_replaceable = false;
1986           NOT_PRODUCT(nsr_reason = "has a non-constant length");
1987         } else if (length > EliminateAllocationArraySizeLimit) {
1988           // Not scalar replaceable if the length is too big.
1989           scalar_replaceable = false;
1990           NOT_PRODUCT(nsr_reason = "has a length that is too big");
1991         }
1992       }
1993     } else {  // Allocate instance
1994       if (!kt->isa_instklassptr()) { // StressReflectiveCode
1995         es = PointsToNode::GlobalEscape;
1996       } else {
1997         const TypeInstKlassPtr* ikt = kt->is_instklassptr();
1998         ciInstanceKlass* ik = ikt->klass_is_exact() ? ikt->exact_klass()->as_instance_klass() : ikt->instance_klass();
1999         if (ik->is_subclass_of(_compile->env()->Thread_klass()) ||
2000             ik->is_subclass_of(_compile->env()->Reference_klass()) ||
2001             !ik->can_be_instantiated() ||
2002             ik->has_finalizer()) {
2003           es = PointsToNode::GlobalEscape;
2004         } else {
2005           int nfields = ik->as_instance_klass()->nof_nonstatic_fields();
2006           if (nfields > EliminateAllocationFieldsLimit) {
2007             // Not scalar replaceable if there are too many fields.
2008             scalar_replaceable = false;
2009             NOT_PRODUCT(nsr_reason = "has too many fields");
2010           }
2011         }
2012       }
2013     }
2014     add_java_object(call, es);
2015     PointsToNode* ptn = ptnode_adr(call_idx);
2016     if (!scalar_replaceable && ptn->scalar_replaceable()) {
2017       set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA nsr_reason));
2018     }
2019   } else if (call->is_CallStaticJava()) {
2020     // Call nodes could be different types:
2021     //
2022     // 1. CallDynamicJavaNode (what happened during call is unknown):
2023     //
2024     //    - mapped to GlobalEscape JavaObject node if oop is returned;
2025     //
2026     //    - all oop arguments are escaping globally;
2027     //
2028     // 2. CallStaticJavaNode (execute bytecode analysis if possible):
2029     //
2030     //    - the same as CallDynamicJavaNode if can't do bytecode analysis;
2031     //
2032     //    - mapped to GlobalEscape JavaObject node if unknown oop is returned;
2033     //    - mapped to NoEscape JavaObject node if non-escaping object allocated
2034     //      during call is returned;
2035     //    - mapped to ArgEscape LocalVar node pointed to object arguments
2036     //      which are returned and does not escape during call;
2037     //
2038     //    - oop arguments escaping status is defined by bytecode analysis;
2039     //
2040     // For a static call, we know exactly what method is being called.
2041     // Use bytecode estimator to record whether the call's return value escapes.
2042     ciMethod* meth = call->as_CallJava()->method();
2043     if (meth == nullptr) {
2044       const char* name = call->as_CallStaticJava()->_name;
2045       assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0, "TODO: add failed case check");
2046       // Returns a newly allocated non-escaped object.
2047       add_java_object(call, PointsToNode::NoEscape);
2048       set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of multinewarray"));
2049     } else if (meth->is_boxing_method()) {
2050       // Returns boxing object
2051       PointsToNode::EscapeState es;
2052       vmIntrinsics::ID intr = meth->intrinsic_id();
2053       if (intr == vmIntrinsics::_floatValue || intr == vmIntrinsics::_doubleValue) {
2054         // It does not escape if object is always allocated.
2055         es = PointsToNode::NoEscape;
2056       } else {
2057         // It escapes globally if object could be loaded from cache.
2058         es = PointsToNode::GlobalEscape;
2059       }
2060       add_java_object(call, es);
2061       if (es == PointsToNode::GlobalEscape) {
2062         set_not_scalar_replaceable(ptnode_adr(call->_idx) NOT_PRODUCT(COMMA "object can be loaded from boxing cache"));
2063       }
2064     } else {
2065       BCEscapeAnalyzer* call_analyzer = meth->get_bcea();
2066       call_analyzer->copy_dependencies(_compile->dependencies());
2067       if (call_analyzer->is_return_allocated()) {
2068         // Returns a newly allocated non-escaped object, simply
2069         // update dependency information.
2070         // Mark it as NoEscape so that objects referenced by
2071         // it's fields will be marked as NoEscape at least.
2072         add_java_object(call, PointsToNode::NoEscape);
2073         set_not_scalar_replaceable(ptnode_adr(call_idx) NOT_PRODUCT(COMMA "is result of call"));
2074       } else {
2075         // Determine whether any arguments are returned.
2076         const TypeTuple* d = call->tf()->domain();
2077         bool ret_arg = false;
2078         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2079           if (d->field_at(i)->isa_ptr() != nullptr &&
2080               call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
2081             ret_arg = true;
2082             break;
2083           }
2084         }
2085         if (ret_arg) {
2086           add_local_var(call, PointsToNode::ArgEscape);
2087         } else {
2088           // Returns unknown object.
2089           map_ideal_node(call, phantom_obj);
2090         }
2091       }
2092     }
2093   } else {
2094     // An other type of call, assume the worst case:
2095     // returned value is unknown and globally escapes.
2096     assert(call->Opcode() == Op_CallDynamicJava, "add failed case check");
2097     map_ideal_node(call, phantom_obj);
2098   }
2099 }
2100 
2101 void ConnectionGraph::process_call_arguments(CallNode *call) {
2102     bool is_arraycopy = false;
2103     switch (call->Opcode()) {
2104 #ifdef ASSERT
2105     case Op_Allocate:
2106     case Op_AllocateArray:
2107     case Op_Lock:
2108     case Op_Unlock:
2109       assert(false, "should be done already");
2110       break;
2111 #endif
2112     case Op_ArrayCopy:
2113     case Op_CallLeafNoFP:
2114       // Most array copies are ArrayCopy nodes at this point but there
2115       // are still a few direct calls to the copy subroutines (See
2116       // PhaseStringOpts::copy_string())
2117       is_arraycopy = (call->Opcode() == Op_ArrayCopy) ||
2118         call->as_CallLeaf()->is_call_to_arraycopystub();
2119       // fall through
2120     case Op_CallLeafVector:
2121     case Op_CallLeaf: {
2122       // Stub calls, objects do not escape but they are not scale replaceable.
2123       // Adjust escape state for outgoing arguments.
2124       const TypeTuple * d = call->tf()->domain();
2125       bool src_has_oops = false;
2126       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2127         const Type* at = d->field_at(i);
2128         Node *arg = call->in(i);
2129         if (arg == nullptr) {
2130           continue;
2131         }
2132         const Type *aat = _igvn->type(arg);
2133         if (arg->is_top() || !at->isa_ptr() || !aat->isa_ptr()) {
2134           continue;
2135         }
2136         if (arg->is_AddP()) {
2137           //
2138           // The inline_native_clone() case when the arraycopy stub is called
2139           // after the allocation before Initialize and CheckCastPP nodes.
2140           // Or normal arraycopy for object arrays case.
2141           //
2142           // Set AddP's base (Allocate) as not scalar replaceable since
2143           // pointer to the base (with offset) is passed as argument.
2144           //
2145           arg = get_addp_base(arg);
2146         }
2147         PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2148         assert(arg_ptn != nullptr, "should be registered");
2149         PointsToNode::EscapeState arg_esc = arg_ptn->escape_state();
2150         if (is_arraycopy || arg_esc < PointsToNode::ArgEscape) {
2151           assert(aat == Type::TOP || aat == TypePtr::NULL_PTR ||
2152                  aat->isa_ptr() != nullptr, "expecting an Ptr");
2153           bool arg_has_oops = aat->isa_oopptr() &&
2154                               (aat->isa_instptr() ||
2155                                (aat->isa_aryptr() && (aat->isa_aryptr()->elem() == Type::BOTTOM || aat->isa_aryptr()->elem()->make_oopptr() != nullptr)));
2156           if (i == TypeFunc::Parms) {
2157             src_has_oops = arg_has_oops;
2158           }
2159           //
2160           // src or dst could be j.l.Object when other is basic type array:
2161           //
2162           //   arraycopy(char[],0,Object*,0,size);
2163           //   arraycopy(Object*,0,char[],0,size);
2164           //
2165           // Don't add edges in such cases.
2166           //
2167           bool arg_is_arraycopy_dest = src_has_oops && is_arraycopy &&
2168                                        arg_has_oops && (i > TypeFunc::Parms);
2169 #ifdef ASSERT
2170           if (!(is_arraycopy ||
2171                 BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(call) ||
2172                 (call->as_CallLeaf()->_name != nullptr &&
2173                  (strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 ||
2174                   strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32C") == 0 ||
2175                   strcmp(call->as_CallLeaf()->_name, "updateBytesAdler32") == 0 ||
2176                   strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 ||
2177                   strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 ||
2178                   strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||
2179                   strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_decryptAESCrypt") == 0 ||
2180                   strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_encryptAESCrypt") == 0 ||
2181                   strcmp(call->as_CallLeaf()->_name, "electronicCodeBook_decryptAESCrypt") == 0 ||
2182                   strcmp(call->as_CallLeaf()->_name, "counterMode_AESCrypt") == 0 ||
2183                   strcmp(call->as_CallLeaf()->_name, "galoisCounterMode_AESCrypt") == 0 ||
2184                   strcmp(call->as_CallLeaf()->_name, "poly1305_processBlocks") == 0 ||
2185                   strcmp(call->as_CallLeaf()->_name, "intpoly_montgomeryMult_P256") == 0 ||
2186                   strcmp(call->as_CallLeaf()->_name, "intpoly_assign") == 0 ||
2187                   strcmp(call->as_CallLeaf()->_name, "ghash_processBlocks") == 0 ||
2188                   strcmp(call->as_CallLeaf()->_name, "chacha20Block") == 0 ||
2189                   strcmp(call->as_CallLeaf()->_name, "encodeBlock") == 0 ||
2190                   strcmp(call->as_CallLeaf()->_name, "decodeBlock") == 0 ||
2191                   strcmp(call->as_CallLeaf()->_name, "md5_implCompress") == 0 ||
2192                   strcmp(call->as_CallLeaf()->_name, "md5_implCompressMB") == 0 ||
2193                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompress") == 0 ||
2194                   strcmp(call->as_CallLeaf()->_name, "sha1_implCompressMB") == 0 ||
2195                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompress") == 0 ||
2196                   strcmp(call->as_CallLeaf()->_name, "sha256_implCompressMB") == 0 ||
2197                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompress") == 0 ||
2198                   strcmp(call->as_CallLeaf()->_name, "sha512_implCompressMB") == 0 ||
2199                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompress") == 0 ||
2200                   strcmp(call->as_CallLeaf()->_name, "sha3_implCompressMB") == 0 ||
2201                   strcmp(call->as_CallLeaf()->_name, "multiplyToLen") == 0 ||
2202                   strcmp(call->as_CallLeaf()->_name, "squareToLen") == 0 ||
2203                   strcmp(call->as_CallLeaf()->_name, "mulAdd") == 0 ||
2204                   strcmp(call->as_CallLeaf()->_name, "montgomery_multiply") == 0 ||
2205                   strcmp(call->as_CallLeaf()->_name, "montgomery_square") == 0 ||
2206                   strcmp(call->as_CallLeaf()->_name, "bigIntegerRightShiftWorker") == 0 ||
2207                   strcmp(call->as_CallLeaf()->_name, "bigIntegerLeftShiftWorker") == 0 ||
2208                   strcmp(call->as_CallLeaf()->_name, "vectorizedMismatch") == 0 ||
2209                   strcmp(call->as_CallLeaf()->_name, "stringIndexOf") == 0 ||
2210                   strcmp(call->as_CallLeaf()->_name, "arraysort_stub") == 0 ||
2211                   strcmp(call->as_CallLeaf()->_name, "array_partition_stub") == 0 ||
2212                   strcmp(call->as_CallLeaf()->_name, "get_class_id_intrinsic") == 0 ||
2213                   strcmp(call->as_CallLeaf()->_name, "unsafe_setmemory") == 0)
2214                  ))) {
2215             call->dump();
2216             fatal("EA unexpected CallLeaf %s", call->as_CallLeaf()->_name);
2217           }
2218 #endif
2219           // Always process arraycopy's destination object since
2220           // we need to add all possible edges to references in
2221           // source object.
2222           if (arg_esc >= PointsToNode::ArgEscape &&
2223               !arg_is_arraycopy_dest) {
2224             continue;
2225           }
2226           PointsToNode::EscapeState es = PointsToNode::ArgEscape;
2227           if (call->is_ArrayCopy()) {
2228             ArrayCopyNode* ac = call->as_ArrayCopy();
2229             if (ac->is_clonebasic() ||
2230                 ac->is_arraycopy_validated() ||
2231                 ac->is_copyof_validated() ||
2232                 ac->is_copyofrange_validated()) {
2233               es = PointsToNode::NoEscape;
2234             }
2235           }
2236           set_escape_state(arg_ptn, es NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2237           if (arg_is_arraycopy_dest) {
2238             Node* src = call->in(TypeFunc::Parms);
2239             if (src->is_AddP()) {
2240               src = get_addp_base(src);
2241             }
2242             PointsToNode* src_ptn = ptnode_adr(src->_idx);
2243             assert(src_ptn != nullptr, "should be registered");
2244             if (arg_ptn != src_ptn) {
2245               // Special arraycopy edge:
2246               // A destination object's field can't have the source object
2247               // as base since objects escape states are not related.
2248               // Only escape state of destination object's fields affects
2249               // escape state of fields in source object.
2250               add_arraycopy(call, es, src_ptn, arg_ptn);
2251             }
2252           }
2253         }
2254       }
2255       break;
2256     }
2257     case Op_CallStaticJava: {
2258       // For a static call, we know exactly what method is being called.
2259       // Use bytecode estimator to record the call's escape affects
2260 #ifdef ASSERT
2261       const char* name = call->as_CallStaticJava()->_name;
2262       assert((name == nullptr || strcmp(name, "uncommon_trap") != 0), "normal calls only");
2263 #endif
2264       ciMethod* meth = call->as_CallJava()->method();
2265       if ((meth != nullptr) && meth->is_boxing_method()) {
2266         break; // Boxing methods do not modify any oops.
2267       }
2268       BCEscapeAnalyzer* call_analyzer = (meth !=nullptr) ? meth->get_bcea() : nullptr;
2269       // fall-through if not a Java method or no analyzer information
2270       if (call_analyzer != nullptr) {
2271         PointsToNode* call_ptn = ptnode_adr(call->_idx);
2272         const TypeTuple* d = call->tf()->domain();
2273         for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2274           const Type* at = d->field_at(i);
2275           int k = i - TypeFunc::Parms;
2276           Node* arg = call->in(i);
2277           PointsToNode* arg_ptn = ptnode_adr(arg->_idx);
2278           if (at->isa_ptr() != nullptr &&
2279               call_analyzer->is_arg_returned(k)) {
2280             // The call returns arguments.
2281             if (call_ptn != nullptr) { // Is call's result used?
2282               assert(call_ptn->is_LocalVar(), "node should be registered");
2283               assert(arg_ptn != nullptr, "node should be registered");
2284               add_edge(call_ptn, arg_ptn);
2285             }
2286           }
2287           if (at->isa_oopptr() != nullptr &&
2288               arg_ptn->escape_state() < PointsToNode::GlobalEscape) {
2289             if (!call_analyzer->is_arg_stack(k)) {
2290               // The argument global escapes
2291               set_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2292             } else {
2293               set_escape_state(arg_ptn, PointsToNode::ArgEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2294               if (!call_analyzer->is_arg_local(k)) {
2295                 // The argument itself doesn't escape, but any fields might
2296                 set_fields_escape_state(arg_ptn, PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2297               }
2298             }
2299           }
2300         }
2301         if (call_ptn != nullptr && call_ptn->is_LocalVar()) {
2302           // The call returns arguments.
2303           assert(call_ptn->edge_count() > 0, "sanity");
2304           if (!call_analyzer->is_return_local()) {
2305             // Returns also unknown object.
2306             add_edge(call_ptn, phantom_obj);
2307           }
2308         }
2309         break;
2310       }
2311     }
2312     default: {
2313       // Fall-through here if not a Java method or no analyzer information
2314       // or some other type of call, assume the worst case: all arguments
2315       // globally escape.
2316       const TypeTuple* d = call->tf()->domain();
2317       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
2318         const Type* at = d->field_at(i);
2319         if (at->isa_oopptr() != nullptr) {
2320           Node* arg = call->in(i);
2321           if (arg->is_AddP()) {
2322             arg = get_addp_base(arg);
2323           }
2324           assert(ptnode_adr(arg->_idx) != nullptr, "should be defined already");
2325           set_escape_state(ptnode_adr(arg->_idx), PointsToNode::GlobalEscape NOT_PRODUCT(COMMA trace_arg_escape_message(call)));
2326         }
2327       }
2328     }
2329   }
2330 }
2331 
2332 
2333 // Finish Graph construction.
2334 bool ConnectionGraph::complete_connection_graph(
2335                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
2336                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,
2337                          GrowableArray<JavaObjectNode*>& java_objects_worklist,
2338                          GrowableArray<FieldNode*>&      oop_fields_worklist) {
2339   // Normally only 1-3 passes needed to build Connection Graph depending
2340   // on graph complexity. Observed 8 passes in jvm2008 compiler.compiler.
2341   // Set limit to 20 to catch situation when something did go wrong and
2342   // bailout Escape Analysis.
2343   // Also limit build time to 20 sec (60 in debug VM), EscapeAnalysisTimeout flag.
2344 #define GRAPH_BUILD_ITER_LIMIT 20
2345 
2346   // Propagate GlobalEscape and ArgEscape escape states and check that
2347   // we still have non-escaping objects. The method pushs on _worklist
2348   // Field nodes which reference phantom_object.
2349   if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) {
2350     return false; // Nothing to do.
2351   }
2352   // Now propagate references to all JavaObject nodes.
2353   int java_objects_length = java_objects_worklist.length();
2354   elapsedTimer build_time;
2355   build_time.start();
2356   elapsedTimer time;
2357   bool timeout = false;
2358   int new_edges = 1;
2359   int iterations = 0;
2360   do {
2361     while ((new_edges > 0) &&
2362            (iterations++ < GRAPH_BUILD_ITER_LIMIT)) {
2363       double start_time = time.seconds();
2364       time.start();
2365       new_edges = 0;
2366       // Propagate references to phantom_object for nodes pushed on _worklist
2367       // by find_non_escaped_objects() and find_field_value().
2368       new_edges += add_java_object_edges(phantom_obj, false);
2369       for (int next = 0; next < java_objects_length; ++next) {
2370         JavaObjectNode* ptn = java_objects_worklist.at(next);
2371         new_edges += add_java_object_edges(ptn, true);
2372 
2373 #define SAMPLE_SIZE 4
2374         if ((next % SAMPLE_SIZE) == 0) {
2375           // Each 4 iterations calculate how much time it will take
2376           // to complete graph construction.
2377           time.stop();
2378           // Poll for requests from shutdown mechanism to quiesce compiler
2379           // because Connection graph construction may take long time.
2380           CompileBroker::maybe_block();
2381           double stop_time = time.seconds();
2382           double time_per_iter = (stop_time - start_time) / (double)SAMPLE_SIZE;
2383           double time_until_end = time_per_iter * (double)(java_objects_length - next);
2384           if ((start_time + time_until_end) >= EscapeAnalysisTimeout) {
2385             timeout = true;
2386             break; // Timeout
2387           }
2388           start_time = stop_time;
2389           time.start();
2390         }
2391 #undef SAMPLE_SIZE
2392 
2393       }
2394       if (timeout) break;
2395       if (new_edges > 0) {
2396         // Update escape states on each iteration if graph was updated.
2397         if (!find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist)) {
2398           return false; // Nothing to do.
2399         }
2400       }
2401       time.stop();
2402       if (time.seconds() >= EscapeAnalysisTimeout) {
2403         timeout = true;
2404         break;
2405       }
2406     }
2407     if ((iterations < GRAPH_BUILD_ITER_LIMIT) && !timeout) {
2408       time.start();
2409       // Find fields which have unknown value.
2410       int fields_length = oop_fields_worklist.length();
2411       for (int next = 0; next < fields_length; next++) {
2412         FieldNode* field = oop_fields_worklist.at(next);
2413         if (field->edge_count() == 0) {
2414           new_edges += find_field_value(field);
2415           // This code may added new edges to phantom_object.
2416           // Need an other cycle to propagate references to phantom_object.
2417         }
2418       }
2419       time.stop();
2420       if (time.seconds() >= EscapeAnalysisTimeout) {
2421         timeout = true;
2422         break;
2423       }
2424     } else {
2425       new_edges = 0; // Bailout
2426     }
2427   } while (new_edges > 0);
2428 
2429   build_time.stop();
2430   _build_time = build_time.seconds();
2431   _build_iterations = iterations;
2432 
2433   // Bailout if passed limits.
2434   if ((iterations >= GRAPH_BUILD_ITER_LIMIT) || timeout) {
2435     Compile* C = _compile;
2436     if (C->log() != nullptr) {
2437       C->log()->begin_elem("connectionGraph_bailout reason='reached ");
2438       C->log()->text("%s", timeout ? "time" : "iterations");
2439       C->log()->end_elem(" limit'");
2440     }
2441     assert(ExitEscapeAnalysisOnTimeout, "infinite EA connection graph build during invocation %d (%f sec, %d iterations) with %d nodes and worklist size %d",
2442            _invocation, _build_time, _build_iterations, nodes_size(), ptnodes_worklist.length());
2443     // Possible infinite build_connection_graph loop,
2444     // bailout (no changes to ideal graph were made).
2445     return false;
2446   }
2447 
2448 #undef GRAPH_BUILD_ITER_LIMIT
2449 
2450   // Find fields initialized by null for non-escaping Allocations.
2451   int non_escaped_length = non_escaped_allocs_worklist.length();
2452   for (int next = 0; next < non_escaped_length; next++) {
2453     JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
2454     PointsToNode::EscapeState es = ptn->escape_state();
2455     assert(es <= PointsToNode::ArgEscape, "sanity");
2456     if (es == PointsToNode::NoEscape) {
2457       if (find_init_values_null(ptn, _igvn) > 0) {
2458         // Adding references to null object does not change escape states
2459         // since it does not escape. Also no fields are added to null object.
2460         add_java_object_edges(null_obj, false);
2461       }
2462     }
2463     Node* n = ptn->ideal_node();
2464     if (n->is_Allocate()) {
2465       // The object allocated by this Allocate node will never be
2466       // seen by an other thread. Mark it so that when it is
2467       // expanded no MemBarStoreStore is added.
2468       InitializeNode* ini = n->as_Allocate()->initialization();
2469       if (ini != nullptr)
2470         ini->set_does_not_escape();
2471     }
2472   }
2473   return true; // Finished graph construction.
2474 }
2475 
2476 // Propagate GlobalEscape and ArgEscape escape states to all nodes
2477 // and check that we still have non-escaping java objects.
2478 bool ConnectionGraph::find_non_escaped_objects(GrowableArray<PointsToNode*>& ptnodes_worklist,
2479                                                GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist) {
2480   GrowableArray<PointsToNode*> escape_worklist;
2481   // First, put all nodes with GlobalEscape and ArgEscape states on worklist.
2482   int ptnodes_length = ptnodes_worklist.length();
2483   for (int next = 0; next < ptnodes_length; ++next) {
2484     PointsToNode* ptn = ptnodes_worklist.at(next);
2485     if (ptn->escape_state() >= PointsToNode::ArgEscape ||
2486         ptn->fields_escape_state() >= PointsToNode::ArgEscape) {
2487       escape_worklist.push(ptn);
2488     }
2489   }
2490   // Set escape states to referenced nodes (edges list).
2491   while (escape_worklist.length() > 0) {
2492     PointsToNode* ptn = escape_worklist.pop();
2493     PointsToNode::EscapeState es  = ptn->escape_state();
2494     PointsToNode::EscapeState field_es = ptn->fields_escape_state();
2495     if (ptn->is_Field() && ptn->as_Field()->is_oop() &&
2496         es >= PointsToNode::ArgEscape) {
2497       // GlobalEscape or ArgEscape state of field means it has unknown value.
2498       if (add_edge(ptn, phantom_obj)) {
2499         // New edge was added
2500         add_field_uses_to_worklist(ptn->as_Field());
2501       }
2502     }
2503     for (EdgeIterator i(ptn); i.has_next(); i.next()) {
2504       PointsToNode* e = i.get();
2505       if (e->is_Arraycopy()) {
2506         assert(ptn->arraycopy_dst(), "sanity");
2507         // Propagate only fields escape state through arraycopy edge.
2508         if (e->fields_escape_state() < field_es) {
2509           set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2510           escape_worklist.push(e);
2511         }
2512       } else if (es >= field_es) {
2513         // fields_escape_state is also set to 'es' if it is less than 'es'.
2514         if (e->escape_state() < es) {
2515           set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2516           escape_worklist.push(e);
2517         }
2518       } else {
2519         // Propagate field escape state.
2520         bool es_changed = false;
2521         if (e->fields_escape_state() < field_es) {
2522           set_fields_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2523           es_changed = true;
2524         }
2525         if ((e->escape_state() < field_es) &&
2526             e->is_Field() && ptn->is_JavaObject() &&
2527             e->as_Field()->is_oop()) {
2528           // Change escape state of referenced fields.
2529           set_escape_state(e, field_es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2530           es_changed = true;
2531         } else if (e->escape_state() < es) {
2532           set_escape_state(e, es NOT_PRODUCT(COMMA trace_propagate_message(ptn)));
2533           es_changed = true;
2534         }
2535         if (es_changed) {
2536           escape_worklist.push(e);
2537         }
2538       }
2539     }
2540   }
2541   // Remove escaped objects from non_escaped list.
2542   for (int next = non_escaped_allocs_worklist.length()-1; next >= 0 ; --next) {
2543     JavaObjectNode* ptn = non_escaped_allocs_worklist.at(next);
2544     if (ptn->escape_state() >= PointsToNode::GlobalEscape) {
2545       non_escaped_allocs_worklist.delete_at(next);
2546     }
2547     if (ptn->escape_state() == PointsToNode::NoEscape) {
2548       // Find fields in non-escaped allocations which have unknown value.
2549       find_init_values_phantom(ptn);
2550     }
2551   }
2552   return (non_escaped_allocs_worklist.length() > 0);
2553 }
2554 
2555 // Add all references to JavaObject node by walking over all uses.
2556 int ConnectionGraph::add_java_object_edges(JavaObjectNode* jobj, bool populate_worklist) {
2557   int new_edges = 0;
2558   if (populate_worklist) {
2559     // Populate _worklist by uses of jobj's uses.
2560     for (UseIterator i(jobj); i.has_next(); i.next()) {
2561       PointsToNode* use = i.get();
2562       if (use->is_Arraycopy()) {
2563         continue;
2564       }
2565       add_uses_to_worklist(use);
2566       if (use->is_Field() && use->as_Field()->is_oop()) {
2567         // Put on worklist all field's uses (loads) and
2568         // related field nodes (same base and offset).
2569         add_field_uses_to_worklist(use->as_Field());
2570       }
2571     }
2572   }
2573   for (int l = 0; l < _worklist.length(); l++) {
2574     PointsToNode* use = _worklist.at(l);
2575     if (PointsToNode::is_base_use(use)) {
2576       // Add reference from jobj to field and from field to jobj (field's base).
2577       use = PointsToNode::get_use_node(use)->as_Field();
2578       if (add_base(use->as_Field(), jobj)) {
2579         new_edges++;
2580       }
2581       continue;
2582     }
2583     assert(!use->is_JavaObject(), "sanity");
2584     if (use->is_Arraycopy()) {
2585       if (jobj == null_obj) { // null object does not have field edges
2586         continue;
2587       }
2588       // Added edge from Arraycopy node to arraycopy's source java object
2589       if (add_edge(use, jobj)) {
2590         jobj->set_arraycopy_src();
2591         new_edges++;
2592       }
2593       // and stop here.
2594       continue;
2595     }
2596     if (!add_edge(use, jobj)) {
2597       continue; // No new edge added, there was such edge already.
2598     }
2599     new_edges++;
2600     if (use->is_LocalVar()) {
2601       add_uses_to_worklist(use);
2602       if (use->arraycopy_dst()) {
2603         for (EdgeIterator i(use); i.has_next(); i.next()) {
2604           PointsToNode* e = i.get();
2605           if (e->is_Arraycopy()) {
2606             if (jobj == null_obj) { // null object does not have field edges
2607               continue;
2608             }
2609             // Add edge from arraycopy's destination java object to Arraycopy node.
2610             if (add_edge(jobj, e)) {
2611               new_edges++;
2612               jobj->set_arraycopy_dst();
2613             }
2614           }
2615         }
2616       }
2617     } else {
2618       // Added new edge to stored in field values.
2619       // Put on worklist all field's uses (loads) and
2620       // related field nodes (same base and offset).
2621       add_field_uses_to_worklist(use->as_Field());
2622     }
2623   }
2624   _worklist.clear();
2625   _in_worklist.reset();
2626   return new_edges;
2627 }
2628 
2629 // Put on worklist all related field nodes.
2630 void ConnectionGraph::add_field_uses_to_worklist(FieldNode* field) {
2631   assert(field->is_oop(), "sanity");
2632   int offset = field->offset();
2633   add_uses_to_worklist(field);
2634   // Loop over all bases of this field and push on worklist Field nodes
2635   // with the same offset and base (since they may reference the same field).
2636   for (BaseIterator i(field); i.has_next(); i.next()) {
2637     PointsToNode* base = i.get();
2638     add_fields_to_worklist(field, base);
2639     // Check if the base was source object of arraycopy and go over arraycopy's
2640     // destination objects since values stored to a field of source object are
2641     // accessible by uses (loads) of fields of destination objects.
2642     if (base->arraycopy_src()) {
2643       for (UseIterator j(base); j.has_next(); j.next()) {
2644         PointsToNode* arycp = j.get();
2645         if (arycp->is_Arraycopy()) {
2646           for (UseIterator k(arycp); k.has_next(); k.next()) {
2647             PointsToNode* abase = k.get();
2648             if (abase->arraycopy_dst() && abase != base) {
2649               // Look for the same arraycopy reference.
2650               add_fields_to_worklist(field, abase);
2651             }
2652           }
2653         }
2654       }
2655     }
2656   }
2657 }
2658 
2659 // Put on worklist all related field nodes.
2660 void ConnectionGraph::add_fields_to_worklist(FieldNode* field, PointsToNode* base) {
2661   int offset = field->offset();
2662   if (base->is_LocalVar()) {
2663     for (UseIterator j(base); j.has_next(); j.next()) {
2664       PointsToNode* f = j.get();
2665       if (PointsToNode::is_base_use(f)) { // Field
2666         f = PointsToNode::get_use_node(f);
2667         if (f == field || !f->as_Field()->is_oop()) {
2668           continue;
2669         }
2670         int offs = f->as_Field()->offset();
2671         if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
2672           add_to_worklist(f);
2673         }
2674       }
2675     }
2676   } else {
2677     assert(base->is_JavaObject(), "sanity");
2678     if (// Skip phantom_object since it is only used to indicate that
2679         // this field's content globally escapes.
2680         (base != phantom_obj) &&
2681         // null object node does not have fields.
2682         (base != null_obj)) {
2683       for (EdgeIterator i(base); i.has_next(); i.next()) {
2684         PointsToNode* f = i.get();
2685         // Skip arraycopy edge since store to destination object field
2686         // does not update value in source object field.
2687         if (f->is_Arraycopy()) {
2688           assert(base->arraycopy_dst(), "sanity");
2689           continue;
2690         }
2691         if (f == field || !f->as_Field()->is_oop()) {
2692           continue;
2693         }
2694         int offs = f->as_Field()->offset();
2695         if (offs == offset || offset == Type::OffsetBot || offs == Type::OffsetBot) {
2696           add_to_worklist(f);
2697         }
2698       }
2699     }
2700   }
2701 }
2702 
2703 // Find fields which have unknown value.
2704 int ConnectionGraph::find_field_value(FieldNode* field) {
2705   // Escaped fields should have init value already.
2706   assert(field->escape_state() == PointsToNode::NoEscape, "sanity");
2707   int new_edges = 0;
2708   for (BaseIterator i(field); i.has_next(); i.next()) {
2709     PointsToNode* base = i.get();
2710     if (base->is_JavaObject()) {
2711       // Skip Allocate's fields which will be processed later.
2712       if (base->ideal_node()->is_Allocate()) {
2713         return 0;
2714       }
2715       assert(base == null_obj, "only null ptr base expected here");
2716     }
2717   }
2718   if (add_edge(field, phantom_obj)) {
2719     // New edge was added
2720     new_edges++;
2721     add_field_uses_to_worklist(field);
2722   }
2723   return new_edges;
2724 }
2725 
2726 // Find fields initializing values for allocations.
2727 int ConnectionGraph::find_init_values_phantom(JavaObjectNode* pta) {
2728   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2729   Node* alloc = pta->ideal_node();
2730 
2731   // Do nothing for Allocate nodes since its fields values are
2732   // "known" unless they are initialized by arraycopy/clone.
2733   if (alloc->is_Allocate() && !pta->arraycopy_dst()) {
2734     return 0;
2735   }
2736   assert(pta->arraycopy_dst() || alloc->as_CallStaticJava(), "sanity");
2737 #ifdef ASSERT
2738   if (!pta->arraycopy_dst() && alloc->as_CallStaticJava()->method() == nullptr) {
2739     const char* name = alloc->as_CallStaticJava()->_name;
2740     assert(strncmp(name, "C2 Runtime multianewarray", 25) == 0, "sanity");
2741   }
2742 #endif
2743   // Non-escaped allocation returned from Java or runtime call have unknown values in fields.
2744   int new_edges = 0;
2745   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2746     PointsToNode* field = i.get();
2747     if (field->is_Field() && field->as_Field()->is_oop()) {
2748       if (add_edge(field, phantom_obj)) {
2749         // New edge was added
2750         new_edges++;
2751         add_field_uses_to_worklist(field->as_Field());
2752       }
2753     }
2754   }
2755   return new_edges;
2756 }
2757 
2758 // Find fields initializing values for allocations.
2759 int ConnectionGraph::find_init_values_null(JavaObjectNode* pta, PhaseValues* phase) {
2760   assert(pta->escape_state() == PointsToNode::NoEscape, "Not escaped Allocate nodes only");
2761   Node* alloc = pta->ideal_node();
2762   // Do nothing for Call nodes since its fields values are unknown.
2763   if (!alloc->is_Allocate()) {
2764     return 0;
2765   }
2766   InitializeNode* ini = alloc->as_Allocate()->initialization();
2767   bool visited_bottom_offset = false;
2768   GrowableArray<int> offsets_worklist;
2769   int new_edges = 0;
2770 
2771   // Check if an oop field's initializing value is recorded and add
2772   // a corresponding null if field's value if it is not recorded.
2773   // Connection Graph does not record a default initialization by null
2774   // captured by Initialize node.
2775   //
2776   for (EdgeIterator i(pta); i.has_next(); i.next()) {
2777     PointsToNode* field = i.get(); // Field (AddP)
2778     if (!field->is_Field() || !field->as_Field()->is_oop()) {
2779       continue; // Not oop field
2780     }
2781     int offset = field->as_Field()->offset();
2782     if (offset == Type::OffsetBot) {
2783       if (!visited_bottom_offset) {
2784         // OffsetBot is used to reference array's element,
2785         // always add reference to null to all Field nodes since we don't
2786         // known which element is referenced.
2787         if (add_edge(field, null_obj)) {
2788           // New edge was added
2789           new_edges++;
2790           add_field_uses_to_worklist(field->as_Field());
2791           visited_bottom_offset = true;
2792         }
2793       }
2794     } else {
2795       // Check only oop fields.
2796       const Type* adr_type = field->ideal_node()->as_AddP()->bottom_type();
2797       if (adr_type->isa_rawptr()) {
2798 #ifdef ASSERT
2799         // Raw pointers are used for initializing stores so skip it
2800         // since it should be recorded already
2801         Node* base = get_addp_base(field->ideal_node());
2802         assert(adr_type->isa_rawptr() && is_captured_store_address(field->ideal_node()), "unexpected pointer type");
2803 #endif
2804         continue;
2805       }
2806       if (!offsets_worklist.contains(offset)) {
2807         offsets_worklist.append(offset);
2808         Node* value = nullptr;
2809         if (ini != nullptr) {
2810           // StoreP::memory_type() == T_ADDRESS
2811           BasicType ft = UseCompressedOops ? T_NARROWOOP : T_ADDRESS;
2812           Node* store = ini->find_captured_store(offset, type2aelembytes(ft, true), phase);
2813           // Make sure initializing store has the same type as this AddP.
2814           // This AddP may reference non existing field because it is on a
2815           // dead branch of bimorphic call which is not eliminated yet.
2816           if (store != nullptr && store->is_Store() &&
2817               store->as_Store()->memory_type() == ft) {
2818             value = store->in(MemNode::ValueIn);
2819 #ifdef ASSERT
2820             if (VerifyConnectionGraph) {
2821               // Verify that AddP already points to all objects the value points to.
2822               PointsToNode* val = ptnode_adr(value->_idx);
2823               assert((val != nullptr), "should be processed already");
2824               PointsToNode* missed_obj = nullptr;
2825               if (val->is_JavaObject()) {
2826                 if (!field->points_to(val->as_JavaObject())) {
2827                   missed_obj = val;
2828                 }
2829               } else {
2830                 if (!val->is_LocalVar() || (val->edge_count() == 0)) {
2831                   tty->print_cr("----------init store has invalid value -----");
2832                   store->dump();
2833                   val->dump();
2834                   assert(val->is_LocalVar() && (val->edge_count() > 0), "should be processed already");
2835                 }
2836                 for (EdgeIterator j(val); j.has_next(); j.next()) {
2837                   PointsToNode* obj = j.get();
2838                   if (obj->is_JavaObject()) {
2839                     if (!field->points_to(obj->as_JavaObject())) {
2840                       missed_obj = obj;
2841                       break;
2842                     }
2843                   }
2844                 }
2845               }
2846               if (missed_obj != nullptr) {
2847                 tty->print_cr("----------field---------------------------------");
2848                 field->dump();
2849                 tty->print_cr("----------missed referernce to object-----------");
2850                 missed_obj->dump();
2851                 tty->print_cr("----------object referernced by init store -----");
2852                 store->dump();
2853                 val->dump();
2854                 assert(!field->points_to(missed_obj->as_JavaObject()), "missed JavaObject reference");
2855               }
2856             }
2857 #endif
2858           } else {
2859             // There could be initializing stores which follow allocation.
2860             // For example, a volatile field store is not collected
2861             // by Initialize node.
2862             //
2863             // Need to check for dependent loads to separate such stores from
2864             // stores which follow loads. For now, add initial value null so
2865             // that compare pointers optimization works correctly.
2866           }
2867         }
2868         if (value == nullptr) {
2869           // A field's initializing value was not recorded. Add null.
2870           if (add_edge(field, null_obj)) {
2871             // New edge was added
2872             new_edges++;
2873             add_field_uses_to_worklist(field->as_Field());
2874           }
2875         }
2876       }
2877     }
2878   }
2879   return new_edges;
2880 }
2881 
2882 // Adjust scalar_replaceable state after Connection Graph is built.
2883 void ConnectionGraph::adjust_scalar_replaceable_state(JavaObjectNode* jobj, Unique_Node_List &reducible_merges) {
2884   // A Phi 'x' is a _candidate_ to be reducible if 'can_reduce_phi(x)'
2885   // returns true. If one of the constraints in this method set 'jobj' to NSR
2886   // then the candidate Phi is discarded. If the Phi has another SR 'jobj' as
2887   // input, 'adjust_scalar_replaceable_state' will eventually be called with
2888   // that other object and the Phi will become a reducible Phi.
2889   // There could be multiple merges involving the same jobj.
2890   Unique_Node_List candidates;
2891 
2892   // Search for non-escaping objects which are not scalar replaceable
2893   // and mark them to propagate the state to referenced objects.
2894 
2895   for (UseIterator i(jobj); i.has_next(); i.next()) {
2896     PointsToNode* use = i.get();
2897     if (use->is_Arraycopy()) {
2898       continue;
2899     }
2900     if (use->is_Field()) {
2901       FieldNode* field = use->as_Field();
2902       assert(field->is_oop() && field->scalar_replaceable(), "sanity");
2903       // 1. An object is not scalar replaceable if the field into which it is
2904       // stored has unknown offset (stored into unknown element of an array).
2905       if (field->offset() == Type::OffsetBot) {
2906         set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored at unknown offset"));
2907         return;
2908       }
2909       for (BaseIterator i(field); i.has_next(); i.next()) {
2910         PointsToNode* base = i.get();
2911         // 2. An object is not scalar replaceable if the field into which it is
2912         // stored has multiple bases one of which is null.
2913         if ((base == null_obj) && (field->base_count() > 1)) {
2914           set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with potentially null base"));
2915           return;
2916         }
2917         // 2.5. An object is not scalar replaceable if the field into which it is
2918         // stored has NSR base.
2919         if (!base->scalar_replaceable()) {
2920           set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base"));
2921           return;
2922         }
2923       }
2924     }
2925     assert(use->is_Field() || use->is_LocalVar(), "sanity");
2926     // 3. An object is not scalar replaceable if it is merged with other objects
2927     // and we can't remove the merge
2928     for (EdgeIterator j(use); j.has_next(); j.next()) {
2929       PointsToNode* ptn = j.get();
2930       if (ptn->is_JavaObject() && ptn != jobj) {
2931         Node* use_n = use->ideal_node();
2932 
2933         // These other local vars may point to multiple objects through a Phi
2934         // In this case we skip them and see if we can reduce the Phi.
2935         if (use_n->is_CastPP() || use_n->is_CheckCastPP()) {
2936           use_n = use_n->in(1);
2937         }
2938 
2939         // If it's already a candidate or confirmed reducible merge we can skip verification
2940         if (candidates.member(use_n) || reducible_merges.member(use_n)) {
2941           continue;
2942         }
2943 
2944         if (use_n->is_Phi() && can_reduce_phi(use_n->as_Phi())) {
2945           candidates.push(use_n);
2946         } else {
2947           // Mark all objects as NSR if we can't remove the merge
2948           set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA trace_merged_message(ptn)));
2949           set_not_scalar_replaceable(ptn NOT_PRODUCT(COMMA trace_merged_message(jobj)));
2950         }
2951       }
2952     }
2953     if (!jobj->scalar_replaceable()) {
2954       return;
2955     }
2956   }
2957 
2958   for (EdgeIterator j(jobj); j.has_next(); j.next()) {
2959     if (j.get()->is_Arraycopy()) {
2960       continue;
2961     }
2962 
2963     // Non-escaping object node should point only to field nodes.
2964     FieldNode* field = j.get()->as_Field();
2965     int offset = field->as_Field()->offset();
2966 
2967     // 4. An object is not scalar replaceable if it has a field with unknown
2968     // offset (array's element is accessed in loop).
2969     if (offset == Type::OffsetBot) {
2970       set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "has field with unknown offset"));
2971       return;
2972     }
2973     // 5. Currently an object is not scalar replaceable if a LoadStore node
2974     // access its field since the field value is unknown after it.
2975     //
2976     Node* n = field->ideal_node();
2977 
2978     // Test for an unsafe access that was parsed as maybe off heap
2979     // (with a CheckCastPP to raw memory).
2980     assert(n->is_AddP(), "expect an address computation");
2981     if (n->in(AddPNode::Base)->is_top() &&
2982         n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) {
2983       assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected");
2984       assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected");
2985       set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used as base of mixed unsafe access"));
2986       return;
2987     }
2988 
2989     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2990       Node* u = n->fast_out(i);
2991       if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) {
2992         set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is used in LoadStore or mismatched access"));
2993         return;
2994       }
2995     }
2996 
2997     // 6. Or the address may point to more then one object. This may produce
2998     // the false positive result (set not scalar replaceable)
2999     // since the flow-insensitive escape analysis can't separate
3000     // the case when stores overwrite the field's value from the case
3001     // when stores happened on different control branches.
3002     //
3003     // Note: it will disable scalar replacement in some cases:
3004     //
3005     //    Point p[] = new Point[1];
3006     //    p[0] = new Point(); // Will be not scalar replaced
3007     //
3008     // but it will save us from incorrect optimizations in next cases:
3009     //
3010     //    Point p[] = new Point[1];
3011     //    if ( x ) p[0] = new Point(); // Will be not scalar replaced
3012     //
3013     if (field->base_count() > 1 && candidates.size() == 0) {
3014       if (has_non_reducible_merge(field, reducible_merges)) {
3015         for (BaseIterator i(field); i.has_next(); i.next()) {
3016           PointsToNode* base = i.get();
3017           // Don't take into account LocalVar nodes which
3018           // may point to only one object which should be also
3019           // this field's base by now.
3020           if (base->is_JavaObject() && base != jobj) {
3021             // Mark all bases.
3022             set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "may point to more than one object"));
3023             set_not_scalar_replaceable(base NOT_PRODUCT(COMMA "may point to more than one object"));
3024           }
3025         }
3026 
3027         if (!jobj->scalar_replaceable()) {
3028           return;
3029         }
3030       }
3031     }
3032   }
3033 
3034   // The candidate is truly a reducible merge only if none of the other
3035   // constraints ruled it as NSR. There could be multiple merges involving the
3036   // same jobj.
3037   assert(jobj->scalar_replaceable(), "sanity");
3038   for (uint i = 0; i < candidates.size(); i++ ) {
3039     Node* candidate = candidates.at(i);
3040     reducible_merges.push(candidate);
3041   }
3042 }
3043 
3044 bool ConnectionGraph::has_non_reducible_merge(FieldNode* field, Unique_Node_List& reducible_merges) {
3045   for (BaseIterator i(field); i.has_next(); i.next()) {
3046     Node* base = i.get()->ideal_node();
3047     if (base->is_Phi() && !reducible_merges.member(base)) {
3048       return true;
3049     }
3050   }
3051   return false;
3052 }
3053 
3054 void ConnectionGraph::revisit_reducible_phi_status(JavaObjectNode* jobj, Unique_Node_List& reducible_merges) {
3055   assert(jobj != nullptr && !jobj->scalar_replaceable(), "jobj should be set as NSR before calling this function.");
3056 
3057   // Look for 'phis' that refer to 'jobj' as the last
3058   // remaining scalar replaceable input.
3059   uint reducible_merges_cnt = reducible_merges.size();
3060   for (uint i = 0; i < reducible_merges_cnt; i++) {
3061     Node* phi = reducible_merges.at(i);
3062 
3063     // This 'Phi' will be a 'good' if it still points to
3064     // at least one scalar replaceable object. Note that 'obj'
3065     // was/should be marked as NSR before calling this function.
3066     bool good_phi = false;
3067 
3068     for (uint j = 1; j < phi->req(); j++) {
3069       JavaObjectNode* phi_in_obj = unique_java_object(phi->in(j));
3070       if (phi_in_obj != nullptr && phi_in_obj->scalar_replaceable()) {
3071         good_phi = true;
3072         break;
3073       }
3074     }
3075 
3076     if (!good_phi) {
3077       NOT_PRODUCT(if (TraceReduceAllocationMerges) tty->print_cr("Phi %d became non-reducible after node %d became NSR.", phi->_idx, jobj->ideal_node()->_idx);)
3078       reducible_merges.remove(i);
3079 
3080       // Decrement the index because the 'remove' call above actually
3081       // moves the last entry of the list to position 'i'.
3082       i--;
3083 
3084       reducible_merges_cnt--;
3085     }
3086   }
3087 }
3088 
3089 // Propagate NSR (Not scalar replaceable) state.
3090 void ConnectionGraph::find_scalar_replaceable_allocs(GrowableArray<JavaObjectNode*>& jobj_worklist, Unique_Node_List &reducible_merges) {
3091   int jobj_length = jobj_worklist.length();
3092   bool found_nsr_alloc = true;
3093   while (found_nsr_alloc) {
3094     found_nsr_alloc = false;
3095     for (int next = 0; next < jobj_length; ++next) {
3096       JavaObjectNode* jobj = jobj_worklist.at(next);
3097       for (UseIterator i(jobj); (jobj->scalar_replaceable() && i.has_next()); i.next()) {
3098         PointsToNode* use = i.get();
3099         if (use->is_Field()) {
3100           FieldNode* field = use->as_Field();
3101           assert(field->is_oop() && field->scalar_replaceable(), "sanity");
3102           assert(field->offset() != Type::OffsetBot, "sanity");
3103           for (BaseIterator i(field); i.has_next(); i.next()) {
3104             PointsToNode* base = i.get();
3105             // An object is not scalar replaceable if the field into which
3106             // it is stored has NSR base.
3107             if ((base != null_obj) && !base->scalar_replaceable()) {
3108               set_not_scalar_replaceable(jobj NOT_PRODUCT(COMMA "is stored into field with NSR base"));
3109               // Any merge that had only 'jobj' as scalar-replaceable will now be non-reducible,
3110               // because there is no point in reducing a Phi that won't improve the number of SR
3111               // objects.
3112               revisit_reducible_phi_status(jobj, reducible_merges);
3113               found_nsr_alloc = true;
3114               break;
3115             }
3116           }
3117         }
3118       }
3119     }
3120   }
3121 }
3122 
3123 #ifdef ASSERT
3124 void ConnectionGraph::verify_connection_graph(
3125                          GrowableArray<PointsToNode*>&   ptnodes_worklist,
3126                          GrowableArray<JavaObjectNode*>& non_escaped_allocs_worklist,
3127                          GrowableArray<JavaObjectNode*>& java_objects_worklist,
3128                          GrowableArray<Node*>& addp_worklist) {
3129   // Verify that graph is complete - no new edges could be added.
3130   int java_objects_length = java_objects_worklist.length();
3131   int non_escaped_length  = non_escaped_allocs_worklist.length();
3132   int new_edges = 0;
3133   for (int next = 0; next < java_objects_length; ++next) {
3134     JavaObjectNode* ptn = java_objects_worklist.at(next);
3135     new_edges += add_java_object_edges(ptn, true);
3136   }
3137   assert(new_edges == 0, "graph was not complete");
3138   // Verify that escape state is final.
3139   int length = non_escaped_allocs_worklist.length();
3140   find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist);
3141   assert((non_escaped_length == non_escaped_allocs_worklist.length()) &&
3142          (non_escaped_length == length) &&
3143          (_worklist.length() == 0), "escape state was not final");
3144 
3145   // Verify fields information.
3146   int addp_length = addp_worklist.length();
3147   for (int next = 0; next < addp_length; ++next ) {
3148     Node* n = addp_worklist.at(next);
3149     FieldNode* field = ptnode_adr(n->_idx)->as_Field();
3150     if (field->is_oop()) {
3151       // Verify that field has all bases
3152       Node* base = get_addp_base(n);
3153       PointsToNode* ptn = ptnode_adr(base->_idx);
3154       if (ptn->is_JavaObject()) {
3155         assert(field->has_base(ptn->as_JavaObject()), "sanity");
3156       } else {
3157         assert(ptn->is_LocalVar(), "sanity");
3158         for (EdgeIterator i(ptn); i.has_next(); i.next()) {
3159           PointsToNode* e = i.get();
3160           if (e->is_JavaObject()) {
3161             assert(field->has_base(e->as_JavaObject()), "sanity");
3162           }
3163         }
3164       }
3165       // Verify that all fields have initializing values.
3166       if (field->edge_count() == 0) {
3167         tty->print_cr("----------field does not have references----------");
3168         field->dump();
3169         for (BaseIterator i(field); i.has_next(); i.next()) {
3170           PointsToNode* base = i.get();
3171           tty->print_cr("----------field has next base---------------------");
3172           base->dump();
3173           if (base->is_JavaObject() && (base != phantom_obj) && (base != null_obj)) {
3174             tty->print_cr("----------base has fields-------------------------");
3175             for (EdgeIterator j(base); j.has_next(); j.next()) {
3176               j.get()->dump();
3177             }
3178             tty->print_cr("----------base has references---------------------");
3179             for (UseIterator j(base); j.has_next(); j.next()) {
3180               j.get()->dump();
3181             }
3182           }
3183         }
3184         for (UseIterator i(field); i.has_next(); i.next()) {
3185           i.get()->dump();
3186         }
3187         assert(field->edge_count() > 0, "sanity");
3188       }
3189     }
3190   }
3191 }
3192 #endif
3193 
3194 // Optimize ideal graph.
3195 void ConnectionGraph::optimize_ideal_graph(GrowableArray<Node*>& ptr_cmp_worklist,
3196                                            GrowableArray<MemBarStoreStoreNode*>& storestore_worklist) {
3197   Compile* C = _compile;
3198   PhaseIterGVN* igvn = _igvn;
3199   if (EliminateLocks) {
3200     // Mark locks before changing ideal graph.
3201     int cnt = C->macro_count();
3202     for (int i = 0; i < cnt; i++) {
3203       Node *n = C->macro_node(i);
3204       if (n->is_AbstractLock()) { // Lock and Unlock nodes
3205         AbstractLockNode* alock = n->as_AbstractLock();
3206         if (!alock->is_non_esc_obj()) {
3207           if (can_eliminate_lock(alock)) {
3208             assert(!alock->is_eliminated() || alock->is_coarsened(), "sanity");
3209             // The lock could be marked eliminated by lock coarsening
3210             // code during first IGVN before EA. Replace coarsened flag
3211             // to eliminate all associated locks/unlocks.
3212 #ifdef ASSERT
3213             alock->log_lock_optimization(C, "eliminate_lock_set_non_esc3");
3214 #endif
3215             alock->set_non_esc_obj();
3216           }
3217         }
3218       }
3219     }
3220   }
3221 
3222   if (OptimizePtrCompare) {
3223     for (int i = 0; i < ptr_cmp_worklist.length(); i++) {
3224       Node *n = ptr_cmp_worklist.at(i);
3225       assert(n->Opcode() == Op_CmpN || n->Opcode() == Op_CmpP, "must be");
3226       const TypeInt* tcmp = optimize_ptr_compare(n->in(1), n->in(2));
3227       if (tcmp->singleton()) {
3228         Node* cmp = igvn->makecon(tcmp);
3229 #ifndef PRODUCT
3230         if (PrintOptimizePtrCompare) {
3231           tty->print_cr("++++ Replaced: %d %s(%d,%d) --> %s", n->_idx, (n->Opcode() == Op_CmpP ? "CmpP" : "CmpN"), n->in(1)->_idx, n->in(2)->_idx, (tcmp == TypeInt::CC_EQ ? "EQ" : "NotEQ"));
3232           if (Verbose) {
3233             n->dump(1);
3234           }
3235         }
3236 #endif
3237         igvn->replace_node(n, cmp);
3238       }
3239     }
3240   }
3241 
3242   // For MemBarStoreStore nodes added in library_call.cpp, check
3243   // escape status of associated AllocateNode and optimize out
3244   // MemBarStoreStore node if the allocated object never escapes.
3245   for (int i = 0; i < storestore_worklist.length(); i++) {
3246     Node* storestore = storestore_worklist.at(i);
3247     Node* alloc = storestore->in(MemBarNode::Precedent)->in(0);
3248     if (alloc->is_Allocate() && not_global_escape(alloc)) {
3249       MemBarNode* mb = MemBarNode::make(C, Op_MemBarCPUOrder, Compile::AliasIdxBot);
3250       mb->init_req(TypeFunc::Memory,  storestore->in(TypeFunc::Memory));
3251       mb->init_req(TypeFunc::Control, storestore->in(TypeFunc::Control));
3252       igvn->register_new_node_with_optimizer(mb);
3253       igvn->replace_node(storestore, mb);
3254     }
3255   }
3256 }
3257 
3258 // Optimize objects compare.
3259 const TypeInt* ConnectionGraph::optimize_ptr_compare(Node* left, Node* right) {
3260   assert(OptimizePtrCompare, "sanity");
3261   const TypeInt* EQ = TypeInt::CC_EQ; // [0] == ZERO
3262   const TypeInt* NE = TypeInt::CC_GT; // [1] == ONE
3263   const TypeInt* UNKNOWN = TypeInt::CC;    // [-1, 0,1]
3264 
3265   PointsToNode* ptn1 = ptnode_adr(left->_idx);
3266   PointsToNode* ptn2 = ptnode_adr(right->_idx);
3267   JavaObjectNode* jobj1 = unique_java_object(left);
3268   JavaObjectNode* jobj2 = unique_java_object(right);
3269 
3270   // The use of this method during allocation merge reduction may cause 'left'
3271   // or 'right' be something (e.g., a Phi) that isn't in the connection graph or
3272   // that doesn't reference an unique java object.
3273   if (ptn1 == nullptr || ptn2 == nullptr ||
3274       jobj1 == nullptr || jobj2 == nullptr) {
3275     return UNKNOWN;
3276   }
3277 
3278   assert(ptn1->is_JavaObject() || ptn1->is_LocalVar(), "sanity");
3279   assert(ptn2->is_JavaObject() || ptn2->is_LocalVar(), "sanity");
3280 
3281   // Check simple cases first.
3282   if (jobj1 != nullptr) {
3283     if (jobj1->escape_state() == PointsToNode::NoEscape) {
3284       if (jobj1 == jobj2) {
3285         // Comparing the same not escaping object.
3286         return EQ;
3287       }
3288       Node* obj = jobj1->ideal_node();
3289       // Comparing not escaping allocation.
3290       if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
3291           !ptn2->points_to(jobj1)) {
3292         return NE; // This includes nullness check.
3293       }
3294     }
3295   }
3296   if (jobj2 != nullptr) {
3297     if (jobj2->escape_state() == PointsToNode::NoEscape) {
3298       Node* obj = jobj2->ideal_node();
3299       // Comparing not escaping allocation.
3300       if ((obj->is_Allocate() || obj->is_CallStaticJava()) &&
3301           !ptn1->points_to(jobj2)) {
3302         return NE; // This includes nullness check.
3303       }
3304     }
3305   }
3306   if (jobj1 != nullptr && jobj1 != phantom_obj &&
3307       jobj2 != nullptr && jobj2 != phantom_obj &&
3308       jobj1->ideal_node()->is_Con() &&
3309       jobj2->ideal_node()->is_Con()) {
3310     // Klass or String constants compare. Need to be careful with
3311     // compressed pointers - compare types of ConN and ConP instead of nodes.
3312     const Type* t1 = jobj1->ideal_node()->get_ptr_type();
3313     const Type* t2 = jobj2->ideal_node()->get_ptr_type();
3314     if (t1->make_ptr() == t2->make_ptr()) {
3315       return EQ;
3316     } else {
3317       return NE;
3318     }
3319   }
3320   if (ptn1->meet(ptn2)) {
3321     return UNKNOWN; // Sets are not disjoint
3322   }
3323 
3324   // Sets are disjoint.
3325   bool set1_has_unknown_ptr = ptn1->points_to(phantom_obj);
3326   bool set2_has_unknown_ptr = ptn2->points_to(phantom_obj);
3327   bool set1_has_null_ptr    = ptn1->points_to(null_obj);
3328   bool set2_has_null_ptr    = ptn2->points_to(null_obj);
3329   if ((set1_has_unknown_ptr && set2_has_null_ptr) ||
3330       (set2_has_unknown_ptr && set1_has_null_ptr)) {
3331     // Check nullness of unknown object.
3332     return UNKNOWN;
3333   }
3334 
3335   // Disjointness by itself is not sufficient since
3336   // alias analysis is not complete for escaped objects.
3337   // Disjoint sets are definitely unrelated only when
3338   // at least one set has only not escaping allocations.
3339   if (!set1_has_unknown_ptr && !set1_has_null_ptr) {
3340     if (ptn1->non_escaping_allocation()) {
3341       return NE;
3342     }
3343   }
3344   if (!set2_has_unknown_ptr && !set2_has_null_ptr) {
3345     if (ptn2->non_escaping_allocation()) {
3346       return NE;
3347     }
3348   }
3349   return UNKNOWN;
3350 }
3351 
3352 // Connection Graph construction functions.
3353 
3354 void ConnectionGraph::add_local_var(Node *n, PointsToNode::EscapeState es) {
3355   PointsToNode* ptadr = _nodes.at(n->_idx);
3356   if (ptadr != nullptr) {
3357     assert(ptadr->is_LocalVar() && ptadr->ideal_node() == n, "sanity");
3358     return;
3359   }
3360   Compile* C = _compile;
3361   ptadr = new (C->comp_arena()) LocalVarNode(this, n, es);
3362   map_ideal_node(n, ptadr);
3363 }
3364 
3365 PointsToNode* ConnectionGraph::add_java_object(Node *n, PointsToNode::EscapeState es) {
3366   PointsToNode* ptadr = _nodes.at(n->_idx);
3367   if (ptadr != nullptr) {
3368     assert(ptadr->is_JavaObject() && ptadr->ideal_node() == n, "sanity");
3369     return ptadr;
3370   }
3371   Compile* C = _compile;
3372   ptadr = new (C->comp_arena()) JavaObjectNode(this, n, es);
3373   map_ideal_node(n, ptadr);
3374   return ptadr;
3375 }
3376 
3377 void ConnectionGraph::add_field(Node *n, PointsToNode::EscapeState es, int offset) {
3378   PointsToNode* ptadr = _nodes.at(n->_idx);
3379   if (ptadr != nullptr) {
3380     assert(ptadr->is_Field() && ptadr->ideal_node() == n, "sanity");
3381     return;
3382   }
3383   bool unsafe = false;
3384   bool is_oop = is_oop_field(n, offset, &unsafe);
3385   if (unsafe) {
3386     es = PointsToNode::GlobalEscape;
3387   }
3388   Compile* C = _compile;
3389   FieldNode* field = new (C->comp_arena()) FieldNode(this, n, es, offset, is_oop);
3390   map_ideal_node(n, field);
3391 }
3392 
3393 void ConnectionGraph::add_arraycopy(Node *n, PointsToNode::EscapeState es,
3394                                     PointsToNode* src, PointsToNode* dst) {
3395   assert(!src->is_Field() && !dst->is_Field(), "only for JavaObject and LocalVar");
3396   assert((src != null_obj) && (dst != null_obj), "not for ConP null");
3397   PointsToNode* ptadr = _nodes.at(n->_idx);
3398   if (ptadr != nullptr) {
3399     assert(ptadr->is_Arraycopy() && ptadr->ideal_node() == n, "sanity");
3400     return;
3401   }
3402   Compile* C = _compile;
3403   ptadr = new (C->comp_arena()) ArraycopyNode(this, n, es);
3404   map_ideal_node(n, ptadr);
3405   // Add edge from arraycopy node to source object.
3406   (void)add_edge(ptadr, src);
3407   src->set_arraycopy_src();
3408   // Add edge from destination object to arraycopy node.
3409   (void)add_edge(dst, ptadr);
3410   dst->set_arraycopy_dst();
3411 }
3412 
3413 bool ConnectionGraph::is_oop_field(Node* n, int offset, bool* unsafe) {
3414   const Type* adr_type = n->as_AddP()->bottom_type();
3415   BasicType bt = T_INT;
3416   if (offset == Type::OffsetBot) {
3417     // Check only oop fields.
3418     if (!adr_type->isa_aryptr() ||
3419         adr_type->isa_aryptr()->elem() == Type::BOTTOM ||
3420         adr_type->isa_aryptr()->elem()->make_oopptr() != nullptr) {
3421       // OffsetBot is used to reference array's element. Ignore first AddP.
3422       if (find_second_addp(n, n->in(AddPNode::Base)) == nullptr) {
3423         bt = T_OBJECT;
3424       }
3425     }
3426   } else if (offset != oopDesc::klass_offset_in_bytes()) {
3427     if (adr_type->isa_instptr()) {
3428       ciField* field = _compile->alias_type(adr_type->isa_instptr())->field();
3429       if (field != nullptr) {
3430         bt = field->layout_type();
3431       } else {
3432         // Check for unsafe oop field access
3433         if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3434             n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3435             n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3436             BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3437           bt = T_OBJECT;
3438           (*unsafe) = true;
3439         }
3440       }
3441     } else if (adr_type->isa_aryptr()) {
3442       if (offset == arrayOopDesc::length_offset_in_bytes()) {
3443         // Ignore array length load.
3444       } else if (find_second_addp(n, n->in(AddPNode::Base)) != nullptr) {
3445         // Ignore first AddP.
3446       } else {
3447         const Type* elemtype = adr_type->isa_aryptr()->elem();
3448         bt = elemtype->array_element_basic_type();
3449       }
3450     } else if (adr_type->isa_rawptr() || adr_type->isa_klassptr()) {
3451       // Allocation initialization, ThreadLocal field access, unsafe access
3452       if (n->has_out_with(Op_StoreP, Op_LoadP, Op_StoreN, Op_LoadN) ||
3453           n->has_out_with(Op_GetAndSetP, Op_GetAndSetN, Op_CompareAndExchangeP, Op_CompareAndExchangeN) ||
3454           n->has_out_with(Op_CompareAndSwapP, Op_CompareAndSwapN, Op_WeakCompareAndSwapP, Op_WeakCompareAndSwapN) ||
3455           BarrierSet::barrier_set()->barrier_set_c2()->escape_has_out_with_unsafe_object(n)) {
3456         bt = T_OBJECT;
3457       }
3458     }
3459   }
3460   // Note: T_NARROWOOP is not classed as a real reference type
3461   return (is_reference_type(bt) || bt == T_NARROWOOP);
3462 }
3463 
3464 // Returns unique pointed java object or null.
3465 JavaObjectNode* ConnectionGraph::unique_java_object(Node *n) const {
3466   // If the node was created after the escape computation we can't answer.
3467   uint idx = n->_idx;
3468   if (idx >= nodes_size()) {
3469     return nullptr;
3470   }
3471   PointsToNode* ptn = ptnode_adr(idx);
3472   if (ptn == nullptr) {
3473     return nullptr;
3474   }
3475   if (ptn->is_JavaObject()) {
3476     return ptn->as_JavaObject();
3477   }
3478   assert(ptn->is_LocalVar(), "sanity");
3479   // Check all java objects it points to.
3480   JavaObjectNode* jobj = nullptr;
3481   for (EdgeIterator i(ptn); i.has_next(); i.next()) {
3482     PointsToNode* e = i.get();
3483     if (e->is_JavaObject()) {
3484       if (jobj == nullptr) {
3485         jobj = e->as_JavaObject();
3486       } else if (jobj != e) {
3487         return nullptr;
3488       }
3489     }
3490   }
3491   return jobj;
3492 }
3493 
3494 // Return true if this node points only to non-escaping allocations.
3495 bool PointsToNode::non_escaping_allocation() {
3496   if (is_JavaObject()) {
3497     Node* n = ideal_node();
3498     if (n->is_Allocate() || n->is_CallStaticJava()) {
3499       return (escape_state() == PointsToNode::NoEscape);
3500     } else {
3501       return false;
3502     }
3503   }
3504   assert(is_LocalVar(), "sanity");
3505   // Check all java objects it points to.
3506   for (EdgeIterator i(this); i.has_next(); i.next()) {
3507     PointsToNode* e = i.get();
3508     if (e->is_JavaObject()) {
3509       Node* n = e->ideal_node();
3510       if ((e->escape_state() != PointsToNode::NoEscape) ||
3511           !(n->is_Allocate() || n->is_CallStaticJava())) {
3512         return false;
3513       }
3514     }
3515   }
3516   return true;
3517 }
3518 
3519 // Return true if we know the node does not escape globally.
3520 bool ConnectionGraph::not_global_escape(Node *n) {
3521   assert(!_collecting, "should not call during graph construction");
3522   // If the node was created after the escape computation we can't answer.
3523   uint idx = n->_idx;
3524   if (idx >= nodes_size()) {
3525     return false;
3526   }
3527   PointsToNode* ptn = ptnode_adr(idx);
3528   if (ptn == nullptr) {
3529     return false; // not in congraph (e.g. ConI)
3530   }
3531   PointsToNode::EscapeState es = ptn->escape_state();
3532   // If we have already computed a value, return it.
3533   if (es >= PointsToNode::GlobalEscape) {
3534     return false;
3535   }
3536   if (ptn->is_JavaObject()) {
3537     return true; // (es < PointsToNode::GlobalEscape);
3538   }
3539   assert(ptn->is_LocalVar(), "sanity");
3540   // Check all java objects it points to.
3541   for (EdgeIterator i(ptn); i.has_next(); i.next()) {
3542     if (i.get()->escape_state() >= PointsToNode::GlobalEscape) {
3543       return false;
3544     }
3545   }
3546   return true;
3547 }
3548 
3549 // Return true if locked object does not escape globally
3550 // and locked code region (identified by BoxLockNode) is balanced:
3551 // all compiled code paths have corresponding Lock/Unlock pairs.
3552 bool ConnectionGraph::can_eliminate_lock(AbstractLockNode* alock) {
3553   if (alock->is_balanced() && not_global_escape(alock->obj_node())) {
3554     if (EliminateNestedLocks) {
3555       // We can mark whole locking region as Local only when only
3556       // one object is used for locking.
3557       alock->box_node()->as_BoxLock()->set_local();
3558     }
3559     return true;
3560   }
3561   return false;
3562 }
3563 
3564 // Helper functions
3565 
3566 // Return true if this node points to specified node or nodes it points to.
3567 bool PointsToNode::points_to(JavaObjectNode* ptn) const {
3568   if (is_JavaObject()) {
3569     return (this == ptn);
3570   }
3571   assert(is_LocalVar() || is_Field(), "sanity");
3572   for (EdgeIterator i(this); i.has_next(); i.next()) {
3573     if (i.get() == ptn) {
3574       return true;
3575     }
3576   }
3577   return false;
3578 }
3579 
3580 // Return true if one node points to an other.
3581 bool PointsToNode::meet(PointsToNode* ptn) {
3582   if (this == ptn) {
3583     return true;
3584   } else if (ptn->is_JavaObject()) {
3585     return this->points_to(ptn->as_JavaObject());
3586   } else if (this->is_JavaObject()) {
3587     return ptn->points_to(this->as_JavaObject());
3588   }
3589   assert(this->is_LocalVar() && ptn->is_LocalVar(), "sanity");
3590   int ptn_count =  ptn->edge_count();
3591   for (EdgeIterator i(this); i.has_next(); i.next()) {
3592     PointsToNode* this_e = i.get();
3593     for (int j = 0; j < ptn_count; j++) {
3594       if (this_e == ptn->edge(j)) {
3595         return true;
3596       }
3597     }
3598   }
3599   return false;
3600 }
3601 
3602 #ifdef ASSERT
3603 // Return true if bases point to this java object.
3604 bool FieldNode::has_base(JavaObjectNode* jobj) const {
3605   for (BaseIterator i(this); i.has_next(); i.next()) {
3606     if (i.get() == jobj) {
3607       return true;
3608     }
3609   }
3610   return false;
3611 }
3612 #endif
3613 
3614 bool ConnectionGraph::is_captured_store_address(Node* addp) {
3615   // Handle simple case first.
3616   assert(_igvn->type(addp)->isa_oopptr() == nullptr, "should be raw access");
3617   if (addp->in(AddPNode::Address)->is_Proj() && addp->in(AddPNode::Address)->in(0)->is_Allocate()) {
3618     return true;
3619   } else if (addp->in(AddPNode::Address)->is_Phi()) {
3620     for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
3621       Node* addp_use = addp->fast_out(i);
3622       if (addp_use->is_Store()) {
3623         for (DUIterator_Fast jmax, j = addp_use->fast_outs(jmax); j < jmax; j++) {
3624           if (addp_use->fast_out(j)->is_Initialize()) {
3625             return true;
3626           }
3627         }
3628       }
3629     }
3630   }
3631   return false;
3632 }
3633 
3634 int ConnectionGraph::address_offset(Node* adr, PhaseValues* phase) {
3635   const Type *adr_type = phase->type(adr);
3636   if (adr->is_AddP() && adr_type->isa_oopptr() == nullptr && is_captured_store_address(adr)) {
3637     // We are computing a raw address for a store captured by an Initialize
3638     // compute an appropriate address type. AddP cases #3 and #5 (see below).
3639     int offs = (int)phase->find_intptr_t_con(adr->in(AddPNode::Offset), Type::OffsetBot);
3640     assert(offs != Type::OffsetBot ||
3641            adr->in(AddPNode::Address)->in(0)->is_AllocateArray(),
3642            "offset must be a constant or it is initialization of array");
3643     return offs;
3644   }
3645   const TypePtr *t_ptr = adr_type->isa_ptr();
3646   assert(t_ptr != nullptr, "must be a pointer type");
3647   return t_ptr->offset();
3648 }
3649 
3650 Node* ConnectionGraph::get_addp_base(Node *addp) {
3651   assert(addp->is_AddP(), "must be AddP");
3652   //
3653   // AddP cases for Base and Address inputs:
3654   // case #1. Direct object's field reference:
3655   //     Allocate
3656   //       |
3657   //     Proj #5 ( oop result )
3658   //       |
3659   //     CheckCastPP (cast to instance type)
3660   //      | |
3661   //     AddP  ( base == address )
3662   //
3663   // case #2. Indirect object's field reference:
3664   //      Phi
3665   //       |
3666   //     CastPP (cast to instance type)
3667   //      | |
3668   //     AddP  ( base == address )
3669   //
3670   // case #3. Raw object's field reference for Initialize node:
3671   //      Allocate
3672   //        |
3673   //      Proj #5 ( oop result )
3674   //  top   |
3675   //     \  |
3676   //     AddP  ( base == top )
3677   //
3678   // case #4. Array's element reference:
3679   //   {CheckCastPP | CastPP}
3680   //     |  | |
3681   //     |  AddP ( array's element offset )
3682   //     |  |
3683   //     AddP ( array's offset )
3684   //
3685   // case #5. Raw object's field reference for arraycopy stub call:
3686   //          The inline_native_clone() case when the arraycopy stub is called
3687   //          after the allocation before Initialize and CheckCastPP nodes.
3688   //      Allocate
3689   //        |
3690   //      Proj #5 ( oop result )
3691   //       | |
3692   //       AddP  ( base == address )
3693   //
3694   // case #6. Constant Pool, ThreadLocal, CastX2P or
3695   //          Raw object's field reference:
3696   //      {ConP, ThreadLocal, CastX2P, raw Load}
3697   //  top   |
3698   //     \  |
3699   //     AddP  ( base == top )
3700   //
3701   // case #7. Klass's field reference.
3702   //      LoadKlass
3703   //       | |
3704   //       AddP  ( base == address )
3705   //
3706   // case #8. narrow Klass's field reference.
3707   //      LoadNKlass
3708   //       |
3709   //      DecodeN
3710   //       | |
3711   //       AddP  ( base == address )
3712   //
3713   // case #9. Mixed unsafe access
3714   //    {instance}
3715   //        |
3716   //      CheckCastPP (raw)
3717   //  top   |
3718   //     \  |
3719   //     AddP  ( base == top )
3720   //
3721   Node *base = addp->in(AddPNode::Base);
3722   if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9.
3723     base = addp->in(AddPNode::Address);
3724     while (base->is_AddP()) {
3725       // Case #6 (unsafe access) may have several chained AddP nodes.
3726       assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
3727       base = base->in(AddPNode::Address);
3728     }
3729     if (base->Opcode() == Op_CheckCastPP &&
3730         base->bottom_type()->isa_rawptr() &&
3731         _igvn->type(base->in(1))->isa_oopptr()) {
3732       base = base->in(1); // Case #9
3733     } else {
3734       Node* uncast_base = base->uncast();
3735       int opcode = uncast_base->Opcode();
3736       assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
3737              opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
3738              (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != nullptr)) ||
3739              is_captured_store_address(addp), "sanity");
3740     }
3741   }
3742   return base;
3743 }
3744 
3745 Node* ConnectionGraph::find_second_addp(Node* addp, Node* n) {
3746   assert(addp->is_AddP() && addp->outcnt() > 0, "Don't process dead nodes");
3747   Node* addp2 = addp->raw_out(0);
3748   if (addp->outcnt() == 1 && addp2->is_AddP() &&
3749       addp2->in(AddPNode::Base) == n &&
3750       addp2->in(AddPNode::Address) == addp) {
3751     assert(addp->in(AddPNode::Base) == n, "expecting the same base");
3752     //
3753     // Find array's offset to push it on worklist first and
3754     // as result process an array's element offset first (pushed second)
3755     // to avoid CastPP for the array's offset.
3756     // Otherwise the inserted CastPP (LocalVar) will point to what
3757     // the AddP (Field) points to. Which would be wrong since
3758     // the algorithm expects the CastPP has the same point as
3759     // as AddP's base CheckCastPP (LocalVar).
3760     //
3761     //    ArrayAllocation
3762     //     |
3763     //    CheckCastPP
3764     //     |
3765     //    memProj (from ArrayAllocation CheckCastPP)
3766     //     |  ||
3767     //     |  ||   Int (element index)
3768     //     |  ||    |   ConI (log(element size))
3769     //     |  ||    |   /
3770     //     |  ||   LShift
3771     //     |  ||  /
3772     //     |  AddP (array's element offset)
3773     //     |  |
3774     //     |  | ConI (array's offset: #12(32-bits) or #24(64-bits))
3775     //     | / /
3776     //     AddP (array's offset)
3777     //      |
3778     //     Load/Store (memory operation on array's element)
3779     //
3780     return addp2;
3781   }
3782   return nullptr;
3783 }
3784 
3785 //
3786 // Adjust the type and inputs of an AddP which computes the
3787 // address of a field of an instance
3788 //
3789 bool ConnectionGraph::split_AddP(Node *addp, Node *base) {
3790   PhaseGVN* igvn = _igvn;
3791   const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
3792   assert(base_t != nullptr && base_t->is_known_instance(), "expecting instance oopptr");
3793   const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
3794   if (t == nullptr) {
3795     // We are computing a raw address for a store captured by an Initialize
3796     // compute an appropriate address type (cases #3 and #5).
3797     assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
3798     assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
3799     intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
3800     assert(offs != Type::OffsetBot, "offset must be a constant");
3801     t = base_t->add_offset(offs)->is_oopptr();
3802   }
3803   int inst_id =  base_t->instance_id();
3804   assert(!t->is_known_instance() || t->instance_id() == inst_id,
3805                              "old type must be non-instance or match new type");
3806 
3807   // The type 't' could be subclass of 'base_t'.
3808   // As result t->offset() could be large then base_t's size and it will
3809   // cause the failure in add_offset() with narrow oops since TypeOopPtr()
3810   // constructor verifies correctness of the offset.
3811   //
3812   // It could happened on subclass's branch (from the type profiling
3813   // inlining) which was not eliminated during parsing since the exactness
3814   // of the allocation type was not propagated to the subclass type check.
3815   //
3816   // Or the type 't' could be not related to 'base_t' at all.
3817   // It could happened when CHA type is different from MDO type on a dead path
3818   // (for example, from instanceof check) which is not collapsed during parsing.
3819   //
3820   // Do nothing for such AddP node and don't process its users since
3821   // this code branch will go away.
3822   //
3823   if (!t->is_known_instance() &&
3824       !base_t->maybe_java_subtype_of(t)) {
3825      return false; // bail out
3826   }
3827   const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
3828   // Do NOT remove the next line: ensure a new alias index is allocated
3829   // for the instance type. Note: C++ will not remove it since the call
3830   // has side effect.
3831   int alias_idx = _compile->get_alias_index(tinst);
3832   igvn->set_type(addp, tinst);
3833   // record the allocation in the node map
3834   set_map(addp, get_map(base->_idx));
3835   // Set addp's Base and Address to 'base'.
3836   Node *abase = addp->in(AddPNode::Base);
3837   Node *adr   = addp->in(AddPNode::Address);
3838   if (adr->is_Proj() && adr->in(0)->is_Allocate() &&
3839       adr->in(0)->_idx == (uint)inst_id) {
3840     // Skip AddP cases #3 and #5.
3841   } else {
3842     assert(!abase->is_top(), "sanity"); // AddP case #3
3843     if (abase != base) {
3844       igvn->hash_delete(addp);
3845       addp->set_req(AddPNode::Base, base);
3846       if (abase == adr) {
3847         addp->set_req(AddPNode::Address, base);
3848       } else {
3849         // AddP case #4 (adr is array's element offset AddP node)
3850 #ifdef ASSERT
3851         const TypeOopPtr *atype = igvn->type(adr)->isa_oopptr();
3852         assert(adr->is_AddP() && atype != nullptr &&
3853                atype->instance_id() == inst_id, "array's element offset should be processed first");
3854 #endif
3855       }
3856       igvn->hash_insert(addp);
3857     }
3858   }
3859   // Put on IGVN worklist since at least addp's type was changed above.
3860   record_for_optimizer(addp);
3861   return true;
3862 }
3863 
3864 //
3865 // Create a new version of orig_phi if necessary. Returns either the newly
3866 // created phi or an existing phi.  Sets create_new to indicate whether a new
3867 // phi was created.  Cache the last newly created phi in the node map.
3868 //
3869 PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *>  &orig_phi_worklist, bool &new_created) {
3870   Compile *C = _compile;
3871   PhaseGVN* igvn = _igvn;
3872   new_created = false;
3873   int phi_alias_idx = C->get_alias_index(orig_phi->adr_type());
3874   // nothing to do if orig_phi is bottom memory or matches alias_idx
3875   if (phi_alias_idx == alias_idx) {
3876     return orig_phi;
3877   }
3878   // Have we recently created a Phi for this alias index?
3879   PhiNode *result = get_map_phi(orig_phi->_idx);
3880   if (result != nullptr && C->get_alias_index(result->adr_type()) == alias_idx) {
3881     return result;
3882   }
3883   // Previous check may fail when the same wide memory Phi was split into Phis
3884   // for different memory slices. Search all Phis for this region.
3885   if (result != nullptr) {
3886     Node* region = orig_phi->in(0);
3887     for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
3888       Node* phi = region->fast_out(i);
3889       if (phi->is_Phi() &&
3890           C->get_alias_index(phi->as_Phi()->adr_type()) == alias_idx) {
3891         assert(phi->_idx >= nodes_size(), "only new Phi per instance memory slice");
3892         return phi->as_Phi();
3893       }
3894     }
3895   }
3896   if (C->live_nodes() + 2*NodeLimitFudgeFactor > C->max_node_limit()) {
3897     if (C->do_escape_analysis() == true && !C->failing()) {
3898       // Retry compilation without escape analysis.
3899       // If this is the first failure, the sentinel string will "stick"
3900       // to the Compile object, and the C2Compiler will see it and retry.
3901       C->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
3902     }
3903     return nullptr;
3904   }
3905   orig_phi_worklist.append_if_missing(orig_phi);
3906   const TypePtr *atype = C->get_adr_type(alias_idx);
3907   result = PhiNode::make(orig_phi->in(0), nullptr, Type::MEMORY, atype);
3908   C->copy_node_notes_to(result, orig_phi);
3909   igvn->set_type(result, result->bottom_type());
3910   record_for_optimizer(result);
3911   set_map(orig_phi, result);
3912   new_created = true;
3913   return result;
3914 }
3915 
3916 //
3917 // Return a new version of Memory Phi "orig_phi" with the inputs having the
3918 // specified alias index.
3919 //
3920 PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, uint rec_depth) {
3921   assert(alias_idx != Compile::AliasIdxBot, "can't split out bottom memory");
3922   Compile *C = _compile;
3923   PhaseGVN* igvn = _igvn;
3924   bool new_phi_created;
3925   PhiNode *result = create_split_phi(orig_phi, alias_idx, orig_phi_worklist, new_phi_created);
3926   if (!new_phi_created) {
3927     return result;
3928   }
3929   GrowableArray<PhiNode *>  phi_list;
3930   GrowableArray<uint>  cur_input;
3931   PhiNode *phi = orig_phi;
3932   uint idx = 1;
3933   bool finished = false;
3934   while(!finished) {
3935     while (idx < phi->req()) {
3936       Node *mem = find_inst_mem(phi->in(idx), alias_idx, orig_phi_worklist, rec_depth + 1);
3937       if (mem != nullptr && mem->is_Phi()) {
3938         PhiNode *newphi = create_split_phi(mem->as_Phi(), alias_idx, orig_phi_worklist, new_phi_created);
3939         if (new_phi_created) {
3940           // found an phi for which we created a new split, push current one on worklist and begin
3941           // processing new one
3942           phi_list.push(phi);
3943           cur_input.push(idx);
3944           phi = mem->as_Phi();
3945           result = newphi;
3946           idx = 1;
3947           continue;
3948         } else {
3949           mem = newphi;
3950         }
3951       }
3952       if (C->failing()) {
3953         return nullptr;
3954       }
3955       result->set_req(idx++, mem);
3956     }
3957 #ifdef ASSERT
3958     // verify that the new Phi has an input for each input of the original
3959     assert( phi->req() == result->req(), "must have same number of inputs.");
3960     assert( result->in(0) != nullptr && result->in(0) == phi->in(0), "regions must match");
3961 #endif
3962     // Check if all new phi's inputs have specified alias index.
3963     // Otherwise use old phi.
3964     for (uint i = 1; i < phi->req(); i++) {
3965       Node* in = result->in(i);
3966       assert((phi->in(i) == nullptr) == (in == nullptr), "inputs must correspond.");
3967     }
3968     // we have finished processing a Phi, see if there are any more to do
3969     finished = (phi_list.length() == 0 );
3970     if (!finished) {
3971       phi = phi_list.pop();
3972       idx = cur_input.pop();
3973       PhiNode *prev_result = get_map_phi(phi->_idx);
3974       prev_result->set_req(idx++, result);
3975       result = prev_result;
3976     }
3977   }
3978   return result;
3979 }
3980 
3981 //
3982 // The next methods are derived from methods in MemNode.
3983 //
3984 Node* ConnectionGraph::step_through_mergemem(MergeMemNode *mmem, int alias_idx, const TypeOopPtr *toop) {
3985   Node *mem = mmem;
3986   // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally
3987   // means an array I have not precisely typed yet.  Do not do any
3988   // alias stuff with it any time soon.
3989   if (toop->base() != Type::AnyPtr &&
3990       !(toop->isa_instptr() &&
3991         toop->is_instptr()->instance_klass()->is_java_lang_Object() &&
3992         toop->offset() == Type::OffsetBot)) {
3993     mem = mmem->memory_at(alias_idx);
3994     // Update input if it is progress over what we have now
3995   }
3996   return mem;
3997 }
3998 
3999 //
4000 // Move memory users to their memory slices.
4001 //
4002 void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *>  &orig_phis) {
4003   Compile* C = _compile;
4004   PhaseGVN* igvn = _igvn;
4005   const TypePtr* tp = igvn->type(n->in(MemNode::Address))->isa_ptr();
4006   assert(tp != nullptr, "ptr type");
4007   int alias_idx = C->get_alias_index(tp);
4008   int general_idx = C->get_general_index(alias_idx);
4009 
4010   // Move users first
4011   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4012     Node* use = n->fast_out(i);
4013     if (use->is_MergeMem()) {
4014       MergeMemNode* mmem = use->as_MergeMem();
4015       assert(n == mmem->memory_at(alias_idx), "should be on instance memory slice");
4016       if (n != mmem->memory_at(general_idx) || alias_idx == general_idx) {
4017         continue; // Nothing to do
4018       }
4019       // Replace previous general reference to mem node.
4020       uint orig_uniq = C->unique();
4021       Node* m = find_inst_mem(n, general_idx, orig_phis);
4022       assert(orig_uniq == C->unique(), "no new nodes");
4023       mmem->set_memory_at(general_idx, m);
4024       --imax;
4025       --i;
4026     } else if (use->is_MemBar()) {
4027       assert(!use->is_Initialize(), "initializing stores should not be moved");
4028       if (use->req() > MemBarNode::Precedent &&
4029           use->in(MemBarNode::Precedent) == n) {
4030         // Don't move related membars.
4031         record_for_optimizer(use);
4032         continue;
4033       }
4034       tp = use->as_MemBar()->adr_type()->isa_ptr();
4035       if ((tp != nullptr && C->get_alias_index(tp) == alias_idx) ||
4036           alias_idx == general_idx) {
4037         continue; // Nothing to do
4038       }
4039       // Move to general memory slice.
4040       uint orig_uniq = C->unique();
4041       Node* m = find_inst_mem(n, general_idx, orig_phis);
4042       assert(orig_uniq == C->unique(), "no new nodes");
4043       igvn->hash_delete(use);
4044       imax -= use->replace_edge(n, m, igvn);
4045       igvn->hash_insert(use);
4046       record_for_optimizer(use);
4047       --i;
4048 #ifdef ASSERT
4049     } else if (use->is_Mem()) {
4050       // Memory nodes should have new memory input.
4051       tp = igvn->type(use->in(MemNode::Address))->isa_ptr();
4052       assert(tp != nullptr, "ptr type");
4053       int idx = C->get_alias_index(tp);
4054       assert(get_map(use->_idx) != nullptr || idx == alias_idx,
4055              "Following memory nodes should have new memory input or be on the same memory slice");
4056     } else if (use->is_Phi()) {
4057       // Phi nodes should be split and moved already.
4058       tp = use->as_Phi()->adr_type()->isa_ptr();
4059       assert(tp != nullptr, "ptr type");
4060       int idx = C->get_alias_index(tp);
4061       assert(idx == alias_idx, "Following Phi nodes should be on the same memory slice");
4062     } else {
4063       use->dump();
4064       assert(false, "should not be here");
4065 #endif
4066     }
4067   }
4068 }
4069 
4070 //
4071 // Search memory chain of "mem" to find a MemNode whose address
4072 // is the specified alias index.
4073 //
4074 #define FIND_INST_MEM_RECURSION_DEPTH_LIMIT 1000
4075 Node* ConnectionGraph::find_inst_mem(Node *orig_mem, int alias_idx, GrowableArray<PhiNode *>  &orig_phis, uint rec_depth) {
4076   if (rec_depth > FIND_INST_MEM_RECURSION_DEPTH_LIMIT) {
4077     _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4078     return nullptr;
4079   }
4080   if (orig_mem == nullptr) {
4081     return orig_mem;
4082   }
4083   Compile* C = _compile;
4084   PhaseGVN* igvn = _igvn;
4085   const TypeOopPtr *toop = C->get_adr_type(alias_idx)->isa_oopptr();
4086   bool is_instance = (toop != nullptr) && toop->is_known_instance();
4087   Node *start_mem = C->start()->proj_out_or_null(TypeFunc::Memory);
4088   Node *prev = nullptr;
4089   Node *result = orig_mem;
4090   while (prev != result) {
4091     prev = result;
4092     if (result == start_mem) {
4093       break;  // hit one of our sentinels
4094     }
4095     if (result->is_Mem()) {
4096       const Type *at = igvn->type(result->in(MemNode::Address));
4097       if (at == Type::TOP) {
4098         break; // Dead
4099       }
4100       assert (at->isa_ptr() != nullptr, "pointer type required.");
4101       int idx = C->get_alias_index(at->is_ptr());
4102       if (idx == alias_idx) {
4103         break; // Found
4104       }
4105       if (!is_instance && (at->isa_oopptr() == nullptr ||
4106                            !at->is_oopptr()->is_known_instance())) {
4107         break; // Do not skip store to general memory slice.
4108       }
4109       result = result->in(MemNode::Memory);
4110     }
4111     if (!is_instance) {
4112       continue;  // don't search further for non-instance types
4113     }
4114     // skip over a call which does not affect this memory slice
4115     if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) {
4116       Node *proj_in = result->in(0);
4117       if (proj_in->is_Allocate() && proj_in->_idx == (uint)toop->instance_id()) {
4118         break;  // hit one of our sentinels
4119       } else if (proj_in->is_Call()) {
4120         // ArrayCopy node processed here as well
4121         CallNode *call = proj_in->as_Call();
4122         if (!call->may_modify(toop, igvn)) {
4123           result = call->in(TypeFunc::Memory);
4124         }
4125       } else if (proj_in->is_Initialize()) {
4126         AllocateNode* alloc = proj_in->as_Initialize()->allocation();
4127         // Stop if this is the initialization for the object instance which
4128         // which contains this memory slice, otherwise skip over it.
4129         if (alloc == nullptr || alloc->_idx != (uint)toop->instance_id()) {
4130           result = proj_in->in(TypeFunc::Memory);
4131         }
4132       } else if (proj_in->is_MemBar()) {
4133         // Check if there is an array copy for a clone
4134         // Step over GC barrier when ReduceInitialCardMarks is disabled
4135         BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
4136         Node* control_proj_ac = bs->step_over_gc_barrier(proj_in->in(0));
4137 
4138         if (control_proj_ac->is_Proj() && control_proj_ac->in(0)->is_ArrayCopy()) {
4139           // Stop if it is a clone
4140           ArrayCopyNode* ac = control_proj_ac->in(0)->as_ArrayCopy();
4141           if (ac->may_modify(toop, igvn)) {
4142             break;
4143           }
4144         }
4145         result = proj_in->in(TypeFunc::Memory);
4146       }
4147     } else if (result->is_MergeMem()) {
4148       MergeMemNode *mmem = result->as_MergeMem();
4149       result = step_through_mergemem(mmem, alias_idx, toop);
4150       if (result == mmem->base_memory()) {
4151         // Didn't find instance memory, search through general slice recursively.
4152         result = mmem->memory_at(C->get_general_index(alias_idx));
4153         result = find_inst_mem(result, alias_idx, orig_phis, rec_depth + 1);
4154         if (C->failing()) {
4155           return nullptr;
4156         }
4157         mmem->set_memory_at(alias_idx, result);
4158       }
4159     } else if (result->is_Phi() &&
4160                C->get_alias_index(result->as_Phi()->adr_type()) != alias_idx) {
4161       Node *un = result->as_Phi()->unique_input(igvn);
4162       if (un != nullptr) {
4163         orig_phis.append_if_missing(result->as_Phi());
4164         result = un;
4165       } else {
4166         break;
4167       }
4168     } else if (result->is_ClearArray()) {
4169       if (!ClearArrayNode::step_through(&result, (uint)toop->instance_id(), igvn)) {
4170         // Can not bypass initialization of the instance
4171         // we are looking for.
4172         break;
4173       }
4174       // Otherwise skip it (the call updated 'result' value).
4175     } else if (result->Opcode() == Op_SCMemProj) {
4176       Node* mem = result->in(0);
4177       Node* adr = nullptr;
4178       if (mem->is_LoadStore()) {
4179         adr = mem->in(MemNode::Address);
4180       } else {
4181         assert(mem->Opcode() == Op_EncodeISOArray ||
4182                mem->Opcode() == Op_StrCompressedCopy, "sanity");
4183         adr = mem->in(3); // Memory edge corresponds to destination array
4184       }
4185       const Type *at = igvn->type(adr);
4186       if (at != Type::TOP) {
4187         assert(at->isa_ptr() != nullptr, "pointer type required.");
4188         int idx = C->get_alias_index(at->is_ptr());
4189         if (idx == alias_idx) {
4190           // Assert in debug mode
4191           assert(false, "Object is not scalar replaceable if a LoadStore node accesses its field");
4192           break; // In product mode return SCMemProj node
4193         }
4194       }
4195       result = mem->in(MemNode::Memory);
4196     } else if (result->Opcode() == Op_StrInflatedCopy) {
4197       Node* adr = result->in(3); // Memory edge corresponds to destination array
4198       const Type *at = igvn->type(adr);
4199       if (at != Type::TOP) {
4200         assert(at->isa_ptr() != nullptr, "pointer type required.");
4201         int idx = C->get_alias_index(at->is_ptr());
4202         if (idx == alias_idx) {
4203           // Assert in debug mode
4204           assert(false, "Object is not scalar replaceable if a StrInflatedCopy node accesses its field");
4205           break; // In product mode return SCMemProj node
4206         }
4207       }
4208       result = result->in(MemNode::Memory);
4209     }
4210   }
4211   if (result->is_Phi()) {
4212     PhiNode *mphi = result->as_Phi();
4213     assert(mphi->bottom_type() == Type::MEMORY, "memory phi required");
4214     const TypePtr *t = mphi->adr_type();
4215     if (!is_instance) {
4216       // Push all non-instance Phis on the orig_phis worklist to update inputs
4217       // during Phase 4 if needed.
4218       orig_phis.append_if_missing(mphi);
4219     } else if (C->get_alias_index(t) != alias_idx) {
4220       // Create a new Phi with the specified alias index type.
4221       result = split_memory_phi(mphi, alias_idx, orig_phis, rec_depth + 1);
4222     }
4223   }
4224   // the result is either MemNode, PhiNode, InitializeNode.
4225   return result;
4226 }
4227 
4228 //
4229 //  Convert the types of non-escaped object to instance types where possible,
4230 //  propagate the new type information through the graph, and update memory
4231 //  edges and MergeMem inputs to reflect the new type.
4232 //
4233 //  We start with allocations (and calls which may be allocations)  on alloc_worklist.
4234 //  The processing is done in 4 phases:
4235 //
4236 //  Phase 1:  Process possible allocations from alloc_worklist.  Create instance
4237 //            types for the CheckCastPP for allocations where possible.
4238 //            Propagate the new types through users as follows:
4239 //               casts and Phi:  push users on alloc_worklist
4240 //               AddP:  cast Base and Address inputs to the instance type
4241 //                      push any AddP users on alloc_worklist and push any memnode
4242 //                      users onto memnode_worklist.
4243 //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
4244 //            search the Memory chain for a store with the appropriate type
4245 //            address type.  If a Phi is found, create a new version with
4246 //            the appropriate memory slices from each of the Phi inputs.
4247 //            For stores, process the users as follows:
4248 //               MemNode:  push on memnode_worklist
4249 //               MergeMem: push on mergemem_worklist
4250 //  Phase 3:  Process MergeMem nodes from mergemem_worklist.  Walk each memory slice
4251 //            moving the first node encountered of each  instance type to the
4252 //            the input corresponding to its alias index.
4253 //            appropriate memory slice.
4254 //  Phase 4:  Update the inputs of non-instance memory Phis and the Memory input of memnodes.
4255 //
4256 // In the following example, the CheckCastPP nodes are the cast of allocation
4257 // results and the allocation of node 29 is non-escaped and eligible to be an
4258 // instance type.
4259 //
4260 // We start with:
4261 //
4262 //     7 Parm #memory
4263 //    10  ConI  "12"
4264 //    19  CheckCastPP   "Foo"
4265 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
4266 //    29  CheckCastPP   "Foo"
4267 //    30  AddP  _ 29 29 10  Foo+12  alias_index=4
4268 //
4269 //    40  StoreP  25   7  20   ... alias_index=4
4270 //    50  StoreP  35  40  30   ... alias_index=4
4271 //    60  StoreP  45  50  20   ... alias_index=4
4272 //    70  LoadP    _  60  30   ... alias_index=4
4273 //    80  Phi     75  50  60   Memory alias_index=4
4274 //    90  LoadP    _  80  30   ... alias_index=4
4275 //   100  LoadP    _  80  20   ... alias_index=4
4276 //
4277 //
4278 // Phase 1 creates an instance type for node 29 assigning it an instance id of 24
4279 // and creating a new alias index for node 30.  This gives:
4280 //
4281 //     7 Parm #memory
4282 //    10  ConI  "12"
4283 //    19  CheckCastPP   "Foo"
4284 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
4285 //    29  CheckCastPP   "Foo"  iid=24
4286 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
4287 //
4288 //    40  StoreP  25   7  20   ... alias_index=4
4289 //    50  StoreP  35  40  30   ... alias_index=6
4290 //    60  StoreP  45  50  20   ... alias_index=4
4291 //    70  LoadP    _  60  30   ... alias_index=6
4292 //    80  Phi     75  50  60   Memory alias_index=4
4293 //    90  LoadP    _  80  30   ... alias_index=6
4294 //   100  LoadP    _  80  20   ... alias_index=4
4295 //
4296 // In phase 2, new memory inputs are computed for the loads and stores,
4297 // And a new version of the phi is created.  In phase 4, the inputs to
4298 // node 80 are updated and then the memory nodes are updated with the
4299 // values computed in phase 2.  This results in:
4300 //
4301 //     7 Parm #memory
4302 //    10  ConI  "12"
4303 //    19  CheckCastPP   "Foo"
4304 //    20  AddP  _ 19 19 10  Foo+12  alias_index=4
4305 //    29  CheckCastPP   "Foo"  iid=24
4306 //    30  AddP  _ 29 29 10  Foo+12  alias_index=6  iid=24
4307 //
4308 //    40  StoreP  25  7   20   ... alias_index=4
4309 //    50  StoreP  35  7   30   ... alias_index=6
4310 //    60  StoreP  45  40  20   ... alias_index=4
4311 //    70  LoadP    _  50  30   ... alias_index=6
4312 //    80  Phi     75  40  60   Memory alias_index=4
4313 //   120  Phi     75  50  50   Memory alias_index=6
4314 //    90  LoadP    _ 120  30   ... alias_index=6
4315 //   100  LoadP    _  80  20   ... alias_index=4
4316 //
4317 void ConnectionGraph::split_unique_types(GrowableArray<Node *>  &alloc_worklist,
4318                                          GrowableArray<ArrayCopyNode*> &arraycopy_worklist,
4319                                          GrowableArray<MergeMemNode*> &mergemem_worklist,
4320                                          Unique_Node_List &reducible_merges) {
4321   DEBUG_ONLY(Unique_Node_List reduced_merges;)
4322   GrowableArray<Node *>  memnode_worklist;
4323   GrowableArray<PhiNode *>  orig_phis;
4324   PhaseIterGVN  *igvn = _igvn;
4325   uint new_index_start = (uint) _compile->num_alias_types();
4326   VectorSet visited;
4327   ideal_nodes.clear(); // Reset for use with set_map/get_map.
4328   uint unique_old = _compile->unique();
4329 
4330   //  Phase 1:  Process possible allocations from alloc_worklist.
4331   //  Create instance types for the CheckCastPP for allocations where possible.
4332   //
4333   // (Note: don't forget to change the order of the second AddP node on
4334   //  the alloc_worklist if the order of the worklist processing is changed,
4335   //  see the comment in find_second_addp().)
4336   //
4337   while (alloc_worklist.length() != 0) {
4338     Node *n = alloc_worklist.pop();
4339     uint ni = n->_idx;
4340     if (n->is_Call()) {
4341       CallNode *alloc = n->as_Call();
4342       // copy escape information to call node
4343       PointsToNode* ptn = ptnode_adr(alloc->_idx);
4344       PointsToNode::EscapeState es = ptn->escape_state();
4345       // We have an allocation or call which returns a Java object,
4346       // see if it is non-escaped.
4347       if (es != PointsToNode::NoEscape || !ptn->scalar_replaceable()) {
4348         continue;
4349       }
4350       // Find CheckCastPP for the allocate or for the return value of a call
4351       n = alloc->result_cast();
4352       if (n == nullptr) {            // No uses except Initialize node
4353         if (alloc->is_Allocate()) {
4354           // Set the scalar_replaceable flag for allocation
4355           // so it could be eliminated if it has no uses.
4356           alloc->as_Allocate()->_is_scalar_replaceable = true;
4357         }
4358         continue;
4359       }
4360       if (!n->is_CheckCastPP()) { // not unique CheckCastPP.
4361         // we could reach here for allocate case if one init is associated with many allocs.
4362         if (alloc->is_Allocate()) {
4363           alloc->as_Allocate()->_is_scalar_replaceable = false;
4364         }
4365         continue;
4366       }
4367 
4368       // The inline code for Object.clone() casts the allocation result to
4369       // java.lang.Object and then to the actual type of the allocated
4370       // object. Detect this case and use the second cast.
4371       // Also detect j.l.reflect.Array.newInstance(jobject, jint) case when
4372       // the allocation result is cast to java.lang.Object and then
4373       // to the actual Array type.
4374       if (alloc->is_Allocate() && n->as_Type()->type() == TypeInstPtr::NOTNULL
4375           && (alloc->is_AllocateArray() ||
4376               igvn->type(alloc->in(AllocateNode::KlassNode)) != TypeInstKlassPtr::OBJECT)) {
4377         Node *cast2 = nullptr;
4378         for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4379           Node *use = n->fast_out(i);
4380           if (use->is_CheckCastPP()) {
4381             cast2 = use;
4382             break;
4383           }
4384         }
4385         if (cast2 != nullptr) {
4386           n = cast2;
4387         } else {
4388           // Non-scalar replaceable if the allocation type is unknown statically
4389           // (reflection allocation), the object can't be restored during
4390           // deoptimization without precise type.
4391           continue;
4392         }
4393       }
4394 
4395       const TypeOopPtr *t = igvn->type(n)->isa_oopptr();
4396       if (t == nullptr) {
4397         continue;  // not a TypeOopPtr
4398       }
4399       if (!t->klass_is_exact()) {
4400         continue; // not an unique type
4401       }
4402       if (alloc->is_Allocate()) {
4403         // Set the scalar_replaceable flag for allocation
4404         // so it could be eliminated.
4405         alloc->as_Allocate()->_is_scalar_replaceable = true;
4406       }
4407       set_escape_state(ptnode_adr(n->_idx), es NOT_PRODUCT(COMMA trace_propagate_message(ptn))); // CheckCastPP escape state
4408       // in order for an object to be scalar-replaceable, it must be:
4409       //   - a direct allocation (not a call returning an object)
4410       //   - non-escaping
4411       //   - eligible to be a unique type
4412       //   - not determined to be ineligible by escape analysis
4413       set_map(alloc, n);
4414       set_map(n, alloc);
4415       const TypeOopPtr* tinst = t->cast_to_instance_id(ni);
4416       igvn->hash_delete(n);
4417       igvn->set_type(n,  tinst);
4418       n->raise_bottom_type(tinst);
4419       igvn->hash_insert(n);
4420       record_for_optimizer(n);
4421       // Allocate an alias index for the header fields. Accesses to
4422       // the header emitted during macro expansion wouldn't have
4423       // correct memory state otherwise.
4424       _compile->get_alias_index(tinst->add_offset(oopDesc::mark_offset_in_bytes()));
4425       _compile->get_alias_index(tinst->add_offset(oopDesc::klass_offset_in_bytes()));
4426       if (alloc->is_Allocate() && (t->isa_instptr() || t->isa_aryptr())) {
4427 
4428         // First, put on the worklist all Field edges from Connection Graph
4429         // which is more accurate than putting immediate users from Ideal Graph.
4430         for (EdgeIterator e(ptn); e.has_next(); e.next()) {
4431           PointsToNode* tgt = e.get();
4432           if (tgt->is_Arraycopy()) {
4433             continue;
4434           }
4435           Node* use = tgt->ideal_node();
4436           assert(tgt->is_Field() && use->is_AddP(),
4437                  "only AddP nodes are Field edges in CG");
4438           if (use->outcnt() > 0) { // Don't process dead nodes
4439             Node* addp2 = find_second_addp(use, use->in(AddPNode::Base));
4440             if (addp2 != nullptr) {
4441               assert(alloc->is_AllocateArray(),"array allocation was expected");
4442               alloc_worklist.append_if_missing(addp2);
4443             }
4444             alloc_worklist.append_if_missing(use);
4445           }
4446         }
4447 
4448         // An allocation may have an Initialize which has raw stores. Scan
4449         // the users of the raw allocation result and push AddP users
4450         // on alloc_worklist.
4451         Node *raw_result = alloc->proj_out_or_null(TypeFunc::Parms);
4452         assert (raw_result != nullptr, "must have an allocation result");
4453         for (DUIterator_Fast imax, i = raw_result->fast_outs(imax); i < imax; i++) {
4454           Node *use = raw_result->fast_out(i);
4455           if (use->is_AddP() && use->outcnt() > 0) { // Don't process dead nodes
4456             Node* addp2 = find_second_addp(use, raw_result);
4457             if (addp2 != nullptr) {
4458               assert(alloc->is_AllocateArray(),"array allocation was expected");
4459               alloc_worklist.append_if_missing(addp2);
4460             }
4461             alloc_worklist.append_if_missing(use);
4462           } else if (use->is_MemBar()) {
4463             memnode_worklist.append_if_missing(use);
4464           }
4465         }
4466       }
4467     } else if (n->is_AddP()) {
4468       if (has_reducible_merge_base(n->as_AddP(), reducible_merges)) {
4469         // This AddP will go away when we reduce the the Phi
4470         continue;
4471       }
4472       Node* addp_base = get_addp_base(n);
4473       JavaObjectNode* jobj = unique_java_object(addp_base);
4474       if (jobj == nullptr || jobj == phantom_obj) {
4475 #ifdef ASSERT
4476         ptnode_adr(get_addp_base(n)->_idx)->dump();
4477         ptnode_adr(n->_idx)->dump();
4478         assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
4479 #endif
4480         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4481         return;
4482       }
4483       Node *base = get_map(jobj->idx());  // CheckCastPP node
4484       if (!split_AddP(n, base)) continue; // wrong type from dead path
4485     } else if (n->is_Phi() ||
4486                n->is_CheckCastPP() ||
4487                n->is_EncodeP() ||
4488                n->is_DecodeN() ||
4489                (n->is_ConstraintCast() && n->Opcode() == Op_CastPP)) {
4490       if (visited.test_set(n->_idx)) {
4491         assert(n->is_Phi(), "loops only through Phi's");
4492         continue;  // already processed
4493       }
4494       // Reducible Phi's will be removed from the graph after split_unique_types
4495       // finishes. For now we just try to split out the SR inputs of the merge.
4496       Node* parent = n->in(1);
4497       if (reducible_merges.member(n)) {
4498         reduce_phi(n->as_Phi(), alloc_worklist, memnode_worklist);
4499 #ifdef ASSERT
4500         if (VerifyReduceAllocationMerges) {
4501           reduced_merges.push(n);
4502         }
4503 #endif
4504         continue;
4505       } else if (reducible_merges.member(parent)) {
4506         // 'n' is an user of a reducible merge (a Phi). It will be simplified as
4507         // part of reduce_merge.
4508         continue;
4509       }
4510       JavaObjectNode* jobj = unique_java_object(n);
4511       if (jobj == nullptr || jobj == phantom_obj) {
4512 #ifdef ASSERT
4513         ptnode_adr(n->_idx)->dump();
4514         assert(jobj != nullptr && jobj != phantom_obj, "escaped allocation");
4515 #endif
4516         _compile->record_failure(_invocation > 0 ? C2Compiler::retry_no_iterative_escape_analysis() : C2Compiler::retry_no_escape_analysis());
4517         return;
4518       } else {
4519         Node *val = get_map(jobj->idx());   // CheckCastPP node
4520         TypeNode *tn = n->as_Type();
4521         const TypeOopPtr* tinst = igvn->type(val)->isa_oopptr();
4522         assert(tinst != nullptr && tinst->is_known_instance() &&
4523                tinst->instance_id() == jobj->idx() , "instance type expected.");
4524 
4525         const Type *tn_type = igvn->type(tn);
4526         const TypeOopPtr *tn_t;
4527         if (tn_type->isa_narrowoop()) {
4528           tn_t = tn_type->make_ptr()->isa_oopptr();
4529         } else {
4530           tn_t = tn_type->isa_oopptr();
4531         }
4532         if (tn_t != nullptr && tinst->maybe_java_subtype_of(tn_t)) {
4533           if (tn_type->isa_narrowoop()) {
4534             tn_type = tinst->make_narrowoop();
4535           } else {
4536             tn_type = tinst;
4537           }
4538           igvn->hash_delete(tn);
4539           igvn->set_type(tn, tn_type);
4540           tn->set_type(tn_type);
4541           igvn->hash_insert(tn);
4542           record_for_optimizer(n);
4543         } else {
4544           assert(tn_type == TypePtr::NULL_PTR ||
4545                  (tn_t != nullptr && !tinst->maybe_java_subtype_of(tn_t)),
4546                  "unexpected type");
4547           continue; // Skip dead path with different type
4548         }
4549       }
4550     } else {
4551       debug_only(n->dump();)
4552       assert(false, "EA: unexpected node");
4553       continue;
4554     }
4555     // push allocation's users on appropriate worklist
4556     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4557       Node *use = n->fast_out(i);
4558       if(use->is_Mem() && use->in(MemNode::Address) == n) {
4559         // Load/store to instance's field
4560         memnode_worklist.append_if_missing(use);
4561       } else if (use->is_MemBar()) {
4562         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4563           memnode_worklist.append_if_missing(use);
4564         }
4565       } else if (use->is_AddP() && use->outcnt() > 0) { // No dead nodes
4566         Node* addp2 = find_second_addp(use, n);
4567         if (addp2 != nullptr) {
4568           alloc_worklist.append_if_missing(addp2);
4569         }
4570         alloc_worklist.append_if_missing(use);
4571       } else if (use->is_Phi() ||
4572                  use->is_CheckCastPP() ||
4573                  use->is_EncodeNarrowPtr() ||
4574                  use->is_DecodeNarrowPtr() ||
4575                  (use->is_ConstraintCast() && use->Opcode() == Op_CastPP)) {
4576         alloc_worklist.append_if_missing(use);
4577 #ifdef ASSERT
4578       } else if (use->is_Mem()) {
4579         assert(use->in(MemNode::Address) != n, "EA: missing allocation reference path");
4580       } else if (use->is_MergeMem()) {
4581         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4582       } else if (use->is_SafePoint()) {
4583         // Look for MergeMem nodes for calls which reference unique allocation
4584         // (through CheckCastPP nodes) even for debug info.
4585         Node* m = use->in(TypeFunc::Memory);
4586         if (m->is_MergeMem()) {
4587           assert(mergemem_worklist.contains(m->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4588         }
4589       } else if (use->Opcode() == Op_EncodeISOArray) {
4590         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4591           // EncodeISOArray overwrites destination array
4592           memnode_worklist.append_if_missing(use);
4593         }
4594       } else {
4595         uint op = use->Opcode();
4596         if ((op == Op_StrCompressedCopy || op == Op_StrInflatedCopy) &&
4597             (use->in(MemNode::Memory) == n)) {
4598           // They overwrite memory edge corresponding to destination array,
4599           memnode_worklist.append_if_missing(use);
4600         } else if (!(op == Op_CmpP || op == Op_Conv2B ||
4601               op == Op_CastP2X ||
4602               op == Op_FastLock || op == Op_AryEq ||
4603               op == Op_StrComp || op == Op_CountPositives ||
4604               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
4605               op == Op_StrEquals || op == Op_VectorizedHashCode ||
4606               op == Op_StrIndexOf || op == Op_StrIndexOfChar ||
4607               op == Op_SubTypeCheck ||
4608               BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use))) {
4609           n->dump();
4610           use->dump();
4611           assert(false, "EA: missing allocation reference path");
4612         }
4613 #endif
4614       }
4615     }
4616 
4617   }
4618 
4619 #ifdef ASSERT
4620   if (VerifyReduceAllocationMerges) {
4621     for (uint i = 0; i < reducible_merges.size(); i++) {
4622       Node* phi = reducible_merges.at(i);
4623 
4624       if (!reduced_merges.member(phi)) {
4625         phi->dump(2);
4626         phi->dump(-2);
4627         assert(false, "This reducible merge wasn't reduced.");
4628       }
4629 
4630       // At this point reducible Phis shouldn't have AddP users anymore; only SafePoints or Casts.
4631       for (DUIterator_Fast jmax, j = phi->fast_outs(jmax); j < jmax; j++) {
4632         Node* use = phi->fast_out(j);
4633         if (!use->is_SafePoint() && !use->is_CastPP()) {
4634           phi->dump(2);
4635           phi->dump(-2);
4636           assert(false, "Unexpected user of reducible Phi -> %d:%s:%d", use->_idx, use->Name(), use->outcnt());
4637         }
4638       }
4639     }
4640   }
4641 #endif
4642 
4643   // Go over all ArrayCopy nodes and if one of the inputs has a unique
4644   // type, record it in the ArrayCopy node so we know what memory this
4645   // node uses/modified.
4646   for (int next = 0; next < arraycopy_worklist.length(); next++) {
4647     ArrayCopyNode* ac = arraycopy_worklist.at(next);
4648     Node* dest = ac->in(ArrayCopyNode::Dest);
4649     if (dest->is_AddP()) {
4650       dest = get_addp_base(dest);
4651     }
4652     JavaObjectNode* jobj = unique_java_object(dest);
4653     if (jobj != nullptr) {
4654       Node *base = get_map(jobj->idx());
4655       if (base != nullptr) {
4656         const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr();
4657         ac->_dest_type = base_t;
4658       }
4659     }
4660     Node* src = ac->in(ArrayCopyNode::Src);
4661     if (src->is_AddP()) {
4662       src = get_addp_base(src);
4663     }
4664     jobj = unique_java_object(src);
4665     if (jobj != nullptr) {
4666       Node* base = get_map(jobj->idx());
4667       if (base != nullptr) {
4668         const TypeOopPtr *base_t = _igvn->type(base)->isa_oopptr();
4669         ac->_src_type = base_t;
4670       }
4671     }
4672   }
4673 
4674   // New alias types were created in split_AddP().
4675   uint new_index_end = (uint) _compile->num_alias_types();
4676 
4677   //  Phase 2:  Process MemNode's from memnode_worklist. compute new address type and
4678   //            compute new values for Memory inputs  (the Memory inputs are not
4679   //            actually updated until phase 4.)
4680   if (memnode_worklist.length() == 0)
4681     return;  // nothing to do
4682   while (memnode_worklist.length() != 0) {
4683     Node *n = memnode_worklist.pop();
4684     if (visited.test_set(n->_idx)) {
4685       continue;
4686     }
4687     if (n->is_Phi() || n->is_ClearArray()) {
4688       // we don't need to do anything, but the users must be pushed
4689     } else if (n->is_MemBar()) { // Initialize, MemBar nodes
4690       // we don't need to do anything, but the users must be pushed
4691       n = n->as_MemBar()->proj_out_or_null(TypeFunc::Memory);
4692       if (n == nullptr) {
4693         continue;
4694       }
4695     } else if (n->is_CallLeaf()) {
4696       // Runtime calls with narrow memory input (no MergeMem node)
4697       // get the memory projection
4698       n = n->as_Call()->proj_out_or_null(TypeFunc::Memory);
4699       if (n == nullptr) {
4700         continue;
4701       }
4702     } else if (n->Opcode() == Op_StrCompressedCopy ||
4703                n->Opcode() == Op_EncodeISOArray) {
4704       // get the memory projection
4705       n = n->find_out_with(Op_SCMemProj);
4706       assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4707     } else {
4708       assert(n->is_Mem(), "memory node required.");
4709       Node *addr = n->in(MemNode::Address);
4710       const Type *addr_t = igvn->type(addr);
4711       if (addr_t == Type::TOP) {
4712         continue;
4713       }
4714       assert (addr_t->isa_ptr() != nullptr, "pointer type required.");
4715       int alias_idx = _compile->get_alias_index(addr_t->is_ptr());
4716       assert ((uint)alias_idx < new_index_end, "wrong alias index");
4717       Node *mem = find_inst_mem(n->in(MemNode::Memory), alias_idx, orig_phis);
4718       if (_compile->failing()) {
4719         return;
4720       }
4721       if (mem != n->in(MemNode::Memory)) {
4722         // We delay the memory edge update since we need old one in
4723         // MergeMem code below when instances memory slices are separated.
4724         set_map(n, mem);
4725       }
4726       if (n->is_Load()) {
4727         continue;  // don't push users
4728       } else if (n->is_LoadStore()) {
4729         // get the memory projection
4730         n = n->find_out_with(Op_SCMemProj);
4731         assert(n != nullptr && n->Opcode() == Op_SCMemProj, "memory projection required");
4732       }
4733     }
4734     // push user on appropriate worklist
4735     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4736       Node *use = n->fast_out(i);
4737       if (use->is_Phi() || use->is_ClearArray()) {
4738         memnode_worklist.append_if_missing(use);
4739       } else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
4740         memnode_worklist.append_if_missing(use);
4741       } else if (use->is_MemBar() || use->is_CallLeaf()) {
4742         if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge
4743           memnode_worklist.append_if_missing(use);
4744         }
4745 #ifdef ASSERT
4746       } else if(use->is_Mem()) {
4747         assert(use->in(MemNode::Memory) != n, "EA: missing memory path");
4748       } else if (use->is_MergeMem()) {
4749         assert(mergemem_worklist.contains(use->as_MergeMem()), "EA: missing MergeMem node in the worklist");
4750       } else if (use->Opcode() == Op_EncodeISOArray) {
4751         if (use->in(MemNode::Memory) == n || use->in(3) == n) {
4752           // EncodeISOArray overwrites destination array
4753           memnode_worklist.append_if_missing(use);
4754         }
4755       } else {
4756         uint op = use->Opcode();
4757         if ((use->in(MemNode::Memory) == n) &&
4758             (op == Op_StrCompressedCopy || op == Op_StrInflatedCopy)) {
4759           // They overwrite memory edge corresponding to destination array,
4760           memnode_worklist.append_if_missing(use);
4761         } else if (!(BarrierSet::barrier_set()->barrier_set_c2()->is_gc_barrier_node(use) ||
4762               op == Op_AryEq || op == Op_StrComp || op == Op_CountPositives ||
4763               op == Op_StrCompressedCopy || op == Op_StrInflatedCopy || op == Op_VectorizedHashCode ||
4764               op == Op_StrEquals || op == Op_StrIndexOf || op == Op_StrIndexOfChar)) {
4765           n->dump();
4766           use->dump();
4767           assert(false, "EA: missing memory path");
4768         }
4769 #endif
4770       }
4771     }
4772   }
4773 
4774   //  Phase 3:  Process MergeMem nodes from mergemem_worklist.
4775   //            Walk each memory slice moving the first node encountered of each
4776   //            instance type to the input corresponding to its alias index.
4777   uint length = mergemem_worklist.length();
4778   for( uint next = 0; next < length; ++next ) {
4779     MergeMemNode* nmm = mergemem_worklist.at(next);
4780     assert(!visited.test_set(nmm->_idx), "should not be visited before");
4781     // Note: we don't want to use MergeMemStream here because we only want to
4782     // scan inputs which exist at the start, not ones we add during processing.
4783     // Note 2: MergeMem may already contains instance memory slices added
4784     // during find_inst_mem() call when memory nodes were processed above.
4785     igvn->hash_delete(nmm);
4786     uint nslices = MIN2(nmm->req(), new_index_start);
4787     for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) {
4788       Node* mem = nmm->in(i);
4789       Node* cur = nullptr;
4790       if (mem == nullptr || mem->is_top()) {
4791         continue;
4792       }
4793       // First, update mergemem by moving memory nodes to corresponding slices
4794       // if their type became more precise since this mergemem was created.
4795       while (mem->is_Mem()) {
4796         const Type *at = igvn->type(mem->in(MemNode::Address));
4797         if (at != Type::TOP) {
4798           assert (at->isa_ptr() != nullptr, "pointer type required.");
4799           uint idx = (uint)_compile->get_alias_index(at->is_ptr());
4800           if (idx == i) {
4801             if (cur == nullptr) {
4802               cur = mem;
4803             }
4804           } else {
4805             if (idx >= nmm->req() || nmm->is_empty_memory(nmm->in(idx))) {
4806               nmm->set_memory_at(idx, mem);
4807             }
4808           }
4809         }
4810         mem = mem->in(MemNode::Memory);
4811       }
4812       nmm->set_memory_at(i, (cur != nullptr) ? cur : mem);
4813       // Find any instance of the current type if we haven't encountered
4814       // already a memory slice of the instance along the memory chain.
4815       for (uint ni = new_index_start; ni < new_index_end; ni++) {
4816         if((uint)_compile->get_general_index(ni) == i) {
4817           Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
4818           if (nmm->is_empty_memory(m)) {
4819             Node* result = find_inst_mem(mem, ni, orig_phis);
4820             if (_compile->failing()) {
4821               return;
4822             }
4823             nmm->set_memory_at(ni, result);
4824           }
4825         }
4826       }
4827     }
4828     // Find the rest of instances values
4829     for (uint ni = new_index_start; ni < new_index_end; ni++) {
4830       const TypeOopPtr *tinst = _compile->get_adr_type(ni)->isa_oopptr();
4831       Node* result = step_through_mergemem(nmm, ni, tinst);
4832       if (result == nmm->base_memory()) {
4833         // Didn't find instance memory, search through general slice recursively.
4834         result = nmm->memory_at(_compile->get_general_index(ni));
4835         result = find_inst_mem(result, ni, orig_phis);
4836         if (_compile->failing()) {
4837           return;
4838         }
4839         nmm->set_memory_at(ni, result);
4840       }
4841     }
4842 
4843     // If we have crossed the 3/4 point of max node limit it's too risky
4844     // to continue with EA/SR because we might hit the max node limit.
4845     if (_compile->live_nodes() >= _compile->max_node_limit() * 0.75) {
4846       if (_compile->do_reduce_allocation_merges()) {
4847         _compile->record_failure(C2Compiler::retry_no_reduce_allocation_merges());
4848       } else if (_invocation > 0) {
4849         _compile->record_failure(C2Compiler::retry_no_iterative_escape_analysis());
4850       } else {
4851         _compile->record_failure(C2Compiler::retry_no_escape_analysis());
4852       }
4853       return;
4854     }
4855 
4856     igvn->hash_insert(nmm);
4857     record_for_optimizer(nmm);
4858   }
4859 
4860   //  Phase 4:  Update the inputs of non-instance memory Phis and
4861   //            the Memory input of memnodes
4862   // First update the inputs of any non-instance Phi's from
4863   // which we split out an instance Phi.  Note we don't have
4864   // to recursively process Phi's encountered on the input memory
4865   // chains as is done in split_memory_phi() since they  will
4866   // also be processed here.
4867   for (int j = 0; j < orig_phis.length(); j++) {
4868     PhiNode *phi = orig_phis.at(j);
4869     int alias_idx = _compile->get_alias_index(phi->adr_type());
4870     igvn->hash_delete(phi);
4871     for (uint i = 1; i < phi->req(); i++) {
4872       Node *mem = phi->in(i);
4873       Node *new_mem = find_inst_mem(mem, alias_idx, orig_phis);
4874       if (_compile->failing()) {
4875         return;
4876       }
4877       if (mem != new_mem) {
4878         phi->set_req(i, new_mem);
4879       }
4880     }
4881     igvn->hash_insert(phi);
4882     record_for_optimizer(phi);
4883   }
4884 
4885   // Update the memory inputs of MemNodes with the value we computed
4886   // in Phase 2 and move stores memory users to corresponding memory slices.
4887   // Disable memory split verification code until the fix for 6984348.
4888   // Currently it produces false negative results since it does not cover all cases.
4889 #if 0 // ifdef ASSERT
4890   visited.Reset();
4891   Node_Stack old_mems(arena, _compile->unique() >> 2);
4892 #endif
4893   for (uint i = 0; i < ideal_nodes.size(); i++) {
4894     Node*    n = ideal_nodes.at(i);
4895     Node* nmem = get_map(n->_idx);
4896     assert(nmem != nullptr, "sanity");
4897     if (n->is_Mem()) {
4898 #if 0 // ifdef ASSERT
4899       Node* old_mem = n->in(MemNode::Memory);
4900       if (!visited.test_set(old_mem->_idx)) {
4901         old_mems.push(old_mem, old_mem->outcnt());
4902       }
4903 #endif
4904       assert(n->in(MemNode::Memory) != nmem, "sanity");
4905       if (!n->is_Load()) {
4906         // Move memory users of a store first.
4907         move_inst_mem(n, orig_phis);
4908       }
4909       // Now update memory input
4910       igvn->hash_delete(n);
4911       n->set_req(MemNode::Memory, nmem);
4912       igvn->hash_insert(n);
4913       record_for_optimizer(n);
4914     } else {
4915       assert(n->is_Allocate() || n->is_CheckCastPP() ||
4916              n->is_AddP() || n->is_Phi(), "unknown node used for set_map()");
4917     }
4918   }
4919 #if 0 // ifdef ASSERT
4920   // Verify that memory was split correctly
4921   while (old_mems.is_nonempty()) {
4922     Node* old_mem = old_mems.node();
4923     uint  old_cnt = old_mems.index();
4924     old_mems.pop();
4925     assert(old_cnt == old_mem->outcnt(), "old mem could be lost");
4926   }
4927 #endif
4928 }
4929 
4930 #ifndef PRODUCT
4931 int ConnectionGraph::_no_escape_counter = 0;
4932 int ConnectionGraph::_arg_escape_counter = 0;
4933 int ConnectionGraph::_global_escape_counter = 0;
4934 
4935 static const char *node_type_names[] = {
4936   "UnknownType",
4937   "JavaObject",
4938   "LocalVar",
4939   "Field",
4940   "Arraycopy"
4941 };
4942 
4943 static const char *esc_names[] = {
4944   "UnknownEscape",
4945   "NoEscape",
4946   "ArgEscape",
4947   "GlobalEscape"
4948 };
4949 
4950 void PointsToNode::dump_header(bool print_state, outputStream* out) const {
4951   NodeType nt = node_type();
4952   out->print("%s(%d) ", node_type_names[(int) nt], _pidx);
4953   if (print_state) {
4954     EscapeState es = escape_state();
4955     EscapeState fields_es = fields_escape_state();
4956     out->print("%s(%s) ", esc_names[(int)es], esc_names[(int)fields_es]);
4957     if (nt == PointsToNode::JavaObject && !this->scalar_replaceable()) {
4958       out->print("NSR ");
4959     }
4960   }
4961 }
4962 
4963 void PointsToNode::dump(bool print_state, outputStream* out, bool newline) const {
4964   dump_header(print_state, out);
4965   if (is_Field()) {
4966     FieldNode* f = (FieldNode*)this;
4967     if (f->is_oop()) {
4968       out->print("oop ");
4969     }
4970     if (f->offset() > 0) {
4971       out->print("+%d ", f->offset());
4972     }
4973     out->print("(");
4974     for (BaseIterator i(f); i.has_next(); i.next()) {
4975       PointsToNode* b = i.get();
4976       out->print(" %d%s", b->idx(),(b->is_JavaObject() ? "P" : ""));
4977     }
4978     out->print(" )");
4979   }
4980   out->print("[");
4981   for (EdgeIterator i(this); i.has_next(); i.next()) {
4982     PointsToNode* e = i.get();
4983     out->print(" %d%s%s", e->idx(),(e->is_JavaObject() ? "P" : (e->is_Field() ? "F" : "")), e->is_Arraycopy() ? "cp" : "");
4984   }
4985   out->print(" [");
4986   for (UseIterator i(this); i.has_next(); i.next()) {
4987     PointsToNode* u = i.get();
4988     bool is_base = false;
4989     if (PointsToNode::is_base_use(u)) {
4990       is_base = true;
4991       u = PointsToNode::get_use_node(u)->as_Field();
4992     }
4993     out->print(" %d%s%s", u->idx(), is_base ? "b" : "", u->is_Arraycopy() ? "cp" : "");
4994   }
4995   out->print(" ]]  ");
4996   if (_node == nullptr) {
4997     out->print("<null>%s", newline ? "\n" : "");
4998   } else {
4999     _node->dump(newline ? "\n" : "", false, out);
5000   }
5001 }
5002 
5003 void ConnectionGraph::dump(GrowableArray<PointsToNode*>& ptnodes_worklist) {
5004   bool first = true;
5005   int ptnodes_length = ptnodes_worklist.length();
5006   for (int i = 0; i < ptnodes_length; i++) {
5007     PointsToNode *ptn = ptnodes_worklist.at(i);
5008     if (ptn == nullptr || !ptn->is_JavaObject()) {
5009       continue;
5010     }
5011     PointsToNode::EscapeState es = ptn->escape_state();
5012     if ((es != PointsToNode::NoEscape) && !Verbose) {
5013       continue;
5014     }
5015     Node* n = ptn->ideal_node();
5016     if (n->is_Allocate() || (n->is_CallStaticJava() &&
5017                              n->as_CallStaticJava()->is_boxing_method())) {
5018       if (first) {
5019         tty->cr();
5020         tty->print("======== Connection graph for ");
5021         _compile->method()->print_short_name();
5022         tty->cr();
5023         tty->print_cr("invocation #%d: %d iterations and %f sec to build connection graph with %d nodes and worklist size %d",
5024                       _invocation, _build_iterations, _build_time, nodes_size(), ptnodes_worklist.length());
5025         tty->cr();
5026         first = false;
5027       }
5028       ptn->dump();
5029       // Print all locals and fields which reference this allocation
5030       for (UseIterator j(ptn); j.has_next(); j.next()) {
5031         PointsToNode* use = j.get();
5032         if (use->is_LocalVar()) {
5033           use->dump(Verbose);
5034         } else if (Verbose) {
5035           use->dump();
5036         }
5037       }
5038       tty->cr();
5039     }
5040   }
5041 }
5042 
5043 void ConnectionGraph::print_statistics() {
5044   tty->print_cr("No escape = %d, Arg escape = %d, Global escape = %d", Atomic::load(&_no_escape_counter), Atomic::load(&_arg_escape_counter), Atomic::load(&_global_escape_counter));
5045 }
5046 
5047 void ConnectionGraph::escape_state_statistics(GrowableArray<JavaObjectNode*>& java_objects_worklist) {
5048   if (!PrintOptoStatistics || (_invocation > 0)) { // Collect data only for the first invocation
5049     return;
5050   }
5051   for (int next = 0; next < java_objects_worklist.length(); ++next) {
5052     JavaObjectNode* ptn = java_objects_worklist.at(next);
5053     if (ptn->ideal_node()->is_Allocate()) {
5054       if (ptn->escape_state() == PointsToNode::NoEscape) {
5055         Atomic::inc(&ConnectionGraph::_no_escape_counter);
5056       } else if (ptn->escape_state() == PointsToNode::ArgEscape) {
5057         Atomic::inc(&ConnectionGraph::_arg_escape_counter);
5058       } else if (ptn->escape_state() == PointsToNode::GlobalEscape) {
5059         Atomic::inc(&ConnectionGraph::_global_escape_counter);
5060       } else {
5061         assert(false, "Unexpected Escape State");
5062       }
5063     }
5064   }
5065 }
5066 
5067 void ConnectionGraph::trace_es_update_helper(PointsToNode* ptn, PointsToNode::EscapeState es, bool fields, const char* reason) const {
5068   if (_compile->directive()->TraceEscapeAnalysisOption) {
5069     assert(ptn != nullptr, "should not be null");
5070     assert(reason != nullptr, "should not be null");
5071     ptn->dump_header(true);
5072     PointsToNode::EscapeState new_es = fields ? ptn->escape_state() : es;
5073     PointsToNode::EscapeState new_fields_es = fields ? es : ptn->fields_escape_state();
5074     tty->print_cr("-> %s(%s) %s", esc_names[(int)new_es], esc_names[(int)new_fields_es], reason);
5075   }
5076 }
5077 
5078 const char* ConnectionGraph::trace_propagate_message(PointsToNode* from) const {
5079   if (_compile->directive()->TraceEscapeAnalysisOption) {
5080     stringStream ss;
5081     ss.print("propagated from: ");
5082     from->dump(true, &ss, false);
5083     return ss.as_string();
5084   } else {
5085     return nullptr;
5086   }
5087 }
5088 
5089 const char* ConnectionGraph::trace_arg_escape_message(CallNode* call) const {
5090   if (_compile->directive()->TraceEscapeAnalysisOption) {
5091     stringStream ss;
5092     ss.print("escapes as arg to:");
5093     call->dump("", false, &ss);
5094     return ss.as_string();
5095   } else {
5096     return nullptr;
5097   }
5098 }
5099 
5100 const char* ConnectionGraph::trace_merged_message(PointsToNode* other) const {
5101   if (_compile->directive()->TraceEscapeAnalysisOption) {
5102     stringStream ss;
5103     ss.print("is merged with other object: ");
5104     other->dump_header(true, &ss);
5105     return ss.as_string();
5106   } else {
5107     return nullptr;
5108   }
5109 }
5110 
5111 #endif
5112 
5113 void ConnectionGraph::record_for_optimizer(Node *n) {
5114   _igvn->_worklist.push(n);
5115   _igvn->add_users_to_worklist(n);
5116 }