1 /*
   2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "libadt/vectset.hpp"
  26 #include "memory/allocation.inline.hpp"
  27 #include "memory/resourceArea.hpp"
  28 #include "opto/block.hpp"
  29 #include "opto/c2compiler.hpp"
  30 #include "opto/callnode.hpp"
  31 #include "opto/cfgnode.hpp"
  32 #include "opto/machnode.hpp"
  33 #include "opto/opcodes.hpp"
  34 #include "opto/phaseX.hpp"
  35 #include "opto/rootnode.hpp"
  36 #include "opto/runtime.hpp"
  37 #include "opto/chaitin.hpp"
  38 #include "runtime/deoptimization.hpp"
  39 
  40 // Portions of code courtesy of Clifford Click
  41 
  42 // Optimization - Graph Style
  43 
  44 // To avoid float value underflow
  45 #define MIN_BLOCK_FREQUENCY 1.e-35f
  46 
  47 //----------------------------schedule_node_into_block-------------------------
  48 // Insert node n into block b. Look for projections of n and make sure they
  49 // are in b also.
  50 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
  51   // Set basic block of n, Add n to b,
  52   map_node_to_block(n, b);
  53   b->add_inst(n);
  54 
  55   // After Matching, nearly any old Node may have projections trailing it.
  56   // These are usually machine-dependent flags.  In any case, they might
  57   // float to another block below this one.  Move them up.
  58   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
  59     Node*  use  = n->fast_out(i);
  60     if (use->is_Proj()) {
  61       Block* buse = get_block_for_node(use);
  62       if (buse != b) {              // In wrong block?
  63         if (buse != nullptr) {
  64           buse->find_remove(use);   // Remove from wrong block
  65         }
  66         map_node_to_block(use, b);
  67         b->add_inst(use);
  68       }
  69     }
  70   }
  71 }
  72 
  73 //----------------------------replace_block_proj_ctrl-------------------------
  74 // Nodes that have is_block_proj() nodes as their control need to use
  75 // the appropriate Region for their actual block as their control since
  76 // the projection will be in a predecessor block.
  77 void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
  78   const Node *in0 = n->in(0);
  79   assert(in0 != nullptr, "Only control-dependent");
  80   const Node *p = in0->is_block_proj();
  81   if (p != nullptr && p != n) {    // Control from a block projection?
  82     assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
  83     // Find trailing Region
  84     Block *pb = get_block_for_node(in0); // Block-projection already has basic block
  85     uint j = 0;
  86     if (pb->_num_succs != 1) {  // More then 1 successor?
  87       // Search for successor
  88       uint max = pb->number_of_nodes();
  89       assert( max > 1, "" );
  90       uint start = max - pb->_num_succs;
  91       // Find which output path belongs to projection
  92       for (j = start; j < max; j++) {
  93         if( pb->get_node(j) == in0 )
  94           break;
  95       }
  96       assert( j < max, "must find" );
  97       // Change control to match head of successor basic block
  98       j -= start;
  99     }
 100     n->set_req(0, pb->_succs[j]->head());
 101   }
 102 }
 103 
 104 bool PhaseCFG::is_dominator(Node* dom_node, Node* node) {
 105   assert(is_CFG(node) && is_CFG(dom_node), "node and dom_node must be CFG nodes");
 106   if (dom_node == node) {
 107     return true;
 108   }
 109   Block* d = find_block_for_node(dom_node);
 110   Block* n = find_block_for_node(node);
 111   assert(n != nullptr && d != nullptr, "blocks must exist");
 112 
 113   if (d == n) {
 114     if (dom_node->is_block_start()) {
 115       return true;
 116     }
 117     if (node->is_block_start()) {
 118       return false;
 119     }
 120     if (dom_node->is_block_proj()) {
 121       return false;
 122     }
 123     if (node->is_block_proj()) {
 124       return true;
 125     }
 126 
 127     assert(is_control_proj_or_safepoint(node), "node must be control projection or safepoint");
 128     assert(is_control_proj_or_safepoint(dom_node), "dom_node must be control projection or safepoint");
 129 
 130     // Neither 'node' nor 'dom_node' is a block start or block projection.
 131     // Check if 'dom_node' is above 'node' in the control graph.
 132     if (is_dominating_control(dom_node, node)) {
 133       return true;
 134     }
 135 
 136 #ifdef ASSERT
 137     // If 'dom_node' does not dominate 'node' then 'node' has to dominate 'dom_node'
 138     if (!is_dominating_control(node, dom_node)) {
 139       node->dump();
 140       dom_node->dump();
 141       assert(false, "neither dom_node nor node dominates the other");
 142     }
 143 #endif
 144 
 145     return false;
 146   }
 147   return d->dom_lca(n) == d;
 148 }
 149 
 150 bool PhaseCFG::is_CFG(Node* n) {
 151   return n->is_block_proj() || n->is_block_start() || is_control_proj_or_safepoint(n);
 152 }
 153 
 154 bool PhaseCFG::is_control_proj_or_safepoint(Node* n) const {
 155   bool result = (n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_SafePoint) || (n->is_Proj() && n->as_Proj()->bottom_type() == Type::CONTROL);
 156   assert(!result || (n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_SafePoint)
 157           || (n->is_Proj() && n->as_Proj()->_con == 0), "If control projection, it must be projection 0");
 158   return result;
 159 }
 160 
 161 Block* PhaseCFG::find_block_for_node(Node* n) const {
 162   if (n->is_block_start() || n->is_block_proj()) {
 163     return get_block_for_node(n);
 164   } else {
 165     // Walk the control graph up if 'n' is not a block start nor a block projection. In this case 'n' must be
 166     // an unmatched control projection or a not yet matched safepoint precedence edge in the middle of a block.
 167     assert(is_control_proj_or_safepoint(n), "must be control projection or safepoint");
 168     Node* ctrl = n->in(0);
 169     while (!ctrl->is_block_start()) {
 170       ctrl = ctrl->in(0);
 171     }
 172     return get_block_for_node(ctrl);
 173   }
 174 }
 175 
 176 // Walk up the control graph from 'n' and check if 'dom_ctrl' is found.
 177 bool PhaseCFG::is_dominating_control(Node* dom_ctrl, Node* n) {
 178   Node* ctrl = n->in(0);
 179   while (!ctrl->is_block_start()) {
 180     if (ctrl == dom_ctrl) {
 181       return true;
 182     }
 183     ctrl = ctrl->in(0);
 184   }
 185   return false;
 186 }
 187 
 188 
 189 //------------------------------schedule_pinned_nodes--------------------------
 190 // Set the basic block for Nodes pinned into blocks
 191 void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
 192   // Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc
 193   GrowableArray <Node*> spstack(C->live_nodes() + 8);
 194   spstack.push(_root);
 195   while (spstack.is_nonempty()) {
 196     Node* node = spstack.pop();
 197     if (!visited.test_set(node->_idx)) { // Test node and flag it as visited
 198       if (node->pinned() && !has_block(node)) {  // Pinned?  Nail it down!
 199         assert(node->in(0), "pinned Node must have Control");
 200         // Before setting block replace block_proj control edge
 201         replace_block_proj_ctrl(node);
 202         Node* input = node->in(0);
 203         while (!input->is_block_start()) {
 204           input = input->in(0);
 205         }
 206         Block* block = get_block_for_node(input); // Basic block of controlling input
 207         schedule_node_into_block(node, block);
 208       }
 209 
 210       // If the node has precedence edges (added when CastPP nodes are
 211       // removed in final_graph_reshaping), fix the control of the
 212       // node to cover the precedence edges and remove the
 213       // dependencies.
 214       Node* n = nullptr;
 215       for (uint i = node->len()-1; i >= node->req(); i--) {
 216         Node* m = node->in(i);
 217         if (m == nullptr) continue;
 218         assert(is_CFG(m), "must be a CFG node");
 219         node->rm_prec(i);
 220         if (n == nullptr) {
 221           n = m;
 222         } else {
 223           assert(is_dominator(n, m) || is_dominator(m, n), "one must dominate the other");
 224           n = is_dominator(n, m) ? m : n;
 225         }
 226       }
 227       if (n != nullptr) {
 228         assert(node->in(0), "control should have been set");
 229         assert(is_dominator(n, node->in(0)) || is_dominator(node->in(0), n), "one must dominate the other");
 230         if (!is_dominator(n, node->in(0))) {
 231           node->set_req(0, n);
 232         }
 233       }
 234 
 235       // process all inputs that are non null
 236       for (int i = node->len()-1; i >= 0; --i) {
 237         if (node->in(i) != nullptr) {
 238           spstack.push(node->in(i));
 239         }
 240       }
 241     }
 242   }
 243 }
 244 
 245 // Assert that new input b2 is dominated by all previous inputs.
 246 // Check this by by seeing that it is dominated by b1, the deepest
 247 // input observed until b2.
 248 static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
 249   if (b1 == nullptr)  return;
 250   assert(b1->_dom_depth < b2->_dom_depth, "sanity");
 251   Block* tmp = b2;
 252   while (tmp != b1 && tmp != nullptr) {
 253     tmp = tmp->_idom;
 254   }
 255   if (tmp != b1) {
 256 #ifdef ASSERT
 257     // Detected an unschedulable graph.  Print some nice stuff and die.
 258     tty->print_cr("!!! Unschedulable graph !!!");
 259     for (uint j=0; j<n->len(); j++) { // For all inputs
 260       Node* inn = n->in(j); // Get input
 261       if (inn == nullptr)  continue;  // Ignore null, missing inputs
 262       Block* inb = cfg->get_block_for_node(inn);
 263       tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
 264                  inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
 265       inn->dump();
 266     }
 267     tty->print("Failing node: ");
 268     n->dump();
 269     assert(false, "unschedulable graph");
 270 #endif
 271     cfg->C->record_failure("unschedulable graph");
 272   }
 273 }
 274 
 275 static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
 276   // Find the last input dominated by all other inputs.
 277   Block* deepb           = nullptr;     // Deepest block so far
 278   int    deepb_dom_depth = 0;
 279   for (uint k = 0; k < n->len(); k++) { // For all inputs
 280     Node* inn = n->in(k);               // Get input
 281     if (inn == nullptr)  continue;      // Ignore null, missing inputs
 282     Block* inb = cfg->get_block_for_node(inn);
 283     assert(inb != nullptr, "must already have scheduled this input");
 284     if (deepb_dom_depth < (int) inb->_dom_depth) {
 285       // The new inb must be dominated by the previous deepb.
 286       // The various inputs must be linearly ordered in the dom
 287       // tree, or else there will not be a unique deepest block.
 288       assert_dom(deepb, inb, n, cfg);
 289       if (cfg->C->failing()) {
 290         return nullptr;
 291       }
 292       deepb = inb;                      // Save deepest block
 293       deepb_dom_depth = deepb->_dom_depth;
 294     }
 295   }
 296   assert(deepb != nullptr, "must be at least one input to n");
 297   return deepb;
 298 }
 299 
 300 
 301 //------------------------------schedule_early---------------------------------
 302 // Find the earliest Block any instruction can be placed in.  Some instructions
 303 // are pinned into Blocks.  Unpinned instructions can appear in last block in
 304 // which all their inputs occur.
 305 bool PhaseCFG::schedule_early(VectorSet &visited, Node_Stack &roots) {
 306   // Allocate stack with enough space to avoid frequent realloc
 307   Node_Stack nstack(roots.size() + 8);
 308   // _root will be processed among C->top() inputs
 309   roots.push(C->top(), 0);
 310   visited.set(C->top()->_idx);
 311 
 312   while (roots.size() != 0) {
 313     // Use local variables nstack_top_n & nstack_top_i to cache values
 314     // on stack's top.
 315     Node* parent_node = roots.node();
 316     uint  input_index = 0;
 317     roots.pop();
 318 
 319     while (true) {
 320       if (input_index == 0) {
 321         // Fixup some control.  Constants without control get attached
 322         // to root and nodes that use is_block_proj() nodes should be attached
 323         // to the region that starts their block.
 324         const Node* control_input = parent_node->in(0);
 325         if (control_input != nullptr) {
 326           replace_block_proj_ctrl(parent_node);
 327         } else {
 328           // Is a constant with NO inputs?
 329           if (parent_node->req() == 1) {
 330             parent_node->set_req(0, _root);
 331           }
 332         }
 333       }
 334 
 335       // First, visit all inputs and force them to get a block.  If an
 336       // input is already in a block we quit following inputs (to avoid
 337       // cycles). Instead we put that Node on a worklist to be handled
 338       // later (since IT'S inputs may not have a block yet).
 339 
 340       // Assume all n's inputs will be processed
 341       bool done = true;
 342 
 343       while (input_index < parent_node->len()) {
 344         Node* in = parent_node->in(input_index++);
 345         if (in == nullptr) {
 346           continue;
 347         }
 348 
 349         int is_visited = visited.test_set(in->_idx);
 350         if (!has_block(in)) {
 351           if (is_visited) {
 352             assert(false, "graph should be schedulable");
 353             return false;
 354           }
 355           // Save parent node and next input's index.
 356           nstack.push(parent_node, input_index);
 357           // Process current input now.
 358           parent_node = in;
 359           input_index = 0;
 360           // Not all n's inputs processed.
 361           done = false;
 362           break;
 363         } else if (!is_visited) {
 364           // Visit this guy later, using worklist
 365           roots.push(in, 0);
 366         }
 367       }
 368 
 369       if (done) {
 370         // All of n's inputs have been processed, complete post-processing.
 371 
 372         // Some instructions are pinned into a block.  These include Region,
 373         // Phi, Start, Return, and other control-dependent instructions and
 374         // any projections which depend on them.
 375         if (!parent_node->pinned()) {
 376           // Set earliest legal block.
 377           Block* earliest_block = find_deepest_input(parent_node, this);
 378           if (C->failing()) {
 379             return false;
 380           }
 381           map_node_to_block(parent_node, earliest_block);
 382         } else {
 383           assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge");
 384         }
 385 
 386         if (nstack.is_empty()) {
 387           // Finished all nodes on stack.
 388           // Process next node on the worklist 'roots'.
 389           break;
 390         }
 391         // Get saved parent node and next input's index.
 392         parent_node = nstack.node();
 393         input_index = nstack.index();
 394         nstack.pop();
 395       }
 396     }
 397   }
 398   return true;
 399 }
 400 
 401 //------------------------------dom_lca----------------------------------------
 402 // Find least common ancestor in dominator tree
 403 // LCA is a current notion of LCA, to be raised above 'this'.
 404 // As a convenient boundary condition, return 'this' if LCA is null.
 405 // Find the LCA of those two nodes.
 406 Block* Block::dom_lca(Block* LCA) {
 407   if (LCA == nullptr || LCA == this)  return this;
 408 
 409   Block* anc = this;
 410   while (anc->_dom_depth > LCA->_dom_depth)
 411     anc = anc->_idom;           // Walk up till anc is as high as LCA
 412 
 413   while (LCA->_dom_depth > anc->_dom_depth)
 414     LCA = LCA->_idom;           // Walk up till LCA is as high as anc
 415 
 416   while (LCA != anc) {          // Walk both up till they are the same
 417     LCA = LCA->_idom;
 418     anc = anc->_idom;
 419   }
 420 
 421   return LCA;
 422 }
 423 
 424 //--------------------------raise_LCA_above_use--------------------------------
 425 // We are placing a definition, and have been given a def->use edge.
 426 // The definition must dominate the use, so move the LCA upward in the
 427 // dominator tree to dominate the use.  If the use is a phi, adjust
 428 // the LCA only with the phi input paths which actually use this def.
 429 static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) {
 430   Block* buse = cfg->get_block_for_node(use);
 431   if (buse == nullptr) return LCA;   // Unused killing Projs have no use block
 432   if (!use->is_Phi())  return buse->dom_lca(LCA);
 433   uint pmax = use->req();       // Number of Phi inputs
 434   // Why does not this loop just break after finding the matching input to
 435   // the Phi?  Well...it's like this.  I do not have true def-use/use-def
 436   // chains.  Means I cannot distinguish, from the def-use direction, which
 437   // of many use-defs lead from the same use to the same def.  That is, this
 438   // Phi might have several uses of the same def.  Each use appears in a
 439   // different predecessor block.  But when I enter here, I cannot distinguish
 440   // which use-def edge I should find the predecessor block for.  So I find
 441   // them all.  Means I do a little extra work if a Phi uses the same value
 442   // more than once.
 443   for (uint j=1; j<pmax; j++) { // For all inputs
 444     if (use->in(j) == def) {    // Found matching input?
 445       Block* pred = cfg->get_block_for_node(buse->pred(j));
 446       LCA = pred->dom_lca(LCA);
 447     }
 448   }
 449   return LCA;
 450 }
 451 
 452 //----------------------------raise_LCA_above_marks----------------------------
 453 // Return a new LCA that dominates LCA and any of its marked predecessors.
 454 // Search all my parents up to 'early' (exclusive), looking for predecessors
 455 // which are marked with the given index.  Return the LCA (in the dom tree)
 456 // of all marked blocks.  If there are none marked, return the original
 457 // LCA.
 458 static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) {
 459   Block_List worklist;
 460   worklist.push(LCA);
 461   while (worklist.size() > 0) {
 462     Block* mid = worklist.pop();
 463     if (mid == early)  continue;  // stop searching here
 464 
 465     // Test and set the visited bit.
 466     if (mid->raise_LCA_visited() == mark)  continue;  // already visited
 467 
 468     // Don't process the current LCA, otherwise the search may terminate early
 469     if (mid != LCA && mid->raise_LCA_mark() == mark) {
 470       // Raise the LCA.
 471       LCA = mid->dom_lca(LCA);
 472       if (LCA == early)  break;   // stop searching everywhere
 473       assert(early->dominates(LCA), "early is high enough");
 474       // Resume searching at that point, skipping intermediate levels.
 475       worklist.push(LCA);
 476       if (LCA == mid)
 477         continue; // Don't mark as visited to avoid early termination.
 478     } else {
 479       // Keep searching through this block's predecessors.
 480       for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {
 481         Block* mid_parent = cfg->get_block_for_node(mid->pred(j));
 482         worklist.push(mid_parent);
 483       }
 484     }
 485     mid->set_raise_LCA_visited(mark);
 486   }
 487   return LCA;
 488 }
 489 
 490 //--------------------------memory_early_block--------------------------------
 491 // This is a variation of find_deepest_input, the heart of schedule_early.
 492 // Find the "early" block for a load, if we considered only memory and
 493 // address inputs, that is, if other data inputs were ignored.
 494 //
 495 // Because a subset of edges are considered, the resulting block will
 496 // be earlier (at a shallower dom_depth) than the true schedule_early
 497 // point of the node. We compute this earlier block as a more permissive
 498 // site for anti-dependency insertion, but only if subsume_loads is enabled.
 499 static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) {
 500   Node* base;
 501   Node* index;
 502   Node* store = load->in(MemNode::Memory);
 503   load->as_Mach()->memory_inputs(base, index);
 504 
 505   assert(base != NodeSentinel && index != NodeSentinel,
 506          "unexpected base/index inputs");
 507 
 508   Node* mem_inputs[4];
 509   int mem_inputs_length = 0;
 510   if (base != nullptr)  mem_inputs[mem_inputs_length++] = base;
 511   if (index != nullptr) mem_inputs[mem_inputs_length++] = index;
 512   if (store != nullptr) mem_inputs[mem_inputs_length++] = store;
 513 
 514   // In the comparison below, add one to account for the control input,
 515   // which may be null, but always takes up a spot in the in array.
 516   if (mem_inputs_length + 1 < (int) load->req()) {
 517     // This "load" has more inputs than just the memory, base and index inputs.
 518     // For purposes of checking anti-dependences, we need to start
 519     // from the early block of only the address portion of the instruction,
 520     // and ignore other blocks that may have factored into the wider
 521     // schedule_early calculation.
 522     if (load->in(0) != nullptr) mem_inputs[mem_inputs_length++] = load->in(0);
 523 
 524     Block* deepb           = nullptr;        // Deepest block so far
 525     int    deepb_dom_depth = 0;
 526     for (int i = 0; i < mem_inputs_length; i++) {
 527       Block* inb = cfg->get_block_for_node(mem_inputs[i]);
 528       if (deepb_dom_depth < (int) inb->_dom_depth) {
 529         // The new inb must be dominated by the previous deepb.
 530         // The various inputs must be linearly ordered in the dom
 531         // tree, or else there will not be a unique deepest block.
 532         assert_dom(deepb, inb, load, cfg);
 533         if (cfg->C->failing()) {
 534           return nullptr;
 535         }
 536         deepb = inb;                      // Save deepest block
 537         deepb_dom_depth = deepb->_dom_depth;
 538       }
 539     }
 540     early = deepb;
 541   }
 542 
 543   return early;
 544 }
 545 
 546 // This function is used by insert_anti_dependences to find unrelated loads for stores in implicit null checks.
 547 bool PhaseCFG::unrelated_load_in_store_null_block(Node* store, Node* load) {
 548   // We expect an anti-dependence edge from 'load' to 'store', except when
 549   // implicit_null_check() has hoisted 'store' above its early block to
 550   // perform an implicit null check, and 'load' is placed in the null
 551   // block. In this case it is safe to ignore the anti-dependence, as the
 552   // null block is only reached if 'store' tries to write to null object and
 553   // 'load' read from non-null object (there is preceding check for that)
 554   // These objects can't be the same.
 555   Block* store_block = get_block_for_node(store);
 556   Block* load_block = get_block_for_node(load);
 557   Node* end = store_block->end();
 558   if (end->is_MachNullCheck() && (end->in(1) == store) && store_block->dominates(load_block)) {
 559     Node* if_true = end->find_out_with(Op_IfTrue);
 560     assert(if_true != nullptr, "null check without null projection");
 561     Node* null_block_region = if_true->find_out_with(Op_Region);
 562     assert(null_block_region != nullptr, "null check without null region");
 563     return get_block_for_node(null_block_region) == load_block;
 564   }
 565   return false;
 566 }
 567 
 568 class DefUseMemStatesQueue : public StackObj {
 569 private:
 570   class DefUsePair : public StackObj {
 571   private:
 572     Node* _def; // memory state
 573     Node* _use; // use of the memory state that also modifies the memory state
 574 
 575   public:
 576     DefUsePair(Node* def, Node* use) :
 577       _def(def), _use(use) {
 578     }
 579 
 580     DefUsePair() :
 581       _def(nullptr), _use(nullptr) {
 582     }
 583 
 584     Node* def() const {
 585       return _def;
 586     }
 587 
 588     Node* use() const {
 589       return _use;
 590     }
 591   };
 592 
 593   GrowableArray<DefUsePair> _queue;
 594   GrowableArray<MergeMemNode*> _worklist_visited; // visited mergemem nodes
 595 
 596   bool already_enqueued(Node* def_mem, PhiNode* use_phi) const {
 597     // def_mem is one of the inputs of use_phi and at least one input of use_phi is
 598     // not def_mem. It's however possible that use_phi has def_mem as input multiple
 599     // times. If that happens, use_phi is recorded as a use of def_mem multiple
 600     // times as well. When PhaseCFG::insert_anti_dependences() goes over
 601     // uses of def_mem and enqueues them for processing, use_phi would then be
 602     // enqueued for processing multiple times when it only needs to be
 603     // processed once. The code below checks if use_phi as a use of def_mem was
 604     // already enqueued to avoid redundant processing of use_phi.
 605     int j = _queue.length()-1;
 606     // If there are any use of def_mem already enqueued, they were enqueued
 607     // last (all use of def_mem are processed in one go).
 608     for (; j >= 0; j--) {
 609       const DefUsePair& def_use_pair = _queue.at(j);
 610       if (def_use_pair.def() != def_mem) {
 611         // We're done with the uses of def_mem
 612         break;
 613       }
 614       if (def_use_pair.use() == use_phi) {
 615         return true;
 616       }
 617     }
 618 #ifdef ASSERT
 619     for (; j >= 0; j--) {
 620       const DefUsePair& def_use_pair = _queue.at(j);
 621       assert(def_use_pair.def() != def_mem, "Should be done with the uses of def_mem");
 622     }
 623 #endif
 624     return false;
 625   }
 626 
 627 public:
 628   DefUseMemStatesQueue(ResourceArea* area) {
 629   }
 630 
 631   void push(Node* def_mem_state, Node* use_mem_state) {
 632     if (use_mem_state->is_MergeMem()) {
 633       // Be sure we don't get into combinatorial problems.
 634       if (!_worklist_visited.append_if_missing(use_mem_state->as_MergeMem())) {
 635         return; // already on work list; do not repeat
 636       }
 637     } else if (use_mem_state->is_Phi()) {
 638       // A Phi could have the same mem as input multiple times. If that's the case, we don't need to enqueue it
 639       // more than once. We otherwise allow phis to be repeated; they can merge two relevant states.
 640       if (already_enqueued(def_mem_state, use_mem_state->as_Phi())) {
 641         return;
 642       }
 643     }
 644 
 645     _queue.push(DefUsePair(def_mem_state, use_mem_state));
 646   }
 647 
 648   bool is_nonempty() const {
 649     return _queue.is_nonempty();
 650   }
 651 
 652   Node* top_def() const {
 653     return _queue.top().def();
 654   }
 655 
 656   Node* top_use() const {
 657     return _queue.top().use();
 658   }
 659 
 660   void pop() {
 661     _queue.pop();
 662   }
 663 };
 664 
 665 //--------------------------insert_anti_dependences---------------------------
 666 // A load may need to witness memory that nearby stores can overwrite.
 667 // For each nearby store, either insert an "anti-dependence" edge
 668 // from the load to the store, or else move LCA upward to force the
 669 // load to (eventually) be scheduled in a block above the store.
 670 //
 671 // Do not add edges to stores on distinct control-flow paths;
 672 // only add edges to stores which might interfere.
 673 //
 674 // Return the (updated) LCA.  There will not be any possibly interfering
 675 // store between the load's "early block" and the updated LCA.
 676 // Any stores in the updated LCA will have new precedence edges
 677 // back to the load.  The caller is expected to schedule the load
 678 // in the LCA, in which case the precedence edges will make LCM
 679 // preserve anti-dependences.  The caller may also hoist the load
 680 // above the LCA, if it is not the early block.
 681 Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
 682   ResourceMark rm;
 683   assert(load->needs_anti_dependence_check(), "must be a load of some sort");
 684   assert(LCA != nullptr, "");
 685   DEBUG_ONLY(Block* LCA_orig = LCA);
 686 
 687   // Compute the alias index.  Loads and stores with different alias indices
 688   // do not need anti-dependence edges.
 689   int load_alias_idx = C->get_alias_index(load->adr_type());
 690 #ifdef ASSERT
 691   assert(Compile::AliasIdxTop <= load_alias_idx && load_alias_idx < C->num_alias_types(), "Invalid alias index");
 692   if (load_alias_idx == Compile::AliasIdxBot && C->do_aliasing() &&
 693       (PrintOpto || VerifyAliases ||
 694        (PrintMiscellaneous && (WizardMode || Verbose)))) {
 695     // Load nodes should not consume all of memory.
 696     // Reporting a bottom type indicates a bug in adlc.
 697     // If some particular type of node validly consumes all of memory,
 698     // sharpen the preceding "if" to exclude it, so we can catch bugs here.
 699     tty->print_cr("*** Possible Anti-Dependence Bug:  Load consumes all of memory.");
 700     load->dump(2);
 701     if (VerifyAliases)  assert(load_alias_idx != Compile::AliasIdxBot, "");
 702   }
 703 #endif
 704 
 705   if (!C->alias_type(load_alias_idx)->is_rewritable()) {
 706     // It is impossible to spoil this load by putting stores before it,
 707     // because we know that the stores will never update the value
 708     // which 'load' must witness.
 709     return LCA;
 710   }
 711 
 712   node_idx_t load_index = load->_idx;
 713 
 714   // Note the earliest legal placement of 'load', as determined by
 715   // by the unique point in the dom tree where all memory effects
 716   // and other inputs are first available.  (Computed by schedule_early.)
 717   // For normal loads, 'early' is the shallowest place (dom graph wise)
 718   // to look for anti-deps between this load and any store.
 719   Block* early = get_block_for_node(load);
 720 
 721   // If we are subsuming loads, compute an "early" block that only considers
 722   // memory or address inputs. This block may be different than the
 723   // schedule_early block in that it could be at an even shallower depth in the
 724   // dominator tree, and allow for a broader discovery of anti-dependences.
 725   if (C->subsume_loads()) {
 726     early = memory_early_block(load, early, this);
 727     if (C->failing()) {
 728       return nullptr;
 729     }
 730   }
 731 
 732   ResourceArea* area = Thread::current()->resource_area();
 733   DefUseMemStatesQueue worklist_def_use_mem_states(area); // prior memory state to store and possible-def to explore
 734   Node_List non_early_stores(area); // all relevant stores outside of early
 735   bool must_raise_LCA = false;
 736 
 737   // 'load' uses some memory state; look for users of the same state.
 738   // Recurse through MergeMem nodes to the stores that use them.
 739 
 740   // Each of these stores is a possible definition of memory
 741   // that 'load' needs to use.  We need to force 'load'
 742   // to occur before each such store.  When the store is in
 743   // the same block as 'load', we insert an anti-dependence
 744   // edge load->store.
 745 
 746   // The relevant stores "nearby" the load consist of a tree rooted
 747   // at initial_mem, with internal nodes of type MergeMem.
 748   // Therefore, the branches visited by the worklist are of this form:
 749   //    initial_mem -> (MergeMem ->)* Memory state modifying node
 750   // Memory state modifying nodes include Store and Phi nodes and any node for which needs_anti_dependence_check()
 751   // returns false.
 752   // The anti-dependence constraints apply only to the fringe of this tree.
 753 
 754   Node* initial_mem = load->in(MemNode::Memory);
 755 
 756   // We don't optimize the memory graph for pinned loads, so we may need to raise the
 757   // root of our search tree through the corresponding slices of MergeMem nodes to
 758   // get to the node that really creates the memory state for this slice.
 759   if (load_alias_idx >= Compile::AliasIdxRaw) {
 760     while (initial_mem->is_MergeMem()) {
 761       MergeMemNode* mm = initial_mem->as_MergeMem();
 762       Node* p = mm->memory_at(load_alias_idx);
 763       if (p != mm->base_memory()) {
 764         initial_mem = p;
 765       } else {
 766         break;
 767       }
 768     }
 769   }
 770   worklist_def_use_mem_states.push(nullptr, initial_mem);
 771   while (worklist_def_use_mem_states.is_nonempty()) {
 772     // Examine a nearby store to see if it might interfere with our load.
 773     Node* def_mem_state = worklist_def_use_mem_states.top_def();
 774     Node* use_mem_state = worklist_def_use_mem_states.top_use();
 775     worklist_def_use_mem_states.pop();
 776 
 777     uint op = use_mem_state->Opcode();
 778 
 779 #ifdef ASSERT
 780     // CacheWB nodes are peculiar in a sense that they both are anti-dependent and produce memory.
 781     // Allow them to be treated as a store.
 782     bool is_cache_wb = false;
 783     if (use_mem_state->is_Mach()) {
 784       int ideal_op = use_mem_state->as_Mach()->ideal_Opcode();
 785       is_cache_wb = (ideal_op == Op_CacheWB);
 786     }
 787     assert(!use_mem_state->needs_anti_dependence_check() || is_cache_wb, "no loads");
 788 #endif
 789 
 790     // MergeMems do not directly have anti-deps.
 791     // Treat them as internal nodes in a forward tree of memory states,
 792     // the leaves of which are each a 'possible-def'.
 793     if (use_mem_state == initial_mem    // root (exclusive) of tree we are searching
 794         || op == Op_MergeMem    // internal node of tree we are searching
 795         ) {
 796       def_mem_state = use_mem_state;   // It's not a possibly interfering store.
 797       if (use_mem_state == initial_mem)
 798         initial_mem = nullptr;  // only process initial memory once
 799 
 800       for (DUIterator_Fast imax, i = def_mem_state->fast_outs(imax); i < imax; i++) {
 801         use_mem_state = def_mem_state->fast_out(i);
 802         if (use_mem_state->needs_anti_dependence_check()) {
 803           // use_mem_state is also a kind of load (i.e. needs_anti_dependence_check), and it is not a memory state
 804           // modifying node (store, Phi or MergeMem). Hence, load can't be anti dependent on this node.
 805           continue;
 806         }
 807         worklist_def_use_mem_states.push(def_mem_state, use_mem_state);
 808       }
 809       continue;
 810     }
 811 
 812     if (op == Op_MachProj || op == Op_Catch)   continue;
 813 
 814     // Compute the alias index.  Loads and stores with different alias
 815     // indices do not need anti-dependence edges.  Wide MemBar's are
 816     // anti-dependent on everything (except immutable memories).
 817     const TypePtr* adr_type = use_mem_state->adr_type();
 818     if (!C->can_alias(adr_type, load_alias_idx))  continue;
 819 
 820     // Most slow-path runtime calls do NOT modify Java memory, but
 821     // they can block and so write Raw memory.
 822     if (use_mem_state->is_Mach()) {
 823       MachNode* mstore = use_mem_state->as_Mach();
 824       if (load_alias_idx != Compile::AliasIdxRaw) {
 825         // Check for call into the runtime using the Java calling
 826         // convention (and from there into a wrapper); it has no
 827         // _method.  Can't do this optimization for Native calls because
 828         // they CAN write to Java memory.
 829         if (mstore->ideal_Opcode() == Op_CallStaticJava) {
 830           assert(mstore->is_MachSafePoint(), "");
 831           MachSafePointNode* ms = (MachSafePointNode*) mstore;
 832           assert(ms->is_MachCallJava(), "");
 833           MachCallJavaNode* mcj = (MachCallJavaNode*) ms;
 834           if (mcj->_method == nullptr) {
 835             // These runtime calls do not write to Java visible memory
 836             // (other than Raw) and so do not require anti-dependence edges.
 837             continue;
 838           }
 839         }
 840         // Same for SafePoints: they read/write Raw but only read otherwise.
 841         // This is basically a workaround for SafePoints only defining control
 842         // instead of control + memory.
 843         if (mstore->ideal_Opcode() == Op_SafePoint)
 844           continue;
 845       } else {
 846         // Some raw memory, such as the load of "top" at an allocation,
 847         // can be control dependent on the previous safepoint. See
 848         // comments in GraphKit::allocate_heap() about control input.
 849         // Inserting an anti-dep between such a safepoint and a use
 850         // creates a cycle, and will cause a subsequent failure in
 851         // local scheduling.  (BugId 4919904)
 852         // (%%% How can a control input be a safepoint and not a projection??)
 853         if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore)
 854           continue;
 855       }
 856     }
 857 
 858     // Identify a block that the current load must be above,
 859     // or else observe that 'store' is all the way up in the
 860     // earliest legal block for 'load'.  In the latter case,
 861     // immediately insert an anti-dependence edge.
 862     Block* store_block = get_block_for_node(use_mem_state);
 863     assert(store_block != nullptr, "unused killing projections skipped above");
 864 
 865     if (use_mem_state->is_Phi()) {
 866       // Loop-phis need to raise load before input. (Other phis are treated
 867       // as store below.)
 868       //
 869       // 'load' uses memory which is one (or more) of the Phi's inputs.
 870       // It must be scheduled not before the Phi, but rather before
 871       // each of the relevant Phi inputs.
 872       //
 873       // Instead of finding the LCA of all inputs to a Phi that match 'mem',
 874       // we mark each corresponding predecessor block and do a combined
 875       // hoisting operation later (raise_LCA_above_marks).
 876       //
 877       // Do not assert(store_block != early, "Phi merging memory after access")
 878       // PhiNode may be at start of block 'early' with backedge to 'early'
 879       DEBUG_ONLY(bool found_match = false);
 880       for (uint j = PhiNode::Input, jmax = use_mem_state->req(); j < jmax; j++) {
 881         if (use_mem_state->in(j) == def_mem_state) {   // Found matching input?
 882           DEBUG_ONLY(found_match = true);
 883           Block* pred_block = get_block_for_node(store_block->pred(j));
 884           if (pred_block != early) {
 885             // If any predecessor of the Phi matches the load's "early block",
 886             // we do not need a precedence edge between the Phi and 'load'
 887             // since the load will be forced into a block preceding the Phi.
 888             pred_block->set_raise_LCA_mark(load_index);
 889             assert(!LCA_orig->dominates(pred_block) ||
 890                    early->dominates(pred_block), "early is high enough");
 891             must_raise_LCA = true;
 892           } else {
 893             // anti-dependent upon PHI pinned below 'early', no edge needed
 894             LCA = early;             // but can not schedule below 'early'
 895           }
 896         }
 897       }
 898       assert(found_match, "no worklist bug");
 899     } else if (store_block != early) {
 900       // 'store' is between the current LCA and earliest possible block.
 901       // Label its block, and decide later on how to raise the LCA
 902       // to include the effect on LCA of this store.
 903       // If this store's block gets chosen as the raised LCA, we
 904       // will find him on the non_early_stores list and stick him
 905       // with a precedence edge.
 906       // (But, don't bother if LCA is already raised all the way.)
 907       if (LCA != early && !unrelated_load_in_store_null_block(use_mem_state, load)) {
 908         store_block->set_raise_LCA_mark(load_index);
 909         must_raise_LCA = true;
 910         non_early_stores.push(use_mem_state);
 911       }
 912     } else {
 913       // Found a possibly-interfering store in the load's 'early' block.
 914       // This means 'load' cannot sink at all in the dominator tree.
 915       // Add an anti-dep edge, and squeeze 'load' into the highest block.
 916       assert(use_mem_state != load->find_exact_control(load->in(0)), "dependence cycle found");
 917       if (verify) {
 918         assert(use_mem_state->find_edge(load) != -1 || unrelated_load_in_store_null_block(use_mem_state, load),
 919                "missing precedence edge");
 920       } else {
 921         use_mem_state->add_prec(load);
 922       }
 923       LCA = early;
 924       // This turns off the process of gathering non_early_stores.
 925     }
 926   }
 927   // (Worklist is now empty; all nearby stores have been visited.)
 928 
 929   // Finished if 'load' must be scheduled in its 'early' block.
 930   // If we found any stores there, they have already been given
 931   // precedence edges.
 932   if (LCA == early)  return LCA;
 933 
 934   // We get here only if there are no possibly-interfering stores
 935   // in the load's 'early' block.  Move LCA up above all predecessors
 936   // which contain stores we have noted.
 937   //
 938   // The raised LCA block can be a home to such interfering stores,
 939   // but its predecessors must not contain any such stores.
 940   //
 941   // The raised LCA will be a lower bound for placing the load,
 942   // preventing the load from sinking past any block containing
 943   // a store that may invalidate the memory state required by 'load'.
 944   if (must_raise_LCA)
 945     LCA = raise_LCA_above_marks(LCA, load->_idx, early, this);
 946   if (LCA == early)  return LCA;
 947 
 948   // Insert anti-dependence edges from 'load' to each store
 949   // in the non-early LCA block.
 950   // Mine the non_early_stores list for such stores.
 951   if (LCA->raise_LCA_mark() == load_index) {
 952     while (non_early_stores.size() > 0) {
 953       Node* store = non_early_stores.pop();
 954       Block* store_block = get_block_for_node(store);
 955       if (store_block == LCA) {
 956         // add anti_dependence from store to load in its own block
 957         assert(store != load->find_exact_control(load->in(0)), "dependence cycle found");
 958         if (verify) {
 959           assert(store->find_edge(load) != -1, "missing precedence edge");
 960         } else {
 961           store->add_prec(load);
 962         }
 963       } else {
 964         assert(store_block->raise_LCA_mark() == load_index, "block was marked");
 965         // Any other stores we found must be either inside the new LCA
 966         // or else outside the original LCA.  In the latter case, they
 967         // did not interfere with any use of 'load'.
 968         assert(LCA->dominates(store_block)
 969                || !LCA_orig->dominates(store_block), "no stray stores");
 970       }
 971     }
 972   }
 973 
 974   // Return the highest block containing stores; any stores
 975   // within that block have been given anti-dependence edges.
 976   return LCA;
 977 }
 978 
 979 // This class is used to iterate backwards over the nodes in the graph.
 980 
 981 class Node_Backward_Iterator {
 982 
 983 private:
 984   Node_Backward_Iterator();
 985 
 986 public:
 987   // Constructor for the iterator
 988   Node_Backward_Iterator(Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg);
 989 
 990   // Postincrement operator to iterate over the nodes
 991   Node *next();
 992 
 993 private:
 994   VectorSet   &_visited;
 995   Node_Stack  &_stack;
 996   PhaseCFG &_cfg;
 997 };
 998 
 999 // Constructor for the Node_Backward_Iterator
1000 Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_Stack &stack, PhaseCFG &cfg)
1001   : _visited(visited), _stack(stack), _cfg(cfg) {
1002   // The stack should contain exactly the root
1003   stack.clear();
1004   stack.push(root, root->outcnt());
1005 
1006   // Clear the visited bits
1007   visited.clear();
1008 }
1009 
1010 // Iterator for the Node_Backward_Iterator
1011 Node *Node_Backward_Iterator::next() {
1012 
1013   // If the _stack is empty, then just return null: finished.
1014   if ( !_stack.size() )
1015     return nullptr;
1016 
1017   // I visit unvisited not-anti-dependence users first, then anti-dependent
1018   // children next. I iterate backwards to support removal of nodes.
1019   // The stack holds states consisting of 3 values:
1020   // current Def node, flag which indicates 1st/2nd pass, index of current out edge
1021   Node *self = (Node*)(((uintptr_t)_stack.node()) & ~1);
1022   bool iterate_anti_dep = (((uintptr_t)_stack.node()) & 1);
1023   uint idx = MIN2(_stack.index(), self->outcnt()); // Support removal of nodes.
1024   _stack.pop();
1025 
1026   // I cycle here when I am entering a deeper level of recursion.
1027   // The key variable 'self' was set prior to jumping here.
1028   while( 1 ) {
1029 
1030     _visited.set(self->_idx);
1031 
1032     // Now schedule all uses as late as possible.
1033     const Node* src = self->is_Proj() ? self->in(0) : self;
1034     uint src_rpo = _cfg.get_block_for_node(src)->_rpo;
1035 
1036     // Schedule all nodes in a post-order visit
1037     Node *unvisited = nullptr;  // Unvisited anti-dependent Node, if any
1038 
1039     // Scan for unvisited nodes
1040     while (idx > 0) {
1041       // For all uses, schedule late
1042       Node* n = self->raw_out(--idx); // Use
1043 
1044       // Skip already visited children
1045       if ( _visited.test(n->_idx) )
1046         continue;
1047 
1048       // do not traverse backward control edges
1049       Node *use = n->is_Proj() ? n->in(0) : n;
1050       uint use_rpo = _cfg.get_block_for_node(use)->_rpo;
1051 
1052       if ( use_rpo < src_rpo )
1053         continue;
1054 
1055       // Phi nodes always precede uses in a basic block
1056       if ( use_rpo == src_rpo && use->is_Phi() )
1057         continue;
1058 
1059       unvisited = n;      // Found unvisited
1060 
1061       // Check for possible-anti-dependent
1062       // 1st pass: No such nodes, 2nd pass: Only such nodes.
1063       if (n->needs_anti_dependence_check() == iterate_anti_dep) {
1064         unvisited = n;      // Found unvisited
1065         break;
1066       }
1067     }
1068 
1069     // Did I find an unvisited not-anti-dependent Node?
1070     if (!unvisited) {
1071       if (!iterate_anti_dep) {
1072         // 2nd pass: Iterate over nodes which needs_anti_dependence_check.
1073         iterate_anti_dep = true;
1074         idx = self->outcnt();
1075         continue;
1076       }
1077       break;                  // All done with children; post-visit 'self'
1078     }
1079 
1080     // Visit the unvisited Node.  Contains the obvious push to
1081     // indicate I'm entering a deeper level of recursion.  I push the
1082     // old state onto the _stack and set a new state and loop (recurse).
1083     _stack.push((Node*)((uintptr_t)self | (uintptr_t)iterate_anti_dep), idx);
1084     self = unvisited;
1085     iterate_anti_dep = false;
1086     idx = self->outcnt();
1087   } // End recursion loop
1088 
1089   return self;
1090 }
1091 
1092 //------------------------------ComputeLatenciesBackwards----------------------
1093 // Compute the latency of all the instructions.
1094 void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_Stack &stack) {
1095 #ifndef PRODUCT
1096   if (trace_opto_pipelining())
1097     tty->print("\n#---- ComputeLatenciesBackwards ----\n");
1098 #endif
1099 
1100   Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
1101   Node *n;
1102 
1103   // Walk over all the nodes from last to first
1104   while ((n = iter.next())) {
1105     // Set the latency for the definitions of this instruction
1106     partial_latency_of_defs(n);
1107   }
1108 } // end ComputeLatenciesBackwards
1109 
1110 //------------------------------partial_latency_of_defs------------------------
1111 // Compute the latency impact of this node on all defs.  This computes
1112 // a number that increases as we approach the beginning of the routine.
1113 void PhaseCFG::partial_latency_of_defs(Node *n) {
1114   // Set the latency for this instruction
1115 #ifndef PRODUCT
1116   if (trace_opto_pipelining()) {
1117     tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
1118     dump();
1119   }
1120 #endif
1121 
1122   if (n->is_Proj()) {
1123     n = n->in(0);
1124   }
1125 
1126   if (n->is_Root()) {
1127     return;
1128   }
1129 
1130   uint nlen = n->len();
1131   uint use_latency = get_latency_for_node(n);
1132   uint use_pre_order = get_block_for_node(n)->_pre_order;
1133 
1134   for (uint j = 0; j < nlen; j++) {
1135     Node *def = n->in(j);
1136 
1137     if (!def || def == n) {
1138       continue;
1139     }
1140 
1141     // Walk backwards thru projections
1142     if (def->is_Proj()) {
1143       def = def->in(0);
1144     }
1145 
1146 #ifndef PRODUCT
1147     if (trace_opto_pipelining()) {
1148       tty->print("#    in(%2d): ", j);
1149       def->dump();
1150     }
1151 #endif
1152 
1153     // If the defining block is not known, assume it is ok
1154     Block *def_block = get_block_for_node(def);
1155     uint def_pre_order = def_block ? def_block->_pre_order : 0;
1156 
1157     if ((use_pre_order <  def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) {
1158       continue;
1159     }
1160 
1161     uint delta_latency = n->latency(j);
1162     uint current_latency = delta_latency + use_latency;
1163 
1164     if (get_latency_for_node(def) < current_latency) {
1165       set_latency_for_node(def, current_latency);
1166     }
1167 
1168 #ifndef PRODUCT
1169     if (trace_opto_pipelining()) {
1170       tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def));
1171     }
1172 #endif
1173   }
1174 }
1175 
1176 //------------------------------latency_from_use-------------------------------
1177 // Compute the latency of a specific use
1178 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
1179   // If self-reference, return no latency
1180   if (use == n || use->is_Root()) {
1181     return 0;
1182   }
1183 
1184   uint def_pre_order = get_block_for_node(def)->_pre_order;
1185   uint latency = 0;
1186 
1187   // If the use is not a projection, then it is simple...
1188   if (!use->is_Proj()) {
1189 #ifndef PRODUCT
1190     if (trace_opto_pipelining()) {
1191       tty->print("#    out(): ");
1192       use->dump();
1193     }
1194 #endif
1195 
1196     uint use_pre_order = get_block_for_node(use)->_pre_order;
1197 
1198     if (use_pre_order < def_pre_order)
1199       return 0;
1200 
1201     if (use_pre_order == def_pre_order && use->is_Phi())
1202       return 0;
1203 
1204     uint nlen = use->len();
1205     uint nl = get_latency_for_node(use);
1206 
1207     for ( uint j=0; j<nlen; j++ ) {
1208       if (use->in(j) == n) {
1209         // Change this if we want local latencies
1210         uint ul = use->latency(j);
1211         uint  l = ul + nl;
1212         if (latency < l) latency = l;
1213 #ifndef PRODUCT
1214         if (trace_opto_pipelining()) {
1215           tty->print_cr("#      %d + edge_latency(%d) == %d -> %d, latency = %d",
1216                         nl, j, ul, l, latency);
1217         }
1218 #endif
1219       }
1220     }
1221   } else {
1222     // This is a projection, just grab the latency of the use(s)
1223     for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
1224       uint l = latency_from_use(use, def, use->fast_out(j));
1225       if (latency < l) latency = l;
1226     }
1227   }
1228 
1229   return latency;
1230 }
1231 
1232 //------------------------------latency_from_uses------------------------------
1233 // Compute the latency of this instruction relative to all of it's uses.
1234 // This computes a number that increases as we approach the beginning of the
1235 // routine.
1236 void PhaseCFG::latency_from_uses(Node *n) {
1237   // Set the latency for this instruction
1238 #ifndef PRODUCT
1239   if (trace_opto_pipelining()) {
1240     tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
1241     dump();
1242   }
1243 #endif
1244   uint latency=0;
1245   const Node *def = n->is_Proj() ? n->in(0): n;
1246 
1247   for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1248     uint l = latency_from_use(n, def, n->fast_out(i));
1249 
1250     if (latency < l) latency = l;
1251   }
1252 
1253   set_latency_for_node(n, latency);
1254 }
1255 
1256 //------------------------------is_cheaper_block-------------------------
1257 // Check if a block between early and LCA block of uses is cheaper by
1258 // frequency-based policy, latency-based policy and random-based policy
1259 bool PhaseCFG::is_cheaper_block(Block* LCA, Node* self, uint target_latency,
1260                                 uint end_latency, double least_freq,
1261                                 int cand_cnt, bool in_latency) {
1262   if (StressGCM) {
1263     // Should be randomly accepted in stress mode
1264     return C->randomized_select(cand_cnt);
1265   }
1266 
1267   const double delta = 1 + PROB_UNLIKELY_MAG(4);
1268 
1269   // Better Frequency. Add a small delta to the comparison to not needlessly
1270   // hoist because of, e.g., small numerical inaccuracies.
1271   if (LCA->_freq * delta < least_freq) {
1272     return true;
1273   }
1274 
1275   // Otherwise, choose with latency
1276   if (!in_latency                     &&  // No block containing latency
1277       LCA->_freq < least_freq * delta &&  // No worse frequency
1278       target_latency >= end_latency   &&  // within latency range
1279       !self->is_iteratively_computed()    // But don't hoist IV increments
1280             // because they may end up above other uses of their phi forcing
1281             // their result register to be different from their input.
1282   ) {
1283     return true;
1284   }
1285 
1286   return false;
1287 }
1288 
1289 //------------------------------hoist_to_cheaper_block-------------------------
1290 // Pick a block for node self, between early and LCA block of uses, that is a
1291 // cheaper alternative to LCA.
1292 Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
1293   Block* least       = LCA;
1294   double least_freq  = least->_freq;
1295   uint target        = get_latency_for_node(self);
1296   uint start_latency = get_latency_for_node(LCA->head());
1297   uint end_latency   = get_latency_for_node(LCA->get_node(LCA->end_idx()));
1298   bool in_latency    = (target <= start_latency);
1299   const Block* root_block = get_block_for_node(_root);
1300 
1301   // Turn off latency scheduling if scheduling is just plain off
1302   if (!C->do_scheduling())
1303     in_latency = true;
1304 
1305   // Do not hoist (to cover latency) instructions which target a
1306   // single register.  Hoisting stretches the live range of the
1307   // single register and may force spilling.
1308   MachNode* mach = self->is_Mach() ? self->as_Mach() : nullptr;
1309   if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty())
1310     in_latency = true;
1311 
1312 #ifndef PRODUCT
1313   if (trace_opto_pipelining()) {
1314     tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self));
1315     self->dump();
1316     tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1317       LCA->_pre_order,
1318       LCA->head()->_idx,
1319       start_latency,
1320       LCA->get_node(LCA->end_idx())->_idx,
1321       end_latency,
1322       least_freq);
1323   }
1324 #endif
1325 
1326   int cand_cnt = 0;  // number of candidates tried
1327 
1328   // Walk up the dominator tree from LCA (Lowest common ancestor) to
1329   // the earliest legal location. Capture the least execution frequency,
1330   // or choose a random block if -XX:+StressGCM, or using latency-based policy
1331   while (LCA != early) {
1332     LCA = LCA->_idom;         // Follow up the dominator tree
1333 
1334     if (LCA == nullptr) {
1335       // Bailout without retry
1336       assert(false, "graph should be schedulable");
1337       C->record_method_not_compilable("late schedule failed: LCA is null");
1338       return least;
1339     }
1340 
1341     // Don't hoist machine instructions to the root basic block
1342     if (mach && LCA == root_block)
1343       break;
1344 
1345     if (self->is_memory_writer() &&
1346         (LCA->_loop->depth() > early->_loop->depth())) {
1347       // LCA is an invalid placement for a memory writer: choosing it would
1348       // cause memory interference, as illustrated in schedule_late().
1349       continue;
1350     }
1351     verify_memory_writer_placement(LCA, self);
1352 
1353     uint start_lat = get_latency_for_node(LCA->head());
1354     uint end_idx   = LCA->end_idx();
1355     uint end_lat   = get_latency_for_node(LCA->get_node(end_idx));
1356     double LCA_freq = LCA->_freq;
1357 #ifndef PRODUCT
1358     if (trace_opto_pipelining()) {
1359       tty->print_cr("#   B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1360         LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq);
1361     }
1362 #endif
1363     cand_cnt++;
1364     if (is_cheaper_block(LCA, self, target, end_lat, least_freq, cand_cnt, in_latency)) {
1365       least = LCA;            // Found cheaper block
1366       least_freq = LCA_freq;
1367       start_latency = start_lat;
1368       end_latency = end_lat;
1369       if (target <= start_lat)
1370         in_latency = true;
1371     }
1372   }
1373 
1374 #ifndef PRODUCT
1375   if (trace_opto_pipelining()) {
1376     tty->print_cr("#  Choose block B%d with start latency=%d and freq=%g",
1377       least->_pre_order, start_latency, least_freq);
1378   }
1379 #endif
1380 
1381   // See if the latency needs to be updated
1382   if (target < end_latency) {
1383 #ifndef PRODUCT
1384     if (trace_opto_pipelining()) {
1385       tty->print_cr("#  Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
1386     }
1387 #endif
1388     set_latency_for_node(self, end_latency);
1389     partial_latency_of_defs(self);
1390   }
1391 
1392   return least;
1393 }
1394 
1395 
1396 //------------------------------schedule_late-----------------------------------
1397 // Now schedule all codes as LATE as possible.  This is the LCA in the
1398 // dominator tree of all USES of a value.  Pick the block with the least
1399 // loop nesting depth that is lowest in the dominator tree.
1400 extern const char must_clone[];
1401 void PhaseCFG::schedule_late(VectorSet &visited, Node_Stack &stack) {
1402 #ifndef PRODUCT
1403   if (trace_opto_pipelining())
1404     tty->print("\n#---- schedule_late ----\n");
1405 #endif
1406 
1407   Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
1408   Node *self;
1409 
1410   // Walk over all the nodes from last to first
1411   while ((self = iter.next())) {
1412     Block* early = get_block_for_node(self); // Earliest legal placement
1413 
1414     if (self->is_top()) {
1415       // Top node goes in bb #2 with other constants.
1416       // It must be special-cased, because it has no out edges.
1417       early->add_inst(self);
1418       continue;
1419     }
1420 
1421     // No uses, just terminate
1422     if (self->outcnt() == 0) {
1423       assert(self->is_MachProj(), "sanity");
1424       continue;                   // Must be a dead machine projection
1425     }
1426 
1427     // If node is pinned in the block, then no scheduling can be done.
1428     if( self->pinned() )          // Pinned in block?
1429       continue;
1430 
1431 #ifdef ASSERT
1432     // Assert that memory writers (e.g. stores) have a "home" block (the block
1433     // given by their control input), and that this block corresponds to their
1434     // earliest possible placement. This guarantees that
1435     // hoist_to_cheaper_block() will always have at least one valid choice.
1436     if (self->is_memory_writer()) {
1437       assert(find_block_for_node(self->in(0)) == early,
1438              "The home of a memory writer must also be its earliest placement");
1439     }
1440 #endif
1441 
1442     MachNode* mach = self->is_Mach() ? self->as_Mach() : nullptr;
1443     if (mach) {
1444       switch (mach->ideal_Opcode()) {
1445       case Op_CreateEx:
1446         // Don't move exception creation
1447         early->add_inst(self);
1448         continue;
1449         break;
1450       case Op_CastI2N:
1451         early->add_inst(self);
1452         continue;
1453       case Op_CheckCastPP: {
1454         // Don't move CheckCastPP nodes away from their input, if the input
1455         // is a rawptr (5071820).
1456         Node *def = self->in(1);
1457         if (def != nullptr && def->bottom_type()->base() == Type::RawPtr) {
1458           early->add_inst(self);
1459 #ifdef ASSERT
1460           _raw_oops.push(def);
1461 #endif
1462           continue;
1463         }
1464         break;
1465       }
1466       default:
1467         break;
1468       }
1469       if (C->has_irreducible_loop() && self->is_memory_writer()) {
1470         // If the CFG is irreducible, place memory writers in their home block.
1471         // This prevents hoist_to_cheaper_block() from accidentally placing such
1472         // nodes into deeper loops, as in the following example:
1473         //
1474         // Home placement of store in B1 (loop L1):
1475         //
1476         // B1 (L1):
1477         //   m1 <- ..
1478         //   m2 <- store m1, ..
1479         // B2 (L2):
1480         //   jump B2
1481         // B3 (L1):
1482         //   .. <- .. m2, ..
1483         //
1484         // Wrong "hoisting" of store to B2 (in loop L2, child of L1):
1485         //
1486         // B1 (L1):
1487         //   m1 <- ..
1488         // B2 (L2):
1489         //   m2 <- store m1, ..
1490         //   # Wrong: m1 and m2 interfere at this point.
1491         //   jump B2
1492         // B3 (L1):
1493         //   .. <- .. m2, ..
1494         //
1495         // This "hoist inversion" can happen due to different factors such as
1496         // inaccurate estimation of frequencies for irreducible CFGs, and loops
1497         // with always-taken exits in reducible CFGs. In the reducible case,
1498         // hoist inversion is prevented by discarding invalid blocks (those in
1499         // deeper loops than the home block). In the irreducible case, the
1500         // invalid blocks cannot be identified due to incomplete loop nesting
1501         // information, hence a conservative solution is taken.
1502 #ifndef PRODUCT
1503         if (trace_opto_pipelining()) {
1504           tty->print_cr("# Irreducible loops: schedule in home block B%d:",
1505                         early->_pre_order);
1506           self->dump();
1507         }
1508 #endif
1509         schedule_node_into_block(self, early);
1510         continue;
1511       }
1512     }
1513 
1514     // Gather LCA of all uses
1515     Block *LCA = nullptr;
1516     {
1517       for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
1518         // For all uses, find LCA
1519         Node* use = self->fast_out(i);
1520         LCA = raise_LCA_above_use(LCA, use, self, this);
1521       }
1522       guarantee(LCA != nullptr, "There must be a LCA");
1523     }  // (Hide defs of imax, i from rest of block.)
1524 
1525     // Place temps in the block of their use.  This isn't a
1526     // requirement for correctness but it reduces useless
1527     // interference between temps and other nodes.
1528     if (mach != nullptr && mach->is_MachTemp()) {
1529       map_node_to_block(self, LCA);
1530       LCA->add_inst(self);
1531       continue;
1532     }
1533 
1534     // Check if 'self' could be anti-dependent on memory
1535     if (self->needs_anti_dependence_check()) {
1536       // Hoist LCA above possible-defs and insert anti-dependences to
1537       // defs in new LCA block.
1538       LCA = insert_anti_dependences(LCA, self);
1539       if (C->failing()) {
1540         return;
1541       }
1542     }
1543 
1544     if (early->_dom_depth > LCA->_dom_depth) {
1545       // Somehow the LCA has moved above the earliest legal point.
1546       // (One way this can happen is via memory_early_block.)
1547       if (C->subsume_loads() == true && !C->failing()) {
1548         // Retry with subsume_loads == false
1549         // If this is the first failure, the sentinel string will "stick"
1550         // to the Compile object, and the C2Compiler will see it and retry.
1551         C->record_failure(C2Compiler::retry_no_subsuming_loads());
1552       } else {
1553         // Bailout without retry when (early->_dom_depth > LCA->_dom_depth)
1554         assert(C->failure_is_artificial(), "graph should be schedulable");
1555         C->record_method_not_compilable("late schedule failed: incorrect graph" DEBUG_ONLY(COMMA true));
1556       }
1557       return;
1558     }
1559 
1560     if (self->is_memory_writer()) {
1561       // If the LCA of a memory writer is a descendant of its home loop, hoist
1562       // it into a valid placement.
1563       while (LCA->_loop->depth() > early->_loop->depth()) {
1564         LCA = LCA->_idom;
1565       }
1566       assert(LCA != nullptr, "a valid LCA must exist");
1567       verify_memory_writer_placement(LCA, self);
1568     }
1569 
1570     // If there is no opportunity to hoist, then we're done.
1571     // In stress mode, try to hoist even the single operations.
1572     bool try_to_hoist = StressGCM || (LCA != early);
1573 
1574     // Must clone guys stay next to use; no hoisting allowed.
1575     // Also cannot hoist guys that alter memory or are otherwise not
1576     // allocatable (hoisting can make a value live longer, leading to
1577     // anti and output dependency problems which are normally resolved
1578     // by the register allocator giving everyone a different register).
1579     if (mach != nullptr && must_clone[mach->ideal_Opcode()])
1580       try_to_hoist = false;
1581 
1582     Block* late = nullptr;
1583     if (try_to_hoist) {
1584       // Now find the block with the least execution frequency.
1585       // Start at the latest schedule and work up to the earliest schedule
1586       // in the dominator tree.  Thus the Node will dominate all its uses.
1587       late = hoist_to_cheaper_block(LCA, early, self);
1588     } else {
1589       // Just use the LCA of the uses.
1590       late = LCA;
1591     }
1592 
1593     // Put the node into target block
1594     schedule_node_into_block(self, late);
1595 
1596 #ifdef ASSERT
1597     if (self->needs_anti_dependence_check()) {
1598       // since precedence edges are only inserted when we're sure they
1599       // are needed make sure that after placement in a block we don't
1600       // need any new precedence edges.
1601       verify_anti_dependences(late, self);
1602     }
1603 #endif
1604   } // Loop until all nodes have been visited
1605 
1606 } // end ScheduleLate
1607 
1608 //------------------------------GlobalCodeMotion-------------------------------
1609 void PhaseCFG::global_code_motion() {
1610   ResourceMark rm;
1611 
1612 #ifndef PRODUCT
1613   if (trace_opto_pipelining()) {
1614     tty->print("\n---- Start GlobalCodeMotion ----\n");
1615   }
1616 #endif
1617 
1618   // Initialize the node to block mapping for things on the proj_list
1619   for (uint i = 0; i < _matcher.number_of_projections(); i++) {
1620     unmap_node_from_block(_matcher.get_projection(i));
1621   }
1622 
1623   // Set the basic block for Nodes pinned into blocks
1624   VectorSet visited;
1625   schedule_pinned_nodes(visited);
1626 
1627   // Find the earliest Block any instruction can be placed in.  Some
1628   // instructions are pinned into Blocks.  Unpinned instructions can
1629   // appear in last block in which all their inputs occur.
1630   visited.clear();
1631   Node_Stack stack((C->live_nodes() >> 2) + 16); // pre-grow
1632   if (!schedule_early(visited, stack)) {
1633     // Bailout without retry
1634     assert(C->failure_is_artificial(), "early schedule failed");
1635     C->record_method_not_compilable("early schedule failed" DEBUG_ONLY(COMMA true));
1636     return;
1637   }
1638 
1639   // Build Def-Use edges.
1640   // Compute the latency information (via backwards walk) for all the
1641   // instructions in the graph
1642   _node_latency = new GrowableArray<uint>(); // resource_area allocation
1643 
1644   if (C->do_scheduling()) {
1645     compute_latencies_backwards(visited, stack);
1646   }
1647 
1648   // Now schedule all codes as LATE as possible.  This is the LCA in the
1649   // dominator tree of all USES of a value.  Pick the block with the least
1650   // loop nesting depth that is lowest in the dominator tree.
1651   // ( visited.clear() called in schedule_late()->Node_Backward_Iterator() )
1652   schedule_late(visited, stack);
1653   if (C->failing()) {
1654     return;
1655   }
1656 
1657 #ifndef PRODUCT
1658   if (trace_opto_pipelining()) {
1659     tty->print("\n---- Detect implicit null checks ----\n");
1660   }
1661 #endif
1662 
1663   // Detect implicit-null-check opportunities.  Basically, find null checks
1664   // with suitable memory ops nearby.  Use the memory op to do the null check.
1665   // I can generate a memory op if there is not one nearby.
1666   if (C->is_method_compilation()) {
1667     // By reversing the loop direction we get a very minor gain on mpegaudio.
1668     // Feel free to revert to a forward loop for clarity.
1669     // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {
1670     for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) {
1671       Node* proj = _matcher._null_check_tests[i];
1672       Node* val  = _matcher._null_check_tests[i + 1];
1673       Block* block = get_block_for_node(proj);
1674       implicit_null_check(block, proj, val, C->allowed_deopt_reasons());
1675       // The implicit_null_check will only perform the transformation
1676       // if the null branch is truly uncommon, *and* it leads to an
1677       // uncommon trap.  Combined with the too_many_traps guards
1678       // above, this prevents SEGV storms reported in 6366351,
1679       // by recompiling offending methods without this optimization.
1680       if (C->failing()) {
1681         return;
1682       }
1683     }
1684   }
1685 
1686   bool block_size_threshold_ok = false;
1687   intptr_t *recalc_pressure_nodes = nullptr;
1688   if (OptoRegScheduling) {
1689     for (uint i = 0; i < number_of_blocks(); i++) {
1690       Block* block = get_block(i);
1691       if (block->number_of_nodes() > 10) {
1692         block_size_threshold_ok = true;
1693         break;
1694       }
1695     }
1696   }
1697 
1698   // Enabling the scheduler for register pressure plus finding blocks of size to schedule for it
1699   // is key to enabling this feature.
1700   PhaseChaitin regalloc(C->unique(), *this, _matcher, true);
1701   ResourceArea live_arena(mtCompiler, Arena::Tag::tag_reglive);      // Arena for liveness
1702   ResourceMark rm_live(&live_arena);
1703   PhaseLive live(*this, regalloc._lrg_map.names(), &live_arena, true);
1704   PhaseIFG ifg(&live_arena);
1705   if (OptoRegScheduling && block_size_threshold_ok) {
1706     regalloc.mark_ssa();
1707     Compile::TracePhase tp(_t_computeLive);
1708     rm_live.reset_to_mark();           // Reclaim working storage
1709     IndexSet::reset_memory(C, &live_arena);
1710     uint node_size = regalloc._lrg_map.max_lrg_id();
1711     ifg.init(node_size); // Empty IFG
1712     regalloc.set_ifg(ifg);
1713     regalloc.set_live(live);
1714     regalloc.gather_lrg_masks(false);    // Collect LRG masks
1715     live.compute(node_size); // Compute liveness
1716 
1717     recalc_pressure_nodes = NEW_RESOURCE_ARRAY(intptr_t, node_size);
1718     for (uint i = 0; i < node_size; i++) {
1719       recalc_pressure_nodes[i] = 0;
1720     }
1721   }
1722   _regalloc = &regalloc;
1723 
1724 #ifndef PRODUCT
1725   if (trace_opto_pipelining()) {
1726     tty->print("\n---- Start Local Scheduling ----\n");
1727   }
1728 #endif
1729 
1730   // Schedule locally.  Right now a simple topological sort.
1731   // Later, do a real latency aware scheduler.
1732   GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1);
1733   visited.reset();
1734   for (uint i = 0; i < number_of_blocks(); i++) {
1735     Block* block = get_block(i);
1736     if (!schedule_local(block, ready_cnt, visited, recalc_pressure_nodes)) {
1737       if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
1738         assert(C->failure_is_artificial(), "local schedule failed");
1739         C->record_method_not_compilable("local schedule failed" DEBUG_ONLY(COMMA true));
1740       }
1741       _regalloc = nullptr;
1742       return;
1743     }
1744   }
1745   _regalloc = nullptr;
1746 
1747   // If we inserted any instructions between a Call and his CatchNode,
1748   // clone the instructions on all paths below the Catch.
1749   for (uint i = 0; i < number_of_blocks(); i++) {
1750     Block* block = get_block(i);
1751     call_catch_cleanup(block);
1752     if (C->failing()) {
1753       return;
1754     }
1755   }
1756 
1757 #ifndef PRODUCT
1758   if (trace_opto_pipelining()) {
1759     tty->print("\n---- After GlobalCodeMotion ----\n");
1760     for (uint i = 0; i < number_of_blocks(); i++) {
1761       Block* block = get_block(i);
1762       block->dump();
1763     }
1764   }
1765 #endif
1766   // Dead.
1767   _node_latency = (GrowableArray<uint> *)((intptr_t)0xdeadbeef);
1768 }
1769 
1770 bool PhaseCFG::do_global_code_motion() {
1771 
1772   build_dominator_tree();
1773   if (C->failing()) {
1774     return false;
1775   }
1776 
1777   NOT_PRODUCT( C->verify_graph_edges(); )
1778 
1779   estimate_block_frequency();
1780 
1781   global_code_motion();
1782 
1783   if (C->failing()) {
1784     return false;
1785   }
1786 
1787   return true;
1788 }
1789 
1790 //------------------------------Estimate_Block_Frequency-----------------------
1791 // Estimate block frequencies based on IfNode probabilities.
1792 void PhaseCFG::estimate_block_frequency() {
1793 
1794   // Force conditional branches leading to uncommon traps to be unlikely,
1795   // not because we get to the uncommon_trap with less relative frequency,
1796   // but because an uncommon_trap typically causes a deopt, so we only get
1797   // there once.
1798   if (C->do_freq_based_layout()) {
1799     Block_List worklist;
1800     Block* root_blk = get_block(0);
1801     for (uint i = 1; i < root_blk->num_preds(); i++) {
1802       Block *pb = get_block_for_node(root_blk->pred(i));
1803       if (pb->has_uncommon_code()) {
1804         worklist.push(pb);
1805       }
1806     }
1807     while (worklist.size() > 0) {
1808       Block* uct = worklist.pop();
1809       if (uct == get_root_block()) {
1810         continue;
1811       }
1812       for (uint i = 1; i < uct->num_preds(); i++) {
1813         Block *pb = get_block_for_node(uct->pred(i));
1814         if (pb->_num_succs == 1) {
1815           worklist.push(pb);
1816         } else if (pb->num_fall_throughs() == 2) {
1817           pb->update_uncommon_branch(uct);
1818         }
1819       }
1820     }
1821   }
1822 
1823   // Create the loop tree and calculate loop depth.
1824   _root_loop = create_loop_tree();
1825   _root_loop->compute_loop_depth(0);
1826 
1827   // Compute block frequency of each block, relative to a single loop entry.
1828   _root_loop->compute_freq();
1829 
1830   // Adjust all frequencies to be relative to a single method entry
1831   _root_loop->_freq = 1.0;
1832   _root_loop->scale_freq();
1833 
1834   // Save outmost loop frequency for LRG frequency threshold
1835   _outer_loop_frequency = _root_loop->outer_loop_freq();
1836 
1837   // force paths ending at uncommon traps to be infrequent
1838   if (!C->do_freq_based_layout()) {
1839     Block_List worklist;
1840     Block* root_blk = get_block(0);
1841     for (uint i = 1; i < root_blk->num_preds(); i++) {
1842       Block *pb = get_block_for_node(root_blk->pred(i));
1843       if (pb->has_uncommon_code()) {
1844         worklist.push(pb);
1845       }
1846     }
1847     while (worklist.size() > 0) {
1848       Block* uct = worklist.pop();
1849       uct->_freq = PROB_MIN;
1850       for (uint i = 1; i < uct->num_preds(); i++) {
1851         Block *pb = get_block_for_node(uct->pred(i));
1852         if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
1853           worklist.push(pb);
1854         }
1855       }
1856     }
1857   }
1858 
1859 #ifdef ASSERT
1860   for (uint i = 0; i < number_of_blocks(); i++) {
1861     Block* b = get_block(i);
1862     assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency");
1863   }
1864 #endif
1865 
1866 #ifndef PRODUCT
1867   if (PrintCFGBlockFreq) {
1868     tty->print_cr("CFG Block Frequencies");
1869     _root_loop->dump_tree();
1870     if (Verbose) {
1871       tty->print_cr("PhaseCFG dump");
1872       dump();
1873       tty->print_cr("Node dump");
1874       _root->dump(99999);
1875     }
1876   }
1877 #endif
1878 }
1879 
1880 //----------------------------create_loop_tree--------------------------------
1881 // Create a loop tree from the CFG
1882 CFGLoop* PhaseCFG::create_loop_tree() {
1883 
1884 #ifdef ASSERT
1885   assert(get_block(0) == get_root_block(), "first block should be root block");
1886   for (uint i = 0; i < number_of_blocks(); i++) {
1887     Block* block = get_block(i);
1888     // Check that _loop field are clear...we could clear them if not.
1889     assert(block->_loop == nullptr, "clear _loop expected");
1890     // Sanity check that the RPO numbering is reflected in the _blocks array.
1891     // It doesn't have to be for the loop tree to be built, but if it is not,
1892     // then the blocks have been reordered since dom graph building...which
1893     // may question the RPO numbering
1894     assert(block->_rpo == i, "unexpected reverse post order number");
1895   }
1896 #endif
1897 
1898   int idct = 0;
1899   CFGLoop* root_loop = new CFGLoop(idct++);
1900 
1901   Block_List worklist;
1902 
1903   // Assign blocks to loops
1904   for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block
1905     Block* block = get_block(i);
1906 
1907     if (block->head()->is_Loop()) {
1908       Block* loop_head = block;
1909       assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1910       Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
1911       Block* tail = get_block_for_node(tail_n);
1912 
1913       // Defensively filter out Loop nodes for non-single-entry loops.
1914       // For all reasonable loops, the head occurs before the tail in RPO.
1915       if (i <= tail->_rpo) {
1916 
1917         // The tail and (recursive) predecessors of the tail
1918         // are made members of a new loop.
1919 
1920         assert(worklist.size() == 0, "nonempty worklist");
1921         CFGLoop* nloop = new CFGLoop(idct++);
1922         assert(loop_head->_loop == nullptr, "just checking");
1923         loop_head->_loop = nloop;
1924         // Add to nloop so push_pred() will skip over inner loops
1925         nloop->add_member(loop_head);
1926         nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this);
1927 
1928         while (worklist.size() > 0) {
1929           Block* member = worklist.pop();
1930           if (member != loop_head) {
1931             for (uint j = 1; j < member->num_preds(); j++) {
1932               nloop->push_pred(member, j, worklist, this);
1933             }
1934           }
1935         }
1936       }
1937     }
1938   }
1939 
1940   // Create a member list for each loop consisting
1941   // of both blocks and (immediate child) loops.
1942   for (uint i = 0; i < number_of_blocks(); i++) {
1943     Block* block = get_block(i);
1944     CFGLoop* lp = block->_loop;
1945     if (lp == nullptr) {
1946       // Not assigned to a loop. Add it to the method's pseudo loop.
1947       block->_loop = root_loop;
1948       lp = root_loop;
1949     }
1950     if (lp == root_loop || block != lp->head()) { // loop heads are already members
1951       lp->add_member(block);
1952     }
1953     if (lp != root_loop) {
1954       if (lp->parent() == nullptr) {
1955         // Not a nested loop. Make it a child of the method's pseudo loop.
1956         root_loop->add_nested_loop(lp);
1957       }
1958       if (block == lp->head()) {
1959         // Add nested loop to member list of parent loop.
1960         lp->parent()->add_member(lp);
1961       }
1962     }
1963   }
1964 
1965   return root_loop;
1966 }
1967 
1968 //------------------------------push_pred--------------------------------------
1969 void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) {
1970   Node* pred_n = blk->pred(i);
1971   Block* pred = cfg->get_block_for_node(pred_n);
1972   CFGLoop *pred_loop = pred->_loop;
1973   if (pred_loop == nullptr) {
1974     // Filter out blocks for non-single-entry loops.
1975     // For all reasonable loops, the head occurs before the tail in RPO.
1976     if (pred->_rpo > head()->_rpo) {
1977       pred->_loop = this;
1978       worklist.push(pred);
1979     }
1980   } else if (pred_loop != this) {
1981     // Nested loop.
1982     while (pred_loop->_parent != nullptr && pred_loop->_parent != this) {
1983       pred_loop = pred_loop->_parent;
1984     }
1985     // Make pred's loop be a child
1986     if (pred_loop->_parent == nullptr) {
1987       add_nested_loop(pred_loop);
1988       // Continue with loop entry predecessor.
1989       Block* pred_head = pred_loop->head();
1990       assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1991       assert(pred_head != head(), "loop head in only one loop");
1992       push_pred(pred_head, LoopNode::EntryControl, worklist, cfg);
1993     } else {
1994       assert(pred_loop->_parent == this && _parent == nullptr, "just checking");
1995     }
1996   }
1997 }
1998 
1999 //------------------------------add_nested_loop--------------------------------
2000 // Make cl a child of the current loop in the loop tree.
2001 void CFGLoop::add_nested_loop(CFGLoop* cl) {
2002   assert(_parent == nullptr, "no parent yet");
2003   assert(cl != this, "not my own parent");
2004   cl->_parent = this;
2005   CFGLoop* ch = _child;
2006   if (ch == nullptr) {
2007     _child = cl;
2008   } else {
2009     while (ch->_sibling != nullptr) { ch = ch->_sibling; }
2010     ch->_sibling = cl;
2011   }
2012 }
2013 
2014 //------------------------------compute_loop_depth-----------------------------
2015 // Store the loop depth in each CFGLoop object.
2016 // Recursively walk the children to do the same for them.
2017 void CFGLoop::compute_loop_depth(int depth) {
2018   _depth = depth;
2019   CFGLoop* ch = _child;
2020   while (ch != nullptr) {
2021     ch->compute_loop_depth(depth + 1);
2022     ch = ch->_sibling;
2023   }
2024 }
2025 
2026 //------------------------------compute_freq-----------------------------------
2027 // Compute the frequency of each block and loop, relative to a single entry
2028 // into the dominating loop head.
2029 void CFGLoop::compute_freq() {
2030   // Bottom up traversal of loop tree (visit inner loops first.)
2031   // Set loop head frequency to 1.0, then transitively
2032   // compute frequency for all successors in the loop,
2033   // as well as for each exit edge.  Inner loops are
2034   // treated as single blocks with loop exit targets
2035   // as the successor blocks.
2036 
2037   // Nested loops first
2038   CFGLoop* ch = _child;
2039   while (ch != nullptr) {
2040     ch->compute_freq();
2041     ch = ch->_sibling;
2042   }
2043   assert (_members.length() > 0, "no empty loops");
2044   Block* hd = head();
2045   hd->_freq = 1.0;
2046   for (int i = 0; i < _members.length(); i++) {
2047     CFGElement* s = _members.at(i);
2048     double freq = s->_freq;
2049     if (s->is_block()) {
2050       Block* b = s->as_Block();
2051       for (uint j = 0; j < b->_num_succs; j++) {
2052         Block* sb = b->_succs[j];
2053         update_succ_freq(sb, freq * b->succ_prob(j));
2054       }
2055     } else {
2056       CFGLoop* lp = s->as_CFGLoop();
2057       assert(lp->_parent == this, "immediate child");
2058       for (int k = 0; k < lp->_exits.length(); k++) {
2059         Block* eb = lp->_exits.at(k).get_target();
2060         double prob = lp->_exits.at(k).get_prob();
2061         update_succ_freq(eb, freq * prob);
2062       }
2063     }
2064   }
2065 
2066   // For all loops other than the outer, "method" loop,
2067   // sum and normalize the exit probability. The "method" loop
2068   // should keep the initial exit probability of 1, so that
2069   // inner blocks do not get erroneously scaled.
2070   if (_depth != 0) {
2071     // Total the exit probabilities for this loop.
2072     double exits_sum = 0.0f;
2073     for (int i = 0; i < _exits.length(); i++) {
2074       exits_sum += _exits.at(i).get_prob();
2075     }
2076 
2077     // Normalize the exit probabilities. Until now, the
2078     // probabilities estimate the possibility of exit per
2079     // a single loop iteration; afterward, they estimate
2080     // the probability of exit per loop entry.
2081     for (int i = 0; i < _exits.length(); i++) {
2082       Block* et = _exits.at(i).get_target();
2083       float new_prob = 0.0f;
2084       if (_exits.at(i).get_prob() > 0.0f) {
2085         new_prob = _exits.at(i).get_prob() / exits_sum;
2086       }
2087       BlockProbPair bpp(et, new_prob);
2088       _exits.at_put(i, bpp);
2089     }
2090 
2091     // Save the total, but guard against unreasonable probability,
2092     // as the value is used to estimate the loop trip count.
2093     // An infinite trip count would blur relative block
2094     // frequencies.
2095     if (exits_sum > 1.0f) exits_sum = 1.0;
2096     if (exits_sum < PROB_MIN) exits_sum = PROB_MIN;
2097     _exit_prob = exits_sum;
2098   }
2099 }
2100 
2101 //------------------------------succ_prob-------------------------------------
2102 // Determine the probability of reaching successor 'i' from the receiver block.
2103 float Block::succ_prob(uint i) {
2104   int eidx = end_idx();
2105   Node *n = get_node(eidx);  // Get ending Node
2106 
2107   int op = n->Opcode();
2108   if (n->is_Mach()) {
2109     if (n->is_MachNullCheck()) {
2110       // Can only reach here if called after lcm. The original Op_If is gone,
2111       // so we attempt to infer the probability from one or both of the
2112       // successor blocks.
2113       assert(_num_succs == 2, "expecting 2 successors of a null check");
2114       // If either successor has only one predecessor, then the
2115       // probability estimate can be derived using the
2116       // relative frequency of the successor and this block.
2117       if (_succs[i]->num_preds() == 2) {
2118         return _succs[i]->_freq / _freq;
2119       } else if (_succs[1-i]->num_preds() == 2) {
2120         return 1 - (_succs[1-i]->_freq / _freq);
2121       } else {
2122         // Estimate using both successor frequencies
2123         float freq = _succs[i]->_freq;
2124         return freq / (freq + _succs[1-i]->_freq);
2125       }
2126     }
2127     op = n->as_Mach()->ideal_Opcode();
2128   }
2129 
2130 
2131   // Switch on branch type
2132   switch( op ) {
2133   case Op_CountedLoopEnd:
2134   case Op_If: {
2135     assert (i < 2, "just checking");
2136     // Conditionals pass on only part of their frequency
2137     float prob  = n->as_MachIf()->_prob;
2138     assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
2139     // If succ[i] is the FALSE branch, invert path info
2140     if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) {
2141       return 1.0f - prob; // not taken
2142     } else {
2143       return prob; // taken
2144     }
2145   }
2146 
2147   case Op_Jump:
2148     return n->as_MachJump()->_probs[get_node(i + eidx + 1)->as_JumpProj()->_con];
2149 
2150   case Op_Catch: {
2151     const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
2152     if (ci->_con == CatchProjNode::fall_through_index) {
2153       // Fall-thru path gets the lion's share.
2154       return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
2155     } else {
2156       // Presume exceptional paths are equally unlikely
2157       return PROB_UNLIKELY_MAG(5);
2158     }
2159   }
2160 
2161   case Op_Root:
2162   case Op_Goto:
2163     // Pass frequency straight thru to target
2164     return 1.0f;
2165 
2166   case Op_NeverBranch:
2167     return 0.0f;
2168 
2169   case Op_TailCall:
2170   case Op_TailJump:
2171   case Op_ForwardException:
2172   case Op_Return:
2173   case Op_Halt:
2174   case Op_Rethrow:
2175     // Do not push out freq to root block
2176     return 0.0f;
2177 
2178   default:
2179     ShouldNotReachHere();
2180   }
2181 
2182   return 0.0f;
2183 }
2184 
2185 //------------------------------num_fall_throughs-----------------------------
2186 // Return the number of fall-through candidates for a block
2187 int Block::num_fall_throughs() {
2188   int eidx = end_idx();
2189   Node *n = get_node(eidx);  // Get ending Node
2190 
2191   int op = n->Opcode();
2192   if (n->is_Mach()) {
2193     if (n->is_MachNullCheck()) {
2194       // In theory, either side can fall-thru, for simplicity sake,
2195       // let's say only the false branch can now.
2196       return 1;
2197     }
2198     op = n->as_Mach()->ideal_Opcode();
2199   }
2200 
2201   // Switch on branch type
2202   switch( op ) {
2203   case Op_CountedLoopEnd:
2204   case Op_If:
2205     return 2;
2206 
2207   case Op_Root:
2208   case Op_Goto:
2209     return 1;
2210 
2211   case Op_Catch: {
2212     for (uint i = 0; i < _num_succs; i++) {
2213       const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
2214       if (ci->_con == CatchProjNode::fall_through_index) {
2215         return 1;
2216       }
2217     }
2218     return 0;
2219   }
2220 
2221   case Op_Jump:
2222   case Op_NeverBranch:
2223   case Op_TailCall:
2224   case Op_TailJump:
2225   case Op_ForwardException:
2226   case Op_Return:
2227   case Op_Halt:
2228   case Op_Rethrow:
2229     return 0;
2230 
2231   default:
2232     ShouldNotReachHere();
2233   }
2234 
2235   return 0;
2236 }
2237 
2238 //------------------------------succ_fall_through-----------------------------
2239 // Return true if a specific successor could be fall-through target.
2240 bool Block::succ_fall_through(uint i) {
2241   int eidx = end_idx();
2242   Node *n = get_node(eidx);  // Get ending Node
2243 
2244   int op = n->Opcode();
2245   if (n->is_Mach()) {
2246     if (n->is_MachNullCheck()) {
2247       // In theory, either side can fall-thru, for simplicity sake,
2248       // let's say only the false branch can now.
2249       return get_node(i + eidx + 1)->Opcode() == Op_IfFalse;
2250     }
2251     op = n->as_Mach()->ideal_Opcode();
2252   }
2253 
2254   // Switch on branch type
2255   switch( op ) {
2256   case Op_CountedLoopEnd:
2257   case Op_If:
2258   case Op_Root:
2259   case Op_Goto:
2260     return true;
2261 
2262   case Op_Catch: {
2263     const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj();
2264     return ci->_con == CatchProjNode::fall_through_index;
2265   }
2266 
2267   case Op_Jump:
2268   case Op_NeverBranch:
2269   case Op_TailCall:
2270   case Op_TailJump:
2271   case Op_ForwardException:
2272   case Op_Return:
2273   case Op_Halt:
2274   case Op_Rethrow:
2275     return false;
2276 
2277   default:
2278     ShouldNotReachHere();
2279   }
2280 
2281   return false;
2282 }
2283 
2284 //------------------------------update_uncommon_branch------------------------
2285 // Update the probability of a two-branch to be uncommon
2286 void Block::update_uncommon_branch(Block* ub) {
2287   int eidx = end_idx();
2288   Node *n = get_node(eidx);  // Get ending Node
2289 
2290   int op = n->as_Mach()->ideal_Opcode();
2291 
2292   assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If");
2293   assert(num_fall_throughs() == 2, "must be a two way branch block");
2294 
2295   // Which successor is ub?
2296   uint s;
2297   for (s = 0; s <_num_succs; s++) {
2298     if (_succs[s] == ub) break;
2299   }
2300   assert(s < 2, "uncommon successor must be found");
2301 
2302   // If ub is the true path, make the proability small, else
2303   // ub is the false path, and make the probability large
2304   bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse);
2305 
2306   // Get existing probability
2307   float p = n->as_MachIf()->_prob;
2308 
2309   if (invert) p = 1.0 - p;
2310   if (p > PROB_MIN) {
2311     p = PROB_MIN;
2312   }
2313   if (invert) p = 1.0 - p;
2314 
2315   n->as_MachIf()->_prob = p;
2316 }
2317 
2318 //------------------------------update_succ_freq-------------------------------
2319 // Update the appropriate frequency associated with block 'b', a successor of
2320 // a block in this loop.
2321 void CFGLoop::update_succ_freq(Block* b, double freq) {
2322   if (b->_loop == this) {
2323     if (b == head()) {
2324       // back branch within the loop
2325       // Do nothing now, the loop carried frequency will be
2326       // adjust later in scale_freq().
2327     } else {
2328       // simple branch within the loop
2329       b->_freq += freq;
2330     }
2331   } else if (!in_loop_nest(b)) {
2332     // branch is exit from this loop
2333     BlockProbPair bpp(b, freq);
2334     _exits.append(bpp);
2335   } else {
2336     // branch into nested loop
2337     CFGLoop* ch = b->_loop;
2338     ch->_freq += freq;
2339   }
2340 }
2341 
2342 //------------------------------in_loop_nest-----------------------------------
2343 // Determine if block b is in the receiver's loop nest.
2344 bool CFGLoop::in_loop_nest(Block* b) {
2345   int depth = _depth;
2346   CFGLoop* b_loop = b->_loop;
2347   int b_depth = b_loop->_depth;
2348   if (depth == b_depth) {
2349     return true;
2350   }
2351   while (b_depth > depth) {
2352     b_loop = b_loop->_parent;
2353     b_depth = b_loop->_depth;
2354   }
2355   return b_loop == this;
2356 }
2357 
2358 //------------------------------scale_freq-------------------------------------
2359 // Scale frequency of loops and blocks by trip counts from outer loops
2360 // Do a top down traversal of loop tree (visit outer loops first.)
2361 void CFGLoop::scale_freq() {
2362   double loop_freq = _freq * trip_count();
2363   _freq = loop_freq;
2364   for (int i = 0; i < _members.length(); i++) {
2365     CFGElement* s = _members.at(i);
2366     double block_freq = s->_freq * loop_freq;
2367     if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY)
2368       block_freq = MIN_BLOCK_FREQUENCY;
2369     s->_freq = block_freq;
2370   }
2371   CFGLoop* ch = _child;
2372   while (ch != nullptr) {
2373     ch->scale_freq();
2374     ch = ch->_sibling;
2375   }
2376 }
2377 
2378 // Frequency of outer loop
2379 double CFGLoop::outer_loop_freq() const {
2380   if (_child != nullptr) {
2381     return _child->_freq;
2382   }
2383   return _freq;
2384 }
2385 
2386 #ifndef PRODUCT
2387 //------------------------------dump_tree--------------------------------------
2388 void CFGLoop::dump_tree() const {
2389   dump();
2390   if (_child != nullptr)   _child->dump_tree();
2391   if (_sibling != nullptr) _sibling->dump_tree();
2392 }
2393 
2394 //------------------------------dump-------------------------------------------
2395 void CFGLoop::dump() const {
2396   for (int i = 0; i < _depth; i++) tty->print("   ");
2397   tty->print("%s: %d  trip_count: %6.0f freq: %6.0f\n",
2398              _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq);
2399   for (int i = 0; i < _depth; i++) tty->print("   ");
2400   tty->print("         members:");
2401   int k = 0;
2402   for (int i = 0; i < _members.length(); i++) {
2403     if (k++ >= 6) {
2404       tty->print("\n              ");
2405       for (int j = 0; j < _depth+1; j++) tty->print("   ");
2406       k = 0;
2407     }
2408     CFGElement *s = _members.at(i);
2409     if (s->is_block()) {
2410       Block *b = s->as_Block();
2411       tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq);
2412     } else {
2413       CFGLoop* lp = s->as_CFGLoop();
2414       tty->print(" L%d(%6.3f)", lp->_id, lp->_freq);
2415     }
2416   }
2417   tty->print("\n");
2418   for (int i = 0; i < _depth; i++) tty->print("   ");
2419   tty->print("         exits:  ");
2420   k = 0;
2421   for (int i = 0; i < _exits.length(); i++) {
2422     if (k++ >= 7) {
2423       tty->print("\n              ");
2424       for (int j = 0; j < _depth+1; j++) tty->print("   ");
2425       k = 0;
2426     }
2427     Block *blk = _exits.at(i).get_target();
2428     double prob = _exits.at(i).get_prob();
2429     tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100));
2430   }
2431   tty->print("\n");
2432 }
2433 #endif