6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciMethodData.hpp"
26 #include "classfile/vmSymbols.hpp"
27 #include "compiler/compileLog.hpp"
28 #include "interpreter/linkResolver.hpp"
29 #include "jvm_io.h"
30 #include "memory/resourceArea.hpp"
31 #include "memory/universe.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/castnode.hpp"
35 #include "opto/convertnode.hpp"
36 #include "opto/divnode.hpp"
37 #include "opto/idealGraphPrinter.hpp"
38 #include "opto/matcher.hpp"
39 #include "opto/memnode.hpp"
40 #include "opto/mulnode.hpp"
41 #include "opto/opaquenode.hpp"
42 #include "opto/parse.hpp"
43 #include "opto/runtime.hpp"
44 #include "runtime/deoptimization.hpp"
45 #include "runtime/sharedRuntime.hpp"
46
47 #ifndef PRODUCT
48 extern uint explicit_null_checks_inserted,
49 explicit_null_checks_elided;
50 #endif
51
52 //---------------------------------array_load----------------------------------
53 void Parse::array_load(BasicType bt) {
54 const Type* elemtype = Type::TOP;
55 bool big_val = bt == T_DOUBLE || bt == T_LONG;
56 Node* adr = array_addressing(bt, 0, elemtype);
57 if (stopped()) return; // guaranteed null or range check
58
59 pop(); // index (already used)
60 Node* array = pop(); // the array itself
61
62 if (elemtype == TypeInt::BOOL) {
63 bt = T_BOOLEAN;
64 }
65 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
66
67 Node* ld = access_load_at(array, adr, adr_type, elemtype, bt,
68 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
69 if (big_val) {
70 push_pair(ld);
71 } else {
72 push(ld);
73 }
74 }
75
76
77 //--------------------------------array_store----------------------------------
78 void Parse::array_store(BasicType bt) {
79 const Type* elemtype = Type::TOP;
80 bool big_val = bt == T_DOUBLE || bt == T_LONG;
81 Node* adr = array_addressing(bt, big_val ? 2 : 1, elemtype);
82 if (stopped()) return; // guaranteed null or range check
83 if (bt == T_OBJECT) {
84 array_store_check();
85 if (stopped()) {
86 return;
87 }
88 }
89 Node* val; // Oop to store
90 if (big_val) {
91 val = pop_pair();
92 } else {
93 val = pop();
94 }
95 pop(); // index (already used)
96 Node* array = pop(); // the array itself
97
98 if (elemtype == TypeInt::BOOL) {
99 bt = T_BOOLEAN;
100 }
101 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
102
103 access_store_at(array, adr, adr_type, val, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
104 }
105
106
107 //------------------------------array_addressing-------------------------------
108 // Pull array and index from the stack. Compute pointer-to-element.
109 Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) {
110 Node *idx = peek(0+vals); // Get from stack without popping
111 Node *ary = peek(1+vals); // in case of exception
112
113 // Null check the array base, with correct stack contents
114 ary = null_check(ary, T_ARRAY);
115 // Compile-time detect of null-exception?
116 if (stopped()) return top();
117
118 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr();
119 const TypeInt* sizetype = arytype->size();
120 elemtype = arytype->elem();
121
122 if (UseUniqueSubclasses) {
123 const Type* el = elemtype->make_ptr();
124 if (el && el->isa_instptr()) {
125 const TypeInstPtr* toop = el->is_instptr();
126 if (toop->instance_klass()->unique_concrete_subklass()) {
127 // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
128 const Type* subklass = Type::get_const_type(toop->instance_klass());
129 elemtype = subklass->join_speculative(el);
130 }
131 }
132 }
133
134 // Check for big class initializers with all constant offsets
135 // feeding into a known-size array.
136 const TypeInt* idxtype = _gvn.type(idx)->is_int();
137 // See if the highest idx value is less than the lowest array bound,
138 // and if the idx value cannot be negative:
139 bool need_range_check = true;
140 if (idxtype->_hi < sizetype->_lo && idxtype->_lo >= 0) {
141 need_range_check = false;
142 if (C->log() != nullptr) C->log()->elem("observe that='!need_range_check'");
143 }
144
145 if (!arytype->is_loaded()) {
146 // Only fails for some -Xcomp runs
147 // The class is unloaded. We have to run this bytecode in the interpreter.
148 ciKlass* klass = arytype->unloaded_klass();
149
150 uncommon_trap(Deoptimization::Reason_unloaded,
151 Deoptimization::Action_reinterpret,
152 klass, "!loaded array");
153 return top();
154 }
155
156 // Do the range check
157 if (need_range_check) {
158 Node* tst;
159 if (sizetype->_hi <= 0) {
160 // The greatest array bound is negative, so we can conclude that we're
161 // compiling unreachable code, but the unsigned compare trick used below
162 // only works with non-negative lengths. Instead, hack "tst" to be zero so
163 // the uncommon_trap path will always be taken.
164 tst = _gvn.intcon(0);
165 } else {
166 // Range is constant in array-oop, so we can use the original state of mem
167 Node* len = load_array_length(ary);
168
169 // Test length vs index (standard trick using unsigned compare)
170 Node* chk = _gvn.transform( new CmpUNode(idx, len) );
171 BoolTest::mask btest = BoolTest::lt;
172 tst = _gvn.transform( new BoolNode(chk, btest) );
173 }
174 RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN);
175 _gvn.set_type(rc, rc->Value(&_gvn));
176 if (!tst->is_Con()) {
177 record_for_igvn(rc);
178 }
179 set_control(_gvn.transform(new IfTrueNode(rc)));
180 // Branch to failure if out of bounds
181 {
182 PreserveJVMState pjvms(this);
183 set_control(_gvn.transform(new IfFalseNode(rc)));
184 if (C->allow_range_check_smearing()) {
185 // Do not use builtin_throw, since range checks are sometimes
186 // made more stringent by an optimistic transformation.
187 // This creates "tentative" range checks at this point,
188 // which are not guaranteed to throw exceptions.
189 // See IfNode::Ideal, is_range_check, adjust_check.
190 uncommon_trap(Deoptimization::Reason_range_check,
191 Deoptimization::Action_make_not_entrant,
192 nullptr, "range_check");
193 } else {
194 // If we have already recompiled with the range-check-widening
195 // heroic optimization turned off, then we must really be throwing
196 // range check exceptions.
197 builtin_throw(Deoptimization::Reason_range_check);
198 }
199 }
200 }
201 // Check for always knowing you are throwing a range-check exception
202 if (stopped()) return top();
203
204 // Make array address computation control dependent to prevent it
205 // from floating above the range check during loop optimizations.
206 Node* ptr = array_element_address(ary, idx, type, sizetype, control());
207 assert(ptr != top(), "top should go hand-in-hand with stopped");
208
209 return ptr;
210 }
211
212
213 // returns IfNode
214 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
215 Node *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
216 Node *tst = _gvn.transform(new BoolNode(cmp, mask));
217 IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
218 return iff;
219 }
220
221
222 // sentinel value for the target bci to mark never taken branches
223 // (according to profiling)
224 static const int never_reached = INT_MAX;
225
226 //------------------------------helper for tableswitch-------------------------
227 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, bool unc) {
228 // True branch, use existing map info
229 { PreserveJVMState pjvms(this);
230 Node *iftrue = _gvn.transform( new IfTrueNode (iff) );
231 set_control( iftrue );
1428 // False branch
1429 Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1430 set_control(iffalse);
1431
1432 if (stopped()) { // Path is dead?
1433 NOT_PRODUCT(explicit_null_checks_elided++);
1434 if (C->eliminate_boxing()) {
1435 // Mark the successor block as parsed
1436 next_block->next_path_num();
1437 }
1438 } else { // Path is live.
1439 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block);
1440 }
1441
1442 if (do_stress_trap) {
1443 stress_trap(iff, counter, incr_store);
1444 }
1445 }
1446
1447 //------------------------------------do_if------------------------------------
1448 void Parse::do_if(BoolTest::mask btest, Node* c) {
1449 int target_bci = iter().get_dest();
1450
1451 Block* branch_block = successor_for_bci(target_bci);
1452 Block* next_block = successor_for_bci(iter().next_bci());
1453
1454 float cnt;
1455 float prob = branch_prediction(cnt, btest, target_bci, c);
1456 float untaken_prob = 1.0 - prob;
1457
1458 if (prob == PROB_UNKNOWN) {
1459 if (PrintOpto && Verbose) {
1460 tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1461 }
1462 repush_if_args(); // to gather stats on loop
1463 uncommon_trap(Deoptimization::Reason_unreached,
1464 Deoptimization::Action_reinterpret,
1465 nullptr, "cold");
1466 if (C->eliminate_boxing()) {
1467 // Mark the successor blocks as parsed
1468 branch_block->next_path_num();
1519 }
1520
1521 // Generate real control flow
1522 float true_prob = (taken_if_true ? prob : untaken_prob);
1523 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1524 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1525 Node* taken_branch = new IfTrueNode(iff);
1526 Node* untaken_branch = new IfFalseNode(iff);
1527 if (!taken_if_true) { // Finish conversion to canonical form
1528 Node* tmp = taken_branch;
1529 taken_branch = untaken_branch;
1530 untaken_branch = tmp;
1531 }
1532
1533 // Branch is taken:
1534 { PreserveJVMState pjvms(this);
1535 taken_branch = _gvn.transform(taken_branch);
1536 set_control(taken_branch);
1537
1538 if (stopped()) {
1539 if (C->eliminate_boxing()) {
1540 // Mark the successor block as parsed
1541 branch_block->next_path_num();
1542 }
1543 } else {
1544 adjust_map_after_if(taken_btest, c, prob, branch_block);
1545 if (!stopped()) {
1546 merge(target_bci);
1547 }
1548 }
1549 }
1550
1551 untaken_branch = _gvn.transform(untaken_branch);
1552 set_control(untaken_branch);
1553
1554 // Branch not taken.
1555 if (stopped()) {
1556 if (C->eliminate_boxing()) {
1557 // Mark the successor block as parsed
1558 next_block->next_path_num();
1559 }
1560 } else {
1561 adjust_map_after_if(untaken_btest, c, untaken_prob, next_block);
1562 }
1563
1564 if (do_stress_trap) {
1565 stress_trap(iff, counter, incr_store);
1566 }
1567 }
1568
1569 // Force unstable if traps to be taken randomly to trigger intermittent bugs such as incorrect debug information.
1570 // Add another if before the unstable if that checks a "random" condition at runtime (a simple shared counter) and
1571 // then either takes the trap or executes the original, unstable if.
1572 void Parse::stress_trap(IfNode* orig_iff, Node* counter, Node* incr_store) {
1573 // Search for an unstable if trap
1574 CallStaticJavaNode* trap = nullptr;
1575 assert(orig_iff->Opcode() == Op_If && orig_iff->outcnt() == 2, "malformed if");
1576 ProjNode* trap_proj = orig_iff->uncommon_trap_proj(trap, Deoptimization::Reason_unstable_if);
1577 if (trap == nullptr || !trap->jvms()->should_reexecute()) {
1578 // No suitable trap found. Remove unused counter load and increment.
1579 C->gvn_replace_by(incr_store, incr_store->in(MemNode::Memory));
1580 return;
1581 }
1582
1583 // Remove trap from optimization list since we add another path to the trap.
1584 bool success = C->remove_unstable_if_trap(trap, true);
1585 assert(success, "Trap already modified");
1586
1587 // Add a check before the original if that will trap with a certain frequency and execute the original if otherwise
1588 int freq_log = (C->random() % 31) + 1; // Random logarithmic frequency in [1, 31]
1621 }
1622
1623 void Parse::maybe_add_predicate_after_if(Block* path) {
1624 if (path->is_SEL_head() && path->preds_parsed() == 0) {
1625 // Add predicates at bci of if dominating the loop so traps can be
1626 // recorded on the if's profile data
1627 int bc_depth = repush_if_args();
1628 add_parse_predicates();
1629 dec_sp(bc_depth);
1630 path->set_has_predicates();
1631 }
1632 }
1633
1634
1635 //----------------------------adjust_map_after_if------------------------------
1636 // Adjust the JVM state to reflect the result of taking this path.
1637 // Basically, it means inspecting the CmpNode controlling this
1638 // branch, seeing how it constrains a tested value, and then
1639 // deciding if it's worth our while to encode this constraint
1640 // as graph nodes in the current abstract interpretation map.
1641 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path) {
1642 if (!c->is_Cmp()) {
1643 maybe_add_predicate_after_if(path);
1644 return;
1645 }
1646
1647 if (stopped() || btest == BoolTest::illegal) {
1648 return; // nothing to do
1649 }
1650
1651 bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
1652
1653 if (path_is_suitable_for_uncommon_trap(prob)) {
1654 repush_if_args();
1655 Node* call = uncommon_trap(Deoptimization::Reason_unstable_if,
1656 Deoptimization::Action_reinterpret,
1657 nullptr,
1658 (is_fallthrough ? "taken always" : "taken never"));
1659
1660 if (call != nullptr) {
1661 C->record_unstable_if_trap(new UnstableIfTrap(call->as_CallStaticJava(), path));
1662 }
1663 return;
1664 }
1665
1666 Node* val = c->in(1);
1667 Node* con = c->in(2);
1668 const Type* tcon = _gvn.type(con);
1669 const Type* tval = _gvn.type(val);
1670 bool have_con = tcon->singleton();
1671 if (tval->singleton()) {
1672 if (!have_con) {
1673 // Swap, so constant is in con.
1730 if (obj != nullptr && (con_type->isa_instptr() || con_type->isa_aryptr())) {
1731 // Found:
1732 // Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
1733 // or the narrowOop equivalent.
1734 const Type* obj_type = _gvn.type(obj);
1735 const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr();
1736 if (tboth != nullptr && tboth->klass_is_exact() && tboth != obj_type &&
1737 tboth->higher_equal(obj_type)) {
1738 // obj has to be of the exact type Foo if the CmpP succeeds.
1739 int obj_in_map = map()->find_edge(obj);
1740 JVMState* jvms = this->jvms();
1741 if (obj_in_map >= 0 &&
1742 (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
1743 TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth);
1744 const Type* tcc = ccast->as_Type()->type();
1745 assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
1746 // Delay transform() call to allow recovery of pre-cast value
1747 // at the control merge.
1748 _gvn.set_type_bottom(ccast);
1749 record_for_igvn(ccast);
1750 // Here's the payoff.
1751 replace_in_map(obj, ccast);
1752 }
1753 }
1754 }
1755 }
1756
1757 int val_in_map = map()->find_edge(val);
1758 if (val_in_map < 0) return; // replace_in_map would be useless
1759 {
1760 JVMState* jvms = this->jvms();
1761 if (!(jvms->is_loc(val_in_map) ||
1762 jvms->is_stk(val_in_map)))
1763 return; // again, it would be useless
1764 }
1765
1766 // Check for a comparison to a constant, and "know" that the compared
1767 // value is constrained on this path.
1768 assert(tcon->singleton(), "");
1769 ConstraintCastNode* ccast = nullptr;
1834 if (c->Opcode() == Op_CmpP &&
1835 (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
1836 c->in(2)->is_Con()) {
1837 Node* load_klass = nullptr;
1838 Node* decode = nullptr;
1839 if (c->in(1)->Opcode() == Op_DecodeNKlass) {
1840 decode = c->in(1);
1841 load_klass = c->in(1)->in(1);
1842 } else {
1843 load_klass = c->in(1);
1844 }
1845 if (load_klass->in(2)->is_AddP()) {
1846 Node* addp = load_klass->in(2);
1847 Node* obj = addp->in(AddPNode::Address);
1848 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
1849 if (obj_type->speculative_type_not_null() != nullptr) {
1850 ciKlass* k = obj_type->speculative_type();
1851 inc_sp(2);
1852 obj = maybe_cast_profiled_obj(obj, k);
1853 dec_sp(2);
1854 // Make the CmpP use the casted obj
1855 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
1856 load_klass = load_klass->clone();
1857 load_klass->set_req(2, addp);
1858 load_klass = _gvn.transform(load_klass);
1859 if (decode != nullptr) {
1860 decode = decode->clone();
1861 decode->set_req(1, load_klass);
1862 load_klass = _gvn.transform(decode);
1863 }
1864 c = c->clone();
1865 c->set_req(1, load_klass);
1866 c = _gvn.transform(c);
1867 }
1868 }
1869 }
1870 return c;
1871 }
1872
1873 //------------------------------do_one_bytecode--------------------------------
2631 // See if we can get some profile data and hand it off to the next block
2632 Block *target_block = block()->successor_for_bci(target_bci);
2633 if (target_block->pred_count() != 1) break;
2634 ciMethodData* methodData = method()->method_data();
2635 if (!methodData->is_mature()) break;
2636 ciProfileData* data = methodData->bci_to_data(bci());
2637 assert(data != nullptr && data->is_JumpData(), "need JumpData for taken branch");
2638 int taken = ((ciJumpData*)data)->taken();
2639 taken = method()->scale_count(taken);
2640 target_block->set_count(taken);
2641 break;
2642 }
2643
2644 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null;
2645 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
2646 handle_if_null:
2647 // If this is a backwards branch in the bytecodes, add Safepoint
2648 maybe_add_safepoint(iter().get_dest());
2649 a = null();
2650 b = pop();
2651 if (!_gvn.type(b)->speculative_maybe_null() &&
2652 !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
2653 inc_sp(1);
2654 Node* null_ctl = top();
2655 b = null_check_oop(b, &null_ctl, true, true, true);
2656 assert(null_ctl->is_top(), "no null control here");
2657 dec_sp(1);
2658 } else if (_gvn.type(b)->speculative_always_null() &&
2659 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
2660 inc_sp(1);
2661 b = null_assert(b);
2662 dec_sp(1);
2663 }
2664 c = _gvn.transform( new CmpPNode(b, a) );
2665 do_ifnull(btest, c);
2666 break;
2667
2668 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
2669 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
2670 handle_if_acmp:
2671 // If this is a backwards branch in the bytecodes, add Safepoint
2672 maybe_add_safepoint(iter().get_dest());
2673 a = pop();
2674 b = pop();
2675 c = _gvn.transform( new CmpPNode(b, a) );
2676 c = optimize_cmp_with_klass(c);
2677 do_if(btest, c);
2678 break;
2679
2680 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
2681 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
2682 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
2683 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
2684 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
2685 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
2686 handle_ifxx:
2687 // If this is a backwards branch in the bytecodes, add Safepoint
2688 maybe_add_safepoint(iter().get_dest());
2689 a = _gvn.intcon(0);
2690 b = pop();
2691 c = _gvn.transform( new CmpINode(b, a) );
2692 do_if(btest, c);
2693 break;
2694
2695 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
2696 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
2697 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
2712 break;
2713
2714 case Bytecodes::_lookupswitch:
2715 do_lookupswitch();
2716 break;
2717
2718 case Bytecodes::_invokestatic:
2719 case Bytecodes::_invokedynamic:
2720 case Bytecodes::_invokespecial:
2721 case Bytecodes::_invokevirtual:
2722 case Bytecodes::_invokeinterface:
2723 do_call();
2724 break;
2725 case Bytecodes::_checkcast:
2726 do_checkcast();
2727 break;
2728 case Bytecodes::_instanceof:
2729 do_instanceof();
2730 break;
2731 case Bytecodes::_anewarray:
2732 do_anewarray();
2733 break;
2734 case Bytecodes::_newarray:
2735 do_newarray((BasicType)iter().get_index());
2736 break;
2737 case Bytecodes::_multianewarray:
2738 do_multianewarray();
2739 break;
2740 case Bytecodes::_new:
2741 do_new();
2742 break;
2743
2744 case Bytecodes::_jsr:
2745 case Bytecodes::_jsr_w:
2746 do_jsr();
2747 break;
2748
2749 case Bytecodes::_ret:
2750 do_ret();
2751 break;
2752
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciMethodData.hpp"
26 #include "ci/ciSymbols.hpp"
27 #include "classfile/vmSymbols.hpp"
28 #include "compiler/compileLog.hpp"
29 #include "interpreter/linkResolver.hpp"
30 #include "jvm_io.h"
31 #include "memory/resourceArea.hpp"
32 #include "memory/universe.hpp"
33 #include "oops/oop.inline.hpp"
34 #include "opto/addnode.hpp"
35 #include "opto/castnode.hpp"
36 #include "opto/convertnode.hpp"
37 #include "opto/divnode.hpp"
38 #include "opto/idealGraphPrinter.hpp"
39 #include "opto/idealKit.hpp"
40 #include "opto/inlinetypenode.hpp"
41 #include "opto/matcher.hpp"
42 #include "opto/memnode.hpp"
43 #include "opto/mulnode.hpp"
44 #include "opto/opaquenode.hpp"
45 #include "opto/parse.hpp"
46 #include "opto/runtime.hpp"
47 #include "runtime/deoptimization.hpp"
48 #include "runtime/sharedRuntime.hpp"
49
50 #ifndef PRODUCT
51 extern uint explicit_null_checks_inserted,
52 explicit_null_checks_elided;
53 #endif
54
55 Node* Parse::record_profile_for_speculation_at_array_load(Node* ld) {
56 // Feed unused profile data to type speculation
57 if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
58 ciKlass* array_type = nullptr;
59 ciKlass* element_type = nullptr;
60 ProfilePtrKind element_ptr = ProfileMaybeNull;
61 bool flat_array = true;
62 bool null_free_array = true;
63 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
64 if (element_type != nullptr || element_ptr != ProfileMaybeNull) {
65 ld = record_profile_for_speculation(ld, element_type, element_ptr);
66 }
67 }
68 return ld;
69 }
70
71
72 //---------------------------------array_load----------------------------------
73 void Parse::array_load(BasicType bt) {
74 const Type* elemtype = Type::TOP;
75 Node* adr = array_addressing(bt, 0, elemtype);
76 if (stopped()) return; // guaranteed null or range check
77
78 Node* array_index = pop();
79 Node* array = pop();
80
81 // Handle inline type arrays
82 const TypeOopPtr* element_ptr = elemtype->make_oopptr();
83 const TypeAryPtr* array_type = _gvn.type(array)->is_aryptr();
84
85 if (!array_type->is_not_flat()) {
86 // Cannot statically determine if array is a flat array, emit runtime check
87 assert(UseArrayFlattening && is_reference_type(bt) && element_ptr->can_be_inline_type() &&
88 (!element_ptr->is_inlinetypeptr() || element_ptr->inline_klass()->flat_in_array()), "array can't be flat");
89 IdealKit ideal(this);
90 IdealVariable res(ideal);
91 ideal.declarations_done();
92 ideal.if_then(flat_array_test(array, /* flat = */ false)); {
93 // Non-flat array
94 sync_kit(ideal);
95 if (!array_type->is_flat()) {
96 assert(array_type->is_flat() || control()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
97 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
98 DecoratorSet decorator_set = IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD;
99 if (needs_range_check(array_type->size(), array_index)) {
100 // We've emitted a RangeCheck but now insert an additional check between the range check and the actual load.
101 // We cannot pin the load to two separate nodes. Instead, we pin it conservatively here such that it cannot
102 // possibly float above the range check at any point.
103 decorator_set |= C2_UNKNOWN_CONTROL_LOAD;
104 }
105 Node* ld = access_load_at(array, adr, adr_type, element_ptr, bt, decorator_set);
106 if (element_ptr->is_inlinetypeptr()) {
107 ld = InlineTypeNode::make_from_oop(this, ld, element_ptr->inline_klass());
108 }
109 ideal.set(res, ld);
110 }
111 ideal.sync_kit(this);
112 } ideal.else_(); {
113 // Flat array
114 sync_kit(ideal);
115 if (!array_type->is_not_flat()) {
116 if (element_ptr->is_inlinetypeptr()) {
117 // Element type is known, cast and load from flat array layout.
118 ciInlineKlass* vk = element_ptr->inline_klass();
119 bool is_null_free = array_type->is_null_free() || !vk->has_nullable_atomic_layout();
120 bool is_not_null_free = array_type->is_not_null_free() || (!vk->has_atomic_layout() && !vk->has_non_atomic_layout());
121 if (is_null_free) {
122 // TODO 8350865 Impossible type
123 is_not_null_free = false;
124 }
125 bool is_naturally_atomic = (is_null_free && vk->nof_declared_nonstatic_fields() <= 1);
126 bool may_need_atomicity = !is_naturally_atomic && ((!is_not_null_free && vk->has_atomic_layout()) || (!is_null_free && vk->has_nullable_atomic_layout()));
127
128 // Re-execute flat array load if buffering triggers deoptimization
129 PreserveReexecuteState preexecs(this);
130 jvms()->set_should_reexecute(true);
131 inc_sp(3);
132
133 adr = flat_array_element_address(array, array_index, vk, is_null_free, is_not_null_free, may_need_atomicity);
134 int nm_offset = is_null_free ? -1 : vk->null_marker_offset_in_payload();
135 Node* vt = InlineTypeNode::make_from_flat(this, vk, array, adr, array_index, nullptr, 0, may_need_atomicity, nm_offset);
136 ideal.set(res, vt);
137 } else {
138 // Element type is unknown, and thus we cannot statically determine the exact flat array layout. Emit a
139 // runtime call to correctly load the inline type element from the flat array.
140 Node* inline_type = load_from_unknown_flat_array(array, array_index, element_ptr);
141 bool is_null_free = array_type->is_null_free() || !UseNullableValueFlattening;
142 if (is_null_free) {
143 inline_type = cast_not_null(inline_type);
144 }
145 ideal.set(res, inline_type);
146 }
147 }
148 ideal.sync_kit(this);
149 } ideal.end_if();
150 sync_kit(ideal);
151 Node* ld = _gvn.transform(ideal.value(res));
152 ld = record_profile_for_speculation_at_array_load(ld);
153 push_node(bt, ld);
154 return;
155 }
156
157 if (elemtype == TypeInt::BOOL) {
158 bt = T_BOOLEAN;
159 }
160 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
161 Node* ld = access_load_at(array, adr, adr_type, elemtype, bt,
162 IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
163 ld = record_profile_for_speculation_at_array_load(ld);
164 // Loading an inline type from a non-flat array
165 if (element_ptr != nullptr && element_ptr->is_inlinetypeptr()) {
166 assert(!array_type->is_null_free() || !element_ptr->maybe_null(), "inline type array elements should never be null");
167 ld = InlineTypeNode::make_from_oop(this, ld, element_ptr->inline_klass());
168 }
169 push_node(bt, ld);
170 }
171
172 Node* Parse::load_from_unknown_flat_array(Node* array, Node* array_index, const TypeOopPtr* element_ptr) {
173 // Below membars keep this access to an unknown flat array correctly
174 // ordered with other unknown and known flat array accesses.
175 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
176
177 Node* call = nullptr;
178 {
179 // Re-execute flat array load if runtime call triggers deoptimization
180 PreserveReexecuteState preexecs(this);
181 jvms()->set_bci(_bci);
182 jvms()->set_should_reexecute(true);
183 inc_sp(2);
184 kill_dead_locals();
185 call = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
186 OptoRuntime::load_unknown_inline_Type(),
187 OptoRuntime::load_unknown_inline_Java(),
188 nullptr, TypeRawPtr::BOTTOM,
189 array, array_index);
190 }
191 make_slow_call_ex(call, env()->Throwable_klass(), false);
192 Node* buffer = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
193
194 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
195
196 // Keep track of the information that the inline type is in flat arrays
197 const Type* unknown_value = element_ptr->is_instptr()->cast_to_flat_in_array();
198 return _gvn.transform(new CheckCastPPNode(control(), buffer, unknown_value));
199 }
200
201 //--------------------------------array_store----------------------------------
202 void Parse::array_store(BasicType bt) {
203 const Type* elemtype = Type::TOP;
204 Node* adr = array_addressing(bt, type2size[bt], elemtype);
205 if (stopped()) return; // guaranteed null or range check
206 Node* stored_value_casted = nullptr;
207 if (bt == T_OBJECT) {
208 stored_value_casted = array_store_check(adr, elemtype);
209 if (stopped()) {
210 return;
211 }
212 }
213 Node* const stored_value = pop_node(bt); // Value to store
214 Node* const array_index = pop(); // Index in the array
215 Node* array = pop(); // The array itself
216
217 const TypeAryPtr* array_type = _gvn.type(array)->is_aryptr();
218 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
219
220 if (elemtype == TypeInt::BOOL) {
221 bt = T_BOOLEAN;
222 } else if (bt == T_OBJECT) {
223 elemtype = elemtype->make_oopptr();
224 const Type* stored_value_casted_type = _gvn.type(stored_value_casted);
225 // Based on the value to be stored, try to determine if the array is not null-free and/or not flat.
226 // This is only legal for non-null stores because the array_store_check always passes for null, even
227 // if the array is null-free. Null stores are handled in GraphKit::inline_array_null_guard().
228 bool not_inline = !stored_value_casted_type->maybe_null() && !stored_value_casted_type->is_oopptr()->can_be_inline_type();
229 bool not_null_free = not_inline;
230 bool not_flat = not_inline || ( stored_value_casted_type->is_inlinetypeptr() &&
231 !stored_value_casted_type->inline_klass()->flat_in_array());
232 if (!array_type->is_not_null_free() && not_null_free) {
233 // Storing a non-inline type, mark array as not null-free.
234 array_type = array_type->cast_to_not_null_free();
235 Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, array_type));
236 replace_in_map(array, cast);
237 array = cast;
238 }
239 if (!array_type->is_not_flat() && not_flat) {
240 // Storing to a non-flat array, mark array as not flat.
241 array_type = array_type->cast_to_not_flat();
242 Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, array_type));
243 replace_in_map(array, cast);
244 array = cast;
245 }
246
247 if (!array_type->is_flat() && array_type->is_null_free()) {
248 // Store to non-flat null-free inline type array (elements can never be null)
249 assert(!stored_value_casted_type->maybe_null(), "should be guaranteed by array store check");
250 if (elemtype->is_inlinetypeptr() && elemtype->inline_klass()->is_empty()) {
251 // Ignore empty inline stores, array is already initialized.
252 return;
253 }
254 } else if (!array_type->is_not_flat()) {
255 // Array might be a flat array, emit runtime checks (for nullptr, a simple inline_array_null_guard is sufficient).
256 assert(UseArrayFlattening && !not_flat && elemtype->is_oopptr()->can_be_inline_type() &&
257 (!array_type->klass_is_exact() || array_type->is_flat()), "array can't be a flat array");
258 // TODO 8350865 Depending on the available layouts, we can avoid this check in below flat/not-flat branches. Also the safe_for_replace arg is now always true.
259 array = inline_array_null_guard(array, stored_value_casted, 3, true);
260 IdealKit ideal(this);
261 ideal.if_then(flat_array_test(array, /* flat = */ false)); {
262 // Non-flat array
263 if (!array_type->is_flat()) {
264 sync_kit(ideal);
265 assert(array_type->is_flat() || ideal.ctrl()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
266 inc_sp(3);
267 access_store_at(array, adr, adr_type, stored_value_casted, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY, false);
268 dec_sp(3);
269 ideal.sync_kit(this);
270 }
271 } ideal.else_(); {
272 // Flat array
273 sync_kit(ideal);
274 if (!array_type->is_not_flat()) {
275 // Try to determine the inline klass type of the stored value
276 ciInlineKlass* vk = nullptr;
277 if (stored_value_casted_type->is_inlinetypeptr()) {
278 vk = stored_value_casted_type->inline_klass();
279 } else if (elemtype->is_inlinetypeptr()) {
280 vk = elemtype->inline_klass();
281 }
282
283 if (vk != nullptr) {
284 // Element type is known, cast and store to flat array layout.
285 bool is_null_free = array_type->is_null_free() || !vk->has_nullable_atomic_layout();
286 bool is_not_null_free = array_type->is_not_null_free() || (!vk->has_atomic_layout() && !vk->has_non_atomic_layout());
287 if (is_null_free) {
288 // TODO 8350865 Impossible type
289 is_not_null_free = false;
290 }
291 bool is_naturally_atomic = (is_null_free && vk->nof_declared_nonstatic_fields() <= 1);
292 bool may_need_atomicity = !is_naturally_atomic && ((!is_not_null_free && vk->has_atomic_layout()) || (!is_null_free && vk->has_nullable_atomic_layout()));
293
294 // Re-execute flat array store if buffering triggers deoptimization
295 PreserveReexecuteState preexecs(this);
296 jvms()->set_should_reexecute(true);
297 inc_sp(3);
298
299 if (!stored_value_casted->is_InlineType()) {
300 assert(_gvn.type(stored_value_casted) == TypePtr::NULL_PTR, "Unexpected value");
301 stored_value_casted = InlineTypeNode::make_null(_gvn, vk);
302 }
303 adr = flat_array_element_address(array, array_index, vk, is_null_free, is_not_null_free, may_need_atomicity);
304 int nm_offset = is_null_free ? -1 : vk->null_marker_offset_in_payload();
305 stored_value_casted->as_InlineType()->store_flat(this, array, adr, array_index, vk, 0, may_need_atomicity, nm_offset, MO_UNORDERED | IN_HEAP | IS_ARRAY);
306 } else {
307 // Element type is unknown, emit a runtime call since the flat array layout is not statically known.
308 store_to_unknown_flat_array(array, array_index, stored_value_casted);
309 }
310 }
311 ideal.sync_kit(this);
312 }
313 ideal.end_if();
314 sync_kit(ideal);
315 return;
316 } else if (!array_type->is_not_null_free()) {
317 // Array is not flat but may be null free
318 assert(elemtype->is_oopptr()->can_be_inline_type(), "array can't be null-free");
319 array = inline_array_null_guard(array, stored_value_casted, 3, true);
320 }
321 }
322 inc_sp(3);
323 access_store_at(array, adr, adr_type, stored_value, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
324 dec_sp(3);
325 }
326
327 // Emit a runtime call to store to a flat array whose element type is either unknown (i.e. we do not know the flat
328 // array layout) or not exact (could have different flat array layouts at runtime).
329 void Parse::store_to_unknown_flat_array(Node* array, Node* const idx, Node* non_null_stored_value) {
330 // Below membars keep this access to an unknown flat array correctly
331 // ordered with other unknown and known flat array accesses.
332 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
333
334 Node* call = nullptr;
335 {
336 // Re-execute flat array store if runtime call triggers deoptimization
337 PreserveReexecuteState preexecs(this);
338 jvms()->set_bci(_bci);
339 jvms()->set_should_reexecute(true);
340 inc_sp(3);
341 kill_dead_locals();
342 call = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
343 OptoRuntime::store_unknown_inline_Type(),
344 OptoRuntime::store_unknown_inline_Java(),
345 nullptr, TypeRawPtr::BOTTOM,
346 non_null_stored_value, array, idx);
347 }
348 make_slow_call_ex(call, env()->Throwable_klass(), false);
349
350 insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
351 }
352
353 //------------------------------array_addressing-------------------------------
354 // Pull array and index from the stack. Compute pointer-to-element.
355 Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) {
356 Node *idx = peek(0+vals); // Get from stack without popping
357 Node *ary = peek(1+vals); // in case of exception
358
359 // Null check the array base, with correct stack contents
360 ary = null_check(ary, T_ARRAY);
361 // Compile-time detect of null-exception?
362 if (stopped()) return top();
363
364 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr();
365 const TypeInt* sizetype = arytype->size();
366 elemtype = arytype->elem();
367
368 if (UseUniqueSubclasses) {
369 const Type* el = elemtype->make_ptr();
370 if (el && el->isa_instptr()) {
371 const TypeInstPtr* toop = el->is_instptr();
372 if (toop->instance_klass()->unique_concrete_subklass()) {
373 // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
374 const Type* subklass = Type::get_const_type(toop->instance_klass());
375 elemtype = subklass->join_speculative(el);
376 }
377 }
378 }
379
380 if (!arytype->is_loaded()) {
381 // Only fails for some -Xcomp runs
382 // The class is unloaded. We have to run this bytecode in the interpreter.
383 ciKlass* klass = arytype->unloaded_klass();
384
385 uncommon_trap(Deoptimization::Reason_unloaded,
386 Deoptimization::Action_reinterpret,
387 klass, "!loaded array");
388 return top();
389 }
390
391 ary = create_speculative_inline_type_array_checks(ary, arytype, elemtype);
392
393 if (needs_range_check(sizetype, idx)) {
394 create_range_check(idx, ary, sizetype);
395 } else if (C->log() != nullptr) {
396 C->log()->elem("observe that='!need_range_check'");
397 }
398
399 // Check for always knowing you are throwing a range-check exception
400 if (stopped()) return top();
401
402 // Make array address computation control dependent to prevent it
403 // from floating above the range check during loop optimizations.
404 Node* ptr = array_element_address(ary, idx, type, sizetype, control());
405 assert(ptr != top(), "top should go hand-in-hand with stopped");
406
407 return ptr;
408 }
409
410 // Check if we need a range check for an array access. This is the case if the index is either negative or if it could
411 // be greater or equal the smallest possible array size (i.e. out-of-bounds).
412 bool Parse::needs_range_check(const TypeInt* size_type, const Node* index) const {
413 const TypeInt* index_type = _gvn.type(index)->is_int();
414 return index_type->_hi >= size_type->_lo || index_type->_lo < 0;
415 }
416
417 void Parse::create_range_check(Node* idx, Node* ary, const TypeInt* sizetype) {
418 Node* tst;
419 if (sizetype->_hi <= 0) {
420 // The greatest array bound is negative, so we can conclude that we're
421 // compiling unreachable code, but the unsigned compare trick used below
422 // only works with non-negative lengths. Instead, hack "tst" to be zero so
423 // the uncommon_trap path will always be taken.
424 tst = _gvn.intcon(0);
425 } else {
426 // Range is constant in array-oop, so we can use the original state of mem
427 Node* len = load_array_length(ary);
428
429 // Test length vs index (standard trick using unsigned compare)
430 Node* chk = _gvn.transform(new CmpUNode(idx, len) );
431 BoolTest::mask btest = BoolTest::lt;
432 tst = _gvn.transform(new BoolNode(chk, btest) );
433 }
434 RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN);
435 _gvn.set_type(rc, rc->Value(&_gvn));
436 if (!tst->is_Con()) {
437 record_for_igvn(rc);
438 }
439 set_control(_gvn.transform(new IfTrueNode(rc)));
440 // Branch to failure if out of bounds
441 {
442 PreserveJVMState pjvms(this);
443 set_control(_gvn.transform(new IfFalseNode(rc)));
444 if (C->allow_range_check_smearing()) {
445 // Do not use builtin_throw, since range checks are sometimes
446 // made more stringent by an optimistic transformation.
447 // This creates "tentative" range checks at this point,
448 // which are not guaranteed to throw exceptions.
449 // See IfNode::Ideal, is_range_check, adjust_check.
450 uncommon_trap(Deoptimization::Reason_range_check,
451 Deoptimization::Action_make_not_entrant,
452 nullptr, "range_check");
453 } else {
454 // If we have already recompiled with the range-check-widening
455 // heroic optimization turned off, then we must really be throwing
456 // range check exceptions.
457 builtin_throw(Deoptimization::Reason_range_check);
458 }
459 }
460 }
461
462 // For inline type arrays, we can use the profiling information for array accesses to speculate on the type, flatness,
463 // and null-freeness. We can either prepare the speculative type for later uses or emit explicit speculative checks with
464 // traps now. In the latter case, the speculative type guarantees can avoid additional runtime checks later (e.g.
465 // non-null-free implies non-flat which allows us to remove flatness checks). This makes the graph simpler.
466 Node* Parse::create_speculative_inline_type_array_checks(Node* array, const TypeAryPtr* array_type,
467 const Type*& element_type) {
468 if (!array_type->is_flat() && !array_type->is_not_flat()) {
469 // For arrays that might be flat, speculate that the array has the exact type reported in the profile data such that
470 // we can rely on a fixed memory layout (i.e. either a flat layout or not).
471 array = cast_to_speculative_array_type(array, array_type, element_type);
472 } else if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
473 // Array is known to be either flat or not flat. If possible, update the speculative type by using the profile data
474 // at this bci.
475 array = cast_to_profiled_array_type(array);
476 }
477
478 // Even though the type does not tell us whether we have an inline type array or not, we can still check the profile data
479 // whether we have a non-null-free or non-flat array. Speculating on a non-null-free array doesn't help aaload but could
480 // be profitable for a subsequent aastore.
481 if (!array_type->is_null_free() && !array_type->is_not_null_free()) {
482 array = speculate_non_null_free_array(array, array_type);
483 }
484 if (!array_type->is_flat() && !array_type->is_not_flat()) {
485 array = speculate_non_flat_array(array, array_type);
486 }
487 return array;
488 }
489
490 // Speculate that the array has the exact type reported in the profile data. We emit a trap when this turns out to be
491 // wrong. On the fast path, we add a CheckCastPP to use the exact type.
492 Node* Parse::cast_to_speculative_array_type(Node* const array, const TypeAryPtr*& array_type, const Type*& element_type) {
493 Deoptimization::DeoptReason reason = Deoptimization::Reason_speculate_class_check;
494 ciKlass* speculative_array_type = array_type->speculative_type();
495 if (too_many_traps_or_recompiles(reason) || speculative_array_type == nullptr) {
496 // No speculative type, check profile data at this bci
497 speculative_array_type = nullptr;
498 reason = Deoptimization::Reason_class_check;
499 if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
500 ciKlass* profiled_element_type = nullptr;
501 ProfilePtrKind element_ptr = ProfileMaybeNull;
502 bool flat_array = true;
503 bool null_free_array = true;
504 method()->array_access_profiled_type(bci(), speculative_array_type, profiled_element_type, element_ptr, flat_array,
505 null_free_array);
506 }
507 }
508 if (speculative_array_type != nullptr) {
509 // Speculate that this array has the exact type reported by profile data
510 Node* casted_array = nullptr;
511 DEBUG_ONLY(Node* old_control = control();)
512 Node* slow_ctl = type_check_receiver(array, speculative_array_type, 1.0, &casted_array);
513 if (stopped()) {
514 // The check always fails and therefore profile information is incorrect. Don't use it.
515 assert(old_control == slow_ctl, "type check should have been removed");
516 set_control(slow_ctl);
517 } else if (!slow_ctl->is_top()) {
518 { PreserveJVMState pjvms(this);
519 set_control(slow_ctl);
520 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
521 }
522 replace_in_map(array, casted_array);
523 array_type = _gvn.type(casted_array)->is_aryptr();
524 element_type = array_type->elem();
525 return casted_array;
526 }
527 }
528 return array;
529 }
530
531 // Create a CheckCastPP when the speculative type can improve the current type.
532 Node* Parse::cast_to_profiled_array_type(Node* const array) {
533 ciKlass* array_type = nullptr;
534 ciKlass* element_type = nullptr;
535 ProfilePtrKind element_ptr = ProfileMaybeNull;
536 bool flat_array = true;
537 bool null_free_array = true;
538 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
539 if (array_type != nullptr) {
540 return record_profile_for_speculation(array, array_type, ProfileMaybeNull);
541 }
542 return array;
543 }
544
545 // Speculate that the array is non-null-free. We emit a trap when this turns out to be
546 // wrong. On the fast path, we add a CheckCastPP to use the non-null-free type.
547 Node* Parse::speculate_non_null_free_array(Node* const array, const TypeAryPtr*& array_type) {
548 bool null_free_array = true;
549 Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
550 if (array_type->speculative() != nullptr &&
551 array_type->speculative()->is_aryptr()->is_not_null_free() &&
552 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
553 null_free_array = false;
554 reason = Deoptimization::Reason_speculate_class_check;
555 } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
556 ciKlass* profiled_array_type = nullptr;
557 ciKlass* profiled_element_type = nullptr;
558 ProfilePtrKind element_ptr = ProfileMaybeNull;
559 bool flat_array = true;
560 method()->array_access_profiled_type(bci(), profiled_array_type, profiled_element_type, element_ptr, flat_array,
561 null_free_array);
562 reason = Deoptimization::Reason_class_check;
563 }
564 if (!null_free_array) {
565 { // Deoptimize if null-free array
566 BuildCutout unless(this, null_free_array_test(array, /* null_free = */ false), PROB_MAX);
567 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
568 }
569 assert(!stopped(), "null-free array should have been caught earlier");
570 Node* casted_array = _gvn.transform(new CheckCastPPNode(control(), array, array_type->cast_to_not_null_free()));
571 replace_in_map(array, casted_array);
572 array_type = _gvn.type(casted_array)->is_aryptr();
573 return casted_array;
574 }
575 return array;
576 }
577
578 // Speculate that the array is non-flat. We emit a trap when this turns out to be wrong.
579 // On the fast path, we add a CheckCastPP to use the non-flat type.
580 Node* Parse::speculate_non_flat_array(Node* const array, const TypeAryPtr* const array_type) {
581 bool flat_array = true;
582 Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
583 if (array_type->speculative() != nullptr &&
584 array_type->speculative()->is_aryptr()->is_not_flat() &&
585 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
586 flat_array = false;
587 reason = Deoptimization::Reason_speculate_class_check;
588 } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
589 ciKlass* profiled_array_type = nullptr;
590 ciKlass* profiled_element_type = nullptr;
591 ProfilePtrKind element_ptr = ProfileMaybeNull;
592 bool null_free_array = true;
593 method()->array_access_profiled_type(bci(), profiled_array_type, profiled_element_type, element_ptr, flat_array,
594 null_free_array);
595 reason = Deoptimization::Reason_class_check;
596 }
597 if (!flat_array) {
598 { // Deoptimize if flat array
599 BuildCutout unless(this, flat_array_test(array, /* flat = */ false), PROB_MAX);
600 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
601 }
602 assert(!stopped(), "flat array should have been caught earlier");
603 Node* casted_array = _gvn.transform(new CheckCastPPNode(control(), array, array_type->cast_to_not_flat()));
604 replace_in_map(array, casted_array);
605 return casted_array;
606 }
607 return array;
608 }
609
610 // returns IfNode
611 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
612 Node *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
613 Node *tst = _gvn.transform(new BoolNode(cmp, mask));
614 IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
615 return iff;
616 }
617
618
619 // sentinel value for the target bci to mark never taken branches
620 // (according to profiling)
621 static const int never_reached = INT_MAX;
622
623 //------------------------------helper for tableswitch-------------------------
624 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, bool unc) {
625 // True branch, use existing map info
626 { PreserveJVMState pjvms(this);
627 Node *iftrue = _gvn.transform( new IfTrueNode (iff) );
628 set_control( iftrue );
1825 // False branch
1826 Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1827 set_control(iffalse);
1828
1829 if (stopped()) { // Path is dead?
1830 NOT_PRODUCT(explicit_null_checks_elided++);
1831 if (C->eliminate_boxing()) {
1832 // Mark the successor block as parsed
1833 next_block->next_path_num();
1834 }
1835 } else { // Path is live.
1836 adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block);
1837 }
1838
1839 if (do_stress_trap) {
1840 stress_trap(iff, counter, incr_store);
1841 }
1842 }
1843
1844 //------------------------------------do_if------------------------------------
1845 void Parse::do_if(BoolTest::mask btest, Node* c, bool can_trap, bool new_path, Node** ctrl_taken) {
1846 int target_bci = iter().get_dest();
1847
1848 Block* branch_block = successor_for_bci(target_bci);
1849 Block* next_block = successor_for_bci(iter().next_bci());
1850
1851 float cnt;
1852 float prob = branch_prediction(cnt, btest, target_bci, c);
1853 float untaken_prob = 1.0 - prob;
1854
1855 if (prob == PROB_UNKNOWN) {
1856 if (PrintOpto && Verbose) {
1857 tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1858 }
1859 repush_if_args(); // to gather stats on loop
1860 uncommon_trap(Deoptimization::Reason_unreached,
1861 Deoptimization::Action_reinterpret,
1862 nullptr, "cold");
1863 if (C->eliminate_boxing()) {
1864 // Mark the successor blocks as parsed
1865 branch_block->next_path_num();
1916 }
1917
1918 // Generate real control flow
1919 float true_prob = (taken_if_true ? prob : untaken_prob);
1920 IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1921 assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1922 Node* taken_branch = new IfTrueNode(iff);
1923 Node* untaken_branch = new IfFalseNode(iff);
1924 if (!taken_if_true) { // Finish conversion to canonical form
1925 Node* tmp = taken_branch;
1926 taken_branch = untaken_branch;
1927 untaken_branch = tmp;
1928 }
1929
1930 // Branch is taken:
1931 { PreserveJVMState pjvms(this);
1932 taken_branch = _gvn.transform(taken_branch);
1933 set_control(taken_branch);
1934
1935 if (stopped()) {
1936 if (C->eliminate_boxing() && !new_path) {
1937 // Mark the successor block as parsed (if we haven't created a new path)
1938 branch_block->next_path_num();
1939 }
1940 } else {
1941 adjust_map_after_if(taken_btest, c, prob, branch_block, can_trap);
1942 if (!stopped()) {
1943 if (new_path) {
1944 // Merge by using a new path
1945 merge_new_path(target_bci);
1946 } else if (ctrl_taken != nullptr) {
1947 // Don't merge but save taken branch to be wired by caller
1948 *ctrl_taken = control();
1949 } else {
1950 merge(target_bci);
1951 }
1952 }
1953 }
1954 }
1955
1956 untaken_branch = _gvn.transform(untaken_branch);
1957 set_control(untaken_branch);
1958
1959 // Branch not taken.
1960 if (stopped() && ctrl_taken == nullptr) {
1961 if (C->eliminate_boxing()) {
1962 // Mark the successor block as parsed (if caller does not re-wire control flow)
1963 next_block->next_path_num();
1964 }
1965 } else {
1966 adjust_map_after_if(untaken_btest, c, untaken_prob, next_block, can_trap);
1967 }
1968
1969 if (do_stress_trap) {
1970 stress_trap(iff, counter, incr_store);
1971 }
1972 }
1973
1974
1975 static ProfilePtrKind speculative_ptr_kind(const TypeOopPtr* t) {
1976 if (t->speculative() == nullptr) {
1977 return ProfileUnknownNull;
1978 }
1979 if (t->speculative_always_null()) {
1980 return ProfileAlwaysNull;
1981 }
1982 if (t->speculative_maybe_null()) {
1983 return ProfileMaybeNull;
1984 }
1985 return ProfileNeverNull;
1986 }
1987
1988 void Parse::acmp_always_null_input(Node* input, const TypeOopPtr* tinput, BoolTest::mask btest, Node* eq_region) {
1989 inc_sp(2);
1990 Node* cast = null_check_common(input, T_OBJECT, true, nullptr,
1991 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check) &&
1992 speculative_ptr_kind(tinput) == ProfileAlwaysNull);
1993 dec_sp(2);
1994 if (btest == BoolTest::ne) {
1995 {
1996 PreserveJVMState pjvms(this);
1997 replace_in_map(input, cast);
1998 int target_bci = iter().get_dest();
1999 merge(target_bci);
2000 }
2001 record_for_igvn(eq_region);
2002 set_control(_gvn.transform(eq_region));
2003 } else {
2004 replace_in_map(input, cast);
2005 }
2006 }
2007
2008 Node* Parse::acmp_null_check(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, Node*& null_ctl) {
2009 inc_sp(2);
2010 null_ctl = top();
2011 Node* cast = null_check_oop(input, &null_ctl,
2012 input_ptr == ProfileNeverNull || (input_ptr == ProfileUnknownNull && !too_many_traps_or_recompiles(Deoptimization::Reason_null_check)),
2013 false,
2014 speculative_ptr_kind(tinput) == ProfileNeverNull &&
2015 !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check));
2016 dec_sp(2);
2017 assert(!stopped(), "null input should have been caught earlier");
2018 return cast;
2019 }
2020
2021 void Parse::acmp_known_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, ciKlass* input_type, BoolTest::mask btest, Node* eq_region) {
2022 Node* ne_region = new RegionNode(1);
2023 Node* null_ctl;
2024 Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
2025 ne_region->add_req(null_ctl);
2026
2027 Node* slow_ctl = type_check_receiver(cast, input_type, 1.0, &cast);
2028 {
2029 PreserveJVMState pjvms(this);
2030 inc_sp(2);
2031 set_control(slow_ctl);
2032 Deoptimization::DeoptReason reason;
2033 if (tinput->speculative_type() != nullptr && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
2034 reason = Deoptimization::Reason_speculate_class_check;
2035 } else {
2036 reason = Deoptimization::Reason_class_check;
2037 }
2038 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
2039 }
2040 ne_region->add_req(control());
2041
2042 record_for_igvn(ne_region);
2043 set_control(_gvn.transform(ne_region));
2044 if (btest == BoolTest::ne) {
2045 {
2046 PreserveJVMState pjvms(this);
2047 if (null_ctl == top()) {
2048 replace_in_map(input, cast);
2049 }
2050 int target_bci = iter().get_dest();
2051 merge(target_bci);
2052 }
2053 record_for_igvn(eq_region);
2054 set_control(_gvn.transform(eq_region));
2055 } else {
2056 if (null_ctl == top()) {
2057 replace_in_map(input, cast);
2058 }
2059 set_control(_gvn.transform(ne_region));
2060 }
2061 }
2062
2063 void Parse::acmp_unknown_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, BoolTest::mask btest, Node* eq_region) {
2064 Node* ne_region = new RegionNode(1);
2065 Node* null_ctl;
2066 Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
2067 ne_region->add_req(null_ctl);
2068
2069 {
2070 BuildCutout unless(this, inline_type_test(cast, /* is_inline = */ false), PROB_MAX);
2071 inc_sp(2);
2072 uncommon_trap_exact(Deoptimization::Reason_class_check, Deoptimization::Action_maybe_recompile);
2073 }
2074
2075 ne_region->add_req(control());
2076
2077 record_for_igvn(ne_region);
2078 set_control(_gvn.transform(ne_region));
2079 if (btest == BoolTest::ne) {
2080 {
2081 PreserveJVMState pjvms(this);
2082 if (null_ctl == top()) {
2083 replace_in_map(input, cast);
2084 }
2085 int target_bci = iter().get_dest();
2086 merge(target_bci);
2087 }
2088 record_for_igvn(eq_region);
2089 set_control(_gvn.transform(eq_region));
2090 } else {
2091 if (null_ctl == top()) {
2092 replace_in_map(input, cast);
2093 }
2094 set_control(_gvn.transform(ne_region));
2095 }
2096 }
2097
2098 void Parse::do_acmp(BoolTest::mask btest, Node* left, Node* right) {
2099 ciKlass* left_type = nullptr;
2100 ciKlass* right_type = nullptr;
2101 ProfilePtrKind left_ptr = ProfileUnknownNull;
2102 ProfilePtrKind right_ptr = ProfileUnknownNull;
2103 bool left_inline_type = true;
2104 bool right_inline_type = true;
2105
2106 // Leverage profiling at acmp
2107 if (UseACmpProfile) {
2108 method()->acmp_profiled_type(bci(), left_type, right_type, left_ptr, right_ptr, left_inline_type, right_inline_type);
2109 if (too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
2110 left_type = nullptr;
2111 right_type = nullptr;
2112 left_inline_type = true;
2113 right_inline_type = true;
2114 }
2115 if (too_many_traps_or_recompiles(Deoptimization::Reason_null_check)) {
2116 left_ptr = ProfileUnknownNull;
2117 right_ptr = ProfileUnknownNull;
2118 }
2119 }
2120
2121 if (UseTypeSpeculation) {
2122 record_profile_for_speculation(left, left_type, left_ptr);
2123 record_profile_for_speculation(right, right_type, right_ptr);
2124 }
2125
2126 if (!EnableValhalla) {
2127 Node* cmp = CmpP(left, right);
2128 cmp = optimize_cmp_with_klass(cmp);
2129 do_if(btest, cmp);
2130 return;
2131 }
2132
2133 // Check for equality before potentially allocating
2134 if (left == right) {
2135 do_if(btest, makecon(TypeInt::CC_EQ));
2136 return;
2137 }
2138
2139 // Allocate inline type operands and re-execute on deoptimization
2140 if (left->is_InlineType()) {
2141 if (_gvn.type(right)->is_zero_type() ||
2142 (right->is_InlineType() && _gvn.type(right->as_InlineType()->get_is_init())->is_zero_type())) {
2143 // Null checking a scalarized but nullable inline type. Check the IsInit
2144 // input instead of the oop input to avoid keeping buffer allocations alive.
2145 Node* cmp = CmpI(left->as_InlineType()->get_is_init(), intcon(0));
2146 do_if(btest, cmp);
2147 return;
2148 } else {
2149 PreserveReexecuteState preexecs(this);
2150 inc_sp(2);
2151 jvms()->set_should_reexecute(true);
2152 left = left->as_InlineType()->buffer(this)->get_oop();
2153 }
2154 }
2155 if (right->is_InlineType()) {
2156 PreserveReexecuteState preexecs(this);
2157 inc_sp(2);
2158 jvms()->set_should_reexecute(true);
2159 right = right->as_InlineType()->buffer(this)->get_oop();
2160 }
2161
2162 // First, do a normal pointer comparison
2163 const TypeOopPtr* tleft = _gvn.type(left)->isa_oopptr();
2164 const TypeOopPtr* tright = _gvn.type(right)->isa_oopptr();
2165 Node* cmp = CmpP(left, right);
2166 cmp = optimize_cmp_with_klass(cmp);
2167 if (tleft == nullptr || !tleft->can_be_inline_type() ||
2168 tright == nullptr || !tright->can_be_inline_type()) {
2169 // This is sufficient, if one of the operands can't be an inline type
2170 do_if(btest, cmp);
2171 return;
2172 }
2173
2174 // Don't add traps to unstable if branches because additional checks are required to
2175 // decide if the operands are equal/substitutable and we therefore shouldn't prune
2176 // branches for one if based on the profiling of the acmp branches.
2177 // Also, OptimizeUnstableIf would set an incorrect re-rexecution state because it
2178 // assumes that there is a 1-1 mapping between the if and the acmp branches and that
2179 // hitting a trap means that we will take the corresponding acmp branch on re-execution.
2180 const bool can_trap = true;
2181
2182 Node* eq_region = nullptr;
2183 if (btest == BoolTest::eq) {
2184 do_if(btest, cmp, !can_trap, true);
2185 if (stopped()) {
2186 // Pointers are equal, operands must be equal
2187 return;
2188 }
2189 } else {
2190 assert(btest == BoolTest::ne, "only eq or ne");
2191 Node* is_not_equal = nullptr;
2192 eq_region = new RegionNode(3);
2193 {
2194 PreserveJVMState pjvms(this);
2195 // Pointers are not equal, but more checks are needed to determine if the operands are (not) substitutable
2196 do_if(btest, cmp, !can_trap, false, &is_not_equal);
2197 if (!stopped()) {
2198 eq_region->init_req(1, control());
2199 }
2200 }
2201 if (is_not_equal == nullptr || is_not_equal->is_top()) {
2202 record_for_igvn(eq_region);
2203 set_control(_gvn.transform(eq_region));
2204 return;
2205 }
2206 set_control(is_not_equal);
2207 }
2208
2209 // Prefer speculative types if available
2210 if (!too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
2211 if (tleft->speculative_type() != nullptr) {
2212 left_type = tleft->speculative_type();
2213 }
2214 if (tright->speculative_type() != nullptr) {
2215 right_type = tright->speculative_type();
2216 }
2217 }
2218
2219 if (speculative_ptr_kind(tleft) != ProfileMaybeNull && speculative_ptr_kind(tleft) != ProfileUnknownNull) {
2220 ProfilePtrKind speculative_left_ptr = speculative_ptr_kind(tleft);
2221 if (speculative_left_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2222 left_ptr = speculative_left_ptr;
2223 } else if (speculative_left_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2224 left_ptr = speculative_left_ptr;
2225 }
2226 }
2227 if (speculative_ptr_kind(tright) != ProfileMaybeNull && speculative_ptr_kind(tright) != ProfileUnknownNull) {
2228 ProfilePtrKind speculative_right_ptr = speculative_ptr_kind(tright);
2229 if (speculative_right_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2230 right_ptr = speculative_right_ptr;
2231 } else if (speculative_right_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2232 right_ptr = speculative_right_ptr;
2233 }
2234 }
2235
2236 if (left_ptr == ProfileAlwaysNull) {
2237 // Comparison with null. Assert the input is indeed null and we're done.
2238 acmp_always_null_input(left, tleft, btest, eq_region);
2239 return;
2240 }
2241 if (right_ptr == ProfileAlwaysNull) {
2242 // Comparison with null. Assert the input is indeed null and we're done.
2243 acmp_always_null_input(right, tright, btest, eq_region);
2244 return;
2245 }
2246 if (left_type != nullptr && !left_type->is_inlinetype()) {
2247 // Comparison with an object of known type
2248 acmp_known_non_inline_type_input(left, tleft, left_ptr, left_type, btest, eq_region);
2249 return;
2250 }
2251 if (right_type != nullptr && !right_type->is_inlinetype()) {
2252 // Comparison with an object of known type
2253 acmp_known_non_inline_type_input(right, tright, right_ptr, right_type, btest, eq_region);
2254 return;
2255 }
2256 if (!left_inline_type) {
2257 // Comparison with an object known not to be an inline type
2258 acmp_unknown_non_inline_type_input(left, tleft, left_ptr, btest, eq_region);
2259 return;
2260 }
2261 if (!right_inline_type) {
2262 // Comparison with an object known not to be an inline type
2263 acmp_unknown_non_inline_type_input(right, tright, right_ptr, btest, eq_region);
2264 return;
2265 }
2266
2267 // Pointers are not equal, check if first operand is non-null
2268 Node* ne_region = new RegionNode(6);
2269 Node* null_ctl;
2270 Node* not_null_right = acmp_null_check(right, tright, right_ptr, null_ctl);
2271 ne_region->init_req(1, null_ctl);
2272
2273 // First operand is non-null, check if it is an inline type
2274 Node* is_value = inline_type_test(not_null_right);
2275 IfNode* is_value_iff = create_and_map_if(control(), is_value, PROB_FAIR, COUNT_UNKNOWN);
2276 Node* not_value = _gvn.transform(new IfFalseNode(is_value_iff));
2277 ne_region->init_req(2, not_value);
2278 set_control(_gvn.transform(new IfTrueNode(is_value_iff)));
2279
2280 // The first operand is an inline type, check if the second operand is non-null
2281 Node* not_null_left = acmp_null_check(left, tleft, left_ptr, null_ctl);
2282 ne_region->init_req(3, null_ctl);
2283
2284 // Check if both operands are of the same class.
2285 Node* kls_left = load_object_klass(not_null_left);
2286 Node* kls_right = load_object_klass(not_null_right);
2287 Node* kls_cmp = CmpP(kls_left, kls_right);
2288 Node* kls_bol = _gvn.transform(new BoolNode(kls_cmp, BoolTest::ne));
2289 IfNode* kls_iff = create_and_map_if(control(), kls_bol, PROB_FAIR, COUNT_UNKNOWN);
2290 Node* kls_ne = _gvn.transform(new IfTrueNode(kls_iff));
2291 set_control(_gvn.transform(new IfFalseNode(kls_iff)));
2292 ne_region->init_req(4, kls_ne);
2293
2294 if (stopped()) {
2295 record_for_igvn(ne_region);
2296 set_control(_gvn.transform(ne_region));
2297 if (btest == BoolTest::ne) {
2298 {
2299 PreserveJVMState pjvms(this);
2300 int target_bci = iter().get_dest();
2301 merge(target_bci);
2302 }
2303 record_for_igvn(eq_region);
2304 set_control(_gvn.transform(eq_region));
2305 }
2306 return;
2307 }
2308
2309 // Both operands are values types of the same class, we need to perform a
2310 // substitutability test. Delegate to ValueObjectMethods::isSubstitutable().
2311 Node* ne_io_phi = PhiNode::make(ne_region, i_o());
2312 Node* mem = reset_memory();
2313 Node* ne_mem_phi = PhiNode::make(ne_region, mem);
2314
2315 Node* eq_io_phi = nullptr;
2316 Node* eq_mem_phi = nullptr;
2317 if (eq_region != nullptr) {
2318 eq_io_phi = PhiNode::make(eq_region, i_o());
2319 eq_mem_phi = PhiNode::make(eq_region, mem);
2320 }
2321
2322 set_all_memory(mem);
2323
2324 kill_dead_locals();
2325 ciMethod* subst_method = ciEnv::current()->ValueObjectMethods_klass()->find_method(ciSymbols::isSubstitutable_name(), ciSymbols::object_object_boolean_signature());
2326 CallStaticJavaNode *call = new CallStaticJavaNode(C, TypeFunc::make(subst_method), SharedRuntime::get_resolve_static_call_stub(), subst_method);
2327 call->set_override_symbolic_info(true);
2328 call->init_req(TypeFunc::Parms, not_null_left);
2329 call->init_req(TypeFunc::Parms+1, not_null_right);
2330 inc_sp(2);
2331 set_edges_for_java_call(call, false, false);
2332 Node* ret = set_results_for_java_call(call, false, true);
2333 dec_sp(2);
2334
2335 // Test the return value of ValueObjectMethods::isSubstitutable()
2336 // This is the last check, do_if can emit traps now.
2337 Node* subst_cmp = _gvn.transform(new CmpINode(ret, intcon(1)));
2338 Node* ctl = C->top();
2339 if (btest == BoolTest::eq) {
2340 PreserveJVMState pjvms(this);
2341 do_if(btest, subst_cmp, can_trap);
2342 if (!stopped()) {
2343 ctl = control();
2344 }
2345 } else {
2346 assert(btest == BoolTest::ne, "only eq or ne");
2347 PreserveJVMState pjvms(this);
2348 do_if(btest, subst_cmp, can_trap, false, &ctl);
2349 if (!stopped()) {
2350 eq_region->init_req(2, control());
2351 eq_io_phi->init_req(2, i_o());
2352 eq_mem_phi->init_req(2, reset_memory());
2353 }
2354 }
2355 ne_region->init_req(5, ctl);
2356 ne_io_phi->init_req(5, i_o());
2357 ne_mem_phi->init_req(5, reset_memory());
2358
2359 record_for_igvn(ne_region);
2360 set_control(_gvn.transform(ne_region));
2361 set_i_o(_gvn.transform(ne_io_phi));
2362 set_all_memory(_gvn.transform(ne_mem_phi));
2363
2364 if (btest == BoolTest::ne) {
2365 {
2366 PreserveJVMState pjvms(this);
2367 int target_bci = iter().get_dest();
2368 merge(target_bci);
2369 }
2370
2371 record_for_igvn(eq_region);
2372 set_control(_gvn.transform(eq_region));
2373 set_i_o(_gvn.transform(eq_io_phi));
2374 set_all_memory(_gvn.transform(eq_mem_phi));
2375 }
2376 }
2377
2378 // Force unstable if traps to be taken randomly to trigger intermittent bugs such as incorrect debug information.
2379 // Add another if before the unstable if that checks a "random" condition at runtime (a simple shared counter) and
2380 // then either takes the trap or executes the original, unstable if.
2381 void Parse::stress_trap(IfNode* orig_iff, Node* counter, Node* incr_store) {
2382 // Search for an unstable if trap
2383 CallStaticJavaNode* trap = nullptr;
2384 assert(orig_iff->Opcode() == Op_If && orig_iff->outcnt() == 2, "malformed if");
2385 ProjNode* trap_proj = orig_iff->uncommon_trap_proj(trap, Deoptimization::Reason_unstable_if);
2386 if (trap == nullptr || !trap->jvms()->should_reexecute()) {
2387 // No suitable trap found. Remove unused counter load and increment.
2388 C->gvn_replace_by(incr_store, incr_store->in(MemNode::Memory));
2389 return;
2390 }
2391
2392 // Remove trap from optimization list since we add another path to the trap.
2393 bool success = C->remove_unstable_if_trap(trap, true);
2394 assert(success, "Trap already modified");
2395
2396 // Add a check before the original if that will trap with a certain frequency and execute the original if otherwise
2397 int freq_log = (C->random() % 31) + 1; // Random logarithmic frequency in [1, 31]
2430 }
2431
2432 void Parse::maybe_add_predicate_after_if(Block* path) {
2433 if (path->is_SEL_head() && path->preds_parsed() == 0) {
2434 // Add predicates at bci of if dominating the loop so traps can be
2435 // recorded on the if's profile data
2436 int bc_depth = repush_if_args();
2437 add_parse_predicates();
2438 dec_sp(bc_depth);
2439 path->set_has_predicates();
2440 }
2441 }
2442
2443
2444 //----------------------------adjust_map_after_if------------------------------
2445 // Adjust the JVM state to reflect the result of taking this path.
2446 // Basically, it means inspecting the CmpNode controlling this
2447 // branch, seeing how it constrains a tested value, and then
2448 // deciding if it's worth our while to encode this constraint
2449 // as graph nodes in the current abstract interpretation map.
2450 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path, bool can_trap) {
2451 if (!c->is_Cmp()) {
2452 maybe_add_predicate_after_if(path);
2453 return;
2454 }
2455
2456 if (stopped() || btest == BoolTest::illegal) {
2457 return; // nothing to do
2458 }
2459
2460 bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
2461
2462 if (can_trap && path_is_suitable_for_uncommon_trap(prob)) {
2463 repush_if_args();
2464 Node* call = uncommon_trap(Deoptimization::Reason_unstable_if,
2465 Deoptimization::Action_reinterpret,
2466 nullptr,
2467 (is_fallthrough ? "taken always" : "taken never"));
2468
2469 if (call != nullptr) {
2470 C->record_unstable_if_trap(new UnstableIfTrap(call->as_CallStaticJava(), path));
2471 }
2472 return;
2473 }
2474
2475 Node* val = c->in(1);
2476 Node* con = c->in(2);
2477 const Type* tcon = _gvn.type(con);
2478 const Type* tval = _gvn.type(val);
2479 bool have_con = tcon->singleton();
2480 if (tval->singleton()) {
2481 if (!have_con) {
2482 // Swap, so constant is in con.
2539 if (obj != nullptr && (con_type->isa_instptr() || con_type->isa_aryptr())) {
2540 // Found:
2541 // Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
2542 // or the narrowOop equivalent.
2543 const Type* obj_type = _gvn.type(obj);
2544 const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr();
2545 if (tboth != nullptr && tboth->klass_is_exact() && tboth != obj_type &&
2546 tboth->higher_equal(obj_type)) {
2547 // obj has to be of the exact type Foo if the CmpP succeeds.
2548 int obj_in_map = map()->find_edge(obj);
2549 JVMState* jvms = this->jvms();
2550 if (obj_in_map >= 0 &&
2551 (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
2552 TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth);
2553 const Type* tcc = ccast->as_Type()->type();
2554 assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
2555 // Delay transform() call to allow recovery of pre-cast value
2556 // at the control merge.
2557 _gvn.set_type_bottom(ccast);
2558 record_for_igvn(ccast);
2559 if (tboth->is_inlinetypeptr()) {
2560 ccast = InlineTypeNode::make_from_oop(this, ccast, tboth->exact_klass(true)->as_inline_klass());
2561 }
2562 // Here's the payoff.
2563 replace_in_map(obj, ccast);
2564 }
2565 }
2566 }
2567 }
2568
2569 int val_in_map = map()->find_edge(val);
2570 if (val_in_map < 0) return; // replace_in_map would be useless
2571 {
2572 JVMState* jvms = this->jvms();
2573 if (!(jvms->is_loc(val_in_map) ||
2574 jvms->is_stk(val_in_map)))
2575 return; // again, it would be useless
2576 }
2577
2578 // Check for a comparison to a constant, and "know" that the compared
2579 // value is constrained on this path.
2580 assert(tcon->singleton(), "");
2581 ConstraintCastNode* ccast = nullptr;
2646 if (c->Opcode() == Op_CmpP &&
2647 (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
2648 c->in(2)->is_Con()) {
2649 Node* load_klass = nullptr;
2650 Node* decode = nullptr;
2651 if (c->in(1)->Opcode() == Op_DecodeNKlass) {
2652 decode = c->in(1);
2653 load_klass = c->in(1)->in(1);
2654 } else {
2655 load_klass = c->in(1);
2656 }
2657 if (load_klass->in(2)->is_AddP()) {
2658 Node* addp = load_klass->in(2);
2659 Node* obj = addp->in(AddPNode::Address);
2660 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
2661 if (obj_type->speculative_type_not_null() != nullptr) {
2662 ciKlass* k = obj_type->speculative_type();
2663 inc_sp(2);
2664 obj = maybe_cast_profiled_obj(obj, k);
2665 dec_sp(2);
2666 if (obj->is_InlineType()) {
2667 assert(obj->as_InlineType()->is_allocated(&_gvn), "must be allocated");
2668 obj = obj->as_InlineType()->get_oop();
2669 }
2670 // Make the CmpP use the casted obj
2671 addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
2672 load_klass = load_klass->clone();
2673 load_klass->set_req(2, addp);
2674 load_klass = _gvn.transform(load_klass);
2675 if (decode != nullptr) {
2676 decode = decode->clone();
2677 decode->set_req(1, load_klass);
2678 load_klass = _gvn.transform(decode);
2679 }
2680 c = c->clone();
2681 c->set_req(1, load_klass);
2682 c = _gvn.transform(c);
2683 }
2684 }
2685 }
2686 return c;
2687 }
2688
2689 //------------------------------do_one_bytecode--------------------------------
3447 // See if we can get some profile data and hand it off to the next block
3448 Block *target_block = block()->successor_for_bci(target_bci);
3449 if (target_block->pred_count() != 1) break;
3450 ciMethodData* methodData = method()->method_data();
3451 if (!methodData->is_mature()) break;
3452 ciProfileData* data = methodData->bci_to_data(bci());
3453 assert(data != nullptr && data->is_JumpData(), "need JumpData for taken branch");
3454 int taken = ((ciJumpData*)data)->taken();
3455 taken = method()->scale_count(taken);
3456 target_block->set_count(taken);
3457 break;
3458 }
3459
3460 case Bytecodes::_ifnull: btest = BoolTest::eq; goto handle_if_null;
3461 case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
3462 handle_if_null:
3463 // If this is a backwards branch in the bytecodes, add Safepoint
3464 maybe_add_safepoint(iter().get_dest());
3465 a = null();
3466 b = pop();
3467 if (b->is_InlineType()) {
3468 // Null checking a scalarized but nullable inline type. Check the IsInit
3469 // input instead of the oop input to avoid keeping buffer allocations alive
3470 c = _gvn.transform(new CmpINode(b->as_InlineType()->get_is_init(), zerocon(T_INT)));
3471 } else {
3472 if (!_gvn.type(b)->speculative_maybe_null() &&
3473 !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
3474 inc_sp(1);
3475 Node* null_ctl = top();
3476 b = null_check_oop(b, &null_ctl, true, true, true);
3477 assert(null_ctl->is_top(), "no null control here");
3478 dec_sp(1);
3479 } else if (_gvn.type(b)->speculative_always_null() &&
3480 !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
3481 inc_sp(1);
3482 b = null_assert(b);
3483 dec_sp(1);
3484 }
3485 c = _gvn.transform( new CmpPNode(b, a) );
3486 }
3487 do_ifnull(btest, c);
3488 break;
3489
3490 case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
3491 case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
3492 handle_if_acmp:
3493 // If this is a backwards branch in the bytecodes, add Safepoint
3494 maybe_add_safepoint(iter().get_dest());
3495 a = pop();
3496 b = pop();
3497 do_acmp(btest, b, a);
3498 break;
3499
3500 case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
3501 case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
3502 case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
3503 case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
3504 case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
3505 case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
3506 handle_ifxx:
3507 // If this is a backwards branch in the bytecodes, add Safepoint
3508 maybe_add_safepoint(iter().get_dest());
3509 a = _gvn.intcon(0);
3510 b = pop();
3511 c = _gvn.transform( new CmpINode(b, a) );
3512 do_if(btest, c);
3513 break;
3514
3515 case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
3516 case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
3517 case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
3532 break;
3533
3534 case Bytecodes::_lookupswitch:
3535 do_lookupswitch();
3536 break;
3537
3538 case Bytecodes::_invokestatic:
3539 case Bytecodes::_invokedynamic:
3540 case Bytecodes::_invokespecial:
3541 case Bytecodes::_invokevirtual:
3542 case Bytecodes::_invokeinterface:
3543 do_call();
3544 break;
3545 case Bytecodes::_checkcast:
3546 do_checkcast();
3547 break;
3548 case Bytecodes::_instanceof:
3549 do_instanceof();
3550 break;
3551 case Bytecodes::_anewarray:
3552 do_newarray();
3553 break;
3554 case Bytecodes::_newarray:
3555 do_newarray((BasicType)iter().get_index());
3556 break;
3557 case Bytecodes::_multianewarray:
3558 do_multianewarray();
3559 break;
3560 case Bytecodes::_new:
3561 do_new();
3562 break;
3563
3564 case Bytecodes::_jsr:
3565 case Bytecodes::_jsr_w:
3566 do_jsr();
3567 break;
3568
3569 case Bytecodes::_ret:
3570 do_ret();
3571 break;
3572
|