5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciUtilities.hpp"
26 #include "classfile/javaClasses.hpp"
27 #include "ci/ciObjArray.hpp"
28 #include "asm/register.hpp"
29 #include "compiler/compileLog.hpp"
30 #include "gc/shared/barrierSet.hpp"
31 #include "gc/shared/c2/barrierSetC2.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "opto/addnode.hpp"
35 #include "opto/castnode.hpp"
36 #include "opto/convertnode.hpp"
37 #include "opto/graphKit.hpp"
38 #include "opto/idealKit.hpp"
39 #include "opto/intrinsicnode.hpp"
40 #include "opto/locknode.hpp"
41 #include "opto/machnode.hpp"
42 #include "opto/opaquenode.hpp"
43 #include "opto/parse.hpp"
44 #include "opto/rootnode.hpp"
45 #include "opto/runtime.hpp"
46 #include "opto/subtypenode.hpp"
47 #include "runtime/deoptimization.hpp"
48 #include "runtime/sharedRuntime.hpp"
49 #include "utilities/bitMap.inline.hpp"
50 #include "utilities/powerOfTwo.hpp"
51 #include "utilities/growableArray.hpp"
52
53 //----------------------------GraphKit-----------------------------------------
54 // Main utility constructor.
55 GraphKit::GraphKit(JVMState* jvms)
56 : Phase(Phase::Parser),
57 _env(C->env()),
58 _gvn(*C->initial_gvn()),
59 _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
60 {
61 _exceptions = jvms->map()->next_exception();
62 if (_exceptions != nullptr) jvms->map()->set_next_exception(nullptr);
63 set_jvms(jvms);
64 }
65
66 // Private constructor for parser.
67 GraphKit::GraphKit()
68 : Phase(Phase::Parser),
69 _env(C->env()),
70 _gvn(*C->initial_gvn()),
71 _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
72 {
73 _exceptions = nullptr;
74 set_map(nullptr);
75 debug_only(_sp = -99);
76 debug_only(set_bci(-99));
77 }
78
79
80
81 //---------------------------clean_stack---------------------------------------
82 // Clear away rubbish from the stack area of the JVM state.
83 // This destroys any arguments that may be waiting on the stack.
841 if (PrintMiscellaneous && (Verbose || WizardMode)) {
842 tty->print_cr("Zombie local %d: ", local);
843 jvms->dump();
844 }
845 return false;
846 }
847 }
848 }
849 return true;
850 }
851
852 #endif //ASSERT
853
854 // Helper function for enforcing certain bytecodes to reexecute if deoptimization happens.
855 static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {
856 ciMethod* cur_method = jvms->method();
857 int cur_bci = jvms->bci();
858 if (cur_method != nullptr && cur_bci != InvocationEntryBci) {
859 Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
860 return Interpreter::bytecode_should_reexecute(code) ||
861 (is_anewarray && code == Bytecodes::_multianewarray);
862 // Reexecute _multianewarray bytecode which was replaced with
863 // sequence of [a]newarray. See Parse::do_multianewarray().
864 //
865 // Note: interpreter should not have it set since this optimization
866 // is limited by dimensions and guarded by flag so in some cases
867 // multianewarray() runtime calls will be generated and
868 // the bytecode should not be reexecutes (stack will not be reset).
869 } else {
870 return false;
871 }
872 }
873
874 // Helper function for adding JVMState and debug information to node
875 void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
876 // Add the safepoint edges to the call (or other safepoint).
877
878 // Make sure dead locals are set to top. This
879 // should help register allocation time and cut down on the size
880 // of the deoptimization information.
881 assert(dead_locals_are_killed(), "garbage in debug info before safepoint");
932 }
933
934 // Presize the call:
935 DEBUG_ONLY(uint non_debug_edges = call->req());
936 call->add_req_batch(top(), youngest_jvms->debug_depth());
937 assert(call->req() == non_debug_edges + youngest_jvms->debug_depth(), "");
938
939 // Set up edges so that the call looks like this:
940 // Call [state:] ctl io mem fptr retadr
941 // [parms:] parm0 ... parmN
942 // [root:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
943 // [...mid:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN [...]
944 // [young:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
945 // Note that caller debug info precedes callee debug info.
946
947 // Fill pointer walks backwards from "young:" to "root:" in the diagram above:
948 uint debug_ptr = call->req();
949
950 // Loop over the map input edges associated with jvms, add them
951 // to the call node, & reset all offsets to match call node array.
952 for (JVMState* in_jvms = youngest_jvms; in_jvms != nullptr; ) {
953 uint debug_end = debug_ptr;
954 uint debug_start = debug_ptr - in_jvms->debug_size();
955 debug_ptr = debug_start; // back up the ptr
956
957 uint p = debug_start; // walks forward in [debug_start, debug_end)
958 uint j, k, l;
959 SafePointNode* in_map = in_jvms->map();
960 out_jvms->set_map(call);
961
962 if (can_prune_locals) {
963 assert(in_jvms->method() == out_jvms->method(), "sanity");
964 // If the current throw can reach an exception handler in this JVMS,
965 // then we must keep everything live that can reach that handler.
966 // As a quick and dirty approximation, we look for any handlers at all.
967 if (in_jvms->method()->has_exception_handlers()) {
968 can_prune_locals = false;
969 }
970 }
971
972 // Add the Locals
973 k = in_jvms->locoff();
974 l = in_jvms->loc_size();
975 out_jvms->set_locoff(p);
976 if (!can_prune_locals) {
977 for (j = 0; j < l; j++)
978 call->set_req(p++, in_map->in(k+j));
979 } else {
980 p += l; // already set to top above by add_req_batch
981 }
982
983 // Add the Expression Stack
984 k = in_jvms->stkoff();
985 l = in_jvms->sp();
986 out_jvms->set_stkoff(p);
987 if (!can_prune_locals) {
988 for (j = 0; j < l; j++)
989 call->set_req(p++, in_map->in(k+j));
990 } else if (can_prune_locals && stack_slots_not_pruned != 0) {
991 // Divide stack into {S0,...,S1}, where S0 is set to top.
992 uint s1 = stack_slots_not_pruned;
993 stack_slots_not_pruned = 0; // for next iteration
994 if (s1 > l) s1 = l;
995 uint s0 = l - s1;
996 p += s0; // skip the tops preinstalled by add_req_batch
997 for (j = s0; j < l; j++)
998 call->set_req(p++, in_map->in(k+j));
999 } else {
1000 p += l; // already set to top above by add_req_batch
1001 }
1002
1003 // Add the Monitors
1004 k = in_jvms->monoff();
1005 l = in_jvms->mon_size();
1006 out_jvms->set_monoff(p);
1007 for (j = 0; j < l; j++)
1008 call->set_req(p++, in_map->in(k+j));
1009
1010 // Copy any scalar object fields.
1011 k = in_jvms->scloff();
1012 l = in_jvms->scl_size();
1013 out_jvms->set_scloff(p);
1014 for (j = 0; j < l; j++)
1015 call->set_req(p++, in_map->in(k+j));
1016
1017 // Finish the new jvms.
1018 out_jvms->set_endoff(p);
1019
1020 assert(out_jvms->endoff() == debug_end, "fill ptr must match");
1021 assert(out_jvms->depth() == in_jvms->depth(), "depth must match");
1022 assert(out_jvms->loc_size() == in_jvms->loc_size(), "size must match");
1023 assert(out_jvms->mon_size() == in_jvms->mon_size(), "size must match");
1024 assert(out_jvms->scl_size() == in_jvms->scl_size(), "size must match");
1025 assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match");
1026
1027 // Update the two tail pointers in parallel.
1028 out_jvms = out_jvms->caller();
1029 in_jvms = in_jvms->caller();
1030 }
1031
1032 assert(debug_ptr == non_debug_edges, "debug info must fit exactly");
1033
1034 // Test the correctness of JVMState::debug_xxx accessors:
1035 assert(call->jvms()->debug_start() == non_debug_edges, "");
1036 assert(call->jvms()->debug_end() == call->req(), "");
1037 assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, "");
1038 }
1039
1040 bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
1041 Bytecodes::Code code = java_bc();
1042 if (code == Bytecodes::_wide) {
1043 code = method()->java_code_at_bci(bci() + 1);
1044 }
1045
1046 if (code != Bytecodes::_illegal) {
1047 depth = Bytecodes::depth(code); // checkcast=0, athrow=-1
1183 Node* conv = _gvn.transform( new ConvI2LNode(offset));
1184 Node* mask = _gvn.transform(ConLNode::make((julong) max_juint));
1185 return _gvn.transform( new AndLNode(conv, mask) );
1186 }
1187
1188 Node* GraphKit::ConvL2I(Node* offset) {
1189 // short-circuit a common case
1190 jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
1191 if (offset_con != (jlong)Type::OffsetBot) {
1192 return intcon((int) offset_con);
1193 }
1194 return _gvn.transform( new ConvL2INode(offset));
1195 }
1196
1197 //-------------------------load_object_klass-----------------------------------
1198 Node* GraphKit::load_object_klass(Node* obj) {
1199 // Special-case a fresh allocation to avoid building nodes:
1200 Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
1201 if (akls != nullptr) return akls;
1202 Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
1203 return _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), k_adr, TypeInstPtr::KLASS));
1204 }
1205
1206 //-------------------------load_array_length-----------------------------------
1207 Node* GraphKit::load_array_length(Node* array) {
1208 // Special-case a fresh allocation to avoid building nodes:
1209 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array);
1210 Node *alen;
1211 if (alloc == nullptr) {
1212 Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
1213 alen = _gvn.transform( new LoadRangeNode(nullptr, immutable_memory(), r_adr, TypeInt::POS));
1214 } else {
1215 alen = array_ideal_length(alloc, _gvn.type(array)->is_oopptr(), false);
1216 }
1217 return alen;
1218 }
1219
1220 Node* GraphKit::array_ideal_length(AllocateArrayNode* alloc,
1221 const TypeOopPtr* oop_type,
1222 bool replace_length_in_map) {
1223 Node* length = alloc->Ideal_length();
1232 replace_in_map(length, ccast);
1233 }
1234 return ccast;
1235 }
1236 }
1237 return length;
1238 }
1239
1240 //------------------------------do_null_check----------------------------------
1241 // Helper function to do a null pointer check. Returned value is
1242 // the incoming address with null casted away. You are allowed to use the
1243 // not-null value only if you are control dependent on the test.
1244 #ifndef PRODUCT
1245 extern uint explicit_null_checks_inserted,
1246 explicit_null_checks_elided;
1247 #endif
1248 Node* GraphKit::null_check_common(Node* value, BasicType type,
1249 // optional arguments for variations:
1250 bool assert_null,
1251 Node* *null_control,
1252 bool speculative) {
1253 assert(!assert_null || null_control == nullptr, "not both at once");
1254 if (stopped()) return top();
1255 NOT_PRODUCT(explicit_null_checks_inserted++);
1256
1257 // Construct null check
1258 Node *chk = nullptr;
1259 switch(type) {
1260 case T_LONG : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break;
1261 case T_INT : chk = new CmpINode(value, _gvn.intcon(0)); break;
1262 case T_ARRAY : // fall through
1263 type = T_OBJECT; // simplify further tests
1264 case T_OBJECT : {
1265 const Type *t = _gvn.type( value );
1266
1267 const TypeOopPtr* tp = t->isa_oopptr();
1268 if (tp != nullptr && !tp->is_loaded()
1269 // Only for do_null_check, not any of its siblings:
1270 && !assert_null && null_control == nullptr) {
1271 // Usually, any field access or invocation on an unloaded oop type
1272 // will simply fail to link, since the statically linked class is
1273 // likely also to be unloaded. However, in -Xcomp mode, sometimes
1274 // the static class is loaded but the sharper oop type is not.
1275 // Rather than checking for this obscure case in lots of places,
1276 // we simply observe that a null check on an unloaded class
1340 }
1341 Node *oldcontrol = control();
1342 set_control(cfg);
1343 Node *res = cast_not_null(value);
1344 set_control(oldcontrol);
1345 NOT_PRODUCT(explicit_null_checks_elided++);
1346 return res;
1347 }
1348 cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true);
1349 if (cfg == nullptr) break; // Quit at region nodes
1350 depth++;
1351 }
1352 }
1353
1354 //-----------
1355 // Branch to failure if null
1356 float ok_prob = PROB_MAX; // a priori estimate: nulls never happen
1357 Deoptimization::DeoptReason reason;
1358 if (assert_null) {
1359 reason = Deoptimization::reason_null_assert(speculative);
1360 } else if (type == T_OBJECT) {
1361 reason = Deoptimization::reason_null_check(speculative);
1362 } else {
1363 reason = Deoptimization::Reason_div0_check;
1364 }
1365 // %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
1366 // ciMethodData::has_trap_at will return a conservative -1 if any
1367 // must-be-null assertion has failed. This could cause performance
1368 // problems for a method after its first do_null_assert failure.
1369 // Consider using 'Reason_class_check' instead?
1370
1371 // To cause an implicit null check, we set the not-null probability
1372 // to the maximum (PROB_MAX). For an explicit check the probability
1373 // is set to a smaller value.
1374 if (null_control != nullptr || too_many_traps(reason)) {
1375 // probability is less likely
1376 ok_prob = PROB_LIKELY_MAG(3);
1377 } else if (!assert_null &&
1378 (ImplicitNullCheckThreshold > 0) &&
1379 method() != nullptr &&
1380 (method()->method_data()->trap_count(reason)
1414 }
1415
1416 if (assert_null) {
1417 // Cast obj to null on this path.
1418 replace_in_map(value, zerocon(type));
1419 return zerocon(type);
1420 }
1421
1422 // Cast obj to not-null on this path, if there is no null_control.
1423 // (If there is a null_control, a non-null value may come back to haunt us.)
1424 if (type == T_OBJECT) {
1425 Node* cast = cast_not_null(value, false);
1426 if (null_control == nullptr || (*null_control) == top())
1427 replace_in_map(value, cast);
1428 value = cast;
1429 }
1430
1431 return value;
1432 }
1433
1434
1435 //------------------------------cast_not_null----------------------------------
1436 // Cast obj to not-null on this path
1437 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
1438 const Type *t = _gvn.type(obj);
1439 const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
1440 // Object is already not-null?
1441 if( t == t_not_null ) return obj;
1442
1443 Node* cast = new CastPPNode(control(), obj,t_not_null);
1444 cast = _gvn.transform( cast );
1445
1446 // Scan for instances of 'obj' in the current JVM mapping.
1447 // These instances are known to be not-null after the test.
1448 if (do_replace_in_map)
1449 replace_in_map(obj, cast);
1450
1451 return cast; // Return casted value
1452 }
1453
1454 // Sometimes in intrinsics, we implicitly know an object is not null
1455 // (there's no actual null check) so we can cast it to not null. In
1456 // the course of optimizations, the input to the cast can become null.
1457 // In that case that data path will die and we need the control path
1546 // These are layered on top of the factory methods in LoadNode and StoreNode,
1547 // and integrate with the parser's memory state and _gvn engine.
1548 //
1549
1550 // factory methods in "int adr_idx"
1551 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1552 MemNode::MemOrd mo,
1553 LoadNode::ControlDependency control_dependency,
1554 bool require_atomic_access,
1555 bool unaligned,
1556 bool mismatched,
1557 bool unsafe,
1558 uint8_t barrier_data) {
1559 int adr_idx = C->get_alias_index(_gvn.type(adr)->isa_ptr());
1560 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1561 const TypePtr* adr_type = nullptr; // debug-mode-only argument
1562 debug_only(adr_type = C->get_adr_type(adr_idx));
1563 Node* mem = memory(adr_idx);
1564 Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access, unaligned, mismatched, unsafe, barrier_data);
1565 ld = _gvn.transform(ld);
1566 if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
1567 // Improve graph before escape analysis and boxing elimination.
1568 record_for_igvn(ld);
1569 if (ld->is_DecodeN()) {
1570 // Also record the actual load (LoadN) in case ld is DecodeN. In some
1571 // rare corner cases, ld->in(1) can be something other than LoadN (e.g.,
1572 // a Phi). Recording such cases is still perfectly sound, but may be
1573 // unnecessary and result in some minor IGVN overhead.
1574 record_for_igvn(ld->in(1));
1575 }
1576 }
1577 return ld;
1578 }
1579
1580 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1581 MemNode::MemOrd mo,
1582 bool require_atomic_access,
1583 bool unaligned,
1584 bool mismatched,
1585 bool unsafe,
1599 if (unsafe) {
1600 st->as_Store()->set_unsafe_access();
1601 }
1602 st->as_Store()->set_barrier_data(barrier_data);
1603 st = _gvn.transform(st);
1604 set_memory(st, adr_idx);
1605 // Back-to-back stores can only remove intermediate store with DU info
1606 // so push on worklist for optimizer.
1607 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1608 record_for_igvn(st);
1609
1610 return st;
1611 }
1612
1613 Node* GraphKit::access_store_at(Node* obj,
1614 Node* adr,
1615 const TypePtr* adr_type,
1616 Node* val,
1617 const Type* val_type,
1618 BasicType bt,
1619 DecoratorSet decorators) {
1620 // Transformation of a value which could be null pointer (CastPP #null)
1621 // could be delayed during Parse (for example, in adjust_map_after_if()).
1622 // Execute transformation here to avoid barrier generation in such case.
1623 if (_gvn.type(val) == TypePtr::NULL_PTR) {
1624 val = _gvn.makecon(TypePtr::NULL_PTR);
1625 }
1626
1627 if (stopped()) {
1628 return top(); // Dead path ?
1629 }
1630
1631 assert(val != nullptr, "not dead path");
1632
1633 C2AccessValuePtr addr(adr, adr_type);
1634 C2AccessValue value(val, val_type);
1635 C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr);
1636 if (access.is_raw()) {
1637 return _barrier_set->BarrierSetC2::store_at(access, value);
1638 } else {
1639 return _barrier_set->store_at(access, value);
1640 }
1641 }
1642
1643 Node* GraphKit::access_load_at(Node* obj, // containing obj
1644 Node* adr, // actual address to store val at
1645 const TypePtr* adr_type,
1646 const Type* val_type,
1647 BasicType bt,
1648 DecoratorSet decorators) {
1649 if (stopped()) {
1650 return top(); // Dead path ?
1651 }
1652
1653 C2AccessValuePtr addr(adr, adr_type);
1654 C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr);
1655 if (access.is_raw()) {
1656 return _barrier_set->BarrierSetC2::load_at(access, val_type);
1657 } else {
1658 return _barrier_set->load_at(access, val_type);
1659 }
1660 }
1661
1662 Node* GraphKit::access_load(Node* adr, // actual address to load val at
1663 const Type* val_type,
1664 BasicType bt,
1665 DecoratorSet decorators) {
1666 if (stopped()) {
1667 return top(); // Dead path ?
1668 }
1669
1670 C2AccessValuePtr addr(adr, adr->bottom_type()->is_ptr());
1671 C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, nullptr, addr);
1672 if (access.is_raw()) {
1673 return _barrier_set->BarrierSetC2::load_at(access, val_type);
1674 } else {
1739 Node* new_val,
1740 const Type* value_type,
1741 BasicType bt,
1742 DecoratorSet decorators) {
1743 C2AccessValuePtr addr(adr, adr_type);
1744 C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, bt, obj, addr, alias_idx);
1745 if (access.is_raw()) {
1746 return _barrier_set->BarrierSetC2::atomic_add_at(access, new_val, value_type);
1747 } else {
1748 return _barrier_set->atomic_add_at(access, new_val, value_type);
1749 }
1750 }
1751
1752 void GraphKit::access_clone(Node* src, Node* dst, Node* size, bool is_array) {
1753 return _barrier_set->clone(this, src, dst, size, is_array);
1754 }
1755
1756 //-------------------------array_element_address-------------------------
1757 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1758 const TypeInt* sizetype, Node* ctrl) {
1759 uint shift = exact_log2(type2aelembytes(elembt));
1760 uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1761
1762 // short-circuit a common case (saves lots of confusing waste motion)
1763 jint idx_con = find_int_con(idx, -1);
1764 if (idx_con >= 0) {
1765 intptr_t offset = header + ((intptr_t)idx_con << shift);
1766 return basic_plus_adr(ary, offset);
1767 }
1768
1769 // must be correct type for alignment purposes
1770 Node* base = basic_plus_adr(ary, header);
1771 idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
1772 Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1773 return basic_plus_adr(ary, base, scale);
1774 }
1775
1776 //-------------------------load_array_element-------------------------
1777 Node* GraphKit::load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl) {
1778 const Type* elemtype = arytype->elem();
1779 BasicType elembt = elemtype->array_element_basic_type();
1780 Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1781 if (elembt == T_NARROWOOP) {
1782 elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1783 }
1784 Node* ld = access_load_at(ary, adr, arytype, elemtype, elembt,
1785 IN_HEAP | IS_ARRAY | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0));
1786 return ld;
1787 }
1788
1789 //-------------------------set_arguments_for_java_call-------------------------
1790 // Arguments (pre-popped from the stack) are taken from the JVMS.
1791 void GraphKit::set_arguments_for_java_call(CallJavaNode* call) {
1792 // Add the call arguments:
1793 uint nargs = call->method()->arg_size();
1794 for (uint i = 0; i < nargs; i++) {
1795 Node* arg = argument(i);
1796 call->init_req(i + TypeFunc::Parms, arg);
1797 }
1798 }
1799
1800 //---------------------------set_edges_for_java_call---------------------------
1801 // Connect a newly created call into the current JVMS.
1802 // A return value node (if any) is returned from set_edges_for_java_call.
1803 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1804
1805 // Add the predefined inputs:
1806 call->init_req( TypeFunc::Control, control() );
1807 call->init_req( TypeFunc::I_O , i_o() );
1808 call->init_req( TypeFunc::Memory , reset_memory() );
1809 call->init_req( TypeFunc::FramePtr, frameptr() );
1810 call->init_req( TypeFunc::ReturnAdr, top() );
1811
1812 add_safepoint_edges(call, must_throw);
1813
1814 Node* xcall = _gvn.transform(call);
1815
1816 if (xcall == top()) {
1817 set_control(top());
1818 return;
1819 }
1820 assert(xcall == call, "call identity is stable");
1821
1822 // Re-use the current map to produce the result.
1823
1824 set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control)));
1825 set_i_o( _gvn.transform(new ProjNode(call, TypeFunc::I_O , separate_io_proj)));
1826 set_all_memory_call(xcall, separate_io_proj);
1827
1828 //return xcall; // no need, caller already has it
1829 }
1830
1831 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj, bool deoptimize) {
1832 if (stopped()) return top(); // maybe the call folded up?
1833
1834 // Capture the return value, if any.
1835 Node* ret;
1836 if (call->method() == nullptr ||
1837 call->method()->return_type()->basic_type() == T_VOID)
1838 ret = top();
1839 else ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
1840
1841 // Note: Since any out-of-line call can produce an exception,
1842 // we always insert an I_O projection from the call into the result.
1843
1844 make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj, deoptimize);
1845
1846 if (separate_io_proj) {
1847 // The caller requested separate projections be used by the fall
1848 // through and exceptional paths, so replace the projections for
1849 // the fall through path.
1850 set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1851 set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1852 }
1853 return ret;
1854 }
1855
1856 //--------------------set_predefined_input_for_runtime_call--------------------
1857 // Reading and setting the memory state is way conservative here.
1858 // The real problem is that I am not doing real Type analysis on memory,
1859 // so I cannot distinguish card mark stores from other stores. Across a GC
1860 // point the Store Barrier and the card mark memory has to agree. I cannot
1861 // have a card mark store and its barrier split across the GC point from
1862 // either above or below. Here I get that to happen by reading ALL of memory.
1863 // A better answer would be to separate out card marks from other memory.
1864 // For now, return the input memory state, so that it can be reused
1865 // after the call, if this call has restricted memory effects.
1866 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem) {
1867 // Set fixed predefined input arguments
1868 Node* memory = reset_memory();
1869 Node* m = narrow_mem == nullptr ? memory : narrow_mem;
1870 call->init_req( TypeFunc::Control, control() );
1871 call->init_req( TypeFunc::I_O, top() ); // does no i/o
1872 call->init_req( TypeFunc::Memory, m ); // may gc ptrs
1923 if (use->is_MergeMem()) {
1924 wl.push(use);
1925 }
1926 }
1927 }
1928
1929 // Replace the call with the current state of the kit.
1930 void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes, bool do_asserts) {
1931 JVMState* ejvms = nullptr;
1932 if (has_exceptions()) {
1933 ejvms = transfer_exceptions_into_jvms();
1934 }
1935
1936 ReplacedNodes replaced_nodes = map()->replaced_nodes();
1937 ReplacedNodes replaced_nodes_exception;
1938 Node* ex_ctl = top();
1939
1940 SafePointNode* final_state = stop();
1941
1942 // Find all the needed outputs of this call
1943 CallProjections callprojs;
1944 call->extract_projections(&callprojs, true, do_asserts);
1945
1946 Unique_Node_List wl;
1947 Node* init_mem = call->in(TypeFunc::Memory);
1948 Node* final_mem = final_state->in(TypeFunc::Memory);
1949 Node* final_ctl = final_state->in(TypeFunc::Control);
1950 Node* final_io = final_state->in(TypeFunc::I_O);
1951
1952 // Replace all the old call edges with the edges from the inlining result
1953 if (callprojs.fallthrough_catchproj != nullptr) {
1954 C->gvn_replace_by(callprojs.fallthrough_catchproj, final_ctl);
1955 }
1956 if (callprojs.fallthrough_memproj != nullptr) {
1957 if (final_mem->is_MergeMem()) {
1958 // Parser's exits MergeMem was not transformed but may be optimized
1959 final_mem = _gvn.transform(final_mem);
1960 }
1961 C->gvn_replace_by(callprojs.fallthrough_memproj, final_mem);
1962 add_mergemem_users_to_worklist(wl, final_mem);
1963 }
1964 if (callprojs.fallthrough_ioproj != nullptr) {
1965 C->gvn_replace_by(callprojs.fallthrough_ioproj, final_io);
1966 }
1967
1968 // Replace the result with the new result if it exists and is used
1969 if (callprojs.resproj != nullptr && result != nullptr) {
1970 C->gvn_replace_by(callprojs.resproj, result);
1971 }
1972
1973 if (ejvms == nullptr) {
1974 // No exception edges to simply kill off those paths
1975 if (callprojs.catchall_catchproj != nullptr) {
1976 C->gvn_replace_by(callprojs.catchall_catchproj, C->top());
1977 }
1978 if (callprojs.catchall_memproj != nullptr) {
1979 C->gvn_replace_by(callprojs.catchall_memproj, C->top());
1980 }
1981 if (callprojs.catchall_ioproj != nullptr) {
1982 C->gvn_replace_by(callprojs.catchall_ioproj, C->top());
1983 }
1984 // Replace the old exception object with top
1985 if (callprojs.exobj != nullptr) {
1986 C->gvn_replace_by(callprojs.exobj, C->top());
1987 }
1988 } else {
1989 GraphKit ekit(ejvms);
1990
1991 // Load my combined exception state into the kit, with all phis transformed:
1992 SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
1993 replaced_nodes_exception = ex_map->replaced_nodes();
1994
1995 Node* ex_oop = ekit.use_exception_state(ex_map);
1996
1997 if (callprojs.catchall_catchproj != nullptr) {
1998 C->gvn_replace_by(callprojs.catchall_catchproj, ekit.control());
1999 ex_ctl = ekit.control();
2000 }
2001 if (callprojs.catchall_memproj != nullptr) {
2002 Node* ex_mem = ekit.reset_memory();
2003 C->gvn_replace_by(callprojs.catchall_memproj, ex_mem);
2004 add_mergemem_users_to_worklist(wl, ex_mem);
2005 }
2006 if (callprojs.catchall_ioproj != nullptr) {
2007 C->gvn_replace_by(callprojs.catchall_ioproj, ekit.i_o());
2008 }
2009
2010 // Replace the old exception object with the newly created one
2011 if (callprojs.exobj != nullptr) {
2012 C->gvn_replace_by(callprojs.exobj, ex_oop);
2013 }
2014 }
2015
2016 // Disconnect the call from the graph
2017 call->disconnect_inputs(C);
2018 C->gvn_replace_by(call, C->top());
2019
2020 // Clean up any MergeMems that feed other MergeMems since the
2021 // optimizer doesn't like that.
2022 while (wl.size() > 0) {
2023 _gvn.transform(wl.pop());
2024 }
2025
2026 if (callprojs.fallthrough_catchproj != nullptr && !final_ctl->is_top() && do_replaced_nodes) {
2027 replaced_nodes.apply(C, final_ctl);
2028 }
2029 if (!ex_ctl->is_top() && do_replaced_nodes) {
2030 replaced_nodes_exception.apply(C, ex_ctl);
2031 }
2032 }
2033
2034
2035 //------------------------------increment_counter------------------------------
2036 // for statistics: increment a VM counter by 1
2037
2038 void GraphKit::increment_counter(address counter_addr) {
2039 Node* adr1 = makecon(TypeRawPtr::make(counter_addr));
2040 increment_counter(adr1);
2041 }
2042
2043 void GraphKit::increment_counter(Node* counter_addr) {
2044 Node* ctrl = control();
2045 Node* cnt = make_load(ctrl, counter_addr, TypeLong::LONG, T_LONG, MemNode::unordered);
2046 Node* incr = _gvn.transform(new AddLNode(cnt, _gvn.longcon(1)));
2206 *
2207 * @param n node that the type applies to
2208 * @param exact_kls type from profiling
2209 * @param maybe_null did profiling see null?
2210 *
2211 * @return node with improved type
2212 */
2213 Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind) {
2214 const Type* current_type = _gvn.type(n);
2215 assert(UseTypeSpeculation, "type speculation must be on");
2216
2217 const TypePtr* speculative = current_type->speculative();
2218
2219 // Should the klass from the profile be recorded in the speculative type?
2220 if (current_type->would_improve_type(exact_kls, jvms()->depth())) {
2221 const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls, Type::trust_interfaces);
2222 const TypeOopPtr* xtype = tklass->as_instance_type();
2223 assert(xtype->klass_is_exact(), "Should be exact");
2224 // Any reason to believe n is not null (from this profiling or a previous one)?
2225 assert(ptr_kind != ProfileAlwaysNull, "impossible here");
2226 const TypePtr* ptr = (ptr_kind == ProfileMaybeNull && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL;
2227 // record the new speculative type's depth
2228 speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr();
2229 speculative = speculative->with_inline_depth(jvms()->depth());
2230 } else if (current_type->would_improve_ptr(ptr_kind)) {
2231 // Profiling report that null was never seen so we can change the
2232 // speculative type to non null ptr.
2233 if (ptr_kind == ProfileAlwaysNull) {
2234 speculative = TypePtr::NULL_PTR;
2235 } else {
2236 assert(ptr_kind == ProfileNeverNull, "nothing else is an improvement");
2237 const TypePtr* ptr = TypePtr::NOTNULL;
2238 if (speculative != nullptr) {
2239 speculative = speculative->cast_to_ptr_type(ptr->ptr())->is_ptr();
2240 } else {
2241 speculative = ptr;
2242 }
2243 }
2244 }
2245
2246 if (speculative != current_type->speculative()) {
2247 // Build a type with a speculative type (what we think we know
2248 // about the type but will need a guard when we use it)
2249 const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::OffsetBot, TypeOopPtr::InstanceBot, speculative);
2250 // We're changing the type, we need a new CheckCast node to carry
2251 // the new type. The new type depends on the control: what
2252 // profiling tells us is only valid from here as far as we can
2253 // tell.
2254 Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));
2255 cast = _gvn.transform(cast);
2256 replace_in_map(n, cast);
2257 n = cast;
2258 }
2259
2260 return n;
2261 }
2262
2263 /**
2264 * Record profiling data from receiver profiling at an invoke with the
2265 * type system so that it can propagate it (speculation)
2266 *
2267 * @param n receiver node
2268 *
2269 * @return node with improved type
2270 */
2271 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
2272 if (!UseTypeSpeculation) {
2273 return n;
2274 }
2275 ciKlass* exact_kls = profile_has_unique_klass();
2276 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2277 if ((java_bc() == Bytecodes::_checkcast ||
2278 java_bc() == Bytecodes::_instanceof ||
2279 java_bc() == Bytecodes::_aastore) &&
2280 method()->method_data()->is_mature()) {
2281 ciProfileData* data = method()->method_data()->bci_to_data(bci());
2282 if (data != nullptr) {
2283 if (!data->as_BitData()->null_seen()) {
2284 ptr_kind = ProfileNeverNull;
2285 } else {
2286 assert(data->is_ReceiverTypeData(), "bad profile data type");
2287 ciReceiverTypeData* call = (ciReceiverTypeData*)data->as_ReceiverTypeData();
2288 uint i = 0;
2289 for (; i < call->row_limit(); i++) {
2290 ciKlass* receiver = call->receiver(i);
2291 if (receiver != nullptr) {
2292 break;
2293 }
2294 }
2295 ptr_kind = (i == call->row_limit()) ? ProfileAlwaysNull : ProfileMaybeNull;
2296 }
2297 }
2298 }
2299 return record_profile_for_speculation(n, exact_kls, ptr_kind);
2300 }
2301
2302 /**
2303 * Record profiling data from argument profiling at an invoke with the
2304 * type system so that it can propagate it (speculation)
2305 *
2306 * @param dest_method target method for the call
2307 * @param bc what invoke bytecode is this?
2308 */
2309 void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {
2310 if (!UseTypeSpeculation) {
2311 return;
2312 }
2313 const TypeFunc* tf = TypeFunc::make(dest_method);
2314 int nargs = tf->domain()->cnt() - TypeFunc::Parms;
2315 int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
2316 for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
2317 const Type *targ = tf->domain()->field_at(j + TypeFunc::Parms);
2318 if (is_reference_type(targ->basic_type())) {
2319 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2320 ciKlass* better_type = nullptr;
2321 if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) {
2322 record_profile_for_speculation(argument(j), better_type, ptr_kind);
2323 }
2324 i++;
2325 }
2326 }
2327 }
2328
2329 /**
2330 * Record profiling data from parameter profiling at an invoke with
2331 * the type system so that it can propagate it (speculation)
2332 */
2333 void GraphKit::record_profiled_parameters_for_speculation() {
2334 if (!UseTypeSpeculation) {
2335 return;
2336 }
2337 for (int i = 0, j = 0; i < method()->arg_size() ; i++) {
2457 // The first null ends the list.
2458 Node* parm0, Node* parm1,
2459 Node* parm2, Node* parm3,
2460 Node* parm4, Node* parm5,
2461 Node* parm6, Node* parm7) {
2462 assert(call_addr != nullptr, "must not call null targets");
2463
2464 // Slow-path call
2465 bool is_leaf = !(flags & RC_NO_LEAF);
2466 bool has_io = (!is_leaf && !(flags & RC_NO_IO));
2467 if (call_name == nullptr) {
2468 assert(!is_leaf, "must supply name for leaf");
2469 call_name = OptoRuntime::stub_name(call_addr);
2470 }
2471 CallNode* call;
2472 if (!is_leaf) {
2473 call = new CallStaticJavaNode(call_type, call_addr, call_name, adr_type);
2474 } else if (flags & RC_NO_FP) {
2475 call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
2476 } else if (flags & RC_VECTOR){
2477 uint num_bits = call_type->range()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte;
2478 call = new CallLeafVectorNode(call_type, call_addr, call_name, adr_type, num_bits);
2479 } else {
2480 call = new CallLeafNode(call_type, call_addr, call_name, adr_type);
2481 }
2482
2483 // The following is similar to set_edges_for_java_call,
2484 // except that the memory effects of the call are restricted to AliasIdxRaw.
2485
2486 // Slow path call has no side-effects, uses few values
2487 bool wide_in = !(flags & RC_NARROW_MEM);
2488 bool wide_out = (C->get_alias_index(adr_type) == Compile::AliasIdxBot);
2489
2490 Node* prev_mem = nullptr;
2491 if (wide_in) {
2492 prev_mem = set_predefined_input_for_runtime_call(call);
2493 } else {
2494 assert(!wide_out, "narrow in => narrow out");
2495 Node* narrow_mem = memory(adr_type);
2496 prev_mem = set_predefined_input_for_runtime_call(call, narrow_mem);
2497 }
2537
2538 if (has_io) {
2539 set_i_o(_gvn.transform(new ProjNode(call, TypeFunc::I_O)));
2540 }
2541 return call;
2542
2543 }
2544
2545 // i2b
2546 Node* GraphKit::sign_extend_byte(Node* in) {
2547 Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(24)));
2548 return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(24)));
2549 }
2550
2551 // i2s
2552 Node* GraphKit::sign_extend_short(Node* in) {
2553 Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(16)));
2554 return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(16)));
2555 }
2556
2557 //------------------------------merge_memory-----------------------------------
2558 // Merge memory from one path into the current memory state.
2559 void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) {
2560 for (MergeMemStream mms(merged_memory(), new_mem->as_MergeMem()); mms.next_non_empty2(); ) {
2561 Node* old_slice = mms.force_memory();
2562 Node* new_slice = mms.memory2();
2563 if (old_slice != new_slice) {
2564 PhiNode* phi;
2565 if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region) {
2566 if (mms.is_empty()) {
2567 // clone base memory Phi's inputs for this memory slice
2568 assert(old_slice == mms.base_memory(), "sanity");
2569 phi = PhiNode::make(region, nullptr, Type::MEMORY, mms.adr_type(C));
2570 _gvn.set_type(phi, Type::MEMORY);
2571 for (uint i = 1; i < phi->req(); i++) {
2572 phi->init_req(i, old_slice->in(i));
2573 }
2574 } else {
2575 phi = old_slice->as_Phi(); // Phi was generated already
2576 }
2839
2840 // Now do a linear scan of the secondary super-klass array. Again, no real
2841 // performance impact (too rare) but it's gotta be done.
2842 // Since the code is rarely used, there is no penalty for moving it
2843 // out of line, and it can only improve I-cache density.
2844 // The decision to inline or out-of-line this final check is platform
2845 // dependent, and is found in the AD file definition of PartialSubtypeCheck.
2846 Node* psc = gvn.transform(
2847 new PartialSubtypeCheckNode(*ctrl, subklass, superklass));
2848
2849 IfNode *iff4 = gen_subtype_check_compare(*ctrl, psc, gvn.zerocon(T_OBJECT), BoolTest::ne, PROB_FAIR, gvn, T_ADDRESS);
2850 r_not_subtype->init_req(2, gvn.transform(new IfTrueNode (iff4)));
2851 r_ok_subtype ->init_req(3, gvn.transform(new IfFalseNode(iff4)));
2852
2853 // Return false path; set default control to true path.
2854 *ctrl = gvn.transform(r_ok_subtype);
2855 return gvn.transform(r_not_subtype);
2856 }
2857
2858 Node* GraphKit::gen_subtype_check(Node* obj_or_subklass, Node* superklass) {
2859 bool expand_subtype_check = C->post_loop_opts_phase(); // macro node expansion is over
2860 if (expand_subtype_check) {
2861 MergeMemNode* mem = merged_memory();
2862 Node* ctrl = control();
2863 Node* subklass = obj_or_subklass;
2864 if (!_gvn.type(obj_or_subklass)->isa_klassptr()) {
2865 subklass = load_object_klass(obj_or_subklass);
2866 }
2867
2868 Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, _gvn, method(), bci());
2869 set_control(ctrl);
2870 return n;
2871 }
2872
2873 Node* check = _gvn.transform(new SubTypeCheckNode(C, obj_or_subklass, superklass, method(), bci()));
2874 Node* bol = _gvn.transform(new BoolNode(check, BoolTest::eq));
2875 IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
2876 set_control(_gvn.transform(new IfTrueNode(iff)));
2877 return _gvn.transform(new IfFalseNode(iff));
2878 }
2879
2880 // Profile-driven exact type check:
2881 Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
2882 float prob,
2883 Node* *casted_receiver) {
2884 assert(!klass->is_interface(), "no exact type check on interfaces");
2885
2886 const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces);
2887 Node* recv_klass = load_object_klass(receiver);
2888 Node* want_klass = makecon(tklass);
2889 Node* cmp = _gvn.transform(new CmpPNode(recv_klass, want_klass));
2890 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
2891 IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);
2892 set_control( _gvn.transform(new IfTrueNode (iff)));
2893 Node* fail = _gvn.transform(new IfFalseNode(iff));
2894
2895 if (!stopped()) {
2896 const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
2897 const TypeOopPtr* recvx_type = tklass->as_instance_type();
2898 assert(recvx_type->klass_is_exact(), "");
2899
2900 if (!receiver_type->higher_equal(recvx_type)) { // ignore redundant casts
2901 // Subsume downstream occurrences of receiver with a cast to
2902 // recv_xtype, since now we know what the type will be.
2903 Node* cast = new CheckCastPPNode(control(), receiver, recvx_type);
2904 (*casted_receiver) = _gvn.transform(cast);
2905 assert(!(*casted_receiver)->is_top(), "that path should be unreachable");
2906 // (User must make the replace_in_map call.)
2907 }
2908 }
2909
2910 return fail;
2911 }
2912
2913 //------------------------------subtype_check_receiver-------------------------
2914 Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass,
2915 Node** casted_receiver) {
2916 const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces)->try_improve();
2917 Node* want_klass = makecon(tklass);
2918
2919 Node* slow_ctl = gen_subtype_check(receiver, want_klass);
2920
2921 // Ignore interface type information until interface types are properly tracked.
2922 if (!stopped() && !klass->is_interface()) {
2923 const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
2924 const TypeOopPtr* recv_type = tklass->cast_to_exactness(false)->is_klassptr()->as_instance_type();
2925 if (!receiver_type->higher_equal(recv_type)) { // ignore redundant casts
2926 Node* cast = new CheckCastPPNode(control(), receiver, recv_type);
2927 (*casted_receiver) = _gvn.transform(cast);
2928 }
2929 }
2930
2931 return slow_ctl;
2932 }
2933
2934 //------------------------------seems_never_null-------------------------------
2935 // Use null_seen information if it is available from the profile.
2936 // If we see an unexpected null at a type check we record it and force a
2937 // recompile; the offending check will be recompiled to handle nulls.
2938 // If we see several offending BCIs, then all checks in the
2939 // method will be recompiled.
2940 bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculating) {
2941 speculating = !_gvn.type(obj)->speculative_maybe_null();
2942 Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculating);
2943 if (UncommonNullCast // Cutout for this technique
2944 && obj != null() // And not the -Xcomp stupid case?
2945 && !too_many_traps(reason)
2946 ) {
2947 if (speculating) {
3016
3017 //------------------------maybe_cast_profiled_receiver-------------------------
3018 // If the profile has seen exactly one type, narrow to exactly that type.
3019 // Subsequent type checks will always fold up.
3020 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
3021 const TypeKlassPtr* require_klass,
3022 ciKlass* spec_klass,
3023 bool safe_for_replace) {
3024 if (!UseTypeProfile || !TypeProfileCasts) return nullptr;
3025
3026 Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != nullptr);
3027
3028 // Make sure we haven't already deoptimized from this tactic.
3029 if (too_many_traps_or_recompiles(reason))
3030 return nullptr;
3031
3032 // (No, this isn't a call, but it's enough like a virtual call
3033 // to use the same ciMethod accessor to get the profile info...)
3034 // If we have a speculative type use it instead of profiling (which
3035 // may not help us)
3036 ciKlass* exact_kls = spec_klass == nullptr ? profile_has_unique_klass() : spec_klass;
3037 if (exact_kls != nullptr) {// no cast failures here
3038 if (require_klass == nullptr ||
3039 C->static_subtype_check(require_klass, TypeKlassPtr::make(exact_kls, Type::trust_interfaces)) == Compile::SSC_always_true) {
3040 // If we narrow the type to match what the type profile sees or
3041 // the speculative type, we can then remove the rest of the
3042 // cast.
3043 // This is a win, even if the exact_kls is very specific,
3044 // because downstream operations, such as method calls,
3045 // will often benefit from the sharper type.
3046 Node* exact_obj = not_null_obj; // will get updated in place...
3047 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0,
3048 &exact_obj);
3049 { PreserveJVMState pjvms(this);
3050 set_control(slow_ctl);
3051 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
3052 }
3053 if (safe_for_replace) {
3054 replace_in_map(not_null_obj, exact_obj);
3055 }
3056 return exact_obj;
3146 // If not_null_obj is dead, only null-path is taken
3147 if (stopped()) { // Doing instance-of on a null?
3148 set_control(null_ctl);
3149 return intcon(0);
3150 }
3151 region->init_req(_null_path, null_ctl);
3152 phi ->init_req(_null_path, intcon(0)); // Set null path value
3153 if (null_ctl == top()) {
3154 // Do this eagerly, so that pattern matches like is_diamond_phi
3155 // will work even during parsing.
3156 assert(_null_path == PATH_LIMIT-1, "delete last");
3157 region->del_req(_null_path);
3158 phi ->del_req(_null_path);
3159 }
3160
3161 // Do we know the type check always succeed?
3162 bool known_statically = false;
3163 if (_gvn.type(superklass)->singleton()) {
3164 const TypeKlassPtr* superk = _gvn.type(superklass)->is_klassptr();
3165 const TypeKlassPtr* subk = _gvn.type(obj)->is_oopptr()->as_klass_type();
3166 if (subk->is_loaded()) {
3167 int static_res = C->static_subtype_check(superk, subk);
3168 known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false);
3169 }
3170 }
3171
3172 if (!known_statically) {
3173 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3174 // We may not have profiling here or it may not help us. If we
3175 // have a speculative type use it to perform an exact cast.
3176 ciKlass* spec_obj_type = obj_type->speculative_type();
3177 if (spec_obj_type != nullptr || (ProfileDynamicTypes && data != nullptr)) {
3178 Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, nullptr, spec_obj_type, safe_for_replace);
3179 if (stopped()) { // Profile disagrees with this path.
3180 set_control(null_ctl); // Null is the only remaining possibility.
3181 return intcon(0);
3182 }
3183 if (cast_obj != nullptr) {
3184 not_null_obj = cast_obj;
3185 }
3186 }
3202 record_for_igvn(region);
3203
3204 // If we know the type check always succeeds then we don't use the
3205 // profiling data at this bytecode. Don't lose it, feed it to the
3206 // type system as a speculative type.
3207 if (safe_for_replace) {
3208 Node* casted_obj = record_profiled_receiver_for_speculation(obj);
3209 replace_in_map(obj, casted_obj);
3210 }
3211
3212 return _gvn.transform(phi);
3213 }
3214
3215 //-------------------------------gen_checkcast---------------------------------
3216 // Generate a checkcast idiom. Used by both the checkcast bytecode and the
3217 // array store bytecode. Stack must be as-if BEFORE doing the bytecode so the
3218 // uncommon-trap paths work. Adjust stack after this call.
3219 // If failure_control is supplied and not null, it is filled in with
3220 // the control edge for the cast failure. Otherwise, an appropriate
3221 // uncommon trap or exception is thrown.
3222 Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
3223 Node* *failure_control) {
3224 kill_dead_locals(); // Benefit all the uncommon traps
3225 const TypeKlassPtr* klass_ptr_type = _gvn.type(superklass)->is_klassptr();
3226 const TypeKlassPtr* improved_klass_ptr_type = klass_ptr_type->try_improve();
3227 const TypeOopPtr* toop = improved_klass_ptr_type->cast_to_exactness(false)->as_instance_type();
3228
3229 // Fast cutout: Check the case that the cast is vacuously true.
3230 // This detects the common cases where the test will short-circuit
3231 // away completely. We do this before we perform the null check,
3232 // because if the test is going to turn into zero code, we don't
3233 // want a residual null check left around. (Causes a slowdown,
3234 // for example, in some objArray manipulations, such as a[i]=a[j].)
3235 if (improved_klass_ptr_type->singleton()) {
3236 const TypeOopPtr* objtp = _gvn.type(obj)->isa_oopptr();
3237 if (objtp != nullptr) {
3238 switch (C->static_subtype_check(improved_klass_ptr_type, objtp->as_klass_type())) {
3239 case Compile::SSC_always_true:
3240 // If we know the type check always succeed then we don't use
3241 // the profiling data at this bytecode. Don't lose it, feed it
3242 // to the type system as a speculative type.
3243 return record_profiled_receiver_for_speculation(obj);
3244 case Compile::SSC_always_false:
3245 // It needs a null check because a null will *pass* the cast check.
3246 // A non-null value will always produce an exception.
3247 if (!objtp->maybe_null()) {
3248 bool is_aastore = (java_bc() == Bytecodes::_aastore);
3249 Deoptimization::DeoptReason reason = is_aastore ?
3250 Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3251 builtin_throw(reason);
3252 return top();
3253 } else if (!too_many_traps_or_recompiles(Deoptimization::Reason_null_assert)) {
3254 return null_assert(obj);
3255 }
3256 break; // Fall through to full check
3257 default:
3258 break;
3259 }
3260 }
3261 }
3262
3263 ciProfileData* data = nullptr;
3264 bool safe_for_replace = false;
3265 if (failure_control == nullptr) { // use MDO in regular case only
3266 assert(java_bc() == Bytecodes::_aastore ||
3267 java_bc() == Bytecodes::_checkcast,
3268 "interpreter profiles type checks only for these BCs");
3269 data = method()->method_data()->bci_to_data(bci());
3270 safe_for_replace = true;
3271 }
3272
3273 // Make the merge point
3274 enum { _obj_path = 1, _null_path, PATH_LIMIT };
3275 RegionNode* region = new RegionNode(PATH_LIMIT);
3276 Node* phi = new PhiNode(region, toop);
3277 C->set_has_split_ifs(true); // Has chance for split-if optimization
3278
3279 // Use null-cast information if it is available
3280 bool speculative_not_null = false;
3281 bool never_see_null = ((failure_control == nullptr) // regular case only
3282 && seems_never_null(obj, data, speculative_not_null));
3283
3284 // Null check; get casted pointer; set region slot 3
3285 Node* null_ctl = top();
3286 Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
3287
3288 // If not_null_obj is dead, only null-path is taken
3289 if (stopped()) { // Doing instance-of on a null?
3290 set_control(null_ctl);
3291 return null();
3292 }
3293 region->init_req(_null_path, null_ctl);
3294 phi ->init_req(_null_path, null()); // Set null path value
3295 if (null_ctl == top()) {
3296 // Do this eagerly, so that pattern matches like is_diamond_phi
3297 // will work even during parsing.
3298 assert(_null_path == PATH_LIMIT-1, "delete last");
3299 region->del_req(_null_path);
3300 phi ->del_req(_null_path);
3301 }
3302
3303 Node* cast_obj = nullptr;
3304 if (improved_klass_ptr_type->klass_is_exact()) {
3305 // The following optimization tries to statically cast the speculative type of the object
3306 // (for example obtained during profiling) to the type of the superklass and then do a
3307 // dynamic check that the type of the object is what we expect. To work correctly
3308 // for checkcast and aastore the type of superklass should be exact.
3309 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3310 // We may not have profiling here or it may not help us. If we have
3311 // a speculative type use it to perform an exact cast.
3312 ciKlass* spec_obj_type = obj_type->speculative_type();
3313 if (spec_obj_type != nullptr || data != nullptr) {
3314 cast_obj = maybe_cast_profiled_receiver(not_null_obj, improved_klass_ptr_type, spec_obj_type, safe_for_replace);
3315 if (cast_obj != nullptr) {
3316 if (failure_control != nullptr) // failure is now impossible
3317 (*failure_control) = top();
3318 // adjust the type of the phi to the exact klass:
3319 phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
3320 }
3321 }
3322 }
3323
3324 if (cast_obj == nullptr) {
3325 // Generate the subtype check
3326 Node* improved_superklass = superklass;
3327 if (improved_klass_ptr_type != klass_ptr_type && improved_klass_ptr_type->singleton()) {
3328 improved_superklass = makecon(improved_klass_ptr_type);
3329 }
3330 Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, improved_superklass);
3331
3332 // Plug in success path into the merge
3333 cast_obj = _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop));
3334 // Failure path ends in uncommon trap (or may be dead - failure impossible)
3335 if (failure_control == nullptr) {
3336 if (not_subtype_ctrl != top()) { // If failure is possible
3337 PreserveJVMState pjvms(this);
3338 set_control(not_subtype_ctrl);
3339 bool is_aastore = (java_bc() == Bytecodes::_aastore);
3340 Deoptimization::DeoptReason reason = is_aastore ?
3341 Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3342 builtin_throw(reason);
3343 }
3344 } else {
3345 (*failure_control) = not_subtype_ctrl;
3346 }
3347 }
3348
3349 region->init_req(_obj_path, control());
3350 phi ->init_req(_obj_path, cast_obj);
3351
3352 // A merge of null or Casted-NotNull obj
3353 Node* res = _gvn.transform(phi);
3354
3355 // Note I do NOT always 'replace_in_map(obj,result)' here.
3356 // if( tk->klass()->can_be_primary_super() )
3357 // This means that if I successfully store an Object into an array-of-String
3358 // I 'forget' that the Object is really now known to be a String. I have to
3359 // do this because we don't have true union types for interfaces - if I store
3360 // a Baz into an array-of-Interface and then tell the optimizer it's an
3361 // Interface, I forget that it's also a Baz and cannot do Baz-like field
3362 // references to it. FIX THIS WHEN UNION TYPES APPEAR!
3363 // replace_in_map( obj, res );
3364
3365 // Return final merged results
3366 set_control( _gvn.transform(region) );
3367 record_for_igvn(region);
3368
3369 return record_profiled_receiver_for_speculation(res);
3370 }
3371
3372 //------------------------------next_monitor-----------------------------------
3373 // What number should be given to the next monitor?
3374 int GraphKit::next_monitor() {
3375 int current = jvms()->monitor_depth()* C->sync_stack_slots();
3376 int next = current + C->sync_stack_slots();
3377 // Keep the toplevel high water mark current:
3378 if (C->fixed_slots() < next) C->set_fixed_slots(next);
3379 return current;
3380 }
3381
3382 //------------------------------insert_mem_bar---------------------------------
3383 // Memory barrier to avoid floating things around
3384 // The membar serves as a pinch point between both control and all memory slices.
3385 Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) {
3386 MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent);
3387 mb->init_req(TypeFunc::Control, control());
3388 mb->init_req(TypeFunc::Memory, reset_memory());
3389 Node* membar = _gvn.transform(mb);
3417 }
3418 Node* membar = _gvn.transform(mb);
3419 set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));
3420 if (alias_idx == Compile::AliasIdxBot) {
3421 merged_memory()->set_base_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)));
3422 } else {
3423 set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx);
3424 }
3425 return membar;
3426 }
3427
3428 //------------------------------shared_lock------------------------------------
3429 // Emit locking code.
3430 FastLockNode* GraphKit::shared_lock(Node* obj) {
3431 // bci is either a monitorenter bc or InvocationEntryBci
3432 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3433 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3434
3435 if( !GenerateSynchronizationCode )
3436 return nullptr; // Not locking things?
3437 if (stopped()) // Dead monitor?
3438 return nullptr;
3439
3440 assert(dead_locals_are_killed(), "should kill locals before sync. point");
3441
3442 // Box the stack location
3443 Node* box = new BoxLockNode(next_monitor());
3444 // Check for bailout after new BoxLockNode
3445 if (failing()) { return nullptr; }
3446 box = _gvn.transform(box);
3447 Node* mem = reset_memory();
3448
3449 FastLockNode * flock = _gvn.transform(new FastLockNode(nullptr, obj, box) )->as_FastLock();
3450
3451 // Add monitor to debug info for the slow path. If we block inside the
3452 // slow path and de-opt, we need the monitor hanging around
3453 map()->push_monitor( flock );
3454
3455 const TypeFunc *tf = LockNode::lock_type();
3456 LockNode *lock = new LockNode(C, tf);
3485 }
3486 #endif
3487
3488 return flock;
3489 }
3490
3491
3492 //------------------------------shared_unlock----------------------------------
3493 // Emit unlocking code.
3494 void GraphKit::shared_unlock(Node* box, Node* obj) {
3495 // bci is either a monitorenter bc or InvocationEntryBci
3496 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3497 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3498
3499 if( !GenerateSynchronizationCode )
3500 return;
3501 if (stopped()) { // Dead monitor?
3502 map()->pop_monitor(); // Kill monitor from debug info
3503 return;
3504 }
3505
3506 // Memory barrier to avoid floating things down past the locked region
3507 insert_mem_bar(Op_MemBarReleaseLock);
3508
3509 const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
3510 UnlockNode *unlock = new UnlockNode(C, tf);
3511 #ifdef ASSERT
3512 unlock->set_dbg_jvms(sync_jvms());
3513 #endif
3514 uint raw_idx = Compile::AliasIdxRaw;
3515 unlock->init_req( TypeFunc::Control, control() );
3516 unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
3517 unlock->init_req( TypeFunc::I_O , top() ) ; // does no i/o
3518 unlock->init_req( TypeFunc::FramePtr, frameptr() );
3519 unlock->init_req( TypeFunc::ReturnAdr, top() );
3520
3521 unlock->init_req(TypeFunc::Parms + 0, obj);
3522 unlock->init_req(TypeFunc::Parms + 1, box);
3523 unlock = _gvn.transform(unlock)->as_Unlock();
3524
3525 Node* mem = reset_memory();
3526
3527 // unlock has no side-effects, sets few values
3528 set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM);
3529
3530 // Kill monitor from debug info
3531 map()->pop_monitor( );
3532 }
3533
3534 //-------------------------------get_layout_helper-----------------------------
3535 // If the given klass is a constant or known to be an array,
3536 // fetch the constant layout helper value into constant_value
3537 // and return null. Otherwise, load the non-constant
3538 // layout helper value, and return the node which represents it.
3539 // This two-faced routine is useful because allocation sites
3540 // almost always feature constant types.
3541 Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
3542 const TypeKlassPtr* klass_t = _gvn.type(klass_node)->isa_klassptr();
3543 if (!StressReflectiveCode && klass_t != nullptr) {
3544 bool xklass = klass_t->klass_is_exact();
3545 if (xklass || (klass_t->isa_aryklassptr() && klass_t->is_aryklassptr()->elem() != Type::BOTTOM)) {
3546 jint lhelper;
3547 if (klass_t->isa_aryklassptr()) {
3548 BasicType elem = klass_t->as_instance_type()->isa_aryptr()->elem()->array_element_basic_type();
3549 if (is_reference_type(elem, true)) {
3550 elem = T_OBJECT;
3551 }
3552 lhelper = Klass::array_layout_helper(elem);
3553 } else {
3554 lhelper = klass_t->is_instklassptr()->exact_klass()->layout_helper();
3555 }
3556 if (lhelper != Klass::_lh_neutral_value) {
3557 constant_value = lhelper;
3558 return (Node*) nullptr;
3559 }
3560 }
3561 }
3562 constant_value = Klass::_lh_neutral_value; // put in a known value
3563 Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
3564 return make_load(nullptr, lhp, TypeInt::INT, T_INT, MemNode::unordered);
3565 }
3566
3567 // We just put in an allocate/initialize with a big raw-memory effect.
3568 // Hook selected additional alias categories on the initialization.
3569 static void hook_memory_on_init(GraphKit& kit, int alias_idx,
3570 MergeMemNode* init_in_merge,
3571 Node* init_out_raw) {
3572 DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
3573 assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
3574
3575 Node* prevmem = kit.memory(alias_idx);
3576 init_in_merge->set_memory_at(alias_idx, prevmem);
3577 kit.set_memory(init_out_raw, alias_idx);
3578 }
3579
3580 //---------------------------set_output_for_allocation-------------------------
3581 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
3582 const TypeOopPtr* oop_type,
3583 bool deoptimize_on_exception) {
3584 int rawidx = Compile::AliasIdxRaw;
3585 alloc->set_req( TypeFunc::FramePtr, frameptr() );
3586 add_safepoint_edges(alloc);
3587 Node* allocx = _gvn.transform(alloc);
3588 set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) );
3589 // create memory projection for i_o
3590 set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
3591 make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception);
3592
3593 // create a memory projection as for the normal control path
3594 Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory));
3595 set_memory(malloc, rawidx);
3596
3597 // a normal slow-call doesn't change i_o, but an allocation does
3598 // we create a separate i_o projection for the normal control path
3599 set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) );
3600 Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) );
3601
3602 // put in an initialization barrier
3603 InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,
3604 rawoop)->as_Initialize();
3605 assert(alloc->initialization() == init, "2-way macro link must work");
3606 assert(init ->allocation() == alloc, "2-way macro link must work");
3607 {
3608 // Extract memory strands which may participate in the new object's
3609 // initialization, and source them from the new InitializeNode.
3610 // This will allow us to observe initializations when they occur,
3611 // and link them properly (as a group) to the InitializeNode.
3612 assert(init->in(InitializeNode::Memory) == malloc, "");
3613 MergeMemNode* minit_in = MergeMemNode::make(malloc);
3614 init->set_req(InitializeNode::Memory, minit_in);
3615 record_for_igvn(minit_in); // fold it up later, if possible
3616 Node* minit_out = memory(rawidx);
3617 assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
3618 // Add an edge in the MergeMem for the header fields so an access
3619 // to one of those has correct memory state
3620 set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));
3621 set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));
3622 if (oop_type->isa_aryptr()) {
3623 const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
3624 int elemidx = C->get_alias_index(telemref);
3625 hook_memory_on_init(*this, elemidx, minit_in, minit_out);
3626 } else if (oop_type->isa_instptr()) {
3627 ciInstanceKlass* ik = oop_type->is_instptr()->instance_klass();
3628 for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
3629 ciField* field = ik->nonstatic_field_at(i);
3630 if (field->offset_in_bytes() >= TrackedInitializationLimit * HeapWordSize)
3631 continue; // do not bother to track really large numbers of fields
3632 // Find (or create) the alias category for this field:
3633 int fieldidx = C->alias_type(field)->index();
3634 hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
3635 }
3636 }
3637 }
3638
3639 // Cast raw oop to the real thing...
3640 Node* javaoop = new CheckCastPPNode(control(), rawoop, oop_type);
3641 javaoop = _gvn.transform(javaoop);
3642 C->set_recent_alloc(control(), javaoop);
3643 assert(just_allocated_object(control()) == javaoop, "just allocated");
3644
3645 #ifdef ASSERT
3646 { // Verify that the AllocateNode::Ideal_allocation recognizers work:
3657 assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
3658 }
3659 }
3660 #endif //ASSERT
3661
3662 return javaoop;
3663 }
3664
3665 //---------------------------new_instance--------------------------------------
3666 // This routine takes a klass_node which may be constant (for a static type)
3667 // or may be non-constant (for reflective code). It will work equally well
3668 // for either, and the graph will fold nicely if the optimizer later reduces
3669 // the type to a constant.
3670 // The optional arguments are for specialized use by intrinsics:
3671 // - If 'extra_slow_test' if not null is an extra condition for the slow-path.
3672 // - If 'return_size_val', report the total object size to the caller.
3673 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3674 Node* GraphKit::new_instance(Node* klass_node,
3675 Node* extra_slow_test,
3676 Node* *return_size_val,
3677 bool deoptimize_on_exception) {
3678 // Compute size in doublewords
3679 // The size is always an integral number of doublewords, represented
3680 // as a positive bytewise size stored in the klass's layout_helper.
3681 // The layout_helper also encodes (in a low bit) the need for a slow path.
3682 jint layout_con = Klass::_lh_neutral_value;
3683 Node* layout_val = get_layout_helper(klass_node, layout_con);
3684 int layout_is_con = (layout_val == nullptr);
3685
3686 if (extra_slow_test == nullptr) extra_slow_test = intcon(0);
3687 // Generate the initial go-slow test. It's either ALWAYS (return a
3688 // Node for 1) or NEVER (return a null) or perhaps (in the reflective
3689 // case) a computed value derived from the layout_helper.
3690 Node* initial_slow_test = nullptr;
3691 if (layout_is_con) {
3692 assert(!StressReflectiveCode, "stress mode does not use these paths");
3693 bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
3694 initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test;
3695 } else { // reflective case
3696 // This reflective path is used by Unsafe.allocateInstance.
3697 // (It may be stress-tested by specifying StressReflectiveCode.)
3698 // Basically, we want to get into the VM is there's an illegal argument.
3699 Node* bit = intcon(Klass::_lh_instance_slow_path_bit);
3700 initial_slow_test = _gvn.transform( new AndINode(layout_val, bit) );
3701 if (extra_slow_test != intcon(0)) {
3702 initial_slow_test = _gvn.transform( new OrINode(initial_slow_test, extra_slow_test) );
3703 }
3704 // (Macro-expander will further convert this to a Bool, if necessary.)
3715
3716 // Clear the low bits to extract layout_helper_size_in_bytes:
3717 assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
3718 Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong));
3719 size = _gvn.transform( new AndXNode(size, mask) );
3720 }
3721 if (return_size_val != nullptr) {
3722 (*return_size_val) = size;
3723 }
3724
3725 // This is a precise notnull oop of the klass.
3726 // (Actually, it need not be precise if this is a reflective allocation.)
3727 // It's what we cast the result to.
3728 const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();
3729 if (!tklass) tklass = TypeInstKlassPtr::OBJECT;
3730 const TypeOopPtr* oop_type = tklass->as_instance_type();
3731
3732 // Now generate allocation code
3733
3734 // The entire memory state is needed for slow path of the allocation
3735 // since GC and deoptimization can happened.
3736 Node *mem = reset_memory();
3737 set_all_memory(mem); // Create new memory state
3738
3739 AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
3740 control(), mem, i_o(),
3741 size, klass_node,
3742 initial_slow_test);
3743
3744 return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
3745 }
3746
3747 //-------------------------------new_array-------------------------------------
3748 // helper for both newarray and anewarray
3749 // The 'length' parameter is (obviously) the length of the array.
3750 // The optional arguments are for specialized use by intrinsics:
3751 // - If 'return_size_val', report the non-padded array size (sum of header size
3752 // and array body) to the caller.
3753 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
3754 Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
3755 Node* length, // number of array elements
3756 int nargs, // number of arguments to push back for uncommon trap
3757 Node* *return_size_val,
3758 bool deoptimize_on_exception) {
3759 jint layout_con = Klass::_lh_neutral_value;
3760 Node* layout_val = get_layout_helper(klass_node, layout_con);
3761 int layout_is_con = (layout_val == nullptr);
3762
3763 if (!layout_is_con && !StressReflectiveCode &&
3764 !too_many_traps(Deoptimization::Reason_class_check)) {
3765 // This is a reflective array creation site.
3766 // Optimistically assume that it is a subtype of Object[],
3767 // so that we can fold up all the address arithmetic.
3768 layout_con = Klass::array_layout_helper(T_OBJECT);
3769 Node* cmp_lh = _gvn.transform( new CmpINode(layout_val, intcon(layout_con)) );
3770 Node* bol_lh = _gvn.transform( new BoolNode(cmp_lh, BoolTest::eq) );
3771 { BuildCutout unless(this, bol_lh, PROB_MAX);
3772 inc_sp(nargs);
3773 uncommon_trap(Deoptimization::Reason_class_check,
3774 Deoptimization::Action_maybe_recompile);
3775 }
3776 layout_val = nullptr;
3777 layout_is_con = true;
3778 }
3779
3780 // Generate the initial go-slow test. Make sure we do not overflow
3781 // if length is huge (near 2Gig) or negative! We do not need
3782 // exact double-words here, just a close approximation of needed
3783 // double-words. We can't add any offset or rounding bits, lest we
3784 // take a size -1 of bytes and make it positive. Use an unsigned
3785 // compare, so negative sizes look hugely positive.
3786 int fast_size_limit = FastAllocateSizeLimit;
3787 if (layout_is_con) {
3788 assert(!StressReflectiveCode, "stress mode does not use these paths");
3789 // Increase the size limit if we have exact knowledge of array type.
3790 int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
3791 fast_size_limit <<= (LogBytesPerLong - log2_esize);
3792 }
3793
3794 Node* initial_slow_cmp = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
3795 Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
3796
3797 // --- Size Computation ---
3798 // array_size = round_to_heap(array_header + (length << elem_shift));
3799 // where round_to_heap(x) == align_to(x, MinObjAlignmentInBytes)
3800 // and align_to(x, y) == ((x + y-1) & ~(y-1))
3801 // The rounding mask is strength-reduced, if possible.
3802 int round_mask = MinObjAlignmentInBytes - 1;
3803 Node* header_size = nullptr;
3804 // (T_BYTE has the weakest alignment and size restrictions...)
3805 if (layout_is_con) {
3806 int hsize = Klass::layout_helper_header_size(layout_con);
3807 int eshift = Klass::layout_helper_log2_element_size(layout_con);
3808 if ((round_mask & ~right_n_bits(eshift)) == 0)
3809 round_mask = 0; // strength-reduce it if it goes away completely
3810 assert((hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
3811 int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
3812 assert(header_size_min <= hsize, "generic minimum is smallest");
3813 header_size = intcon(hsize);
3814 } else {
3815 Node* hss = intcon(Klass::_lh_header_size_shift);
3816 Node* hsm = intcon(Klass::_lh_header_size_mask);
3817 header_size = _gvn.transform(new URShiftINode(layout_val, hss));
3818 header_size = _gvn.transform(new AndINode(header_size, hsm));
3819 }
3820
3821 Node* elem_shift = nullptr;
3822 if (layout_is_con) {
3823 int eshift = Klass::layout_helper_log2_element_size(layout_con);
3824 if (eshift != 0)
3825 elem_shift = intcon(eshift);
3826 } else {
3827 // There is no need to mask or shift this value.
3828 // The semantics of LShiftINode include an implicit mask to 0x1F.
3829 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
3830 elem_shift = layout_val;
3877 }
3878 Node* non_rounded_size = _gvn.transform(new AddXNode(headerx, abody));
3879
3880 if (return_size_val != nullptr) {
3881 // This is the size
3882 (*return_size_val) = non_rounded_size;
3883 }
3884
3885 Node* size = non_rounded_size;
3886 if (round_mask != 0) {
3887 Node* mask1 = MakeConX(round_mask);
3888 size = _gvn.transform(new AddXNode(size, mask1));
3889 Node* mask2 = MakeConX(~round_mask);
3890 size = _gvn.transform(new AndXNode(size, mask2));
3891 }
3892 // else if round_mask == 0, the size computation is self-rounding
3893
3894 // Now generate allocation code
3895
3896 // The entire memory state is needed for slow path of the allocation
3897 // since GC and deoptimization can happened.
3898 Node *mem = reset_memory();
3899 set_all_memory(mem); // Create new memory state
3900
3901 if (initial_slow_test->is_Bool()) {
3902 // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
3903 initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
3904 }
3905
3906 const TypeOopPtr* ary_type = _gvn.type(klass_node)->is_klassptr()->as_instance_type();
3907 Node* valid_length_test = _gvn.intcon(1);
3908 if (ary_type->isa_aryptr()) {
3909 BasicType bt = ary_type->isa_aryptr()->elem()->array_element_basic_type();
3910 jint max = TypeAryPtr::max_array_length(bt);
3911 Node* valid_length_cmp = _gvn.transform(new CmpUNode(length, intcon(max)));
3912 valid_length_test = _gvn.transform(new BoolNode(valid_length_cmp, BoolTest::le));
3913 }
3914
3915 // Create the AllocateArrayNode and its result projections
3916 AllocateArrayNode* alloc
3917 = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
3918 control(), mem, i_o(),
3919 size, klass_node,
3920 initial_slow_test,
3921 length, valid_length_test);
3922
3923 // Cast to correct type. Note that the klass_node may be constant or not,
3924 // and in the latter case the actual array type will be inexact also.
3925 // (This happens via a non-constant argument to inline_native_newArray.)
3926 // In any case, the value of klass_node provides the desired array type.
3927 const TypeInt* length_type = _gvn.find_int_type(length);
3928 if (ary_type->isa_aryptr() && length_type != nullptr) {
3929 // Try to get a better type than POS for the size
3930 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
3931 }
3932
3933 Node* javaoop = set_output_for_allocation(alloc, ary_type, deoptimize_on_exception);
3934
3935 array_ideal_length(alloc, ary_type, true);
3936 return javaoop;
3937 }
3938
3939 // The following "Ideal_foo" functions are placed here because they recognize
3940 // the graph shapes created by the functions immediately above.
3941
3942 //---------------------------Ideal_allocation----------------------------------
4050 set_all_memory(ideal.merged_memory());
4051 set_i_o(ideal.i_o());
4052 set_control(ideal.ctrl());
4053 }
4054
4055 void GraphKit::final_sync(IdealKit& ideal) {
4056 // Final sync IdealKit and graphKit.
4057 sync_kit(ideal);
4058 }
4059
4060 Node* GraphKit::load_String_length(Node* str, bool set_ctrl) {
4061 Node* len = load_array_length(load_String_value(str, set_ctrl));
4062 Node* coder = load_String_coder(str, set_ctrl);
4063 // Divide length by 2 if coder is UTF16
4064 return _gvn.transform(new RShiftINode(len, coder));
4065 }
4066
4067 Node* GraphKit::load_String_value(Node* str, bool set_ctrl) {
4068 int value_offset = java_lang_String::value_offset();
4069 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4070 false, nullptr, 0);
4071 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4072 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4073 TypeAry::make(TypeInt::BYTE, TypeInt::POS),
4074 ciTypeArrayKlass::make(T_BYTE), true, 0);
4075 Node* p = basic_plus_adr(str, str, value_offset);
4076 Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT,
4077 IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4078 return load;
4079 }
4080
4081 Node* GraphKit::load_String_coder(Node* str, bool set_ctrl) {
4082 if (!CompactStrings) {
4083 return intcon(java_lang_String::CODER_UTF16);
4084 }
4085 int coder_offset = java_lang_String::coder_offset();
4086 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4087 false, nullptr, 0);
4088 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4089
4090 Node* p = basic_plus_adr(str, str, coder_offset);
4091 Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE,
4092 IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4093 return load;
4094 }
4095
4096 void GraphKit::store_String_value(Node* str, Node* value) {
4097 int value_offset = java_lang_String::value_offset();
4098 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4099 false, nullptr, 0);
4100 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4101
4102 access_store_at(str, basic_plus_adr(str, value_offset), value_field_type,
4103 value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED);
4104 }
4105
4106 void GraphKit::store_String_coder(Node* str, Node* value) {
4107 int coder_offset = java_lang_String::coder_offset();
4108 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4109 false, nullptr, 0);
4110 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4111
4112 access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type,
4113 value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED);
4114 }
4115
4116 // Capture src and dst memory state with a MergeMemNode
4117 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) {
4118 if (src_type == dst_type) {
4119 // Types are equal, we don't need a MergeMemNode
4120 return memory(src_type);
4121 }
4122 MergeMemNode* merge = MergeMemNode::make(map()->memory());
4123 record_for_igvn(merge); // fold it up later, if possible
4124 int src_idx = C->get_alias_index(src_type);
4125 int dst_idx = C->get_alias_index(dst_type);
4126 merge->set_memory_at(src_idx, memory(src_idx));
4127 merge->set_memory_at(dst_idx, memory(dst_idx));
4128 return merge;
4129 }
4202 i_char->init_req(2, AddI(i_char, intcon(2)));
4203
4204 set_control(IfFalse(iff));
4205 set_memory(st, TypeAryPtr::BYTES);
4206 }
4207
4208 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {
4209 if (!field->is_constant()) {
4210 return nullptr; // Field not marked as constant.
4211 }
4212 ciInstance* holder = nullptr;
4213 if (!field->is_static()) {
4214 ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop();
4215 if (const_oop != nullptr && const_oop->is_instance()) {
4216 holder = const_oop->as_instance();
4217 }
4218 }
4219 const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
4220 /*is_unsigned_load=*/false);
4221 if (con_type != nullptr) {
4222 return makecon(con_type);
4223 }
4224 return nullptr;
4225 }
4226
4227 Node* GraphKit::maybe_narrow_object_type(Node* obj, ciKlass* type) {
4228 const TypeOopPtr* obj_type = obj->bottom_type()->isa_oopptr();
4229 const TypeOopPtr* sig_type = TypeOopPtr::make_from_klass(type);
4230 if (obj_type != nullptr && sig_type->is_loaded() && !obj_type->higher_equal(sig_type)) {
4231 const Type* narrow_obj_type = obj_type->filter_speculative(sig_type); // keep speculative part
4232 Node* casted_obj = gvn().transform(new CheckCastPPNode(control(), obj, narrow_obj_type));
4233 return casted_obj;
4234 }
4235 return obj;
4236 }
|
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "ci/ciFlatArrayKlass.hpp"
26 #include "ci/ciInlineKlass.hpp"
27 #include "ci/ciUtilities.hpp"
28 #include "classfile/javaClasses.hpp"
29 #include "ci/ciObjArray.hpp"
30 #include "asm/register.hpp"
31 #include "compiler/compileLog.hpp"
32 #include "gc/shared/barrierSet.hpp"
33 #include "gc/shared/c2/barrierSetC2.hpp"
34 #include "interpreter/interpreter.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "oops/flatArrayKlass.hpp"
37 #include "opto/addnode.hpp"
38 #include "opto/castnode.hpp"
39 #include "opto/convertnode.hpp"
40 #include "opto/graphKit.hpp"
41 #include "opto/idealKit.hpp"
42 #include "opto/inlinetypenode.hpp"
43 #include "opto/intrinsicnode.hpp"
44 #include "opto/locknode.hpp"
45 #include "opto/machnode.hpp"
46 #include "opto/narrowptrnode.hpp"
47 #include "opto/opaquenode.hpp"
48 #include "opto/parse.hpp"
49 #include "opto/rootnode.hpp"
50 #include "opto/runtime.hpp"
51 #include "opto/subtypenode.hpp"
52 #include "runtime/deoptimization.hpp"
53 #include "runtime/sharedRuntime.hpp"
54 #include "utilities/bitMap.inline.hpp"
55 #include "utilities/powerOfTwo.hpp"
56 #include "utilities/growableArray.hpp"
57
58 //----------------------------GraphKit-----------------------------------------
59 // Main utility constructor.
60 GraphKit::GraphKit(JVMState* jvms, PhaseGVN* gvn)
61 : Phase(Phase::Parser),
62 _env(C->env()),
63 _gvn((gvn != nullptr) ? *gvn : *C->initial_gvn()),
64 _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
65 {
66 assert(gvn == nullptr || !gvn->is_IterGVN() || gvn->is_IterGVN()->delay_transform(), "delay transform should be enabled");
67 _exceptions = jvms->map()->next_exception();
68 if (_exceptions != nullptr) jvms->map()->set_next_exception(nullptr);
69 set_jvms(jvms);
70 #ifdef ASSERT
71 if (_gvn.is_IterGVN() != nullptr) {
72 assert(_gvn.is_IterGVN()->delay_transform(), "Transformation must be delayed if IterGVN is used");
73 // Save the initial size of _for_igvn worklist for verification (see ~GraphKit)
74 _worklist_size = _gvn.C->igvn_worklist()->size();
75 }
76 #endif
77 }
78
79 // Private constructor for parser.
80 GraphKit::GraphKit()
81 : Phase(Phase::Parser),
82 _env(C->env()),
83 _gvn(*C->initial_gvn()),
84 _barrier_set(BarrierSet::barrier_set()->barrier_set_c2())
85 {
86 _exceptions = nullptr;
87 set_map(nullptr);
88 debug_only(_sp = -99);
89 debug_only(set_bci(-99));
90 }
91
92
93
94 //---------------------------clean_stack---------------------------------------
95 // Clear away rubbish from the stack area of the JVM state.
96 // This destroys any arguments that may be waiting on the stack.
854 if (PrintMiscellaneous && (Verbose || WizardMode)) {
855 tty->print_cr("Zombie local %d: ", local);
856 jvms->dump();
857 }
858 return false;
859 }
860 }
861 }
862 return true;
863 }
864
865 #endif //ASSERT
866
867 // Helper function for enforcing certain bytecodes to reexecute if deoptimization happens.
868 static bool should_reexecute_implied_by_bytecode(JVMState *jvms, bool is_anewarray) {
869 ciMethod* cur_method = jvms->method();
870 int cur_bci = jvms->bci();
871 if (cur_method != nullptr && cur_bci != InvocationEntryBci) {
872 Bytecodes::Code code = cur_method->java_code_at_bci(cur_bci);
873 return Interpreter::bytecode_should_reexecute(code) ||
874 (is_anewarray && (code == Bytecodes::_multianewarray));
875 // Reexecute _multianewarray bytecode which was replaced with
876 // sequence of [a]newarray. See Parse::do_multianewarray().
877 //
878 // Note: interpreter should not have it set since this optimization
879 // is limited by dimensions and guarded by flag so in some cases
880 // multianewarray() runtime calls will be generated and
881 // the bytecode should not be reexecutes (stack will not be reset).
882 } else {
883 return false;
884 }
885 }
886
887 // Helper function for adding JVMState and debug information to node
888 void GraphKit::add_safepoint_edges(SafePointNode* call, bool must_throw) {
889 // Add the safepoint edges to the call (or other safepoint).
890
891 // Make sure dead locals are set to top. This
892 // should help register allocation time and cut down on the size
893 // of the deoptimization information.
894 assert(dead_locals_are_killed(), "garbage in debug info before safepoint");
945 }
946
947 // Presize the call:
948 DEBUG_ONLY(uint non_debug_edges = call->req());
949 call->add_req_batch(top(), youngest_jvms->debug_depth());
950 assert(call->req() == non_debug_edges + youngest_jvms->debug_depth(), "");
951
952 // Set up edges so that the call looks like this:
953 // Call [state:] ctl io mem fptr retadr
954 // [parms:] parm0 ... parmN
955 // [root:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
956 // [...mid:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN [...]
957 // [young:] loc0 ... locN stk0 ... stkSP mon0 obj0 ... monN objN
958 // Note that caller debug info precedes callee debug info.
959
960 // Fill pointer walks backwards from "young:" to "root:" in the diagram above:
961 uint debug_ptr = call->req();
962
963 // Loop over the map input edges associated with jvms, add them
964 // to the call node, & reset all offsets to match call node array.
965
966 JVMState* callee_jvms = nullptr;
967 for (JVMState* in_jvms = youngest_jvms; in_jvms != nullptr; ) {
968 uint debug_end = debug_ptr;
969 uint debug_start = debug_ptr - in_jvms->debug_size();
970 debug_ptr = debug_start; // back up the ptr
971
972 uint p = debug_start; // walks forward in [debug_start, debug_end)
973 uint j, k, l;
974 SafePointNode* in_map = in_jvms->map();
975 out_jvms->set_map(call);
976
977 if (can_prune_locals) {
978 assert(in_jvms->method() == out_jvms->method(), "sanity");
979 // If the current throw can reach an exception handler in this JVMS,
980 // then we must keep everything live that can reach that handler.
981 // As a quick and dirty approximation, we look for any handlers at all.
982 if (in_jvms->method()->has_exception_handlers()) {
983 can_prune_locals = false;
984 }
985 }
986
987 // Add the Locals
988 k = in_jvms->locoff();
989 l = in_jvms->loc_size();
990 out_jvms->set_locoff(p);
991 if (!can_prune_locals) {
992 for (j = 0; j < l; j++) {
993 Node* val = in_map->in(k + j);
994 // Check if there's a larval that has been written in the callee state (constructor) and update it in the caller state
995 if (callee_jvms != nullptr && val->is_InlineType() && val->as_InlineType()->is_larval() &&
996 callee_jvms->method()->is_object_constructor() && val == in_map->argument(in_jvms, 0) &&
997 val->bottom_type()->is_inlinetypeptr()) {
998 val = callee_jvms->map()->local(callee_jvms, 0); // Receiver
999 }
1000 call->set_req(p++, val);
1001 }
1002 } else {
1003 p += l; // already set to top above by add_req_batch
1004 }
1005
1006 // Add the Expression Stack
1007 k = in_jvms->stkoff();
1008 l = in_jvms->sp();
1009 out_jvms->set_stkoff(p);
1010 if (!can_prune_locals) {
1011 for (j = 0; j < l; j++) {
1012 Node* val = in_map->in(k + j);
1013 // Check if there's a larval that has been written in the callee state (constructor) and update it in the caller state
1014 if (callee_jvms != nullptr && val->is_InlineType() && val->as_InlineType()->is_larval() &&
1015 callee_jvms->method()->is_object_constructor() && val == in_map->argument(in_jvms, 0) &&
1016 val->bottom_type()->is_inlinetypeptr()) {
1017 val = callee_jvms->map()->local(callee_jvms, 0); // Receiver
1018 }
1019 call->set_req(p++, val);
1020 }
1021 } else if (can_prune_locals && stack_slots_not_pruned != 0) {
1022 // Divide stack into {S0,...,S1}, where S0 is set to top.
1023 uint s1 = stack_slots_not_pruned;
1024 stack_slots_not_pruned = 0; // for next iteration
1025 if (s1 > l) s1 = l;
1026 uint s0 = l - s1;
1027 p += s0; // skip the tops preinstalled by add_req_batch
1028 for (j = s0; j < l; j++)
1029 call->set_req(p++, in_map->in(k+j));
1030 } else {
1031 p += l; // already set to top above by add_req_batch
1032 }
1033
1034 // Add the Monitors
1035 k = in_jvms->monoff();
1036 l = in_jvms->mon_size();
1037 out_jvms->set_monoff(p);
1038 for (j = 0; j < l; j++)
1039 call->set_req(p++, in_map->in(k+j));
1040
1041 // Copy any scalar object fields.
1042 k = in_jvms->scloff();
1043 l = in_jvms->scl_size();
1044 out_jvms->set_scloff(p);
1045 for (j = 0; j < l; j++)
1046 call->set_req(p++, in_map->in(k+j));
1047
1048 // Finish the new jvms.
1049 out_jvms->set_endoff(p);
1050
1051 assert(out_jvms->endoff() == debug_end, "fill ptr must match");
1052 assert(out_jvms->depth() == in_jvms->depth(), "depth must match");
1053 assert(out_jvms->loc_size() == in_jvms->loc_size(), "size must match");
1054 assert(out_jvms->mon_size() == in_jvms->mon_size(), "size must match");
1055 assert(out_jvms->scl_size() == in_jvms->scl_size(), "size must match");
1056 assert(out_jvms->debug_size() == in_jvms->debug_size(), "size must match");
1057
1058 // Update the two tail pointers in parallel.
1059 callee_jvms = out_jvms;
1060 out_jvms = out_jvms->caller();
1061 in_jvms = in_jvms->caller();
1062 }
1063
1064 assert(debug_ptr == non_debug_edges, "debug info must fit exactly");
1065
1066 // Test the correctness of JVMState::debug_xxx accessors:
1067 assert(call->jvms()->debug_start() == non_debug_edges, "");
1068 assert(call->jvms()->debug_end() == call->req(), "");
1069 assert(call->jvms()->debug_depth() == call->req() - non_debug_edges, "");
1070 }
1071
1072 bool GraphKit::compute_stack_effects(int& inputs, int& depth) {
1073 Bytecodes::Code code = java_bc();
1074 if (code == Bytecodes::_wide) {
1075 code = method()->java_code_at_bci(bci() + 1);
1076 }
1077
1078 if (code != Bytecodes::_illegal) {
1079 depth = Bytecodes::depth(code); // checkcast=0, athrow=-1
1215 Node* conv = _gvn.transform( new ConvI2LNode(offset));
1216 Node* mask = _gvn.transform(ConLNode::make((julong) max_juint));
1217 return _gvn.transform( new AndLNode(conv, mask) );
1218 }
1219
1220 Node* GraphKit::ConvL2I(Node* offset) {
1221 // short-circuit a common case
1222 jlong offset_con = find_long_con(offset, (jlong)Type::OffsetBot);
1223 if (offset_con != (jlong)Type::OffsetBot) {
1224 return intcon((int) offset_con);
1225 }
1226 return _gvn.transform( new ConvL2INode(offset));
1227 }
1228
1229 //-------------------------load_object_klass-----------------------------------
1230 Node* GraphKit::load_object_klass(Node* obj) {
1231 // Special-case a fresh allocation to avoid building nodes:
1232 Node* akls = AllocateNode::Ideal_klass(obj, &_gvn);
1233 if (akls != nullptr) return akls;
1234 Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
1235 return _gvn.transform(LoadKlassNode::make(_gvn, immutable_memory(), k_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
1236 }
1237
1238 //-------------------------load_array_length-----------------------------------
1239 Node* GraphKit::load_array_length(Node* array) {
1240 // Special-case a fresh allocation to avoid building nodes:
1241 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(array);
1242 Node *alen;
1243 if (alloc == nullptr) {
1244 Node *r_adr = basic_plus_adr(array, arrayOopDesc::length_offset_in_bytes());
1245 alen = _gvn.transform( new LoadRangeNode(nullptr, immutable_memory(), r_adr, TypeInt::POS));
1246 } else {
1247 alen = array_ideal_length(alloc, _gvn.type(array)->is_oopptr(), false);
1248 }
1249 return alen;
1250 }
1251
1252 Node* GraphKit::array_ideal_length(AllocateArrayNode* alloc,
1253 const TypeOopPtr* oop_type,
1254 bool replace_length_in_map) {
1255 Node* length = alloc->Ideal_length();
1264 replace_in_map(length, ccast);
1265 }
1266 return ccast;
1267 }
1268 }
1269 return length;
1270 }
1271
1272 //------------------------------do_null_check----------------------------------
1273 // Helper function to do a null pointer check. Returned value is
1274 // the incoming address with null casted away. You are allowed to use the
1275 // not-null value only if you are control dependent on the test.
1276 #ifndef PRODUCT
1277 extern uint explicit_null_checks_inserted,
1278 explicit_null_checks_elided;
1279 #endif
1280 Node* GraphKit::null_check_common(Node* value, BasicType type,
1281 // optional arguments for variations:
1282 bool assert_null,
1283 Node* *null_control,
1284 bool speculative,
1285 bool is_init_check) {
1286 assert(!assert_null || null_control == nullptr, "not both at once");
1287 if (stopped()) return top();
1288 NOT_PRODUCT(explicit_null_checks_inserted++);
1289
1290 if (value->is_InlineType()) {
1291 // Null checking a scalarized but nullable inline type. Check the IsInit
1292 // input instead of the oop input to avoid keeping buffer allocations alive.
1293 InlineTypeNode* vtptr = value->as_InlineType();
1294 while (vtptr->get_oop()->is_InlineType()) {
1295 vtptr = vtptr->get_oop()->as_InlineType();
1296 }
1297 null_check_common(vtptr->get_is_init(), T_INT, assert_null, null_control, speculative, true);
1298 if (stopped()) {
1299 return top();
1300 }
1301 if (assert_null) {
1302 // TODO 8284443 Scalarize here (this currently leads to compilation bailouts)
1303 // vtptr = InlineTypeNode::make_null(_gvn, vtptr->type()->inline_klass());
1304 // replace_in_map(value, vtptr);
1305 // return vtptr;
1306 replace_in_map(value, null());
1307 return null();
1308 }
1309 bool do_replace_in_map = (null_control == nullptr || (*null_control) == top());
1310 return cast_not_null(value, do_replace_in_map);
1311 }
1312
1313 // Construct null check
1314 Node *chk = nullptr;
1315 switch(type) {
1316 case T_LONG : chk = new CmpLNode(value, _gvn.zerocon(T_LONG)); break;
1317 case T_INT : chk = new CmpINode(value, _gvn.intcon(0)); break;
1318 case T_ARRAY : // fall through
1319 type = T_OBJECT; // simplify further tests
1320 case T_OBJECT : {
1321 const Type *t = _gvn.type( value );
1322
1323 const TypeOopPtr* tp = t->isa_oopptr();
1324 if (tp != nullptr && !tp->is_loaded()
1325 // Only for do_null_check, not any of its siblings:
1326 && !assert_null && null_control == nullptr) {
1327 // Usually, any field access or invocation on an unloaded oop type
1328 // will simply fail to link, since the statically linked class is
1329 // likely also to be unloaded. However, in -Xcomp mode, sometimes
1330 // the static class is loaded but the sharper oop type is not.
1331 // Rather than checking for this obscure case in lots of places,
1332 // we simply observe that a null check on an unloaded class
1396 }
1397 Node *oldcontrol = control();
1398 set_control(cfg);
1399 Node *res = cast_not_null(value);
1400 set_control(oldcontrol);
1401 NOT_PRODUCT(explicit_null_checks_elided++);
1402 return res;
1403 }
1404 cfg = IfNode::up_one_dom(cfg, /*linear_only=*/ true);
1405 if (cfg == nullptr) break; // Quit at region nodes
1406 depth++;
1407 }
1408 }
1409
1410 //-----------
1411 // Branch to failure if null
1412 float ok_prob = PROB_MAX; // a priori estimate: nulls never happen
1413 Deoptimization::DeoptReason reason;
1414 if (assert_null) {
1415 reason = Deoptimization::reason_null_assert(speculative);
1416 } else if (type == T_OBJECT || is_init_check) {
1417 reason = Deoptimization::reason_null_check(speculative);
1418 } else {
1419 reason = Deoptimization::Reason_div0_check;
1420 }
1421 // %%% Since Reason_unhandled is not recorded on a per-bytecode basis,
1422 // ciMethodData::has_trap_at will return a conservative -1 if any
1423 // must-be-null assertion has failed. This could cause performance
1424 // problems for a method after its first do_null_assert failure.
1425 // Consider using 'Reason_class_check' instead?
1426
1427 // To cause an implicit null check, we set the not-null probability
1428 // to the maximum (PROB_MAX). For an explicit check the probability
1429 // is set to a smaller value.
1430 if (null_control != nullptr || too_many_traps(reason)) {
1431 // probability is less likely
1432 ok_prob = PROB_LIKELY_MAG(3);
1433 } else if (!assert_null &&
1434 (ImplicitNullCheckThreshold > 0) &&
1435 method() != nullptr &&
1436 (method()->method_data()->trap_count(reason)
1470 }
1471
1472 if (assert_null) {
1473 // Cast obj to null on this path.
1474 replace_in_map(value, zerocon(type));
1475 return zerocon(type);
1476 }
1477
1478 // Cast obj to not-null on this path, if there is no null_control.
1479 // (If there is a null_control, a non-null value may come back to haunt us.)
1480 if (type == T_OBJECT) {
1481 Node* cast = cast_not_null(value, false);
1482 if (null_control == nullptr || (*null_control) == top())
1483 replace_in_map(value, cast);
1484 value = cast;
1485 }
1486
1487 return value;
1488 }
1489
1490 //------------------------------cast_not_null----------------------------------
1491 // Cast obj to not-null on this path
1492 Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
1493 if (obj->is_InlineType()) {
1494 Node* vt = obj->isa_InlineType()->clone_if_required(&gvn(), map(), do_replace_in_map);
1495 vt->as_InlineType()->set_is_init(_gvn);
1496 vt = _gvn.transform(vt);
1497 if (do_replace_in_map) {
1498 replace_in_map(obj, vt);
1499 }
1500 return vt;
1501 }
1502 const Type *t = _gvn.type(obj);
1503 const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
1504 // Object is already not-null?
1505 if( t == t_not_null ) return obj;
1506
1507 Node* cast = new CastPPNode(control(), obj,t_not_null);
1508 cast = _gvn.transform( cast );
1509
1510 // Scan for instances of 'obj' in the current JVM mapping.
1511 // These instances are known to be not-null after the test.
1512 if (do_replace_in_map)
1513 replace_in_map(obj, cast);
1514
1515 return cast; // Return casted value
1516 }
1517
1518 // Sometimes in intrinsics, we implicitly know an object is not null
1519 // (there's no actual null check) so we can cast it to not null. In
1520 // the course of optimizations, the input to the cast can become null.
1521 // In that case that data path will die and we need the control path
1610 // These are layered on top of the factory methods in LoadNode and StoreNode,
1611 // and integrate with the parser's memory state and _gvn engine.
1612 //
1613
1614 // factory methods in "int adr_idx"
1615 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
1616 MemNode::MemOrd mo,
1617 LoadNode::ControlDependency control_dependency,
1618 bool require_atomic_access,
1619 bool unaligned,
1620 bool mismatched,
1621 bool unsafe,
1622 uint8_t barrier_data) {
1623 int adr_idx = C->get_alias_index(_gvn.type(adr)->isa_ptr());
1624 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
1625 const TypePtr* adr_type = nullptr; // debug-mode-only argument
1626 debug_only(adr_type = C->get_adr_type(adr_idx));
1627 Node* mem = memory(adr_idx);
1628 Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access, unaligned, mismatched, unsafe, barrier_data);
1629 ld = _gvn.transform(ld);
1630
1631 if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) {
1632 // Improve graph before escape analysis and boxing elimination.
1633 record_for_igvn(ld);
1634 if (ld->is_DecodeN()) {
1635 // Also record the actual load (LoadN) in case ld is DecodeN. In some
1636 // rare corner cases, ld->in(1) can be something other than LoadN (e.g.,
1637 // a Phi). Recording such cases is still perfectly sound, but may be
1638 // unnecessary and result in some minor IGVN overhead.
1639 record_for_igvn(ld->in(1));
1640 }
1641 }
1642 return ld;
1643 }
1644
1645 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
1646 MemNode::MemOrd mo,
1647 bool require_atomic_access,
1648 bool unaligned,
1649 bool mismatched,
1650 bool unsafe,
1664 if (unsafe) {
1665 st->as_Store()->set_unsafe_access();
1666 }
1667 st->as_Store()->set_barrier_data(barrier_data);
1668 st = _gvn.transform(st);
1669 set_memory(st, adr_idx);
1670 // Back-to-back stores can only remove intermediate store with DU info
1671 // so push on worklist for optimizer.
1672 if (mem->req() > MemNode::Address && adr == mem->in(MemNode::Address))
1673 record_for_igvn(st);
1674
1675 return st;
1676 }
1677
1678 Node* GraphKit::access_store_at(Node* obj,
1679 Node* adr,
1680 const TypePtr* adr_type,
1681 Node* val,
1682 const Type* val_type,
1683 BasicType bt,
1684 DecoratorSet decorators,
1685 bool safe_for_replace,
1686 const InlineTypeNode* vt) {
1687 // Transformation of a value which could be null pointer (CastPP #null)
1688 // could be delayed during Parse (for example, in adjust_map_after_if()).
1689 // Execute transformation here to avoid barrier generation in such case.
1690 if (_gvn.type(val) == TypePtr::NULL_PTR) {
1691 val = _gvn.makecon(TypePtr::NULL_PTR);
1692 }
1693
1694 if (stopped()) {
1695 return top(); // Dead path ?
1696 }
1697
1698 assert(val != nullptr, "not dead path");
1699 if (val->is_InlineType()) {
1700 // Store to non-flat field. Buffer the inline type and make sure
1701 // the store is re-executed if the allocation triggers deoptimization.
1702 PreserveReexecuteState preexecs(this);
1703 jvms()->set_should_reexecute(true);
1704 val = val->as_InlineType()->buffer(this, safe_for_replace);
1705 }
1706
1707 C2AccessValuePtr addr(adr, adr_type);
1708 C2AccessValue value(val, val_type);
1709 C2ParseAccess access(this, decorators | C2_WRITE_ACCESS, bt, obj, addr, nullptr, vt);
1710 if (access.is_raw()) {
1711 return _barrier_set->BarrierSetC2::store_at(access, value);
1712 } else {
1713 return _barrier_set->store_at(access, value);
1714 }
1715 }
1716
1717 Node* GraphKit::access_load_at(Node* obj, // containing obj
1718 Node* adr, // actual address to store val at
1719 const TypePtr* adr_type,
1720 const Type* val_type,
1721 BasicType bt,
1722 DecoratorSet decorators,
1723 Node* ctl) {
1724 if (stopped()) {
1725 return top(); // Dead path ?
1726 }
1727
1728 C2AccessValuePtr addr(adr, adr_type);
1729 C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, obj, addr, ctl);
1730 if (access.is_raw()) {
1731 return _barrier_set->BarrierSetC2::load_at(access, val_type);
1732 } else {
1733 return _barrier_set->load_at(access, val_type);
1734 }
1735 }
1736
1737 Node* GraphKit::access_load(Node* adr, // actual address to load val at
1738 const Type* val_type,
1739 BasicType bt,
1740 DecoratorSet decorators) {
1741 if (stopped()) {
1742 return top(); // Dead path ?
1743 }
1744
1745 C2AccessValuePtr addr(adr, adr->bottom_type()->is_ptr());
1746 C2ParseAccess access(this, decorators | C2_READ_ACCESS, bt, nullptr, addr);
1747 if (access.is_raw()) {
1748 return _barrier_set->BarrierSetC2::load_at(access, val_type);
1749 } else {
1814 Node* new_val,
1815 const Type* value_type,
1816 BasicType bt,
1817 DecoratorSet decorators) {
1818 C2AccessValuePtr addr(adr, adr_type);
1819 C2AtomicParseAccess access(this, decorators | C2_READ_ACCESS | C2_WRITE_ACCESS, bt, obj, addr, alias_idx);
1820 if (access.is_raw()) {
1821 return _barrier_set->BarrierSetC2::atomic_add_at(access, new_val, value_type);
1822 } else {
1823 return _barrier_set->atomic_add_at(access, new_val, value_type);
1824 }
1825 }
1826
1827 void GraphKit::access_clone(Node* src, Node* dst, Node* size, bool is_array) {
1828 return _barrier_set->clone(this, src, dst, size, is_array);
1829 }
1830
1831 //-------------------------array_element_address-------------------------
1832 Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
1833 const TypeInt* sizetype, Node* ctrl) {
1834 const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr();
1835 uint shift;
1836 if (arytype->is_flat() && arytype->klass_is_exact()) {
1837 // We can only determine the flat array layout statically if the klass is exact. Otherwise, we could have different
1838 // value classes at runtime with a potentially different layout. The caller needs to fall back to call
1839 // load/store_unknown_inline_Type() at runtime. We could return a sentinel node for the non-exact case but that
1840 // might mess with other GVN transformations in between. Thus, we just continue in the else branch normally, even
1841 // though we don't need the address node in this case and throw it away again.
1842 shift = arytype->flat_log_elem_size();
1843 } else {
1844 shift = exact_log2(type2aelembytes(elembt));
1845 }
1846 uint header = arrayOopDesc::base_offset_in_bytes(elembt);
1847
1848 // short-circuit a common case (saves lots of confusing waste motion)
1849 jint idx_con = find_int_con(idx, -1);
1850 if (idx_con >= 0) {
1851 intptr_t offset = header + ((intptr_t)idx_con << shift);
1852 return basic_plus_adr(ary, offset);
1853 }
1854
1855 // must be correct type for alignment purposes
1856 Node* base = basic_plus_adr(ary, header);
1857 idx = Compile::conv_I2X_index(&_gvn, idx, sizetype, ctrl);
1858 Node* scale = _gvn.transform( new LShiftXNode(idx, intcon(shift)) );
1859 return basic_plus_adr(ary, base, scale);
1860 }
1861
1862 Node* GraphKit::flat_array_element_address(Node*& array, Node* idx, ciInlineKlass* vk, bool is_null_free,
1863 bool is_not_null_free, bool is_atomic) {
1864 ciArrayKlass* array_klass = ciArrayKlass::make(vk, /* flat */ true, is_null_free, is_atomic);
1865 const TypeAryPtr* arytype = TypeOopPtr::make_from_klass(array_klass)->isa_aryptr();
1866 arytype = arytype->cast_to_exactness(true);
1867 arytype = arytype->cast_to_not_null_free(is_not_null_free);
1868 array = _gvn.transform(new CheckCastPPNode(control(), array, arytype));
1869 return array_element_address(array, idx, T_FLAT_ELEMENT, arytype->size(), control());
1870 }
1871
1872 //-------------------------load_array_element-------------------------
1873 Node* GraphKit::load_array_element(Node* ary, Node* idx, const TypeAryPtr* arytype, bool set_ctrl) {
1874 const Type* elemtype = arytype->elem();
1875 BasicType elembt = elemtype->array_element_basic_type();
1876 Node* adr = array_element_address(ary, idx, elembt, arytype->size());
1877 if (elembt == T_NARROWOOP) {
1878 elembt = T_OBJECT; // To satisfy switch in LoadNode::make()
1879 }
1880 Node* ld = access_load_at(ary, adr, arytype, elemtype, elembt,
1881 IN_HEAP | IS_ARRAY | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0));
1882 return ld;
1883 }
1884
1885 //-------------------------set_arguments_for_java_call-------------------------
1886 // Arguments (pre-popped from the stack) are taken from the JVMS.
1887 void GraphKit::set_arguments_for_java_call(CallJavaNode* call, bool is_late_inline) {
1888 PreserveReexecuteState preexecs(this);
1889 if (EnableValhalla) {
1890 // Make sure the call is "re-executed", if buffering of inline type arguments triggers deoptimization.
1891 // At this point, the call hasn't been executed yet, so we will only ever execute the call once.
1892 jvms()->set_should_reexecute(true);
1893 int arg_size = method()->get_declared_signature_at_bci(bci())->arg_size_for_bc(java_bc());
1894 inc_sp(arg_size);
1895 }
1896 // Add the call arguments
1897 const TypeTuple* domain = call->tf()->domain_sig();
1898 uint nargs = domain->cnt();
1899 int arg_num = 0;
1900 for (uint i = TypeFunc::Parms, idx = TypeFunc::Parms; i < nargs; i++) {
1901 Node* arg = argument(i-TypeFunc::Parms);
1902 const Type* t = domain->field_at(i);
1903 // TODO 8284443 A static call to a mismatched method should still be scalarized
1904 if (t->is_inlinetypeptr() && !call->method()->get_Method()->mismatch() && call->method()->is_scalarized_arg(arg_num)) {
1905 // We don't pass inline type arguments by reference but instead pass each field of the inline type
1906 if (!arg->is_InlineType()) {
1907 assert(_gvn.type(arg)->is_zero_type() && !t->inline_klass()->is_null_free(), "Unexpected argument type");
1908 arg = InlineTypeNode::make_from_oop(this, arg, t->inline_klass());
1909 }
1910 InlineTypeNode* vt = arg->as_InlineType();
1911 vt->pass_fields(this, call, idx, true, !t->maybe_null());
1912 // If an inline type argument is passed as fields, attach the Method* to the call site
1913 // to be able to access the extended signature later via attached_method_before_pc().
1914 // For example, see CompiledMethod::preserve_callee_argument_oops().
1915 call->set_override_symbolic_info(true);
1916 // Register an evol dependency on the callee method to make sure that this method is deoptimized and
1917 // re-compiled with a non-scalarized calling convention if the callee method is later marked as mismatched.
1918 C->dependencies()->assert_evol_method(call->method());
1919 arg_num++;
1920 continue;
1921 } else if (arg->is_InlineType()) {
1922 // Pass inline type argument via oop to callee
1923 InlineTypeNode* inline_type = arg->as_InlineType();
1924 const ciMethod* method = call->method();
1925 ciInstanceKlass* holder = method->holder();
1926 const bool is_receiver = (i == TypeFunc::Parms);
1927 const bool is_abstract_or_object_klass_constructor = method->is_object_constructor() &&
1928 (holder->is_abstract() || holder->is_java_lang_Object());
1929 const bool is_larval_receiver_on_super_constructor = is_receiver && is_abstract_or_object_klass_constructor;
1930 bool must_init_buffer = true;
1931 // We always need to buffer inline types when they are escaping. However, we can skip the actual initialization
1932 // of the buffer if the inline type is a larval because we are going to update the buffer anyway which requires
1933 // us to create a new one. But there is one special case where we are still required to initialize the buffer:
1934 // When we have a larval receiver invoked on an abstract (value class) constructor or the Object constructor (that
1935 // is not going to be inlined). After this call, the larval is completely initialized and thus not a larval anymore.
1936 // We therefore need to force an initialization of the buffer to not lose all the field writes so far in case the
1937 // buffer needs to be used (e.g. to read from when deoptimizing at runtime) or further updated in abstract super
1938 // value class constructors which could have more fields to be initialized. Note that we do not need to
1939 // initialize the buffer when invoking another constructor in the same class on a larval receiver because we
1940 // have not initialized any fields, yet (this is done completely by the other constructor call).
1941 if (inline_type->is_larval() && !is_larval_receiver_on_super_constructor) {
1942 must_init_buffer = false;
1943 }
1944 arg = inline_type->buffer(this, true, must_init_buffer);
1945 }
1946 if (t != Type::HALF) {
1947 arg_num++;
1948 }
1949 call->init_req(idx++, arg);
1950 }
1951 }
1952
1953 //---------------------------set_edges_for_java_call---------------------------
1954 // Connect a newly created call into the current JVMS.
1955 // A return value node (if any) is returned from set_edges_for_java_call.
1956 void GraphKit::set_edges_for_java_call(CallJavaNode* call, bool must_throw, bool separate_io_proj) {
1957
1958 // Add the predefined inputs:
1959 call->init_req( TypeFunc::Control, control() );
1960 call->init_req( TypeFunc::I_O , i_o() );
1961 call->init_req( TypeFunc::Memory , reset_memory() );
1962 call->init_req( TypeFunc::FramePtr, frameptr() );
1963 call->init_req( TypeFunc::ReturnAdr, top() );
1964
1965 add_safepoint_edges(call, must_throw);
1966
1967 Node* xcall = _gvn.transform(call);
1968
1969 if (xcall == top()) {
1970 set_control(top());
1971 return;
1972 }
1973 assert(xcall == call, "call identity is stable");
1974
1975 // Re-use the current map to produce the result.
1976
1977 set_control(_gvn.transform(new ProjNode(call, TypeFunc::Control)));
1978 set_i_o( _gvn.transform(new ProjNode(call, TypeFunc::I_O , separate_io_proj)));
1979 set_all_memory_call(xcall, separate_io_proj);
1980
1981 //return xcall; // no need, caller already has it
1982 }
1983
1984 Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj, bool deoptimize) {
1985 if (stopped()) return top(); // maybe the call folded up?
1986
1987 // Note: Since any out-of-line call can produce an exception,
1988 // we always insert an I_O projection from the call into the result.
1989
1990 make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj, deoptimize);
1991
1992 if (separate_io_proj) {
1993 // The caller requested separate projections be used by the fall
1994 // through and exceptional paths, so replace the projections for
1995 // the fall through path.
1996 set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) ));
1997 set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) ));
1998 }
1999
2000 // Capture the return value, if any.
2001 Node* ret;
2002 if (call->method() == nullptr || call->method()->return_type()->basic_type() == T_VOID) {
2003 ret = top();
2004 } else if (call->tf()->returns_inline_type_as_fields()) {
2005 // Return of multiple values (inline type fields): we create a
2006 // InlineType node, each field is a projection from the call.
2007 ciInlineKlass* vk = call->method()->return_type()->as_inline_klass();
2008 uint base_input = TypeFunc::Parms;
2009 ret = InlineTypeNode::make_from_multi(this, call, vk, base_input, false, false);
2010 } else {
2011 ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
2012 ciType* t = call->method()->return_type();
2013 if (t->is_klass()) {
2014 const Type* type = TypeOopPtr::make_from_klass(t->as_klass());
2015 if (type->is_inlinetypeptr()) {
2016 ret = InlineTypeNode::make_from_oop(this, ret, type->inline_klass());
2017 }
2018 }
2019 }
2020
2021 // We just called the constructor on a value type receiver. Reload it from the buffer
2022 ciMethod* method = call->method();
2023 if (method->is_object_constructor() && !method->holder()->is_java_lang_Object()) {
2024 InlineTypeNode* inline_type_receiver = call->in(TypeFunc::Parms)->isa_InlineType();
2025 if (inline_type_receiver != nullptr) {
2026 assert(inline_type_receiver->is_larval(), "must be larval");
2027 assert(inline_type_receiver->is_allocated(&gvn()), "larval must be buffered");
2028 InlineTypeNode* reloaded = InlineTypeNode::make_from_oop(this, inline_type_receiver->get_oop(),
2029 inline_type_receiver->bottom_type()->inline_klass());
2030 assert(!reloaded->is_larval(), "should not be larval anymore");
2031 replace_in_map(inline_type_receiver, reloaded);
2032 }
2033 }
2034
2035 return ret;
2036 }
2037
2038 //--------------------set_predefined_input_for_runtime_call--------------------
2039 // Reading and setting the memory state is way conservative here.
2040 // The real problem is that I am not doing real Type analysis on memory,
2041 // so I cannot distinguish card mark stores from other stores. Across a GC
2042 // point the Store Barrier and the card mark memory has to agree. I cannot
2043 // have a card mark store and its barrier split across the GC point from
2044 // either above or below. Here I get that to happen by reading ALL of memory.
2045 // A better answer would be to separate out card marks from other memory.
2046 // For now, return the input memory state, so that it can be reused
2047 // after the call, if this call has restricted memory effects.
2048 Node* GraphKit::set_predefined_input_for_runtime_call(SafePointNode* call, Node* narrow_mem) {
2049 // Set fixed predefined input arguments
2050 Node* memory = reset_memory();
2051 Node* m = narrow_mem == nullptr ? memory : narrow_mem;
2052 call->init_req( TypeFunc::Control, control() );
2053 call->init_req( TypeFunc::I_O, top() ); // does no i/o
2054 call->init_req( TypeFunc::Memory, m ); // may gc ptrs
2105 if (use->is_MergeMem()) {
2106 wl.push(use);
2107 }
2108 }
2109 }
2110
2111 // Replace the call with the current state of the kit.
2112 void GraphKit::replace_call(CallNode* call, Node* result, bool do_replaced_nodes, bool do_asserts) {
2113 JVMState* ejvms = nullptr;
2114 if (has_exceptions()) {
2115 ejvms = transfer_exceptions_into_jvms();
2116 }
2117
2118 ReplacedNodes replaced_nodes = map()->replaced_nodes();
2119 ReplacedNodes replaced_nodes_exception;
2120 Node* ex_ctl = top();
2121
2122 SafePointNode* final_state = stop();
2123
2124 // Find all the needed outputs of this call
2125 CallProjections* callprojs = call->extract_projections(true, do_asserts);
2126
2127 Unique_Node_List wl;
2128 Node* init_mem = call->in(TypeFunc::Memory);
2129 Node* final_mem = final_state->in(TypeFunc::Memory);
2130 Node* final_ctl = final_state->in(TypeFunc::Control);
2131 Node* final_io = final_state->in(TypeFunc::I_O);
2132
2133 // Replace all the old call edges with the edges from the inlining result
2134 if (callprojs->fallthrough_catchproj != nullptr) {
2135 C->gvn_replace_by(callprojs->fallthrough_catchproj, final_ctl);
2136 }
2137 if (callprojs->fallthrough_memproj != nullptr) {
2138 if (final_mem->is_MergeMem()) {
2139 // Parser's exits MergeMem was not transformed but may be optimized
2140 final_mem = _gvn.transform(final_mem);
2141 }
2142 C->gvn_replace_by(callprojs->fallthrough_memproj, final_mem);
2143 add_mergemem_users_to_worklist(wl, final_mem);
2144 }
2145 if (callprojs->fallthrough_ioproj != nullptr) {
2146 C->gvn_replace_by(callprojs->fallthrough_ioproj, final_io);
2147 }
2148
2149 // Replace the result with the new result if it exists and is used
2150 if (callprojs->resproj[0] != nullptr && result != nullptr) {
2151 // If the inlined code is dead, the result projections for an inline type returned as
2152 // fields have not been replaced. They will go away once the call is replaced by TOP below.
2153 assert(callprojs->nb_resproj == 1 || (call->tf()->returns_inline_type_as_fields() && stopped()),
2154 "unexpected number of results");
2155 C->gvn_replace_by(callprojs->resproj[0], result);
2156 }
2157
2158 if (ejvms == nullptr) {
2159 // No exception edges to simply kill off those paths
2160 if (callprojs->catchall_catchproj != nullptr) {
2161 C->gvn_replace_by(callprojs->catchall_catchproj, C->top());
2162 }
2163 if (callprojs->catchall_memproj != nullptr) {
2164 C->gvn_replace_by(callprojs->catchall_memproj, C->top());
2165 }
2166 if (callprojs->catchall_ioproj != nullptr) {
2167 C->gvn_replace_by(callprojs->catchall_ioproj, C->top());
2168 }
2169 // Replace the old exception object with top
2170 if (callprojs->exobj != nullptr) {
2171 C->gvn_replace_by(callprojs->exobj, C->top());
2172 }
2173 } else {
2174 GraphKit ekit(ejvms);
2175
2176 // Load my combined exception state into the kit, with all phis transformed:
2177 SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states();
2178 replaced_nodes_exception = ex_map->replaced_nodes();
2179
2180 Node* ex_oop = ekit.use_exception_state(ex_map);
2181
2182 if (callprojs->catchall_catchproj != nullptr) {
2183 C->gvn_replace_by(callprojs->catchall_catchproj, ekit.control());
2184 ex_ctl = ekit.control();
2185 }
2186 if (callprojs->catchall_memproj != nullptr) {
2187 Node* ex_mem = ekit.reset_memory();
2188 C->gvn_replace_by(callprojs->catchall_memproj, ex_mem);
2189 add_mergemem_users_to_worklist(wl, ex_mem);
2190 }
2191 if (callprojs->catchall_ioproj != nullptr) {
2192 C->gvn_replace_by(callprojs->catchall_ioproj, ekit.i_o());
2193 }
2194
2195 // Replace the old exception object with the newly created one
2196 if (callprojs->exobj != nullptr) {
2197 C->gvn_replace_by(callprojs->exobj, ex_oop);
2198 }
2199 }
2200
2201 // Disconnect the call from the graph
2202 call->disconnect_inputs(C);
2203 C->gvn_replace_by(call, C->top());
2204
2205 // Clean up any MergeMems that feed other MergeMems since the
2206 // optimizer doesn't like that.
2207 while (wl.size() > 0) {
2208 _gvn.transform(wl.pop());
2209 }
2210
2211 if (callprojs->fallthrough_catchproj != nullptr && !final_ctl->is_top() && do_replaced_nodes) {
2212 replaced_nodes.apply(C, final_ctl);
2213 }
2214 if (!ex_ctl->is_top() && do_replaced_nodes) {
2215 replaced_nodes_exception.apply(C, ex_ctl);
2216 }
2217 }
2218
2219
2220 //------------------------------increment_counter------------------------------
2221 // for statistics: increment a VM counter by 1
2222
2223 void GraphKit::increment_counter(address counter_addr) {
2224 Node* adr1 = makecon(TypeRawPtr::make(counter_addr));
2225 increment_counter(adr1);
2226 }
2227
2228 void GraphKit::increment_counter(Node* counter_addr) {
2229 Node* ctrl = control();
2230 Node* cnt = make_load(ctrl, counter_addr, TypeLong::LONG, T_LONG, MemNode::unordered);
2231 Node* incr = _gvn.transform(new AddLNode(cnt, _gvn.longcon(1)));
2391 *
2392 * @param n node that the type applies to
2393 * @param exact_kls type from profiling
2394 * @param maybe_null did profiling see null?
2395 *
2396 * @return node with improved type
2397 */
2398 Node* GraphKit::record_profile_for_speculation(Node* n, ciKlass* exact_kls, ProfilePtrKind ptr_kind) {
2399 const Type* current_type = _gvn.type(n);
2400 assert(UseTypeSpeculation, "type speculation must be on");
2401
2402 const TypePtr* speculative = current_type->speculative();
2403
2404 // Should the klass from the profile be recorded in the speculative type?
2405 if (current_type->would_improve_type(exact_kls, jvms()->depth())) {
2406 const TypeKlassPtr* tklass = TypeKlassPtr::make(exact_kls, Type::trust_interfaces);
2407 const TypeOopPtr* xtype = tklass->as_instance_type();
2408 assert(xtype->klass_is_exact(), "Should be exact");
2409 // Any reason to believe n is not null (from this profiling or a previous one)?
2410 assert(ptr_kind != ProfileAlwaysNull, "impossible here");
2411 const TypePtr* ptr = (ptr_kind != ProfileNeverNull && current_type->speculative_maybe_null()) ? TypePtr::BOTTOM : TypePtr::NOTNULL;
2412 // record the new speculative type's depth
2413 speculative = xtype->cast_to_ptr_type(ptr->ptr())->is_ptr();
2414 speculative = speculative->with_inline_depth(jvms()->depth());
2415 } else if (current_type->would_improve_ptr(ptr_kind)) {
2416 // Profiling report that null was never seen so we can change the
2417 // speculative type to non null ptr.
2418 if (ptr_kind == ProfileAlwaysNull) {
2419 speculative = TypePtr::NULL_PTR;
2420 } else {
2421 assert(ptr_kind == ProfileNeverNull, "nothing else is an improvement");
2422 const TypePtr* ptr = TypePtr::NOTNULL;
2423 if (speculative != nullptr) {
2424 speculative = speculative->cast_to_ptr_type(ptr->ptr())->is_ptr();
2425 } else {
2426 speculative = ptr;
2427 }
2428 }
2429 }
2430
2431 if (speculative != current_type->speculative()) {
2432 // Build a type with a speculative type (what we think we know
2433 // about the type but will need a guard when we use it)
2434 const TypeOopPtr* spec_type = TypeOopPtr::make(TypePtr::BotPTR, Type::Offset::bottom, TypeOopPtr::InstanceBot, speculative);
2435 // We're changing the type, we need a new CheckCast node to carry
2436 // the new type. The new type depends on the control: what
2437 // profiling tells us is only valid from here as far as we can
2438 // tell.
2439 Node* cast = new CheckCastPPNode(control(), n, current_type->remove_speculative()->join_speculative(spec_type));
2440 cast = _gvn.transform(cast);
2441 replace_in_map(n, cast);
2442 n = cast;
2443 }
2444
2445 return n;
2446 }
2447
2448 /**
2449 * Record profiling data from receiver profiling at an invoke with the
2450 * type system so that it can propagate it (speculation)
2451 *
2452 * @param n receiver node
2453 *
2454 * @return node with improved type
2455 */
2456 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
2457 if (!UseTypeSpeculation) {
2458 return n;
2459 }
2460 ciKlass* exact_kls = profile_has_unique_klass();
2461 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2462 if ((java_bc() == Bytecodes::_checkcast ||
2463 java_bc() == Bytecodes::_instanceof ||
2464 java_bc() == Bytecodes::_aastore) &&
2465 method()->method_data()->is_mature()) {
2466 ciProfileData* data = method()->method_data()->bci_to_data(bci());
2467 if (data != nullptr) {
2468 if (java_bc() == Bytecodes::_aastore) {
2469 ciKlass* array_type = nullptr;
2470 ciKlass* element_type = nullptr;
2471 ProfilePtrKind element_ptr = ProfileMaybeNull;
2472 bool flat_array = true;
2473 bool null_free_array = true;
2474 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
2475 exact_kls = element_type;
2476 ptr_kind = element_ptr;
2477 } else {
2478 if (!data->as_BitData()->null_seen()) {
2479 ptr_kind = ProfileNeverNull;
2480 } else {
2481 assert(data->is_ReceiverTypeData(), "bad profile data type");
2482 ciReceiverTypeData* call = (ciReceiverTypeData*)data->as_ReceiverTypeData();
2483 uint i = 0;
2484 for (; i < call->row_limit(); i++) {
2485 ciKlass* receiver = call->receiver(i);
2486 if (receiver != nullptr) {
2487 break;
2488 }
2489 }
2490 ptr_kind = (i == call->row_limit()) ? ProfileAlwaysNull : ProfileMaybeNull;
2491 }
2492 }
2493 }
2494 }
2495 return record_profile_for_speculation(n, exact_kls, ptr_kind);
2496 }
2497
2498 /**
2499 * Record profiling data from argument profiling at an invoke with the
2500 * type system so that it can propagate it (speculation)
2501 *
2502 * @param dest_method target method for the call
2503 * @param bc what invoke bytecode is this?
2504 */
2505 void GraphKit::record_profiled_arguments_for_speculation(ciMethod* dest_method, Bytecodes::Code bc) {
2506 if (!UseTypeSpeculation) {
2507 return;
2508 }
2509 const TypeFunc* tf = TypeFunc::make(dest_method);
2510 int nargs = tf->domain_sig()->cnt() - TypeFunc::Parms;
2511 int skip = Bytecodes::has_receiver(bc) ? 1 : 0;
2512 for (int j = skip, i = 0; j < nargs && i < TypeProfileArgsLimit; j++) {
2513 const Type *targ = tf->domain_sig()->field_at(j + TypeFunc::Parms);
2514 if (is_reference_type(targ->basic_type())) {
2515 ProfilePtrKind ptr_kind = ProfileMaybeNull;
2516 ciKlass* better_type = nullptr;
2517 if (method()->argument_profiled_type(bci(), i, better_type, ptr_kind)) {
2518 record_profile_for_speculation(argument(j), better_type, ptr_kind);
2519 }
2520 i++;
2521 }
2522 }
2523 }
2524
2525 /**
2526 * Record profiling data from parameter profiling at an invoke with
2527 * the type system so that it can propagate it (speculation)
2528 */
2529 void GraphKit::record_profiled_parameters_for_speculation() {
2530 if (!UseTypeSpeculation) {
2531 return;
2532 }
2533 for (int i = 0, j = 0; i < method()->arg_size() ; i++) {
2653 // The first null ends the list.
2654 Node* parm0, Node* parm1,
2655 Node* parm2, Node* parm3,
2656 Node* parm4, Node* parm5,
2657 Node* parm6, Node* parm7) {
2658 assert(call_addr != nullptr, "must not call null targets");
2659
2660 // Slow-path call
2661 bool is_leaf = !(flags & RC_NO_LEAF);
2662 bool has_io = (!is_leaf && !(flags & RC_NO_IO));
2663 if (call_name == nullptr) {
2664 assert(!is_leaf, "must supply name for leaf");
2665 call_name = OptoRuntime::stub_name(call_addr);
2666 }
2667 CallNode* call;
2668 if (!is_leaf) {
2669 call = new CallStaticJavaNode(call_type, call_addr, call_name, adr_type);
2670 } else if (flags & RC_NO_FP) {
2671 call = new CallLeafNoFPNode(call_type, call_addr, call_name, adr_type);
2672 } else if (flags & RC_VECTOR){
2673 uint num_bits = call_type->range_sig()->field_at(TypeFunc::Parms)->is_vect()->length_in_bytes() * BitsPerByte;
2674 call = new CallLeafVectorNode(call_type, call_addr, call_name, adr_type, num_bits);
2675 } else {
2676 call = new CallLeafNode(call_type, call_addr, call_name, adr_type);
2677 }
2678
2679 // The following is similar to set_edges_for_java_call,
2680 // except that the memory effects of the call are restricted to AliasIdxRaw.
2681
2682 // Slow path call has no side-effects, uses few values
2683 bool wide_in = !(flags & RC_NARROW_MEM);
2684 bool wide_out = (C->get_alias_index(adr_type) == Compile::AliasIdxBot);
2685
2686 Node* prev_mem = nullptr;
2687 if (wide_in) {
2688 prev_mem = set_predefined_input_for_runtime_call(call);
2689 } else {
2690 assert(!wide_out, "narrow in => narrow out");
2691 Node* narrow_mem = memory(adr_type);
2692 prev_mem = set_predefined_input_for_runtime_call(call, narrow_mem);
2693 }
2733
2734 if (has_io) {
2735 set_i_o(_gvn.transform(new ProjNode(call, TypeFunc::I_O)));
2736 }
2737 return call;
2738
2739 }
2740
2741 // i2b
2742 Node* GraphKit::sign_extend_byte(Node* in) {
2743 Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(24)));
2744 return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(24)));
2745 }
2746
2747 // i2s
2748 Node* GraphKit::sign_extend_short(Node* in) {
2749 Node* tmp = _gvn.transform(new LShiftINode(in, _gvn.intcon(16)));
2750 return _gvn.transform(new RShiftINode(tmp, _gvn.intcon(16)));
2751 }
2752
2753
2754 //------------------------------merge_memory-----------------------------------
2755 // Merge memory from one path into the current memory state.
2756 void GraphKit::merge_memory(Node* new_mem, Node* region, int new_path) {
2757 for (MergeMemStream mms(merged_memory(), new_mem->as_MergeMem()); mms.next_non_empty2(); ) {
2758 Node* old_slice = mms.force_memory();
2759 Node* new_slice = mms.memory2();
2760 if (old_slice != new_slice) {
2761 PhiNode* phi;
2762 if (old_slice->is_Phi() && old_slice->as_Phi()->region() == region) {
2763 if (mms.is_empty()) {
2764 // clone base memory Phi's inputs for this memory slice
2765 assert(old_slice == mms.base_memory(), "sanity");
2766 phi = PhiNode::make(region, nullptr, Type::MEMORY, mms.adr_type(C));
2767 _gvn.set_type(phi, Type::MEMORY);
2768 for (uint i = 1; i < phi->req(); i++) {
2769 phi->init_req(i, old_slice->in(i));
2770 }
2771 } else {
2772 phi = old_slice->as_Phi(); // Phi was generated already
2773 }
3036
3037 // Now do a linear scan of the secondary super-klass array. Again, no real
3038 // performance impact (too rare) but it's gotta be done.
3039 // Since the code is rarely used, there is no penalty for moving it
3040 // out of line, and it can only improve I-cache density.
3041 // The decision to inline or out-of-line this final check is platform
3042 // dependent, and is found in the AD file definition of PartialSubtypeCheck.
3043 Node* psc = gvn.transform(
3044 new PartialSubtypeCheckNode(*ctrl, subklass, superklass));
3045
3046 IfNode *iff4 = gen_subtype_check_compare(*ctrl, psc, gvn.zerocon(T_OBJECT), BoolTest::ne, PROB_FAIR, gvn, T_ADDRESS);
3047 r_not_subtype->init_req(2, gvn.transform(new IfTrueNode (iff4)));
3048 r_ok_subtype ->init_req(3, gvn.transform(new IfFalseNode(iff4)));
3049
3050 // Return false path; set default control to true path.
3051 *ctrl = gvn.transform(r_ok_subtype);
3052 return gvn.transform(r_not_subtype);
3053 }
3054
3055 Node* GraphKit::gen_subtype_check(Node* obj_or_subklass, Node* superklass) {
3056 const Type* sub_t = _gvn.type(obj_or_subklass);
3057 if (sub_t->make_oopptr() != nullptr && sub_t->make_oopptr()->is_inlinetypeptr()) {
3058 sub_t = TypeKlassPtr::make(sub_t->inline_klass());
3059 obj_or_subklass = makecon(sub_t);
3060 }
3061 bool expand_subtype_check = C->post_loop_opts_phase(); // macro node expansion is over
3062 if (expand_subtype_check) {
3063 MergeMemNode* mem = merged_memory();
3064 Node* ctrl = control();
3065 Node* subklass = obj_or_subklass;
3066 if (!sub_t->isa_klassptr()) {
3067 subklass = load_object_klass(obj_or_subklass);
3068 }
3069
3070 Node* n = Phase::gen_subtype_check(subklass, superklass, &ctrl, mem, _gvn, method(), bci());
3071 set_control(ctrl);
3072 return n;
3073 }
3074
3075 Node* check = _gvn.transform(new SubTypeCheckNode(C, obj_or_subklass, superklass, method(), bci()));
3076 Node* bol = _gvn.transform(new BoolNode(check, BoolTest::eq));
3077 IfNode* iff = create_and_xform_if(control(), bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN);
3078 set_control(_gvn.transform(new IfTrueNode(iff)));
3079 return _gvn.transform(new IfFalseNode(iff));
3080 }
3081
3082 // Profile-driven exact type check:
3083 Node* GraphKit::type_check_receiver(Node* receiver, ciKlass* klass,
3084 float prob, Node* *casted_receiver) {
3085 assert(!klass->is_interface(), "no exact type check on interfaces");
3086 Node* fail = top();
3087 const Type* rec_t = _gvn.type(receiver);
3088 if (rec_t->is_inlinetypeptr()) {
3089 if (klass->equals(rec_t->inline_klass())) {
3090 (*casted_receiver) = receiver; // Always passes
3091 } else {
3092 (*casted_receiver) = top(); // Always fails
3093 fail = control();
3094 set_control(top());
3095 }
3096 return fail;
3097 }
3098 const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces);
3099 Node* recv_klass = load_object_klass(receiver);
3100 fail = type_check(recv_klass, tklass, prob);
3101
3102 if (!stopped()) {
3103 const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
3104 const TypeOopPtr* recv_xtype = tklass->as_instance_type();
3105 assert(recv_xtype->klass_is_exact(), "");
3106
3107 if (!receiver_type->higher_equal(recv_xtype)) { // ignore redundant casts
3108 // Subsume downstream occurrences of receiver with a cast to
3109 // recv_xtype, since now we know what the type will be.
3110 Node* cast = new CheckCastPPNode(control(), receiver, recv_xtype);
3111 Node* res = _gvn.transform(cast);
3112 if (recv_xtype->is_inlinetypeptr()) {
3113 assert(!gvn().type(res)->maybe_null(), "receiver should never be null");
3114 res = InlineTypeNode::make_from_oop(this, res, recv_xtype->inline_klass());
3115 }
3116 (*casted_receiver) = res;
3117 assert(!(*casted_receiver)->is_top(), "that path should be unreachable");
3118 // (User must make the replace_in_map call.)
3119 }
3120 }
3121
3122 return fail;
3123 }
3124
3125 Node* GraphKit::type_check(Node* recv_klass, const TypeKlassPtr* tklass,
3126 float prob) {
3127 Node* want_klass = makecon(tklass);
3128 Node* cmp = _gvn.transform(new CmpPNode(recv_klass, want_klass));
3129 Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::eq));
3130 IfNode* iff = create_and_xform_if(control(), bol, prob, COUNT_UNKNOWN);
3131 set_control(_gvn.transform(new IfTrueNode (iff)));
3132 Node* fail = _gvn.transform(new IfFalseNode(iff));
3133 return fail;
3134 }
3135
3136 //------------------------------subtype_check_receiver-------------------------
3137 Node* GraphKit::subtype_check_receiver(Node* receiver, ciKlass* klass,
3138 Node** casted_receiver) {
3139 const TypeKlassPtr* tklass = TypeKlassPtr::make(klass, Type::trust_interfaces)->try_improve();
3140 Node* want_klass = makecon(tklass);
3141
3142 Node* slow_ctl = gen_subtype_check(receiver, want_klass);
3143
3144 // Ignore interface type information until interface types are properly tracked.
3145 if (!stopped() && !klass->is_interface()) {
3146 const TypeOopPtr* receiver_type = _gvn.type(receiver)->isa_oopptr();
3147 const TypeOopPtr* recv_type = tklass->cast_to_exactness(false)->is_klassptr()->as_instance_type();
3148 if (receiver_type != nullptr && !receiver_type->higher_equal(recv_type)) { // ignore redundant casts
3149 Node* cast = _gvn.transform(new CheckCastPPNode(control(), receiver, recv_type));
3150 if (recv_type->is_inlinetypeptr()) {
3151 cast = InlineTypeNode::make_from_oop(this, cast, recv_type->inline_klass());
3152 }
3153 (*casted_receiver) = cast;
3154 }
3155 }
3156
3157 return slow_ctl;
3158 }
3159
3160 //------------------------------seems_never_null-------------------------------
3161 // Use null_seen information if it is available from the profile.
3162 // If we see an unexpected null at a type check we record it and force a
3163 // recompile; the offending check will be recompiled to handle nulls.
3164 // If we see several offending BCIs, then all checks in the
3165 // method will be recompiled.
3166 bool GraphKit::seems_never_null(Node* obj, ciProfileData* data, bool& speculating) {
3167 speculating = !_gvn.type(obj)->speculative_maybe_null();
3168 Deoptimization::DeoptReason reason = Deoptimization::reason_null_check(speculating);
3169 if (UncommonNullCast // Cutout for this technique
3170 && obj != null() // And not the -Xcomp stupid case?
3171 && !too_many_traps(reason)
3172 ) {
3173 if (speculating) {
3242
3243 //------------------------maybe_cast_profiled_receiver-------------------------
3244 // If the profile has seen exactly one type, narrow to exactly that type.
3245 // Subsequent type checks will always fold up.
3246 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
3247 const TypeKlassPtr* require_klass,
3248 ciKlass* spec_klass,
3249 bool safe_for_replace) {
3250 if (!UseTypeProfile || !TypeProfileCasts) return nullptr;
3251
3252 Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != nullptr);
3253
3254 // Make sure we haven't already deoptimized from this tactic.
3255 if (too_many_traps_or_recompiles(reason))
3256 return nullptr;
3257
3258 // (No, this isn't a call, but it's enough like a virtual call
3259 // to use the same ciMethod accessor to get the profile info...)
3260 // If we have a speculative type use it instead of profiling (which
3261 // may not help us)
3262 ciKlass* exact_kls = spec_klass;
3263 if (exact_kls == nullptr) {
3264 if (java_bc() == Bytecodes::_aastore) {
3265 ciKlass* array_type = nullptr;
3266 ciKlass* element_type = nullptr;
3267 ProfilePtrKind element_ptr = ProfileMaybeNull;
3268 bool flat_array = true;
3269 bool null_free_array = true;
3270 method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
3271 exact_kls = element_type;
3272 } else {
3273 exact_kls = profile_has_unique_klass();
3274 }
3275 }
3276 if (exact_kls != nullptr) {// no cast failures here
3277 if (require_klass == nullptr ||
3278 C->static_subtype_check(require_klass, TypeKlassPtr::make(exact_kls, Type::trust_interfaces)) == Compile::SSC_always_true) {
3279 // If we narrow the type to match what the type profile sees or
3280 // the speculative type, we can then remove the rest of the
3281 // cast.
3282 // This is a win, even if the exact_kls is very specific,
3283 // because downstream operations, such as method calls,
3284 // will often benefit from the sharper type.
3285 Node* exact_obj = not_null_obj; // will get updated in place...
3286 Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0,
3287 &exact_obj);
3288 { PreserveJVMState pjvms(this);
3289 set_control(slow_ctl);
3290 uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
3291 }
3292 if (safe_for_replace) {
3293 replace_in_map(not_null_obj, exact_obj);
3294 }
3295 return exact_obj;
3385 // If not_null_obj is dead, only null-path is taken
3386 if (stopped()) { // Doing instance-of on a null?
3387 set_control(null_ctl);
3388 return intcon(0);
3389 }
3390 region->init_req(_null_path, null_ctl);
3391 phi ->init_req(_null_path, intcon(0)); // Set null path value
3392 if (null_ctl == top()) {
3393 // Do this eagerly, so that pattern matches like is_diamond_phi
3394 // will work even during parsing.
3395 assert(_null_path == PATH_LIMIT-1, "delete last");
3396 region->del_req(_null_path);
3397 phi ->del_req(_null_path);
3398 }
3399
3400 // Do we know the type check always succeed?
3401 bool known_statically = false;
3402 if (_gvn.type(superklass)->singleton()) {
3403 const TypeKlassPtr* superk = _gvn.type(superklass)->is_klassptr();
3404 const TypeKlassPtr* subk = _gvn.type(obj)->is_oopptr()->as_klass_type();
3405 if (subk != nullptr && subk->is_loaded()) {
3406 int static_res = C->static_subtype_check(superk, subk);
3407 known_statically = (static_res == Compile::SSC_always_true || static_res == Compile::SSC_always_false);
3408 }
3409 }
3410
3411 if (!known_statically) {
3412 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3413 // We may not have profiling here or it may not help us. If we
3414 // have a speculative type use it to perform an exact cast.
3415 ciKlass* spec_obj_type = obj_type->speculative_type();
3416 if (spec_obj_type != nullptr || (ProfileDynamicTypes && data != nullptr)) {
3417 Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, nullptr, spec_obj_type, safe_for_replace);
3418 if (stopped()) { // Profile disagrees with this path.
3419 set_control(null_ctl); // Null is the only remaining possibility.
3420 return intcon(0);
3421 }
3422 if (cast_obj != nullptr) {
3423 not_null_obj = cast_obj;
3424 }
3425 }
3441 record_for_igvn(region);
3442
3443 // If we know the type check always succeeds then we don't use the
3444 // profiling data at this bytecode. Don't lose it, feed it to the
3445 // type system as a speculative type.
3446 if (safe_for_replace) {
3447 Node* casted_obj = record_profiled_receiver_for_speculation(obj);
3448 replace_in_map(obj, casted_obj);
3449 }
3450
3451 return _gvn.transform(phi);
3452 }
3453
3454 //-------------------------------gen_checkcast---------------------------------
3455 // Generate a checkcast idiom. Used by both the checkcast bytecode and the
3456 // array store bytecode. Stack must be as-if BEFORE doing the bytecode so the
3457 // uncommon-trap paths work. Adjust stack after this call.
3458 // If failure_control is supplied and not null, it is filled in with
3459 // the control edge for the cast failure. Otherwise, an appropriate
3460 // uncommon trap or exception is thrown.
3461 Node* GraphKit::gen_checkcast(Node* obj, Node* superklass, Node* *failure_control, bool null_free) {
3462 kill_dead_locals(); // Benefit all the uncommon traps
3463 const TypeKlassPtr* klass_ptr_type = _gvn.type(superklass)->is_klassptr();
3464 const Type* obj_type = _gvn.type(obj);
3465 if (obj_type->is_inlinetypeptr() && !obj_type->maybe_null() && klass_ptr_type->klass_is_exact() && obj_type->inline_klass() == klass_ptr_type->exact_klass(true)) {
3466 // Special case: larval inline objects must not be scalarized. They are also generally not
3467 // allowed to participate in most operations except as the first operand of putfield, or as an
3468 // argument to a constructor invocation with it being a receiver, Unsafe::putXXX with it being
3469 // the first argument, or Unsafe::finishPrivateBuffer. This allows us to aggressively scalarize
3470 // value objects in all other places. This special case comes from the limitation of the Java
3471 // language, Unsafe::makePrivateBuffer returns an Object that is checkcast-ed to the concrete
3472 // value type. We must do this first because C->static_subtype_check may do nothing when
3473 // StressReflectiveCode is set.
3474 return obj;
3475 }
3476
3477 const TypeKlassPtr* improved_klass_ptr_type = klass_ptr_type->try_improve();
3478 const TypeOopPtr* toop = improved_klass_ptr_type->cast_to_exactness(false)->as_instance_type();
3479 bool safe_for_replace = (failure_control == nullptr);
3480 assert(!null_free || toop->can_be_inline_type(), "must be an inline type pointer");
3481
3482 // Fast cutout: Check the case that the cast is vacuously true.
3483 // This detects the common cases where the test will short-circuit
3484 // away completely. We do this before we perform the null check,
3485 // because if the test is going to turn into zero code, we don't
3486 // want a residual null check left around. (Causes a slowdown,
3487 // for example, in some objArray manipulations, such as a[i]=a[j].)
3488 if (improved_klass_ptr_type->singleton()) {
3489 const TypeKlassPtr* kptr = nullptr;
3490 if (obj_type->isa_oop_ptr()) {
3491 kptr = obj_type->is_oopptr()->as_klass_type();
3492 } else if (obj->is_InlineType()) {
3493 ciInlineKlass* vk = obj_type->inline_klass();
3494 kptr = TypeInstKlassPtr::make(TypePtr::NotNull, vk, Type::Offset(0));
3495 }
3496
3497 if (kptr != nullptr) {
3498 switch (C->static_subtype_check(improved_klass_ptr_type, kptr)) {
3499 case Compile::SSC_always_true:
3500 // If we know the type check always succeed then we don't use
3501 // the profiling data at this bytecode. Don't lose it, feed it
3502 // to the type system as a speculative type.
3503 obj = record_profiled_receiver_for_speculation(obj);
3504 if (null_free) {
3505 assert(safe_for_replace, "must be");
3506 obj = null_check(obj);
3507 }
3508 assert(stopped() || !toop->is_inlinetypeptr() || obj->is_InlineType(), "should have been scalarized");
3509 return obj;
3510 case Compile::SSC_always_false:
3511 if (null_free) {
3512 assert(safe_for_replace, "must be");
3513 obj = null_check(obj);
3514 }
3515 // It needs a null check because a null will *pass* the cast check.
3516 if (obj_type->isa_oopptr() != nullptr && !obj_type->is_oopptr()->maybe_null()) {
3517 bool is_aastore = (java_bc() == Bytecodes::_aastore);
3518 Deoptimization::DeoptReason reason = is_aastore ?
3519 Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3520 builtin_throw(reason);
3521 return top();
3522 } else if (!too_many_traps_or_recompiles(Deoptimization::Reason_null_assert)) {
3523 return null_assert(obj);
3524 }
3525 break; // Fall through to full check
3526 default:
3527 break;
3528 }
3529 }
3530 }
3531
3532 ciProfileData* data = nullptr;
3533 if (failure_control == nullptr) { // use MDO in regular case only
3534 assert(java_bc() == Bytecodes::_aastore ||
3535 java_bc() == Bytecodes::_checkcast,
3536 "interpreter profiles type checks only for these BCs");
3537 if (method()->method_data()->is_mature()) {
3538 data = method()->method_data()->bci_to_data(bci());
3539 }
3540 }
3541
3542 // Make the merge point
3543 enum { _obj_path = 1, _null_path, PATH_LIMIT };
3544 RegionNode* region = new RegionNode(PATH_LIMIT);
3545 Node* phi = new PhiNode(region, toop);
3546 _gvn.set_type(region, Type::CONTROL);
3547 _gvn.set_type(phi, toop);
3548
3549 C->set_has_split_ifs(true); // Has chance for split-if optimization
3550
3551 // Use null-cast information if it is available
3552 bool speculative_not_null = false;
3553 bool never_see_null = ((failure_control == nullptr) // regular case only
3554 && seems_never_null(obj, data, speculative_not_null));
3555
3556 if (obj->is_InlineType()) {
3557 // Re-execute if buffering during triggers deoptimization
3558 PreserveReexecuteState preexecs(this);
3559 jvms()->set_should_reexecute(true);
3560 obj = obj->as_InlineType()->buffer(this, safe_for_replace);
3561 }
3562
3563 // Null check; get casted pointer; set region slot 3
3564 Node* null_ctl = top();
3565 Node* not_null_obj = nullptr;
3566 if (null_free) {
3567 assert(safe_for_replace, "must be");
3568 not_null_obj = null_check(obj);
3569 } else {
3570 not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace, speculative_not_null);
3571 }
3572
3573 // If not_null_obj is dead, only null-path is taken
3574 if (stopped()) { // Doing instance-of on a null?
3575 set_control(null_ctl);
3576 if (toop->is_inlinetypeptr()) {
3577 return InlineTypeNode::make_null(_gvn, toop->inline_klass());
3578 }
3579 return null();
3580 }
3581 region->init_req(_null_path, null_ctl);
3582 phi ->init_req(_null_path, null()); // Set null path value
3583 if (null_ctl == top()) {
3584 // Do this eagerly, so that pattern matches like is_diamond_phi
3585 // will work even during parsing.
3586 assert(_null_path == PATH_LIMIT-1, "delete last");
3587 region->del_req(_null_path);
3588 phi ->del_req(_null_path);
3589 }
3590
3591 Node* cast_obj = nullptr;
3592 if (improved_klass_ptr_type->klass_is_exact()) {
3593 // The following optimization tries to statically cast the speculative type of the object
3594 // (for example obtained during profiling) to the type of the superklass and then do a
3595 // dynamic check that the type of the object is what we expect. To work correctly
3596 // for checkcast and aastore the type of superklass should be exact.
3597 const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
3598 // We may not have profiling here or it may not help us. If we have
3599 // a speculative type use it to perform an exact cast.
3600 ciKlass* spec_obj_type = obj_type->speculative_type();
3601 if (spec_obj_type != nullptr || data != nullptr) {
3602 cast_obj = maybe_cast_profiled_receiver(not_null_obj, improved_klass_ptr_type, spec_obj_type, safe_for_replace);
3603 if (cast_obj != nullptr) {
3604 if (failure_control != nullptr) // failure is now impossible
3605 (*failure_control) = top();
3606 // adjust the type of the phi to the exact klass:
3607 phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
3608 }
3609 }
3610 }
3611
3612 if (cast_obj == nullptr) {
3613 // Generate the subtype check
3614 Node* improved_superklass = superklass;
3615 if (improved_klass_ptr_type != klass_ptr_type && improved_klass_ptr_type->singleton()) {
3616 // Only improve the super class for constants which allows subsequent sub type checks to possibly be commoned up.
3617 // The other non-constant cases cannot be improved with a cast node here since they could be folded to top.
3618 // Additionally, the benefit would only be minor in non-constant cases.
3619 improved_superklass = makecon(improved_klass_ptr_type);
3620 }
3621 Node* not_subtype_ctrl = gen_subtype_check(not_null_obj, improved_superklass);
3622 // Plug in success path into the merge
3623 cast_obj = _gvn.transform(new CheckCastPPNode(control(), not_null_obj, toop));
3624 // Failure path ends in uncommon trap (or may be dead - failure impossible)
3625 if (failure_control == nullptr) {
3626 if (not_subtype_ctrl != top()) { // If failure is possible
3627 PreserveJVMState pjvms(this);
3628 set_control(not_subtype_ctrl);
3629 Node* obj_klass = nullptr;
3630 if (not_null_obj->is_InlineType()) {
3631 obj_klass = makecon(TypeKlassPtr::make(_gvn.type(not_null_obj)->inline_klass()));
3632 } else {
3633 obj_klass = load_object_klass(not_null_obj);
3634 }
3635 bool is_aastore = (java_bc() == Bytecodes::_aastore);
3636 Deoptimization::DeoptReason reason = is_aastore ?
3637 Deoptimization::Reason_array_check : Deoptimization::Reason_class_check;
3638 builtin_throw(reason);
3639 }
3640 } else {
3641 (*failure_control) = not_subtype_ctrl;
3642 }
3643 }
3644
3645 region->init_req(_obj_path, control());
3646 phi ->init_req(_obj_path, cast_obj);
3647
3648 // A merge of null or Casted-NotNull obj
3649 Node* res = _gvn.transform(phi);
3650
3651 // Note I do NOT always 'replace_in_map(obj,result)' here.
3652 // if( tk->klass()->can_be_primary_super() )
3653 // This means that if I successfully store an Object into an array-of-String
3654 // I 'forget' that the Object is really now known to be a String. I have to
3655 // do this because we don't have true union types for interfaces - if I store
3656 // a Baz into an array-of-Interface and then tell the optimizer it's an
3657 // Interface, I forget that it's also a Baz and cannot do Baz-like field
3658 // references to it. FIX THIS WHEN UNION TYPES APPEAR!
3659 // replace_in_map( obj, res );
3660
3661 // Return final merged results
3662 set_control( _gvn.transform(region) );
3663 record_for_igvn(region);
3664
3665 bool not_inline = !toop->can_be_inline_type();
3666 bool not_flat_in_array = !UseArrayFlattening || not_inline || (toop->is_inlinetypeptr() && !toop->inline_klass()->flat_in_array());
3667 if (EnableValhalla && (not_inline || not_flat_in_array)) {
3668 // Check if obj has been loaded from an array
3669 obj = obj->isa_DecodeN() ? obj->in(1) : obj;
3670 Node* array = nullptr;
3671 if (obj->isa_Load()) {
3672 Node* address = obj->in(MemNode::Address);
3673 if (address->isa_AddP()) {
3674 array = address->as_AddP()->in(AddPNode::Base);
3675 }
3676 } else if (obj->is_Phi()) {
3677 Node* region = obj->in(0);
3678 // TODO make this more robust (see JDK-8231346)
3679 if (region->req() == 3 && region->in(2) != nullptr && region->in(2)->in(0) != nullptr) {
3680 IfNode* iff = region->in(2)->in(0)->isa_If();
3681 if (iff != nullptr) {
3682 iff->is_flat_array_check(&_gvn, &array);
3683 }
3684 }
3685 }
3686 if (array != nullptr) {
3687 const TypeAryPtr* ary_t = _gvn.type(array)->isa_aryptr();
3688 if (ary_t != nullptr) {
3689 if (!ary_t->is_not_null_free() && !ary_t->is_null_free() && not_inline) {
3690 // Casting array element to a non-inline-type, mark array as not null-free.
3691 Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_null_free()));
3692 replace_in_map(array, cast);
3693 array = cast;
3694 }
3695 if (!ary_t->is_not_flat() && !ary_t->is_flat() && not_flat_in_array) {
3696 // Casting array element to a non-flat-in-array type, mark array as not flat.
3697 Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, ary_t->cast_to_not_flat()));
3698 replace_in_map(array, cast);
3699 array = cast;
3700 }
3701 }
3702 }
3703 }
3704
3705 if (!stopped() && !res->is_InlineType()) {
3706 res = record_profiled_receiver_for_speculation(res);
3707 if (toop->is_inlinetypeptr()) {
3708 Node* vt = InlineTypeNode::make_from_oop(this, res, toop->inline_klass());
3709 res = vt;
3710 if (safe_for_replace) {
3711 replace_in_map(obj, vt);
3712 replace_in_map(not_null_obj, vt);
3713 replace_in_map(res, vt);
3714 }
3715 }
3716 }
3717 return res;
3718 }
3719
3720 Node* GraphKit::mark_word_test(Node* obj, uintptr_t mask_val, bool eq, bool check_lock) {
3721 // Load markword
3722 Node* mark_adr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes());
3723 Node* mark = make_load(nullptr, mark_adr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered);
3724 if (check_lock) {
3725 // Check if obj is locked
3726 Node* locked_bit = MakeConX(markWord::unlocked_value);
3727 locked_bit = _gvn.transform(new AndXNode(locked_bit, mark));
3728 Node* cmp = _gvn.transform(new CmpXNode(locked_bit, MakeConX(0)));
3729 Node* is_unlocked = _gvn.transform(new BoolNode(cmp, BoolTest::ne));
3730 IfNode* iff = new IfNode(control(), is_unlocked, PROB_MAX, COUNT_UNKNOWN);
3731 _gvn.transform(iff);
3732 Node* locked_region = new RegionNode(3);
3733 Node* mark_phi = new PhiNode(locked_region, TypeX_X);
3734
3735 // Unlocked: Use bits from mark word
3736 locked_region->init_req(1, _gvn.transform(new IfTrueNode(iff)));
3737 mark_phi->init_req(1, mark);
3738
3739 // Locked: Load prototype header from klass
3740 set_control(_gvn.transform(new IfFalseNode(iff)));
3741 // Make loads control dependent to make sure they are only executed if array is locked
3742 Node* klass_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
3743 Node* klass = _gvn.transform(LoadKlassNode::make(_gvn, C->immutable_memory(), klass_adr, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
3744 Node* proto_adr = basic_plus_adr(klass, in_bytes(Klass::prototype_header_offset()));
3745 Node* proto = _gvn.transform(LoadNode::make(_gvn, control(), C->immutable_memory(), proto_adr, proto_adr->bottom_type()->is_ptr(), TypeX_X, TypeX_X->basic_type(), MemNode::unordered));
3746
3747 locked_region->init_req(2, control());
3748 mark_phi->init_req(2, proto);
3749 set_control(_gvn.transform(locked_region));
3750 record_for_igvn(locked_region);
3751
3752 mark = mark_phi;
3753 }
3754
3755 // Now check if mark word bits are set
3756 Node* mask = MakeConX(mask_val);
3757 Node* masked = _gvn.transform(new AndXNode(_gvn.transform(mark), mask));
3758 record_for_igvn(masked); // Give it a chance to be optimized out by IGVN
3759 Node* cmp = _gvn.transform(new CmpXNode(masked, mask));
3760 return _gvn.transform(new BoolNode(cmp, eq ? BoolTest::eq : BoolTest::ne));
3761 }
3762
3763 Node* GraphKit::inline_type_test(Node* obj, bool is_inline) {
3764 return mark_word_test(obj, markWord::inline_type_pattern, is_inline, /* check_lock = */ false);
3765 }
3766
3767 Node* GraphKit::flat_array_test(Node* array_or_klass, bool flat) {
3768 // We can't use immutable memory here because the mark word is mutable.
3769 // PhaseIdealLoop::move_flat_array_check_out_of_loop will make sure the
3770 // check is moved out of loops (mainly to enable loop unswitching).
3771 Node* cmp = _gvn.transform(new FlatArrayCheckNode(C, memory(Compile::AliasIdxRaw), array_or_klass));
3772 record_for_igvn(cmp); // Give it a chance to be optimized out by IGVN
3773 return _gvn.transform(new BoolNode(cmp, flat ? BoolTest::eq : BoolTest::ne));
3774 }
3775
3776 Node* GraphKit::null_free_array_test(Node* array, bool null_free) {
3777 return mark_word_test(array, markWord::null_free_array_bit_in_place, null_free);
3778 }
3779
3780 Node* GraphKit::null_free_atomic_array_test(Node* array, ciInlineKlass* vk) {
3781 assert(vk->has_atomic_layout() || vk->has_non_atomic_layout(), "Can't be null-free and flat");
3782
3783 // TODO 8350865 Add a stress flag to always access atomic if layout exists?
3784 if (!vk->has_non_atomic_layout()) {
3785 return intcon(1); // Always atomic
3786 } else if (!vk->has_atomic_layout()) {
3787 return intcon(0); // Never atomic
3788 }
3789
3790 Node* array_klass = load_object_klass(array);
3791 int layout_kind_offset = in_bytes(FlatArrayKlass::layout_kind_offset());
3792 Node* layout_kind_addr = basic_plus_adr(array_klass, array_klass, layout_kind_offset);
3793 Node* layout_kind = make_load(nullptr, layout_kind_addr, TypeInt::INT, T_INT, MemNode::unordered);
3794 Node* cmp = _gvn.transform(new CmpINode(layout_kind, intcon((int)LayoutKind::ATOMIC_FLAT)));
3795 return _gvn.transform(new BoolNode(cmp, BoolTest::eq));
3796 }
3797
3798 // Deoptimize if 'ary' is a null-free inline type array and 'val' is null
3799 Node* GraphKit::inline_array_null_guard(Node* ary, Node* val, int nargs, bool safe_for_replace) {
3800 RegionNode* region = new RegionNode(3);
3801 Node* null_ctl = top();
3802 null_check_oop(val, &null_ctl);
3803 if (null_ctl != top()) {
3804 PreserveJVMState pjvms(this);
3805 set_control(null_ctl);
3806 {
3807 // Deoptimize if null-free array
3808 BuildCutout unless(this, null_free_array_test(ary, /* null_free = */ false), PROB_MAX);
3809 inc_sp(nargs);
3810 uncommon_trap(Deoptimization::Reason_null_check,
3811 Deoptimization::Action_none);
3812 }
3813 region->init_req(1, control());
3814 }
3815 region->init_req(2, control());
3816 set_control(_gvn.transform(region));
3817 record_for_igvn(region);
3818 if (_gvn.type(val) == TypePtr::NULL_PTR) {
3819 // Since we were just successfully storing null, the array can't be null free.
3820 const TypeAryPtr* ary_t = _gvn.type(ary)->is_aryptr();
3821 ary_t = ary_t->cast_to_not_null_free();
3822 Node* cast = _gvn.transform(new CheckCastPPNode(control(), ary, ary_t));
3823 if (safe_for_replace) {
3824 replace_in_map(ary, cast);
3825 }
3826 ary = cast;
3827 }
3828 return ary;
3829 }
3830
3831 //------------------------------next_monitor-----------------------------------
3832 // What number should be given to the next monitor?
3833 int GraphKit::next_monitor() {
3834 int current = jvms()->monitor_depth()* C->sync_stack_slots();
3835 int next = current + C->sync_stack_slots();
3836 // Keep the toplevel high water mark current:
3837 if (C->fixed_slots() < next) C->set_fixed_slots(next);
3838 return current;
3839 }
3840
3841 //------------------------------insert_mem_bar---------------------------------
3842 // Memory barrier to avoid floating things around
3843 // The membar serves as a pinch point between both control and all memory slices.
3844 Node* GraphKit::insert_mem_bar(int opcode, Node* precedent) {
3845 MemBarNode* mb = MemBarNode::make(C, opcode, Compile::AliasIdxBot, precedent);
3846 mb->init_req(TypeFunc::Control, control());
3847 mb->init_req(TypeFunc::Memory, reset_memory());
3848 Node* membar = _gvn.transform(mb);
3876 }
3877 Node* membar = _gvn.transform(mb);
3878 set_control(_gvn.transform(new ProjNode(membar, TypeFunc::Control)));
3879 if (alias_idx == Compile::AliasIdxBot) {
3880 merged_memory()->set_base_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)));
3881 } else {
3882 set_memory(_gvn.transform(new ProjNode(membar, TypeFunc::Memory)),alias_idx);
3883 }
3884 return membar;
3885 }
3886
3887 //------------------------------shared_lock------------------------------------
3888 // Emit locking code.
3889 FastLockNode* GraphKit::shared_lock(Node* obj) {
3890 // bci is either a monitorenter bc or InvocationEntryBci
3891 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3892 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3893
3894 if( !GenerateSynchronizationCode )
3895 return nullptr; // Not locking things?
3896
3897 if (stopped()) // Dead monitor?
3898 return nullptr;
3899
3900 assert(dead_locals_are_killed(), "should kill locals before sync. point");
3901
3902 // Box the stack location
3903 Node* box = new BoxLockNode(next_monitor());
3904 // Check for bailout after new BoxLockNode
3905 if (failing()) { return nullptr; }
3906 box = _gvn.transform(box);
3907 Node* mem = reset_memory();
3908
3909 FastLockNode * flock = _gvn.transform(new FastLockNode(nullptr, obj, box) )->as_FastLock();
3910
3911 // Add monitor to debug info for the slow path. If we block inside the
3912 // slow path and de-opt, we need the monitor hanging around
3913 map()->push_monitor( flock );
3914
3915 const TypeFunc *tf = LockNode::lock_type();
3916 LockNode *lock = new LockNode(C, tf);
3945 }
3946 #endif
3947
3948 return flock;
3949 }
3950
3951
3952 //------------------------------shared_unlock----------------------------------
3953 // Emit unlocking code.
3954 void GraphKit::shared_unlock(Node* box, Node* obj) {
3955 // bci is either a monitorenter bc or InvocationEntryBci
3956 // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
3957 assert(SynchronizationEntryBCI == InvocationEntryBci, "");
3958
3959 if( !GenerateSynchronizationCode )
3960 return;
3961 if (stopped()) { // Dead monitor?
3962 map()->pop_monitor(); // Kill monitor from debug info
3963 return;
3964 }
3965 assert(!obj->is_InlineType(), "should not unlock on inline type");
3966
3967 // Memory barrier to avoid floating things down past the locked region
3968 insert_mem_bar(Op_MemBarReleaseLock);
3969
3970 const TypeFunc *tf = OptoRuntime::complete_monitor_exit_Type();
3971 UnlockNode *unlock = new UnlockNode(C, tf);
3972 #ifdef ASSERT
3973 unlock->set_dbg_jvms(sync_jvms());
3974 #endif
3975 uint raw_idx = Compile::AliasIdxRaw;
3976 unlock->init_req( TypeFunc::Control, control() );
3977 unlock->init_req( TypeFunc::Memory , memory(raw_idx) );
3978 unlock->init_req( TypeFunc::I_O , top() ) ; // does no i/o
3979 unlock->init_req( TypeFunc::FramePtr, frameptr() );
3980 unlock->init_req( TypeFunc::ReturnAdr, top() );
3981
3982 unlock->init_req(TypeFunc::Parms + 0, obj);
3983 unlock->init_req(TypeFunc::Parms + 1, box);
3984 unlock = _gvn.transform(unlock)->as_Unlock();
3985
3986 Node* mem = reset_memory();
3987
3988 // unlock has no side-effects, sets few values
3989 set_predefined_output_for_runtime_call(unlock, mem, TypeRawPtr::BOTTOM);
3990
3991 // Kill monitor from debug info
3992 map()->pop_monitor( );
3993 }
3994
3995 //-------------------------------get_layout_helper-----------------------------
3996 // If the given klass is a constant or known to be an array,
3997 // fetch the constant layout helper value into constant_value
3998 // and return null. Otherwise, load the non-constant
3999 // layout helper value, and return the node which represents it.
4000 // This two-faced routine is useful because allocation sites
4001 // almost always feature constant types.
4002 Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) {
4003 const TypeKlassPtr* klass_t = _gvn.type(klass_node)->isa_klassptr();
4004 if (!StressReflectiveCode && klass_t != nullptr) {
4005 bool xklass = klass_t->klass_is_exact();
4006 bool can_be_flat = false;
4007 const TypeAryPtr* ary_type = klass_t->as_instance_type()->isa_aryptr();
4008 if (UseArrayFlattening && !xklass && ary_type != nullptr && !ary_type->is_null_free()) {
4009 // Don't constant fold if the runtime type might be a flat array but the static type is not.
4010 const TypeOopPtr* elem = ary_type->elem()->make_oopptr();
4011 can_be_flat = ary_type->can_be_inline_array() && (!elem->is_inlinetypeptr() || elem->inline_klass()->flat_in_array());
4012 }
4013 if (!can_be_flat && (xklass || (klass_t->isa_aryklassptr() && klass_t->is_aryklassptr()->elem() != Type::BOTTOM))) {
4014 jint lhelper;
4015 if (klass_t->is_flat()) {
4016 lhelper = ary_type->flat_layout_helper();
4017 } else if (klass_t->isa_aryklassptr()) {
4018 BasicType elem = ary_type->elem()->array_element_basic_type();
4019 if (is_reference_type(elem, true)) {
4020 elem = T_OBJECT;
4021 }
4022 lhelper = Klass::array_layout_helper(elem);
4023 } else {
4024 lhelper = klass_t->is_instklassptr()->exact_klass()->layout_helper();
4025 }
4026 if (lhelper != Klass::_lh_neutral_value) {
4027 constant_value = lhelper;
4028 return (Node*) nullptr;
4029 }
4030 }
4031 }
4032 constant_value = Klass::_lh_neutral_value; // put in a known value
4033 Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset()));
4034 return make_load(nullptr, lhp, TypeInt::INT, T_INT, MemNode::unordered);
4035 }
4036
4037 // We just put in an allocate/initialize with a big raw-memory effect.
4038 // Hook selected additional alias categories on the initialization.
4039 static void hook_memory_on_init(GraphKit& kit, int alias_idx,
4040 MergeMemNode* init_in_merge,
4041 Node* init_out_raw) {
4042 DEBUG_ONLY(Node* init_in_raw = init_in_merge->base_memory());
4043 assert(init_in_merge->memory_at(alias_idx) == init_in_raw, "");
4044
4045 Node* prevmem = kit.memory(alias_idx);
4046 init_in_merge->set_memory_at(alias_idx, prevmem);
4047 if (init_out_raw != nullptr) {
4048 kit.set_memory(init_out_raw, alias_idx);
4049 }
4050 }
4051
4052 //---------------------------set_output_for_allocation-------------------------
4053 Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
4054 const TypeOopPtr* oop_type,
4055 bool deoptimize_on_exception) {
4056 int rawidx = Compile::AliasIdxRaw;
4057 alloc->set_req( TypeFunc::FramePtr, frameptr() );
4058 add_safepoint_edges(alloc);
4059 Node* allocx = _gvn.transform(alloc);
4060 set_control( _gvn.transform(new ProjNode(allocx, TypeFunc::Control) ) );
4061 // create memory projection for i_o
4062 set_memory ( _gvn.transform( new ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
4063 make_slow_call_ex(allocx, env()->Throwable_klass(), true, deoptimize_on_exception);
4064
4065 // create a memory projection as for the normal control path
4066 Node* malloc = _gvn.transform(new ProjNode(allocx, TypeFunc::Memory));
4067 set_memory(malloc, rawidx);
4068
4069 // a normal slow-call doesn't change i_o, but an allocation does
4070 // we create a separate i_o projection for the normal control path
4071 set_i_o(_gvn.transform( new ProjNode(allocx, TypeFunc::I_O, false) ) );
4072 Node* rawoop = _gvn.transform( new ProjNode(allocx, TypeFunc::Parms) );
4073
4074 // put in an initialization barrier
4075 InitializeNode* init = insert_mem_bar_volatile(Op_Initialize, rawidx,
4076 rawoop)->as_Initialize();
4077 assert(alloc->initialization() == init, "2-way macro link must work");
4078 assert(init ->allocation() == alloc, "2-way macro link must work");
4079 {
4080 // Extract memory strands which may participate in the new object's
4081 // initialization, and source them from the new InitializeNode.
4082 // This will allow us to observe initializations when they occur,
4083 // and link them properly (as a group) to the InitializeNode.
4084 assert(init->in(InitializeNode::Memory) == malloc, "");
4085 MergeMemNode* minit_in = MergeMemNode::make(malloc);
4086 init->set_req(InitializeNode::Memory, minit_in);
4087 record_for_igvn(minit_in); // fold it up later, if possible
4088 _gvn.set_type(minit_in, Type::MEMORY);
4089 Node* minit_out = memory(rawidx);
4090 assert(minit_out->is_Proj() && minit_out->in(0) == init, "");
4091 // Add an edge in the MergeMem for the header fields so an access
4092 // to one of those has correct memory state
4093 set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::mark_offset_in_bytes())));
4094 set_memory(minit_out, C->get_alias_index(oop_type->add_offset(oopDesc::klass_offset_in_bytes())));
4095 if (oop_type->isa_aryptr()) {
4096 const TypeAryPtr* arytype = oop_type->is_aryptr();
4097 if (arytype->is_flat()) {
4098 // Initially all flat array accesses share a single slice
4099 // but that changes after parsing. Prepare the memory graph so
4100 // it can optimize flat array accesses properly once they
4101 // don't share a single slice.
4102 assert(C->flat_accesses_share_alias(), "should be set at parse time");
4103 C->set_flat_accesses_share_alias(false);
4104 ciInlineKlass* vk = arytype->elem()->inline_klass();
4105 for (int i = 0, len = vk->nof_nonstatic_fields(); i < len; i++) {
4106 ciField* field = vk->nonstatic_field_at(i);
4107 if (field->offset_in_bytes() >= TrackedInitializationLimit * HeapWordSize)
4108 continue; // do not bother to track really large numbers of fields
4109 int off_in_vt = field->offset_in_bytes() - vk->payload_offset();
4110 const TypePtr* adr_type = arytype->with_field_offset(off_in_vt)->add_offset(Type::OffsetBot);
4111 int fieldidx = C->get_alias_index(adr_type, true);
4112 // Pass nullptr for init_out. Having per flat array element field memory edges as uses of the Initialize node
4113 // can result in per flat array field Phis to be created which confuses the logic of
4114 // Compile::adjust_flat_array_access_aliases().
4115 hook_memory_on_init(*this, fieldidx, minit_in, nullptr);
4116 }
4117 C->set_flat_accesses_share_alias(true);
4118 hook_memory_on_init(*this, C->get_alias_index(TypeAryPtr::INLINES), minit_in, minit_out);
4119 } else {
4120 const TypePtr* telemref = oop_type->add_offset(Type::OffsetBot);
4121 int elemidx = C->get_alias_index(telemref);
4122 hook_memory_on_init(*this, elemidx, minit_in, minit_out);
4123 }
4124 } else if (oop_type->isa_instptr()) {
4125 set_memory(minit_out, C->get_alias_index(oop_type)); // mark word
4126 ciInstanceKlass* ik = oop_type->is_instptr()->instance_klass();
4127 for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
4128 ciField* field = ik->nonstatic_field_at(i);
4129 if (field->offset_in_bytes() >= TrackedInitializationLimit * HeapWordSize)
4130 continue; // do not bother to track really large numbers of fields
4131 // Find (or create) the alias category for this field:
4132 int fieldidx = C->alias_type(field)->index();
4133 hook_memory_on_init(*this, fieldidx, minit_in, minit_out);
4134 }
4135 }
4136 }
4137
4138 // Cast raw oop to the real thing...
4139 Node* javaoop = new CheckCastPPNode(control(), rawoop, oop_type);
4140 javaoop = _gvn.transform(javaoop);
4141 C->set_recent_alloc(control(), javaoop);
4142 assert(just_allocated_object(control()) == javaoop, "just allocated");
4143
4144 #ifdef ASSERT
4145 { // Verify that the AllocateNode::Ideal_allocation recognizers work:
4156 assert(alloc->in(AllocateNode::ALength)->is_top(), "no length, please");
4157 }
4158 }
4159 #endif //ASSERT
4160
4161 return javaoop;
4162 }
4163
4164 //---------------------------new_instance--------------------------------------
4165 // This routine takes a klass_node which may be constant (for a static type)
4166 // or may be non-constant (for reflective code). It will work equally well
4167 // for either, and the graph will fold nicely if the optimizer later reduces
4168 // the type to a constant.
4169 // The optional arguments are for specialized use by intrinsics:
4170 // - If 'extra_slow_test' if not null is an extra condition for the slow-path.
4171 // - If 'return_size_val', report the total object size to the caller.
4172 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
4173 Node* GraphKit::new_instance(Node* klass_node,
4174 Node* extra_slow_test,
4175 Node* *return_size_val,
4176 bool deoptimize_on_exception,
4177 InlineTypeNode* inline_type_node) {
4178 // Compute size in doublewords
4179 // The size is always an integral number of doublewords, represented
4180 // as a positive bytewise size stored in the klass's layout_helper.
4181 // The layout_helper also encodes (in a low bit) the need for a slow path.
4182 jint layout_con = Klass::_lh_neutral_value;
4183 Node* layout_val = get_layout_helper(klass_node, layout_con);
4184 bool layout_is_con = (layout_val == nullptr);
4185
4186 if (extra_slow_test == nullptr) extra_slow_test = intcon(0);
4187 // Generate the initial go-slow test. It's either ALWAYS (return a
4188 // Node for 1) or NEVER (return a null) or perhaps (in the reflective
4189 // case) a computed value derived from the layout_helper.
4190 Node* initial_slow_test = nullptr;
4191 if (layout_is_con) {
4192 assert(!StressReflectiveCode, "stress mode does not use these paths");
4193 bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con);
4194 initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test;
4195 } else { // reflective case
4196 // This reflective path is used by Unsafe.allocateInstance.
4197 // (It may be stress-tested by specifying StressReflectiveCode.)
4198 // Basically, we want to get into the VM is there's an illegal argument.
4199 Node* bit = intcon(Klass::_lh_instance_slow_path_bit);
4200 initial_slow_test = _gvn.transform( new AndINode(layout_val, bit) );
4201 if (extra_slow_test != intcon(0)) {
4202 initial_slow_test = _gvn.transform( new OrINode(initial_slow_test, extra_slow_test) );
4203 }
4204 // (Macro-expander will further convert this to a Bool, if necessary.)
4215
4216 // Clear the low bits to extract layout_helper_size_in_bytes:
4217 assert((int)Klass::_lh_instance_slow_path_bit < BytesPerLong, "clear bit");
4218 Node* mask = MakeConX(~ (intptr_t)right_n_bits(LogBytesPerLong));
4219 size = _gvn.transform( new AndXNode(size, mask) );
4220 }
4221 if (return_size_val != nullptr) {
4222 (*return_size_val) = size;
4223 }
4224
4225 // This is a precise notnull oop of the klass.
4226 // (Actually, it need not be precise if this is a reflective allocation.)
4227 // It's what we cast the result to.
4228 const TypeKlassPtr* tklass = _gvn.type(klass_node)->isa_klassptr();
4229 if (!tklass) tklass = TypeInstKlassPtr::OBJECT;
4230 const TypeOopPtr* oop_type = tklass->as_instance_type();
4231
4232 // Now generate allocation code
4233
4234 // The entire memory state is needed for slow path of the allocation
4235 // since GC and deoptimization can happen.
4236 Node *mem = reset_memory();
4237 set_all_memory(mem); // Create new memory state
4238
4239 AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP),
4240 control(), mem, i_o(),
4241 size, klass_node,
4242 initial_slow_test, inline_type_node);
4243
4244 return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception);
4245 }
4246
4247 //-------------------------------new_array-------------------------------------
4248 // helper for newarray and anewarray
4249 // The 'length' parameter is (obviously) the length of the array.
4250 // The optional arguments are for specialized use by intrinsics:
4251 // - If 'return_size_val', report the non-padded array size (sum of header size
4252 // and array body) to the caller.
4253 // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize)
4254 Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
4255 Node* length, // number of array elements
4256 int nargs, // number of arguments to push back for uncommon trap
4257 Node* *return_size_val,
4258 bool deoptimize_on_exception,
4259 Node* init_val) {
4260 jint layout_con = Klass::_lh_neutral_value;
4261 Node* layout_val = get_layout_helper(klass_node, layout_con);
4262 bool layout_is_con = (layout_val == nullptr);
4263
4264 if (!layout_is_con && !StressReflectiveCode &&
4265 !too_many_traps(Deoptimization::Reason_class_check)) {
4266 // This is a reflective array creation site.
4267 // Optimistically assume that it is a subtype of Object[],
4268 // so that we can fold up all the address arithmetic.
4269 layout_con = Klass::array_layout_helper(T_OBJECT);
4270 Node* cmp_lh = _gvn.transform( new CmpINode(layout_val, intcon(layout_con)) );
4271 Node* bol_lh = _gvn.transform( new BoolNode(cmp_lh, BoolTest::eq) );
4272 { BuildCutout unless(this, bol_lh, PROB_MAX);
4273 inc_sp(nargs);
4274 uncommon_trap(Deoptimization::Reason_class_check,
4275 Deoptimization::Action_maybe_recompile);
4276 }
4277 layout_val = nullptr;
4278 layout_is_con = true;
4279 }
4280
4281 // Generate the initial go-slow test. Make sure we do not overflow
4282 // if length is huge (near 2Gig) or negative! We do not need
4283 // exact double-words here, just a close approximation of needed
4284 // double-words. We can't add any offset or rounding bits, lest we
4285 // take a size -1 of bytes and make it positive. Use an unsigned
4286 // compare, so negative sizes look hugely positive.
4287 int fast_size_limit = FastAllocateSizeLimit;
4288 if (layout_is_con) {
4289 assert(!StressReflectiveCode, "stress mode does not use these paths");
4290 // Increase the size limit if we have exact knowledge of array type.
4291 int log2_esize = Klass::layout_helper_log2_element_size(layout_con);
4292 fast_size_limit <<= MAX2(LogBytesPerLong - log2_esize, 0);
4293 }
4294
4295 Node* initial_slow_cmp = _gvn.transform( new CmpUNode( length, intcon( fast_size_limit ) ) );
4296 Node* initial_slow_test = _gvn.transform( new BoolNode( initial_slow_cmp, BoolTest::gt ) );
4297
4298 // --- Size Computation ---
4299 // array_size = round_to_heap(array_header + (length << elem_shift));
4300 // where round_to_heap(x) == align_to(x, MinObjAlignmentInBytes)
4301 // and align_to(x, y) == ((x + y-1) & ~(y-1))
4302 // The rounding mask is strength-reduced, if possible.
4303 int round_mask = MinObjAlignmentInBytes - 1;
4304 Node* header_size = nullptr;
4305 // (T_BYTE has the weakest alignment and size restrictions...)
4306 if (layout_is_con) {
4307 int hsize = Klass::layout_helper_header_size(layout_con);
4308 int eshift = Klass::layout_helper_log2_element_size(layout_con);
4309 bool is_flat_array = Klass::layout_helper_is_flatArray(layout_con);
4310 if ((round_mask & ~right_n_bits(eshift)) == 0)
4311 round_mask = 0; // strength-reduce it if it goes away completely
4312 assert(is_flat_array || (hsize & right_n_bits(eshift)) == 0, "hsize is pre-rounded");
4313 int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
4314 assert(header_size_min <= hsize, "generic minimum is smallest");
4315 header_size = intcon(hsize);
4316 } else {
4317 Node* hss = intcon(Klass::_lh_header_size_shift);
4318 Node* hsm = intcon(Klass::_lh_header_size_mask);
4319 header_size = _gvn.transform(new URShiftINode(layout_val, hss));
4320 header_size = _gvn.transform(new AndINode(header_size, hsm));
4321 }
4322
4323 Node* elem_shift = nullptr;
4324 if (layout_is_con) {
4325 int eshift = Klass::layout_helper_log2_element_size(layout_con);
4326 if (eshift != 0)
4327 elem_shift = intcon(eshift);
4328 } else {
4329 // There is no need to mask or shift this value.
4330 // The semantics of LShiftINode include an implicit mask to 0x1F.
4331 assert(Klass::_lh_log2_element_size_shift == 0, "use shift in place");
4332 elem_shift = layout_val;
4379 }
4380 Node* non_rounded_size = _gvn.transform(new AddXNode(headerx, abody));
4381
4382 if (return_size_val != nullptr) {
4383 // This is the size
4384 (*return_size_val) = non_rounded_size;
4385 }
4386
4387 Node* size = non_rounded_size;
4388 if (round_mask != 0) {
4389 Node* mask1 = MakeConX(round_mask);
4390 size = _gvn.transform(new AddXNode(size, mask1));
4391 Node* mask2 = MakeConX(~round_mask);
4392 size = _gvn.transform(new AndXNode(size, mask2));
4393 }
4394 // else if round_mask == 0, the size computation is self-rounding
4395
4396 // Now generate allocation code
4397
4398 // The entire memory state is needed for slow path of the allocation
4399 // since GC and deoptimization can happen.
4400 Node *mem = reset_memory();
4401 set_all_memory(mem); // Create new memory state
4402
4403 if (initial_slow_test->is_Bool()) {
4404 // Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
4405 initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
4406 }
4407
4408 const TypeKlassPtr* ary_klass = _gvn.type(klass_node)->isa_klassptr();
4409 const TypeOopPtr* ary_type = ary_klass->as_instance_type();
4410
4411 Node* raw_init_value = nullptr;
4412 if (init_val != nullptr) {
4413 // TODO 8350865 Fast non-zero init not implemented yet for flat, null-free arrays
4414 if (ary_type->is_flat()) {
4415 initial_slow_test = intcon(1);
4416 }
4417
4418 if (UseCompressedOops) {
4419 // With compressed oops, the 64-bit init value is built from two 32-bit compressed oops
4420 init_val = _gvn.transform(new EncodePNode(init_val, init_val->bottom_type()->make_narrowoop()));
4421 Node* lower = _gvn.transform(new CastP2XNode(control(), init_val));
4422 Node* upper = _gvn.transform(new LShiftLNode(lower, intcon(32)));
4423 raw_init_value = _gvn.transform(new OrLNode(lower, upper));
4424 } else {
4425 raw_init_value = _gvn.transform(new CastP2XNode(control(), init_val));
4426 }
4427 }
4428
4429 Node* valid_length_test = _gvn.intcon(1);
4430 if (ary_type->isa_aryptr()) {
4431 BasicType bt = ary_type->isa_aryptr()->elem()->array_element_basic_type();
4432 jint max = TypeAryPtr::max_array_length(bt);
4433 Node* valid_length_cmp = _gvn.transform(new CmpUNode(length, intcon(max)));
4434 valid_length_test = _gvn.transform(new BoolNode(valid_length_cmp, BoolTest::le));
4435 }
4436
4437 // Create the AllocateArrayNode and its result projections
4438 AllocateArrayNode* alloc
4439 = new AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
4440 control(), mem, i_o(),
4441 size, klass_node,
4442 initial_slow_test,
4443 length, valid_length_test,
4444 init_val, raw_init_value);
4445 // Cast to correct type. Note that the klass_node may be constant or not,
4446 // and in the latter case the actual array type will be inexact also.
4447 // (This happens via a non-constant argument to inline_native_newArray.)
4448 // In any case, the value of klass_node provides the desired array type.
4449 const TypeInt* length_type = _gvn.find_int_type(length);
4450 if (ary_type->isa_aryptr() && length_type != nullptr) {
4451 // Try to get a better type than POS for the size
4452 ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
4453 }
4454
4455 Node* javaoop = set_output_for_allocation(alloc, ary_type, deoptimize_on_exception);
4456
4457 array_ideal_length(alloc, ary_type, true);
4458 return javaoop;
4459 }
4460
4461 // The following "Ideal_foo" functions are placed here because they recognize
4462 // the graph shapes created by the functions immediately above.
4463
4464 //---------------------------Ideal_allocation----------------------------------
4572 set_all_memory(ideal.merged_memory());
4573 set_i_o(ideal.i_o());
4574 set_control(ideal.ctrl());
4575 }
4576
4577 void GraphKit::final_sync(IdealKit& ideal) {
4578 // Final sync IdealKit and graphKit.
4579 sync_kit(ideal);
4580 }
4581
4582 Node* GraphKit::load_String_length(Node* str, bool set_ctrl) {
4583 Node* len = load_array_length(load_String_value(str, set_ctrl));
4584 Node* coder = load_String_coder(str, set_ctrl);
4585 // Divide length by 2 if coder is UTF16
4586 return _gvn.transform(new RShiftINode(len, coder));
4587 }
4588
4589 Node* GraphKit::load_String_value(Node* str, bool set_ctrl) {
4590 int value_offset = java_lang_String::value_offset();
4591 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4592 false, nullptr, Type::Offset(0));
4593 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4594 const TypeAryPtr* value_type = TypeAryPtr::make(TypePtr::NotNull,
4595 TypeAry::make(TypeInt::BYTE, TypeInt::POS, false, false, true, true),
4596 ciTypeArrayKlass::make(T_BYTE), true, Type::Offset(0));
4597 Node* p = basic_plus_adr(str, str, value_offset);
4598 Node* load = access_load_at(str, p, value_field_type, value_type, T_OBJECT,
4599 IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4600 return load;
4601 }
4602
4603 Node* GraphKit::load_String_coder(Node* str, bool set_ctrl) {
4604 if (!CompactStrings) {
4605 return intcon(java_lang_String::CODER_UTF16);
4606 }
4607 int coder_offset = java_lang_String::coder_offset();
4608 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4609 false, nullptr, Type::Offset(0));
4610 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4611
4612 Node* p = basic_plus_adr(str, str, coder_offset);
4613 Node* load = access_load_at(str, p, coder_field_type, TypeInt::BYTE, T_BYTE,
4614 IN_HEAP | (set_ctrl ? C2_CONTROL_DEPENDENT_LOAD : 0) | MO_UNORDERED);
4615 return load;
4616 }
4617
4618 void GraphKit::store_String_value(Node* str, Node* value) {
4619 int value_offset = java_lang_String::value_offset();
4620 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4621 false, nullptr, Type::Offset(0));
4622 const TypePtr* value_field_type = string_type->add_offset(value_offset);
4623
4624 access_store_at(str, basic_plus_adr(str, value_offset), value_field_type,
4625 value, TypeAryPtr::BYTES, T_OBJECT, IN_HEAP | MO_UNORDERED);
4626 }
4627
4628 void GraphKit::store_String_coder(Node* str, Node* value) {
4629 int coder_offset = java_lang_String::coder_offset();
4630 const TypeInstPtr* string_type = TypeInstPtr::make(TypePtr::NotNull, C->env()->String_klass(),
4631 false, nullptr, Type::Offset(0));
4632 const TypePtr* coder_field_type = string_type->add_offset(coder_offset);
4633
4634 access_store_at(str, basic_plus_adr(str, coder_offset), coder_field_type,
4635 value, TypeInt::BYTE, T_BYTE, IN_HEAP | MO_UNORDERED);
4636 }
4637
4638 // Capture src and dst memory state with a MergeMemNode
4639 Node* GraphKit::capture_memory(const TypePtr* src_type, const TypePtr* dst_type) {
4640 if (src_type == dst_type) {
4641 // Types are equal, we don't need a MergeMemNode
4642 return memory(src_type);
4643 }
4644 MergeMemNode* merge = MergeMemNode::make(map()->memory());
4645 record_for_igvn(merge); // fold it up later, if possible
4646 int src_idx = C->get_alias_index(src_type);
4647 int dst_idx = C->get_alias_index(dst_type);
4648 merge->set_memory_at(src_idx, memory(src_idx));
4649 merge->set_memory_at(dst_idx, memory(dst_idx));
4650 return merge;
4651 }
4724 i_char->init_req(2, AddI(i_char, intcon(2)));
4725
4726 set_control(IfFalse(iff));
4727 set_memory(st, TypeAryPtr::BYTES);
4728 }
4729
4730 Node* GraphKit::make_constant_from_field(ciField* field, Node* obj) {
4731 if (!field->is_constant()) {
4732 return nullptr; // Field not marked as constant.
4733 }
4734 ciInstance* holder = nullptr;
4735 if (!field->is_static()) {
4736 ciObject* const_oop = obj->bottom_type()->is_oopptr()->const_oop();
4737 if (const_oop != nullptr && const_oop->is_instance()) {
4738 holder = const_oop->as_instance();
4739 }
4740 }
4741 const Type* con_type = Type::make_constant_from_field(field, holder, field->layout_type(),
4742 /*is_unsigned_load=*/false);
4743 if (con_type != nullptr) {
4744 Node* con = makecon(con_type);
4745 if (field->type()->is_inlinetype()) {
4746 con = InlineTypeNode::make_from_oop(this, con, field->type()->as_inline_klass());
4747 } else if (con_type->is_inlinetypeptr()) {
4748 con = InlineTypeNode::make_from_oop(this, con, con_type->inline_klass());
4749 }
4750 return con;
4751 }
4752 return nullptr;
4753 }
4754
4755 //---------------------------load_mirror_from_klass----------------------------
4756 // Given a klass oop, load its java mirror (a java.lang.Class oop).
4757 Node* GraphKit::load_mirror_from_klass(Node* klass) {
4758 Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
4759 Node* load = make_load(nullptr, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
4760 // mirror = ((OopHandle)mirror)->resolve();
4761 return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE);
4762 }
4763
4764 Node* GraphKit::maybe_narrow_object_type(Node* obj, ciKlass* type) {
4765 const Type* obj_type = obj->bottom_type();
4766 const TypeOopPtr* sig_type = TypeOopPtr::make_from_klass(type);
4767 if (obj_type->isa_oopptr() && sig_type->is_loaded() && !obj_type->higher_equal(sig_type)) {
4768 const Type* narrow_obj_type = obj_type->filter_speculative(sig_type); // keep speculative part
4769 Node* casted_obj = gvn().transform(new CheckCastPPNode(control(), obj, narrow_obj_type));
4770 obj = casted_obj;
4771 }
4772 if (sig_type->is_inlinetypeptr()) {
4773 obj = InlineTypeNode::make_from_oop(this, obj, sig_type->inline_klass());
4774 }
4775 return obj;
4776 }
|