40 #include "gc/shared/c2/barrierSetC2.hpp"
41 #include "jfr/jfrEvents.hpp"
42 #include "jvm_io.h"
43 #include "memory/allocation.hpp"
44 #include "memory/arena.hpp"
45 #include "memory/resourceArea.hpp"
46 #include "opto/addnode.hpp"
47 #include "opto/block.hpp"
48 #include "opto/c2compiler.hpp"
49 #include "opto/callGenerator.hpp"
50 #include "opto/callnode.hpp"
51 #include "opto/castnode.hpp"
52 #include "opto/cfgnode.hpp"
53 #include "opto/chaitin.hpp"
54 #include "opto/compile.hpp"
55 #include "opto/connode.hpp"
56 #include "opto/convertnode.hpp"
57 #include "opto/divnode.hpp"
58 #include "opto/escape.hpp"
59 #include "opto/idealGraphPrinter.hpp"
60 #include "opto/locknode.hpp"
61 #include "opto/loopnode.hpp"
62 #include "opto/machnode.hpp"
63 #include "opto/macro.hpp"
64 #include "opto/matcher.hpp"
65 #include "opto/mathexactnode.hpp"
66 #include "opto/memnode.hpp"
67 #include "opto/mulnode.hpp"
68 #include "opto/narrowptrnode.hpp"
69 #include "opto/node.hpp"
70 #include "opto/opaquenode.hpp"
71 #include "opto/opcodes.hpp"
72 #include "opto/output.hpp"
73 #include "opto/parse.hpp"
74 #include "opto/phaseX.hpp"
75 #include "opto/rootnode.hpp"
76 #include "opto/runtime.hpp"
77 #include "opto/stringopts.hpp"
78 #include "opto/type.hpp"
79 #include "opto/vector.hpp"
80 #include "opto/vectornode.hpp"
81 #include "runtime/globals_extension.hpp"
82 #include "runtime/sharedRuntime.hpp"
83 #include "runtime/signature.hpp"
84 #include "runtime/stubRoutines.hpp"
85 #include "runtime/timer.hpp"
86 #include "utilities/align.hpp"
386 // as dead to be conservative about the dead node count at any
387 // given time.
388 if (!dead->is_Con()) {
389 record_dead_node(dead->_idx);
390 }
391 if (dead->is_macro()) {
392 remove_macro_node(dead);
393 }
394 if (dead->is_expensive()) {
395 remove_expensive_node(dead);
396 }
397 if (dead->is_OpaqueTemplateAssertionPredicate()) {
398 remove_template_assertion_predicate_opaque(dead->as_OpaqueTemplateAssertionPredicate());
399 }
400 if (dead->is_ParsePredicate()) {
401 remove_parse_predicate(dead->as_ParsePredicate());
402 }
403 if (dead->for_post_loop_opts_igvn()) {
404 remove_from_post_loop_opts_igvn(dead);
405 }
406 if (dead->for_merge_stores_igvn()) {
407 remove_from_merge_stores_igvn(dead);
408 }
409 if (dead->is_Call()) {
410 remove_useless_late_inlines( &_late_inlines, dead);
411 remove_useless_late_inlines( &_string_late_inlines, dead);
412 remove_useless_late_inlines( &_boxing_late_inlines, dead);
413 remove_useless_late_inlines(&_vector_reboxing_late_inlines, dead);
414
415 if (dead->is_CallStaticJava()) {
416 remove_unstable_if_trap(dead->as_CallStaticJava(), false);
417 }
418 }
419 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
420 bs->unregister_potential_barrier_node(dead);
421 }
422
423 // Disconnect all useless nodes by disconnecting those at the boundary.
424 void Compile::disconnect_useless_nodes(Unique_Node_List& useful, Unique_Node_List& worklist, const Unique_Node_List* root_and_safepoints) {
425 uint next = 0;
433 // Use raw traversal of out edges since this code removes out edges
434 int max = n->outcnt();
435 for (int j = 0; j < max; ++j) {
436 Node* child = n->raw_out(j);
437 if (!useful.member(child)) {
438 assert(!child->is_top() || child != top(),
439 "If top is cached in Compile object it is in useful list");
440 // Only need to remove this out-edge to the useless node
441 n->raw_del_out(j);
442 --j;
443 --max;
444 if (child->is_data_proj_of_pure_function(n)) {
445 worklist.push(n);
446 }
447 }
448 }
449 if (n->outcnt() == 1 && n->has_special_unique_user()) {
450 assert(useful.member(n->unique_out()), "do not push a useless node");
451 worklist.push(n->unique_out());
452 }
453 }
454
455 remove_useless_nodes(_macro_nodes, useful); // remove useless macro nodes
456 remove_useless_nodes(_parse_predicates, useful); // remove useless Parse Predicate nodes
457 // Remove useless Template Assertion Predicate opaque nodes
458 remove_useless_nodes(_template_assertion_predicate_opaques, useful);
459 remove_useless_nodes(_expensive_nodes, useful); // remove useless expensive nodes
460 remove_useless_nodes(_for_post_loop_igvn, useful); // remove useless node recorded for post loop opts IGVN pass
461 remove_useless_nodes(_for_merge_stores_igvn, useful); // remove useless node recorded for merge stores IGVN pass
462 remove_useless_unstable_if_traps(useful); // remove useless unstable_if traps
463 remove_useless_coarsened_locks(useful); // remove useless coarsened locks nodes
464 #ifdef ASSERT
465 if (_modified_nodes != nullptr) {
466 _modified_nodes->remove_useless_nodes(useful.member_set());
467 }
468 #endif
469
470 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
471 bs->eliminate_useless_gc_barriers(useful, this);
472 // clean up the late inline lists
473 remove_useless_late_inlines( &_late_inlines, useful);
474 remove_useless_late_inlines( &_string_late_inlines, useful);
475 remove_useless_late_inlines( &_boxing_late_inlines, useful);
476 remove_useless_late_inlines(&_vector_reboxing_late_inlines, useful);
477 debug_only(verify_graph_edges(true /*check for no_dead_code*/, root_and_safepoints);)
478 }
479
480 // ============================================================================
622
623 Compile::Compile(ciEnv* ci_env, ciMethod* target, int osr_bci,
624 Options options, DirectiveSet* directive)
625 : Phase(Compiler),
626 _compile_id(ci_env->compile_id()),
627 _options(options),
628 _method(target),
629 _entry_bci(osr_bci),
630 _ilt(nullptr),
631 _stub_function(nullptr),
632 _stub_name(nullptr),
633 _stub_entry_point(nullptr),
634 _max_node_limit(MaxNodeLimit),
635 _post_loop_opts_phase(false),
636 _merge_stores_phase(false),
637 _allow_macro_nodes(true),
638 _inlining_progress(false),
639 _inlining_incrementally(false),
640 _do_cleanup(false),
641 _has_reserved_stack_access(target->has_reserved_stack_access()),
642 #ifndef PRODUCT
643 _igv_idx(0),
644 _trace_opto_output(directive->TraceOptoOutputOption),
645 #endif
646 _has_method_handle_invokes(false),
647 _clinit_barrier_on_entry(false),
648 _stress_seed(0),
649 _comp_arena(mtCompiler, Arena::Tag::tag_comp),
650 _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
651 _env(ci_env),
652 _directive(directive),
653 _log(ci_env->log()),
654 _first_failure_details(nullptr),
655 _intrinsics(comp_arena(), 0, 0, nullptr),
656 _macro_nodes(comp_arena(), 8, 0, nullptr),
657 _parse_predicates(comp_arena(), 8, 0, nullptr),
658 _template_assertion_predicate_opaques(comp_arena(), 8, 0, nullptr),
659 _expensive_nodes(comp_arena(), 8, 0, nullptr),
660 _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
661 _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr),
662 _unstable_if_traps(comp_arena(), 8, 0, nullptr),
663 _coarsened_locks(comp_arena(), 8, 0, nullptr),
664 _congraph(nullptr),
665 NOT_PRODUCT(_igv_printer(nullptr) COMMA)
666 _unique(0),
667 _dead_node_count(0),
668 _dead_node_list(comp_arena()),
669 _node_arena_one(mtCompiler, Arena::Tag::tag_node),
670 _node_arena_two(mtCompiler, Arena::Tag::tag_node),
671 _node_arena(&_node_arena_one),
672 _mach_constant_base_node(nullptr),
673 _Compile_types(mtCompiler, Arena::Tag::tag_type),
674 _initial_gvn(nullptr),
675 _igvn_worklist(nullptr),
676 _types(nullptr),
677 _node_hash(nullptr),
678 _late_inlines(comp_arena(), 2, 0, nullptr),
679 _string_late_inlines(comp_arena(), 2, 0, nullptr),
680 _boxing_late_inlines(comp_arena(), 2, 0, nullptr),
745 #define MINIMUM_NODE_HASH 1023
746
747 // GVN that will be run immediately on new nodes
748 uint estimated_size = method()->code_size()*4+64;
749 estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
750 _igvn_worklist = new (comp_arena()) Unique_Node_List(comp_arena());
751 _types = new (comp_arena()) Type_Array(comp_arena());
752 _node_hash = new (comp_arena()) NodeHash(comp_arena(), estimated_size);
753 PhaseGVN gvn;
754 set_initial_gvn(&gvn);
755
756 { // Scope for timing the parser
757 TracePhase tp(_t_parser);
758
759 // Put top into the hash table ASAP.
760 initial_gvn()->transform(top());
761
762 // Set up tf(), start(), and find a CallGenerator.
763 CallGenerator* cg = nullptr;
764 if (is_osr_compilation()) {
765 const TypeTuple *domain = StartOSRNode::osr_domain();
766 const TypeTuple *range = TypeTuple::make_range(method()->signature());
767 init_tf(TypeFunc::make(domain, range));
768 StartNode* s = new StartOSRNode(root(), domain);
769 initial_gvn()->set_type_bottom(s);
770 verify_start(s);
771 cg = CallGenerator::for_osr(method(), entry_bci());
772 } else {
773 // Normal case.
774 init_tf(TypeFunc::make(method()));
775 StartNode* s = new StartNode(root(), tf()->domain());
776 initial_gvn()->set_type_bottom(s);
777 verify_start(s);
778 if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) {
779 // With java.lang.ref.reference.get() we must go through the
780 // intrinsic - even when get() is the root
781 // method of the compile - so that, if necessary, the value in
782 // the referent field of the reference object gets recorded by
783 // the pre-barrier code.
784 cg = find_intrinsic(method(), false);
785 }
786 if (cg == nullptr) {
787 float past_uses = method()->interpreter_invocation_count();
788 float expected_uses = past_uses;
789 cg = CallGenerator::for_inline(method(), expected_uses);
790 }
791 }
792 if (failing()) return;
793 if (cg == nullptr) {
794 const char* reason = InlineTree::check_can_parse(method());
795 assert(reason != nullptr, "expect reason for parse failure");
866 print_ideal_ir("print_ideal");
867 }
868 #endif
869
870 #ifdef ASSERT
871 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
872 bs->verify_gc_barriers(this, BarrierSetC2::BeforeCodeGen);
873 #endif
874
875 // Dump compilation data to replay it.
876 if (directive->DumpReplayOption) {
877 env()->dump_replay_data(_compile_id);
878 }
879 if (directive->DumpInlineOption && (ilt() != nullptr)) {
880 env()->dump_inline_data(_compile_id);
881 }
882
883 // Now that we know the size of all the monitors we can add a fixed slot
884 // for the original deopt pc.
885 int next_slot = fixed_slots() + (sizeof(address) / VMRegImpl::stack_slot_size);
886 set_fixed_slots(next_slot);
887
888 // Compute when to use implicit null checks. Used by matching trap based
889 // nodes and NullCheck optimization.
890 set_allowed_deopt_reasons();
891
892 // Now generate code
893 Code_Gen();
894 }
895
896 //------------------------------Compile----------------------------------------
897 // Compile a runtime stub
898 Compile::Compile(ciEnv* ci_env,
899 TypeFunc_generator generator,
900 address stub_function,
901 const char* stub_name,
902 int is_fancy_jump,
903 bool pass_tls,
904 bool return_pc,
905 DirectiveSet* directive)
906 : Phase(Compiler),
907 _compile_id(0),
908 _options(Options::for_runtime_stub()),
909 _method(nullptr),
910 _entry_bci(InvocationEntryBci),
911 _stub_function(stub_function),
912 _stub_name(stub_name),
913 _stub_entry_point(nullptr),
914 _max_node_limit(MaxNodeLimit),
915 _post_loop_opts_phase(false),
916 _merge_stores_phase(false),
917 _allow_macro_nodes(true),
918 _inlining_progress(false),
919 _inlining_incrementally(false),
920 _has_reserved_stack_access(false),
921 #ifndef PRODUCT
922 _igv_idx(0),
923 _trace_opto_output(directive->TraceOptoOutputOption),
924 #endif
925 _has_method_handle_invokes(false),
926 _clinit_barrier_on_entry(false),
927 _stress_seed(0),
928 _comp_arena(mtCompiler, Arena::Tag::tag_comp),
929 _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
930 _env(ci_env),
931 _directive(directive),
932 _log(ci_env->log()),
933 _first_failure_details(nullptr),
934 _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
935 _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr),
936 _congraph(nullptr),
937 NOT_PRODUCT(_igv_printer(nullptr) COMMA)
938 _unique(0),
939 _dead_node_count(0),
940 _dead_node_list(comp_arena()),
1044
1045 _fixed_slots = 0;
1046 set_has_split_ifs(false);
1047 set_has_loops(false); // first approximation
1048 set_has_stringbuilder(false);
1049 set_has_boxed_value(false);
1050 _trap_can_recompile = false; // no traps emitted yet
1051 _major_progress = true; // start out assuming good things will happen
1052 set_has_unsafe_access(false);
1053 set_max_vector_size(0);
1054 set_clear_upper_avx(false); //false as default for clear upper bits of ymm registers
1055 Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
1056 set_decompile_count(0);
1057
1058 #ifndef PRODUCT
1059 Copy::zero_to_bytes(_igv_phase_iter, sizeof(_igv_phase_iter));
1060 #endif
1061
1062 set_do_freq_based_layout(_directive->BlockLayoutByFrequencyOption);
1063 _loop_opts_cnt = LoopOptsCount;
1064 set_do_inlining(Inline);
1065 set_max_inline_size(MaxInlineSize);
1066 set_freq_inline_size(FreqInlineSize);
1067 set_do_scheduling(OptoScheduling);
1068
1069 set_do_vector_loop(false);
1070 set_has_monitors(false);
1071 set_has_scoped_access(false);
1072
1073 if (AllowVectorizeOnDemand) {
1074 if (has_method() && _directive->VectorizeOption) {
1075 set_do_vector_loop(true);
1076 NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n", method()->name()->as_quoted_ascii());})
1077 } else if (has_method() && method()->name() != nullptr &&
1078 method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1079 set_do_vector_loop(true);
1080 }
1081 }
1082 set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1083 NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n", method()->name()->as_quoted_ascii());})
1328
1329 // Known instance (scalarizable allocation) alias only with itself.
1330 bool is_known_inst = tj->isa_oopptr() != nullptr &&
1331 tj->is_oopptr()->is_known_instance();
1332
1333 // Process weird unsafe references.
1334 if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
1335 assert(InlineUnsafeOps || StressReflectiveCode, "indeterminate pointers come only from unsafe ops");
1336 assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
1337 tj = TypeOopPtr::BOTTOM;
1338 ptr = tj->ptr();
1339 offset = tj->offset();
1340 }
1341
1342 // Array pointers need some flattening
1343 const TypeAryPtr* ta = tj->isa_aryptr();
1344 if (ta && ta->is_stable()) {
1345 // Erase stability property for alias analysis.
1346 tj = ta = ta->cast_to_stable(false);
1347 }
1348 if( ta && is_known_inst ) {
1349 if ( offset != Type::OffsetBot &&
1350 offset > arrayOopDesc::length_offset_in_bytes() ) {
1351 offset = Type::OffsetBot; // Flatten constant access into array body only
1352 tj = ta = ta->
1353 remove_speculative()->
1354 cast_to_ptr_type(ptr)->
1355 with_offset(offset);
1356 }
1357 } else if (ta) {
1358 // For arrays indexed by constant indices, we flatten the alias
1359 // space to include all of the array body. Only the header, klass
1360 // and array length can be accessed un-aliased.
1361 if( offset != Type::OffsetBot ) {
1362 if( ta->const_oop() ) { // MethodData* or Method*
1363 offset = Type::OffsetBot; // Flatten constant access into array body
1364 tj = ta = ta->
1365 remove_speculative()->
1366 cast_to_ptr_type(ptr)->
1367 cast_to_exactness(false)->
1368 with_offset(offset);
1369 } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
1370 // range is OK as-is.
1371 tj = ta = TypeAryPtr::RANGE;
1372 } else if( offset == oopDesc::klass_offset_in_bytes() ) {
1373 tj = TypeInstPtr::KLASS; // all klass loads look alike
1374 ta = TypeAryPtr::RANGE; // generic ignored junk
1375 ptr = TypePtr::BotPTR;
1376 } else if( offset == oopDesc::mark_offset_in_bytes() ) {
1377 tj = TypeInstPtr::MARK;
1378 ta = TypeAryPtr::RANGE; // generic ignored junk
1379 ptr = TypePtr::BotPTR;
1380 } else { // Random constant offset into array body
1381 offset = Type::OffsetBot; // Flatten constant access into array body
1382 tj = ta = ta->
1383 remove_speculative()->
1384 cast_to_ptr_type(ptr)->
1385 cast_to_exactness(false)->
1386 with_offset(offset);
1387 }
1388 }
1389 // Arrays of fixed size alias with arrays of unknown size.
1390 if (ta->size() != TypeInt::POS) {
1391 const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
1392 tj = ta = ta->
1393 remove_speculative()->
1394 cast_to_ptr_type(ptr)->
1395 with_ary(tary)->
1396 cast_to_exactness(false);
1397 }
1398 // Arrays of known objects become arrays of unknown objects.
1399 if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
1400 const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
1401 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,nullptr,false,offset);
1402 }
1403 if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
1404 const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
1405 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,nullptr,false,offset);
1406 }
1407 // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
1408 // cannot be distinguished by bytecode alone.
1409 if (ta->elem() == TypeInt::BOOL) {
1410 const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
1411 ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
1412 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset);
1413 }
1414 // During the 2nd round of IterGVN, NotNull castings are removed.
1415 // Make sure the Bottom and NotNull variants alias the same.
1416 // Also, make sure exact and non-exact variants alias the same.
1417 if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != nullptr) {
1418 tj = ta = ta->
1419 remove_speculative()->
1420 cast_to_ptr_type(TypePtr::BotPTR)->
1421 cast_to_exactness(false)->
1422 with_offset(offset);
1423 }
1424 }
1425
1426 // Oop pointers need some flattening
1427 const TypeInstPtr *to = tj->isa_instptr();
1428 if (to && to != TypeOopPtr::BOTTOM) {
1429 ciInstanceKlass* ik = to->instance_klass();
1430 if( ptr == TypePtr::Constant ) {
1431 if (ik != ciEnv::current()->Class_klass() ||
1432 offset < ik->layout_helper_size_in_bytes()) {
1442 } else if( is_known_inst ) {
1443 tj = to; // Keep NotNull and klass_is_exact for instance type
1444 } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
1445 // During the 2nd round of IterGVN, NotNull castings are removed.
1446 // Make sure the Bottom and NotNull variants alias the same.
1447 // Also, make sure exact and non-exact variants alias the same.
1448 tj = to = to->
1449 remove_speculative()->
1450 cast_to_instance_id(TypeOopPtr::InstanceBot)->
1451 cast_to_ptr_type(TypePtr::BotPTR)->
1452 cast_to_exactness(false);
1453 }
1454 if (to->speculative() != nullptr) {
1455 tj = to = to->remove_speculative();
1456 }
1457 // Canonicalize the holder of this field
1458 if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
1459 // First handle header references such as a LoadKlassNode, even if the
1460 // object's klass is unloaded at compile time (4965979).
1461 if (!is_known_inst) { // Do it only for non-instance types
1462 tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, nullptr, offset);
1463 }
1464 } else if (offset < 0 || offset >= ik->layout_helper_size_in_bytes()) {
1465 // Static fields are in the space above the normal instance
1466 // fields in the java.lang.Class instance.
1467 if (ik != ciEnv::current()->Class_klass()) {
1468 to = nullptr;
1469 tj = TypeOopPtr::BOTTOM;
1470 offset = tj->offset();
1471 }
1472 } else {
1473 ciInstanceKlass *canonical_holder = ik->get_canonical_holder(offset);
1474 assert(offset < canonical_holder->layout_helper_size_in_bytes(), "");
1475 assert(tj->offset() == offset, "no change to offset expected");
1476 bool xk = to->klass_is_exact();
1477 int instance_id = to->instance_id();
1478
1479 // If the input type's class is the holder: if exact, the type only includes interfaces implemented by the holder
1480 // but if not exact, it may include extra interfaces: build new type from the holder class to make sure only
1481 // its interfaces are included.
1482 if (xk && ik->equals(canonical_holder)) {
1483 assert(tj == TypeInstPtr::make(to->ptr(), canonical_holder, is_known_inst, nullptr, offset, instance_id), "exact type should be canonical type");
1484 } else {
1485 assert(xk || !is_known_inst, "Known instance should be exact type");
1486 tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, is_known_inst, nullptr, offset, instance_id);
1487 }
1488 }
1489 }
1490
1491 // Klass pointers to object array klasses need some flattening
1492 const TypeKlassPtr *tk = tj->isa_klassptr();
1493 if( tk ) {
1494 // If we are referencing a field within a Klass, we need
1495 // to assume the worst case of an Object. Both exact and
1496 // inexact types must flatten to the same alias class so
1497 // use NotNull as the PTR.
1498 if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
1499 tj = tk = TypeInstKlassPtr::make(TypePtr::NotNull,
1500 env()->Object_klass(),
1501 offset);
1502 }
1503
1504 if (tk->isa_aryklassptr() && tk->is_aryklassptr()->elem()->isa_klassptr()) {
1505 ciKlass* k = ciObjArrayKlass::make(env()->Object_klass());
1506 if (!k || !k->is_loaded()) { // Only fails for some -Xcomp runs
1507 tj = tk = TypeInstKlassPtr::make(TypePtr::NotNull, env()->Object_klass(), offset);
1508 } else {
1509 tj = tk = TypeAryKlassPtr::make(TypePtr::NotNull, tk->is_aryklassptr()->elem(), k, offset);
1510 }
1511 }
1512
1513 // Check for precise loads from the primary supertype array and force them
1514 // to the supertype cache alias index. Check for generic array loads from
1515 // the primary supertype array and also force them to the supertype cache
1516 // alias index. Since the same load can reach both, we need to merge
1517 // these 2 disparate memories into the same alias class. Since the
1518 // primary supertype array is read-only, there's no chance of confusion
1519 // where we bypass an array load and an array store.
1520 int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
1521 if (offset == Type::OffsetBot ||
1522 (offset >= primary_supers_offset &&
1523 offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
1524 offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
1525 offset = in_bytes(Klass::secondary_super_cache_offset());
1526 tj = tk = tk->with_offset(offset);
1527 }
1528 }
1529
1530 // Flatten all Raw pointers together.
1531 if (tj->base() == Type::RawPtr)
1532 tj = TypeRawPtr::BOTTOM;
1622 intptr_t key = (intptr_t) adr_type;
1623 key ^= key >> logAliasCacheSize;
1624 return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
1625 }
1626
1627
1628 //-----------------------------grow_alias_types--------------------------------
1629 void Compile::grow_alias_types() {
1630 const int old_ats = _max_alias_types; // how many before?
1631 const int new_ats = old_ats; // how many more?
1632 const int grow_ats = old_ats+new_ats; // how many now?
1633 _max_alias_types = grow_ats;
1634 _alias_types = REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
1635 AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
1636 Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
1637 for (int i = 0; i < new_ats; i++) _alias_types[old_ats+i] = &ats[i];
1638 }
1639
1640
1641 //--------------------------------find_alias_type------------------------------
1642 Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field) {
1643 if (!do_aliasing()) {
1644 return alias_type(AliasIdxBot);
1645 }
1646
1647 AliasCacheEntry* ace = probe_alias_cache(adr_type);
1648 if (ace->_adr_type == adr_type) {
1649 return alias_type(ace->_index);
1650 }
1651
1652 // Handle special cases.
1653 if (adr_type == nullptr) return alias_type(AliasIdxTop);
1654 if (adr_type == TypePtr::BOTTOM) return alias_type(AliasIdxBot);
1655
1656 // Do it the slow way.
1657 const TypePtr* flat = flatten_alias_type(adr_type);
1658
1659 #ifdef ASSERT
1660 {
1661 ResourceMark rm;
1662 assert(flat == flatten_alias_type(flat), "not idempotent: adr_type = %s; flat = %s => %s",
1663 Type::str(adr_type), Type::str(flat), Type::str(flatten_alias_type(flat)));
1664 assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr: adr_type = %s",
1665 Type::str(adr_type));
1666 if (flat->isa_oopptr() && !flat->isa_klassptr()) {
1667 const TypeOopPtr* foop = flat->is_oopptr();
1668 // Scalarizable allocations have exact klass always.
1669 bool exact = !foop->klass_is_exact() || foop->is_known_instance();
1679 if (alias_type(i)->adr_type() == flat) {
1680 idx = i;
1681 break;
1682 }
1683 }
1684
1685 if (idx == AliasIdxTop) {
1686 if (no_create) return nullptr;
1687 // Grow the array if necessary.
1688 if (_num_alias_types == _max_alias_types) grow_alias_types();
1689 // Add a new alias type.
1690 idx = _num_alias_types++;
1691 _alias_types[idx]->Init(idx, flat);
1692 if (flat == TypeInstPtr::KLASS) alias_type(idx)->set_rewritable(false);
1693 if (flat == TypeAryPtr::RANGE) alias_type(idx)->set_rewritable(false);
1694 if (flat->isa_instptr()) {
1695 if (flat->offset() == java_lang_Class::klass_offset()
1696 && flat->is_instptr()->instance_klass() == env()->Class_klass())
1697 alias_type(idx)->set_rewritable(false);
1698 }
1699 if (flat->isa_aryptr()) {
1700 #ifdef ASSERT
1701 const int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1702 // (T_BYTE has the weakest alignment and size restrictions...)
1703 assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
1704 #endif
1705 if (flat->offset() == TypePtr::OffsetBot) {
1706 alias_type(idx)->set_element(flat->is_aryptr()->elem());
1707 }
1708 }
1709 if (flat->isa_klassptr()) {
1710 if (UseCompactObjectHeaders) {
1711 if (flat->offset() == in_bytes(Klass::prototype_header_offset()))
1712 alias_type(idx)->set_rewritable(false);
1713 }
1714 if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
1715 alias_type(idx)->set_rewritable(false);
1716 if (flat->offset() == in_bytes(Klass::access_flags_offset()))
1717 alias_type(idx)->set_rewritable(false);
1718 if (flat->offset() == in_bytes(Klass::misc_flags_offset()))
1719 alias_type(idx)->set_rewritable(false);
1720 if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
1721 alias_type(idx)->set_rewritable(false);
1722 if (flat->offset() == in_bytes(Klass::secondary_super_cache_offset()))
1723 alias_type(idx)->set_rewritable(false);
1724 }
1725 // %%% (We would like to finalize JavaThread::threadObj_offset(),
1726 // but the base pointer type is not distinctive enough to identify
1727 // references into JavaThread.)
1728
1729 // Check for final fields.
1730 const TypeInstPtr* tinst = flat->isa_instptr();
1731 if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
1732 ciField* field;
1733 if (tinst->const_oop() != nullptr &&
1734 tinst->instance_klass() == ciEnv::current()->Class_klass() &&
1735 tinst->offset() >= (tinst->instance_klass()->layout_helper_size_in_bytes())) {
1736 // static field
1737 ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
1738 field = k->get_field_by_offset(tinst->offset(), true);
1739 } else {
1740 ciInstanceKlass *k = tinst->instance_klass();
1741 field = k->get_field_by_offset(tinst->offset(), false);
1742 }
1743 assert(field == nullptr ||
1744 original_field == nullptr ||
1745 (field->holder() == original_field->holder() &&
1746 field->offset_in_bytes() == original_field->offset_in_bytes() &&
1747 field->is_static() == original_field->is_static()), "wrong field?");
1748 // Set field() and is_rewritable() attributes.
1749 if (field != nullptr) alias_type(idx)->set_field(field);
1750 }
1751 }
1752
1753 // Fill the cache for next time.
1754 ace->_adr_type = adr_type;
1755 ace->_index = idx;
1756 assert(alias_type(adr_type) == alias_type(idx), "type must be installed");
1757
1758 // Might as well try to fill the cache for the flattened version, too.
1759 AliasCacheEntry* face = probe_alias_cache(flat);
1760 if (face->_adr_type == nullptr) {
1761 face->_adr_type = flat;
1762 face->_index = idx;
1763 assert(alias_type(flat) == alias_type(idx), "flat type must work too");
1764 }
1765
1766 return alias_type(idx);
1767 }
1768
1769
1770 Compile::AliasType* Compile::alias_type(ciField* field) {
1771 const TypeOopPtr* t;
1772 if (field->is_static())
1773 t = TypeInstPtr::make(field->holder()->java_mirror());
1774 else
1775 t = TypeOopPtr::make_from_klass_raw(field->holder());
1776 AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
1777 assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
1778 return atp;
1779 }
1780
1781
1782 //------------------------------have_alias_type--------------------------------
1783 bool Compile::have_alias_type(const TypePtr* adr_type) {
1862 assert(!C->major_progress(), "not cleared");
1863
1864 if (_for_post_loop_igvn.length() > 0) {
1865 while (_for_post_loop_igvn.length() > 0) {
1866 Node* n = _for_post_loop_igvn.pop();
1867 n->remove_flag(Node::NodeFlags::Flag_for_post_loop_opts_igvn);
1868 igvn._worklist.push(n);
1869 }
1870 igvn.optimize();
1871 if (failing()) return;
1872 assert(_for_post_loop_igvn.length() == 0, "no more delayed nodes allowed");
1873 assert(C->parse_predicate_count() == 0, "all parse predicates should have been removed now");
1874
1875 // Sometimes IGVN sets major progress (e.g., when processing loop nodes).
1876 if (C->major_progress()) {
1877 C->clear_major_progress(); // ensure that major progress is now clear
1878 }
1879 }
1880 }
1881
1882 void Compile::record_for_merge_stores_igvn(Node* n) {
1883 if (!n->for_merge_stores_igvn()) {
1884 assert(!_for_merge_stores_igvn.contains(n), "duplicate");
1885 n->add_flag(Node::NodeFlags::Flag_for_merge_stores_igvn);
1886 _for_merge_stores_igvn.append(n);
1887 }
1888 }
1889
1890 void Compile::remove_from_merge_stores_igvn(Node* n) {
1891 n->remove_flag(Node::NodeFlags::Flag_for_merge_stores_igvn);
1892 _for_merge_stores_igvn.remove(n);
1893 }
1894
1895 // We need to wait with merging stores until RangeCheck smearing has removed the RangeChecks during
1896 // the post loops IGVN phase. If we do it earlier, then there may still be some RangeChecks between
1897 // the stores, and we merge the wrong sequence of stores.
1898 // Example:
1899 // StoreI RangeCheck StoreI StoreI RangeCheck StoreI
1900 // Apply MergeStores:
1901 // StoreI RangeCheck [ StoreL ] RangeCheck StoreI
1980 assert(next_bci == iter.next_bci() || next_bci == iter.get_dest(), "wrong next_bci at unstable_if");
1981 Bytecodes::Code c = iter.cur_bc();
1982 Node* lhs = nullptr;
1983 Node* rhs = nullptr;
1984 if (c == Bytecodes::_if_acmpeq || c == Bytecodes::_if_acmpne) {
1985 lhs = unc->peek_operand(0);
1986 rhs = unc->peek_operand(1);
1987 } else if (c == Bytecodes::_ifnull || c == Bytecodes::_ifnonnull) {
1988 lhs = unc->peek_operand(0);
1989 }
1990
1991 ResourceMark rm;
1992 const MethodLivenessResult& live_locals = method->liveness_at_bci(next_bci);
1993 assert(live_locals.is_valid(), "broken liveness info");
1994 int len = (int)live_locals.size();
1995
1996 for (int i = 0; i < len; i++) {
1997 Node* local = unc->local(jvms, i);
1998 // kill local using the liveness of next_bci.
1999 // give up when the local looks like an operand to secure reexecution.
2000 if (!live_locals.at(i) && !local->is_top() && local != lhs && local!= rhs) {
2001 uint idx = jvms->locoff() + i;
2002 #ifdef ASSERT
2003 if (PrintOpto && Verbose) {
2004 tty->print("[unstable_if] kill local#%d: ", idx);
2005 local->dump();
2006 tty->cr();
2007 }
2008 #endif
2009 igvn.replace_input_of(unc, idx, top());
2010 modified = true;
2011 }
2012 }
2013 }
2014
2015 // keep the mondified trap for late query
2016 if (modified) {
2017 trap->set_modified();
2018 } else {
2019 _unstable_if_traps.delete_at(i);
2020 }
2021 }
2022 igvn.optimize();
2023 }
2024
2025 // StringOpts and late inlining of string methods
2026 void Compile::inline_string_calls(bool parse_time) {
2027 {
2028 // remove useless nodes to make the usage analysis simpler
2029 ResourceMark rm;
2030 PhaseRemoveUseless pru(initial_gvn(), *igvn_worklist());
2031 }
2032
2033 {
2034 ResourceMark rm;
2035 print_method(PHASE_BEFORE_STRINGOPTS, 3);
2190
2191 if (_string_late_inlines.length() > 0) {
2192 assert(has_stringbuilder(), "inconsistent");
2193
2194 inline_string_calls(false);
2195
2196 if (failing()) return;
2197
2198 inline_incrementally_cleanup(igvn);
2199 }
2200
2201 set_inlining_incrementally(false);
2202 }
2203
2204 void Compile::process_late_inline_calls_no_inline(PhaseIterGVN& igvn) {
2205 // "inlining_incrementally() == false" is used to signal that no inlining is allowed
2206 // (see LateInlineVirtualCallGenerator::do_late_inline_check() for details).
2207 // Tracking and verification of modified nodes is disabled by setting "_modified_nodes == nullptr"
2208 // as if "inlining_incrementally() == true" were set.
2209 assert(inlining_incrementally() == false, "not allowed");
2210 assert(_modified_nodes == nullptr, "not allowed");
2211 assert(_late_inlines.length() > 0, "sanity");
2212
2213 while (_late_inlines.length() > 0) {
2214 igvn_worklist()->ensure_empty(); // should be done with igvn
2215
2216 while (inline_incrementally_one()) {
2217 assert(!failing_internal() || failure_is_artificial(), "inconsistent");
2218 }
2219 if (failing()) return;
2220
2221 inline_incrementally_cleanup(igvn);
2222 }
2223 }
2224
2225 bool Compile::optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode) {
2226 if (_loop_opts_cnt > 0) {
2227 while (major_progress() && (_loop_opts_cnt > 0)) {
2228 TracePhase tp(_t_idealLoop);
2229 PhaseIdealLoop::optimize(igvn, mode);
2230 _loop_opts_cnt--;
2231 if (failing()) return false;
2232 if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2233 }
2234 }
2235 return true;
2236 }
2237
2238 // Remove edges from "root" to each SafePoint at a backward branch.
2239 // They were inserted during parsing (see add_safepoint()) to make
2240 // infinite loops without calls or exceptions visible to root, i.e.,
2241 // useful.
2242 void Compile::remove_root_to_sfpts_edges(PhaseIterGVN& igvn) {
2347 print_method(PHASE_ITER_GVN_AFTER_VECTOR, 2);
2348 }
2349 assert(!has_vbox_nodes(), "sanity");
2350
2351 if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
2352 Compile::TracePhase tp(_t_renumberLive);
2353 igvn_worklist()->ensure_empty(); // should be done with igvn
2354 {
2355 ResourceMark rm;
2356 PhaseRenumberLive prl(initial_gvn(), *igvn_worklist());
2357 }
2358 igvn.reset_from_gvn(initial_gvn());
2359 igvn.optimize();
2360 if (failing()) return;
2361 }
2362
2363 // Now that all inlining is over and no PhaseRemoveUseless will run, cut edge from root to loop
2364 // safepoints
2365 remove_root_to_sfpts_edges(igvn);
2366
2367 if (failing()) return;
2368
2369 // Perform escape analysis
2370 if (do_escape_analysis() && ConnectionGraph::has_candidates(this)) {
2371 if (has_loops()) {
2372 // Cleanup graph (remove dead nodes).
2373 TracePhase tp(_t_idealLoop);
2374 PhaseIdealLoop::optimize(igvn, LoopOptsMaxUnroll);
2375 if (failing()) return;
2376 }
2377 bool progress;
2378 print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2379 do {
2380 ConnectionGraph::do_analysis(this, &igvn);
2381
2382 if (failing()) return;
2383
2384 int mcount = macro_count(); // Record number of allocations and locks before IGVN
2385
2386 // Optimize out fields loads from scalar replaceable allocations.
2472 // Loop transforms on the ideal graph. Range Check Elimination,
2473 // peeling, unrolling, etc.
2474 if (!optimize_loops(igvn, LoopOptsDefault)) {
2475 return;
2476 }
2477
2478 if (failing()) return;
2479
2480 C->clear_major_progress(); // ensure that major progress is now clear
2481
2482 process_for_post_loop_opts_igvn(igvn);
2483
2484 process_for_merge_stores_igvn(igvn);
2485
2486 if (failing()) return;
2487
2488 #ifdef ASSERT
2489 bs->verify_gc_barriers(this, BarrierSetC2::BeforeMacroExpand);
2490 #endif
2491
2492 {
2493 TracePhase tp(_t_macroExpand);
2494 print_method(PHASE_BEFORE_MACRO_EXPANSION, 3);
2495 PhaseMacroExpand mex(igvn);
2496 if (mex.expand_macro_nodes()) {
2497 assert(failing(), "must bail out w/ explicit message");
2498 return;
2499 }
2500 print_method(PHASE_AFTER_MACRO_EXPANSION, 2);
2501 }
2502
2503 {
2504 TracePhase tp(_t_barrierExpand);
2505 if (bs->expand_barriers(this, igvn)) {
2506 assert(failing(), "must bail out w/ explicit message");
2507 return;
2508 }
2509 print_method(PHASE_BARRIER_EXPANSION, 2);
2510 }
2511
2512 if (C->max_vector_size() > 0) {
2513 C->optimize_logic_cones(igvn);
2514 igvn.optimize();
2515 if (failing()) return;
2516 }
2517
2518 DEBUG_ONLY( _modified_nodes = nullptr; )
2519
2520 assert(igvn._worklist.size() == 0, "not empty");
2521
2522 assert(_late_inlines.length() == 0 || IncrementalInlineMH || IncrementalInlineVirtual, "not empty");
2523
2524 if (_late_inlines.length() > 0) {
2525 // More opportunities to optimize virtual and MH calls.
2526 // Though it's maybe too late to perform inlining, strength-reducing them to direct calls is still an option.
2527 process_late_inline_calls_no_inline(igvn);
2528 if (failing()) return;
2529 }
2530 } // (End scope of igvn; run destructor if necessary for asserts.)
2531
2532 check_no_dead_use();
2533
2534 // We will never use the NodeHash table any more. Clear it so that final_graph_reshaping does not have
2535 // to remove hashes to unlock nodes for modifications.
2536 C->node_hash()->clear();
2537
2538 // A method with only infinite loops has no edges entering loops from root
2539 {
2540 TracePhase tp(_t_graphReshaping);
2541 if (final_graph_reshaping()) {
2542 assert(failing(), "must bail out w/ explicit message");
2543 return;
2544 }
2545 }
2546
2547 print_method(PHASE_OPTIMIZE_FINISHED, 2);
2548 DEBUG_ONLY(set_phase_optimize_finished();)
2549 }
3282 int nop = n->Opcode();
3283 // Clone shared simple arguments to uncommon calls, item (1).
3284 if (n->outcnt() > 1 &&
3285 !n->is_Proj() &&
3286 nop != Op_CreateEx &&
3287 nop != Op_CheckCastPP &&
3288 nop != Op_DecodeN &&
3289 nop != Op_DecodeNKlass &&
3290 !n->is_Mem() &&
3291 !n->is_Phi()) {
3292 Node *x = n->clone();
3293 call->set_req(TypeFunc::Parms, x);
3294 }
3295 }
3296 break;
3297 }
3298 case Op_StoreB:
3299 case Op_StoreC:
3300 case Op_StoreI:
3301 case Op_StoreL:
3302 case Op_CompareAndSwapB:
3303 case Op_CompareAndSwapS:
3304 case Op_CompareAndSwapI:
3305 case Op_CompareAndSwapL:
3306 case Op_CompareAndSwapP:
3307 case Op_CompareAndSwapN:
3308 case Op_WeakCompareAndSwapB:
3309 case Op_WeakCompareAndSwapS:
3310 case Op_WeakCompareAndSwapI:
3311 case Op_WeakCompareAndSwapL:
3312 case Op_WeakCompareAndSwapP:
3313 case Op_WeakCompareAndSwapN:
3314 case Op_CompareAndExchangeB:
3315 case Op_CompareAndExchangeS:
3316 case Op_CompareAndExchangeI:
3317 case Op_CompareAndExchangeL:
3318 case Op_CompareAndExchangeP:
3319 case Op_CompareAndExchangeN:
3320 case Op_GetAndAddS:
3321 case Op_GetAndAddB:
3825 k->subsume_by(m, this);
3826 }
3827 }
3828 }
3829 break;
3830 }
3831 case Op_CmpUL: {
3832 if (!Matcher::has_match_rule(Op_CmpUL)) {
3833 // No support for unsigned long comparisons
3834 ConINode* sign_pos = new ConINode(TypeInt::make(BitsPerLong - 1));
3835 Node* sign_bit_mask = new RShiftLNode(n->in(1), sign_pos);
3836 Node* orl = new OrLNode(n->in(1), sign_bit_mask);
3837 ConLNode* remove_sign_mask = new ConLNode(TypeLong::make(max_jlong));
3838 Node* andl = new AndLNode(orl, remove_sign_mask);
3839 Node* cmp = new CmpLNode(andl, n->in(2));
3840 n->subsume_by(cmp, this);
3841 }
3842 break;
3843 }
3844 #ifdef ASSERT
3845 case Op_ConNKlass: {
3846 const TypePtr* tp = n->as_Type()->type()->make_ptr();
3847 ciKlass* klass = tp->is_klassptr()->exact_klass();
3848 assert(klass->is_in_encoding_range(), "klass cannot be compressed");
3849 break;
3850 }
3851 #endif
3852 default:
3853 assert(!n->is_Call(), "");
3854 assert(!n->is_Mem(), "");
3855 assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN");
3856 break;
3857 }
3858 }
3859
3860 //------------------------------final_graph_reshaping_walk---------------------
3861 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
3862 // requires that the walk visits a node's inputs before visiting the node.
3863 void Compile::final_graph_reshaping_walk(Node_Stack& nstack, Node* root, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes) {
3864 Unique_Node_List sfpt;
4211 }
4212 }
4213
4214 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
4215 return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
4216 }
4217
4218 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
4219 return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
4220 }
4221
4222 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
4223 if (holder->is_initialized()) {
4224 return false;
4225 }
4226 if (holder->is_being_initialized()) {
4227 if (accessing_method->holder() == holder) {
4228 // Access inside a class. The barrier can be elided when access happens in <clinit>,
4229 // <init>, or a static method. In all those cases, there was an initialization
4230 // barrier on the holder klass passed.
4231 if (accessing_method->is_static_initializer() ||
4232 accessing_method->is_object_initializer() ||
4233 accessing_method->is_static()) {
4234 return false;
4235 }
4236 } else if (accessing_method->holder()->is_subclass_of(holder)) {
4237 // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
4238 // In case of <init> or a static method, the barrier is on the subclass is not enough:
4239 // child class can become fully initialized while its parent class is still being initialized.
4240 if (accessing_method->is_static_initializer()) {
4241 return false;
4242 }
4243 }
4244 ciMethod* root = method(); // the root method of compilation
4245 if (root != accessing_method) {
4246 return needs_clinit_barrier(holder, root); // check access in the context of compilation root
4247 }
4248 }
4249 return true;
4250 }
4251
4252 #ifndef PRODUCT
4253 //------------------------------verify_bidirectional_edges---------------------
4254 // For each input edge to a node (ie - for each Use-Def edge), verify that
4255 // there is a corresponding Def-Use edge.
4256 void Compile::verify_bidirectional_edges(Unique_Node_List& visited, const Unique_Node_List* root_and_safepoints) const {
4257 // Allocate stack of size C->live_nodes()/16 to avoid frequent realloc
4258 uint stack_size = live_nodes() >> 4;
4259 Node_List nstack(MAX2(stack_size, (uint) OptoNodeListSize));
4260 if (root_and_safepoints != nullptr) {
4290 if (in != nullptr && !in->is_top()) {
4291 // Count instances of `next`
4292 int cnt = 0;
4293 for (uint idx = 0; idx < in->_outcnt; idx++) {
4294 if (in->_out[idx] == n) {
4295 cnt++;
4296 }
4297 }
4298 assert(cnt > 0, "Failed to find Def-Use edge.");
4299 // Check for duplicate edges
4300 // walk the input array downcounting the input edges to n
4301 for (uint j = 0; j < length; j++) {
4302 if (n->in(j) == in) {
4303 cnt--;
4304 }
4305 }
4306 assert(cnt == 0, "Mismatched edge count.");
4307 } else if (in == nullptr) {
4308 assert(i == 0 || i >= n->req() ||
4309 n->is_Region() || n->is_Phi() || n->is_ArrayCopy() ||
4310 (n->is_Unlock() && i == (n->req() - 1)) ||
4311 (n->is_MemBar() && i == 5), // the precedence edge to a membar can be removed during macro node expansion
4312 "only region, phi, arraycopy, unlock or membar nodes have null data edges");
4313 } else {
4314 assert(in->is_top(), "sanity");
4315 // Nothing to check.
4316 }
4317 }
4318 }
4319 }
4320
4321 //------------------------------verify_graph_edges---------------------------
4322 // Walk the Graph and verify that there is a one-to-one correspondence
4323 // between Use-Def edges and Def-Use edges in the graph.
4324 void Compile::verify_graph_edges(bool no_dead_code, const Unique_Node_List* root_and_safepoints) const {
4325 if (VerifyGraphEdges) {
4326 Unique_Node_List visited;
4327
4328 // Call graph walk to check edges
4329 verify_bidirectional_edges(visited, root_and_safepoints);
4330 if (no_dead_code) {
4331 // Now make sure that no visited node is used by an unvisited node.
4332 bool dead_nodes = false;
4443 // (1) subklass is already limited to a subtype of superklass => always ok
4444 // (2) subklass does not overlap with superklass => always fail
4445 // (3) superklass has NO subtypes and we can check with a simple compare.
4446 Compile::SubTypeCheckResult Compile::static_subtype_check(const TypeKlassPtr* superk, const TypeKlassPtr* subk, bool skip) {
4447 if (skip) {
4448 return SSC_full_test; // Let caller generate the general case.
4449 }
4450
4451 if (subk->is_java_subtype_of(superk)) {
4452 return SSC_always_true; // (0) and (1) this test cannot fail
4453 }
4454
4455 if (!subk->maybe_java_subtype_of(superk)) {
4456 return SSC_always_false; // (2) true path dead; no dynamic test needed
4457 }
4458
4459 const Type* superelem = superk;
4460 if (superk->isa_aryklassptr()) {
4461 int ignored;
4462 superelem = superk->is_aryklassptr()->base_element_type(ignored);
4463 }
4464
4465 if (superelem->isa_instklassptr()) {
4466 ciInstanceKlass* ik = superelem->is_instklassptr()->instance_klass();
4467 if (!ik->has_subklass()) {
4468 if (!ik->is_final()) {
4469 // Add a dependency if there is a chance of a later subclass.
4470 dependencies()->assert_leaf_type(ik);
4471 }
4472 if (!superk->maybe_java_subtype_of(subk)) {
4473 return SSC_always_false;
4474 }
4475 return SSC_easy_test; // (3) caller can do a simple ptr comparison
4476 }
4477 } else {
4478 // A primitive array type has no subtypes.
4479 return SSC_easy_test; // (3) caller can do a simple ptr comparison
4480 }
4481
4482 return SSC_full_test;
4924 const Type* t = igvn.type_or_null(n);
4925 assert((t == nullptr) || (t == t->remove_speculative()), "no more speculative types");
4926 if (n->is_Type()) {
4927 t = n->as_Type()->type();
4928 assert(t == t->remove_speculative(), "no more speculative types");
4929 }
4930 // Iterate over outs - endless loops is unreachable from below
4931 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
4932 Node *m = n->fast_out(i);
4933 if (not_a_node(m)) {
4934 continue;
4935 }
4936 worklist.push(m);
4937 }
4938 }
4939 igvn.check_no_speculative_types();
4940 #endif
4941 }
4942 }
4943
4944 // Auxiliary methods to support randomized stressing/fuzzing.
4945
4946 void Compile::initialize_stress_seed(const DirectiveSet* directive) {
4947 if (FLAG_IS_DEFAULT(StressSeed) || (FLAG_IS_ERGO(StressSeed) && directive->RepeatCompilationOption)) {
4948 _stress_seed = static_cast<uint>(Ticks::now().nanoseconds());
4949 FLAG_SET_ERGO(StressSeed, _stress_seed);
4950 } else {
4951 _stress_seed = StressSeed;
4952 }
4953 if (_log != nullptr) {
4954 _log->elem("stress_test seed='%u'", _stress_seed);
4955 }
4956 }
4957
4958 int Compile::random() {
4959 _stress_seed = os::next_random(_stress_seed);
4960 return static_cast<int>(_stress_seed);
4961 }
4962
4963 // This method can be called the arbitrary number of times, with current count
5269 } else {
5270 _debug_network_printer->update_compiled_method(C->method());
5271 }
5272 tty->print_cr("Method printed over network stream to IGV");
5273 _debug_network_printer->print(name, C->root(), visible_nodes);
5274 }
5275 #endif
5276
5277 Node* Compile::narrow_value(BasicType bt, Node* value, const Type* type, PhaseGVN* phase, bool transform_res) {
5278 if (type != nullptr && phase->type(value)->higher_equal(type)) {
5279 return value;
5280 }
5281 Node* result = nullptr;
5282 if (bt == T_BYTE) {
5283 result = phase->transform(new LShiftINode(value, phase->intcon(24)));
5284 result = new RShiftINode(result, phase->intcon(24));
5285 } else if (bt == T_BOOLEAN) {
5286 result = new AndINode(value, phase->intcon(0xFF));
5287 } else if (bt == T_CHAR) {
5288 result = new AndINode(value,phase->intcon(0xFFFF));
5289 } else {
5290 assert(bt == T_SHORT, "unexpected narrow type");
5291 result = phase->transform(new LShiftINode(value, phase->intcon(16)));
5292 result = new RShiftINode(result, phase->intcon(16));
5293 }
5294 if (transform_res) {
5295 result = phase->transform(result);
5296 }
5297 return result;
5298 }
5299
5300 void Compile::record_method_not_compilable_oom() {
5301 record_method_not_compilable(CompilationMemoryStatistic::failure_reason_memlimit());
5302 }
|
40 #include "gc/shared/c2/barrierSetC2.hpp"
41 #include "jfr/jfrEvents.hpp"
42 #include "jvm_io.h"
43 #include "memory/allocation.hpp"
44 #include "memory/arena.hpp"
45 #include "memory/resourceArea.hpp"
46 #include "opto/addnode.hpp"
47 #include "opto/block.hpp"
48 #include "opto/c2compiler.hpp"
49 #include "opto/callGenerator.hpp"
50 #include "opto/callnode.hpp"
51 #include "opto/castnode.hpp"
52 #include "opto/cfgnode.hpp"
53 #include "opto/chaitin.hpp"
54 #include "opto/compile.hpp"
55 #include "opto/connode.hpp"
56 #include "opto/convertnode.hpp"
57 #include "opto/divnode.hpp"
58 #include "opto/escape.hpp"
59 #include "opto/idealGraphPrinter.hpp"
60 #include "opto/inlinetypenode.hpp"
61 #include "opto/locknode.hpp"
62 #include "opto/loopnode.hpp"
63 #include "opto/machnode.hpp"
64 #include "opto/macro.hpp"
65 #include "opto/matcher.hpp"
66 #include "opto/mathexactnode.hpp"
67 #include "opto/memnode.hpp"
68 #include "opto/movenode.hpp"
69 #include "opto/mulnode.hpp"
70 #include "opto/narrowptrnode.hpp"
71 #include "opto/node.hpp"
72 #include "opto/opaquenode.hpp"
73 #include "opto/opcodes.hpp"
74 #include "opto/output.hpp"
75 #include "opto/parse.hpp"
76 #include "opto/phaseX.hpp"
77 #include "opto/rootnode.hpp"
78 #include "opto/runtime.hpp"
79 #include "opto/stringopts.hpp"
80 #include "opto/type.hpp"
81 #include "opto/vector.hpp"
82 #include "opto/vectornode.hpp"
83 #include "runtime/globals_extension.hpp"
84 #include "runtime/sharedRuntime.hpp"
85 #include "runtime/signature.hpp"
86 #include "runtime/stubRoutines.hpp"
87 #include "runtime/timer.hpp"
88 #include "utilities/align.hpp"
388 // as dead to be conservative about the dead node count at any
389 // given time.
390 if (!dead->is_Con()) {
391 record_dead_node(dead->_idx);
392 }
393 if (dead->is_macro()) {
394 remove_macro_node(dead);
395 }
396 if (dead->is_expensive()) {
397 remove_expensive_node(dead);
398 }
399 if (dead->is_OpaqueTemplateAssertionPredicate()) {
400 remove_template_assertion_predicate_opaque(dead->as_OpaqueTemplateAssertionPredicate());
401 }
402 if (dead->is_ParsePredicate()) {
403 remove_parse_predicate(dead->as_ParsePredicate());
404 }
405 if (dead->for_post_loop_opts_igvn()) {
406 remove_from_post_loop_opts_igvn(dead);
407 }
408 if (dead->is_InlineType()) {
409 remove_inline_type(dead);
410 }
411 if (dead->for_merge_stores_igvn()) {
412 remove_from_merge_stores_igvn(dead);
413 }
414 if (dead->is_Call()) {
415 remove_useless_late_inlines( &_late_inlines, dead);
416 remove_useless_late_inlines( &_string_late_inlines, dead);
417 remove_useless_late_inlines( &_boxing_late_inlines, dead);
418 remove_useless_late_inlines(&_vector_reboxing_late_inlines, dead);
419
420 if (dead->is_CallStaticJava()) {
421 remove_unstable_if_trap(dead->as_CallStaticJava(), false);
422 }
423 }
424 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
425 bs->unregister_potential_barrier_node(dead);
426 }
427
428 // Disconnect all useless nodes by disconnecting those at the boundary.
429 void Compile::disconnect_useless_nodes(Unique_Node_List& useful, Unique_Node_List& worklist, const Unique_Node_List* root_and_safepoints) {
430 uint next = 0;
438 // Use raw traversal of out edges since this code removes out edges
439 int max = n->outcnt();
440 for (int j = 0; j < max; ++j) {
441 Node* child = n->raw_out(j);
442 if (!useful.member(child)) {
443 assert(!child->is_top() || child != top(),
444 "If top is cached in Compile object it is in useful list");
445 // Only need to remove this out-edge to the useless node
446 n->raw_del_out(j);
447 --j;
448 --max;
449 if (child->is_data_proj_of_pure_function(n)) {
450 worklist.push(n);
451 }
452 }
453 }
454 if (n->outcnt() == 1 && n->has_special_unique_user()) {
455 assert(useful.member(n->unique_out()), "do not push a useless node");
456 worklist.push(n->unique_out());
457 }
458 if (n->outcnt() == 0) {
459 worklist.push(n);
460 }
461 }
462
463 remove_useless_nodes(_macro_nodes, useful); // remove useless macro nodes
464 remove_useless_nodes(_parse_predicates, useful); // remove useless Parse Predicate nodes
465 // Remove useless Template Assertion Predicate opaque nodes
466 remove_useless_nodes(_template_assertion_predicate_opaques, useful);
467 remove_useless_nodes(_expensive_nodes, useful); // remove useless expensive nodes
468 remove_useless_nodes(_for_post_loop_igvn, useful); // remove useless node recorded for post loop opts IGVN pass
469 remove_useless_nodes(_inline_type_nodes, useful); // remove useless inline type nodes
470 #ifdef ASSERT
471 if (_modified_nodes != nullptr) {
472 _modified_nodes->remove_useless_nodes(useful.member_set());
473 }
474 #endif
475 remove_useless_nodes(_for_merge_stores_igvn, useful); // remove useless node recorded for merge stores IGVN pass
476 remove_useless_unstable_if_traps(useful); // remove useless unstable_if traps
477 remove_useless_coarsened_locks(useful); // remove useless coarsened locks nodes
478 #ifdef ASSERT
479 if (_modified_nodes != nullptr) {
480 _modified_nodes->remove_useless_nodes(useful.member_set());
481 }
482 #endif
483
484 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
485 bs->eliminate_useless_gc_barriers(useful, this);
486 // clean up the late inline lists
487 remove_useless_late_inlines( &_late_inlines, useful);
488 remove_useless_late_inlines( &_string_late_inlines, useful);
489 remove_useless_late_inlines( &_boxing_late_inlines, useful);
490 remove_useless_late_inlines(&_vector_reboxing_late_inlines, useful);
491 debug_only(verify_graph_edges(true /*check for no_dead_code*/, root_and_safepoints);)
492 }
493
494 // ============================================================================
636
637 Compile::Compile(ciEnv* ci_env, ciMethod* target, int osr_bci,
638 Options options, DirectiveSet* directive)
639 : Phase(Compiler),
640 _compile_id(ci_env->compile_id()),
641 _options(options),
642 _method(target),
643 _entry_bci(osr_bci),
644 _ilt(nullptr),
645 _stub_function(nullptr),
646 _stub_name(nullptr),
647 _stub_entry_point(nullptr),
648 _max_node_limit(MaxNodeLimit),
649 _post_loop_opts_phase(false),
650 _merge_stores_phase(false),
651 _allow_macro_nodes(true),
652 _inlining_progress(false),
653 _inlining_incrementally(false),
654 _do_cleanup(false),
655 _has_reserved_stack_access(target->has_reserved_stack_access()),
656 _has_circular_inline_type(false),
657 #ifndef PRODUCT
658 _igv_idx(0),
659 _trace_opto_output(directive->TraceOptoOutputOption),
660 #endif
661 _has_method_handle_invokes(false),
662 _clinit_barrier_on_entry(false),
663 _stress_seed(0),
664 _comp_arena(mtCompiler, Arena::Tag::tag_comp),
665 _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
666 _env(ci_env),
667 _directive(directive),
668 _log(ci_env->log()),
669 _first_failure_details(nullptr),
670 _intrinsics(comp_arena(), 0, 0, nullptr),
671 _macro_nodes(comp_arena(), 8, 0, nullptr),
672 _parse_predicates(comp_arena(), 8, 0, nullptr),
673 _template_assertion_predicate_opaques(comp_arena(), 8, 0, nullptr),
674 _expensive_nodes(comp_arena(), 8, 0, nullptr),
675 _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
676 _inline_type_nodes (comp_arena(), 8, 0, nullptr),
677 _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr),
678 _unstable_if_traps(comp_arena(), 8, 0, nullptr),
679 _coarsened_locks(comp_arena(), 8, 0, nullptr),
680 _congraph(nullptr),
681 NOT_PRODUCT(_igv_printer(nullptr) COMMA)
682 _unique(0),
683 _dead_node_count(0),
684 _dead_node_list(comp_arena()),
685 _node_arena_one(mtCompiler, Arena::Tag::tag_node),
686 _node_arena_two(mtCompiler, Arena::Tag::tag_node),
687 _node_arena(&_node_arena_one),
688 _mach_constant_base_node(nullptr),
689 _Compile_types(mtCompiler, Arena::Tag::tag_type),
690 _initial_gvn(nullptr),
691 _igvn_worklist(nullptr),
692 _types(nullptr),
693 _node_hash(nullptr),
694 _late_inlines(comp_arena(), 2, 0, nullptr),
695 _string_late_inlines(comp_arena(), 2, 0, nullptr),
696 _boxing_late_inlines(comp_arena(), 2, 0, nullptr),
761 #define MINIMUM_NODE_HASH 1023
762
763 // GVN that will be run immediately on new nodes
764 uint estimated_size = method()->code_size()*4+64;
765 estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size);
766 _igvn_worklist = new (comp_arena()) Unique_Node_List(comp_arena());
767 _types = new (comp_arena()) Type_Array(comp_arena());
768 _node_hash = new (comp_arena()) NodeHash(comp_arena(), estimated_size);
769 PhaseGVN gvn;
770 set_initial_gvn(&gvn);
771
772 { // Scope for timing the parser
773 TracePhase tp(_t_parser);
774
775 // Put top into the hash table ASAP.
776 initial_gvn()->transform(top());
777
778 // Set up tf(), start(), and find a CallGenerator.
779 CallGenerator* cg = nullptr;
780 if (is_osr_compilation()) {
781 init_tf(TypeFunc::make(method(), /* is_osr_compilation = */ true));
782 StartNode* s = new StartOSRNode(root(), tf()->domain_sig());
783 initial_gvn()->set_type_bottom(s);
784 verify_start(s);
785 cg = CallGenerator::for_osr(method(), entry_bci());
786 } else {
787 // Normal case.
788 init_tf(TypeFunc::make(method()));
789 StartNode* s = new StartNode(root(), tf()->domain_cc());
790 initial_gvn()->set_type_bottom(s);
791 verify_start(s);
792 if (method()->intrinsic_id() == vmIntrinsics::_Reference_get) {
793 // With java.lang.ref.reference.get() we must go through the
794 // intrinsic - even when get() is the root
795 // method of the compile - so that, if necessary, the value in
796 // the referent field of the reference object gets recorded by
797 // the pre-barrier code.
798 cg = find_intrinsic(method(), false);
799 }
800 if (cg == nullptr) {
801 float past_uses = method()->interpreter_invocation_count();
802 float expected_uses = past_uses;
803 cg = CallGenerator::for_inline(method(), expected_uses);
804 }
805 }
806 if (failing()) return;
807 if (cg == nullptr) {
808 const char* reason = InlineTree::check_can_parse(method());
809 assert(reason != nullptr, "expect reason for parse failure");
880 print_ideal_ir("print_ideal");
881 }
882 #endif
883
884 #ifdef ASSERT
885 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
886 bs->verify_gc_barriers(this, BarrierSetC2::BeforeCodeGen);
887 #endif
888
889 // Dump compilation data to replay it.
890 if (directive->DumpReplayOption) {
891 env()->dump_replay_data(_compile_id);
892 }
893 if (directive->DumpInlineOption && (ilt() != nullptr)) {
894 env()->dump_inline_data(_compile_id);
895 }
896
897 // Now that we know the size of all the monitors we can add a fixed slot
898 // for the original deopt pc.
899 int next_slot = fixed_slots() + (sizeof(address) / VMRegImpl::stack_slot_size);
900 if (needs_stack_repair()) {
901 // One extra slot for the special stack increment value
902 next_slot += 2;
903 }
904 // TODO 8284443 Only reserve extra slot if needed
905 if (InlineTypeReturnedAsFields) {
906 // One extra slot to hold the IsInit information for a nullable
907 // inline type return if we run out of registers.
908 next_slot += 2;
909 }
910 set_fixed_slots(next_slot);
911
912 // Compute when to use implicit null checks. Used by matching trap based
913 // nodes and NullCheck optimization.
914 set_allowed_deopt_reasons();
915
916 // Now generate code
917 Code_Gen();
918 }
919
920 //------------------------------Compile----------------------------------------
921 // Compile a runtime stub
922 Compile::Compile(ciEnv* ci_env,
923 TypeFunc_generator generator,
924 address stub_function,
925 const char *stub_name,
926 int is_fancy_jump,
927 bool pass_tls,
928 bool return_pc,
929 DirectiveSet* directive)
930 : Phase(Compiler),
931 _compile_id(0),
932 _options(Options::for_runtime_stub()),
933 _method(nullptr),
934 _entry_bci(InvocationEntryBci),
935 _stub_function(stub_function),
936 _stub_name(stub_name),
937 _stub_entry_point(nullptr),
938 _max_node_limit(MaxNodeLimit),
939 _post_loop_opts_phase(false),
940 _merge_stores_phase(false),
941 _allow_macro_nodes(true),
942 _inlining_progress(false),
943 _inlining_incrementally(false),
944 _has_reserved_stack_access(false),
945 _has_circular_inline_type(false),
946 #ifndef PRODUCT
947 _igv_idx(0),
948 _trace_opto_output(directive->TraceOptoOutputOption),
949 #endif
950 _has_method_handle_invokes(false),
951 _clinit_barrier_on_entry(false),
952 _stress_seed(0),
953 _comp_arena(mtCompiler, Arena::Tag::tag_comp),
954 _barrier_set_state(BarrierSet::barrier_set()->barrier_set_c2()->create_barrier_state(comp_arena())),
955 _env(ci_env),
956 _directive(directive),
957 _log(ci_env->log()),
958 _first_failure_details(nullptr),
959 _for_post_loop_igvn(comp_arena(), 8, 0, nullptr),
960 _for_merge_stores_igvn(comp_arena(), 8, 0, nullptr),
961 _congraph(nullptr),
962 NOT_PRODUCT(_igv_printer(nullptr) COMMA)
963 _unique(0),
964 _dead_node_count(0),
965 _dead_node_list(comp_arena()),
1069
1070 _fixed_slots = 0;
1071 set_has_split_ifs(false);
1072 set_has_loops(false); // first approximation
1073 set_has_stringbuilder(false);
1074 set_has_boxed_value(false);
1075 _trap_can_recompile = false; // no traps emitted yet
1076 _major_progress = true; // start out assuming good things will happen
1077 set_has_unsafe_access(false);
1078 set_max_vector_size(0);
1079 set_clear_upper_avx(false); //false as default for clear upper bits of ymm registers
1080 Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist));
1081 set_decompile_count(0);
1082
1083 #ifndef PRODUCT
1084 Copy::zero_to_bytes(_igv_phase_iter, sizeof(_igv_phase_iter));
1085 #endif
1086
1087 set_do_freq_based_layout(_directive->BlockLayoutByFrequencyOption);
1088 _loop_opts_cnt = LoopOptsCount;
1089 _has_flat_accesses = false;
1090 _flat_accesses_share_alias = true;
1091 _scalarize_in_safepoints = false;
1092
1093 set_do_inlining(Inline);
1094 set_max_inline_size(MaxInlineSize);
1095 set_freq_inline_size(FreqInlineSize);
1096 set_do_scheduling(OptoScheduling);
1097
1098 set_do_vector_loop(false);
1099 set_has_monitors(false);
1100 set_has_scoped_access(false);
1101
1102 if (AllowVectorizeOnDemand) {
1103 if (has_method() && _directive->VectorizeOption) {
1104 set_do_vector_loop(true);
1105 NOT_PRODUCT(if (do_vector_loop() && Verbose) {tty->print("Compile::Init: do vectorized loops (SIMD like) for method %s\n", method()->name()->as_quoted_ascii());})
1106 } else if (has_method() && method()->name() != nullptr &&
1107 method()->intrinsic_id() == vmIntrinsics::_forEachRemaining) {
1108 set_do_vector_loop(true);
1109 }
1110 }
1111 set_use_cmove(UseCMoveUnconditionally /* || do_vector_loop()*/); //TODO: consider do_vector_loop() mandate use_cmove unconditionally
1112 NOT_PRODUCT(if (use_cmove() && Verbose && has_method()) {tty->print("Compile::Init: use CMove without profitability tests for method %s\n", method()->name()->as_quoted_ascii());})
1357
1358 // Known instance (scalarizable allocation) alias only with itself.
1359 bool is_known_inst = tj->isa_oopptr() != nullptr &&
1360 tj->is_oopptr()->is_known_instance();
1361
1362 // Process weird unsafe references.
1363 if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) {
1364 assert(InlineUnsafeOps || StressReflectiveCode, "indeterminate pointers come only from unsafe ops");
1365 assert(!is_known_inst, "scalarizable allocation should not have unsafe references");
1366 tj = TypeOopPtr::BOTTOM;
1367 ptr = tj->ptr();
1368 offset = tj->offset();
1369 }
1370
1371 // Array pointers need some flattening
1372 const TypeAryPtr* ta = tj->isa_aryptr();
1373 if (ta && ta->is_stable()) {
1374 // Erase stability property for alias analysis.
1375 tj = ta = ta->cast_to_stable(false);
1376 }
1377 if (ta && ta->is_not_flat()) {
1378 // Erase not flat property for alias analysis.
1379 tj = ta = ta->cast_to_not_flat(false);
1380 }
1381 if (ta && ta->is_not_null_free()) {
1382 // Erase not null free property for alias analysis.
1383 tj = ta = ta->cast_to_not_null_free(false);
1384 }
1385
1386 if( ta && is_known_inst ) {
1387 if ( offset != Type::OffsetBot &&
1388 offset > arrayOopDesc::length_offset_in_bytes() ) {
1389 offset = Type::OffsetBot; // Flatten constant access into array body only
1390 tj = ta = ta->
1391 remove_speculative()->
1392 cast_to_ptr_type(ptr)->
1393 with_offset(offset);
1394 }
1395 } else if (ta) {
1396 // For arrays indexed by constant indices, we flatten the alias
1397 // space to include all of the array body. Only the header, klass
1398 // and array length can be accessed un-aliased.
1399 // For flat inline type array, each field has its own slice so
1400 // we must include the field offset.
1401 if( offset != Type::OffsetBot ) {
1402 if( ta->const_oop() ) { // MethodData* or Method*
1403 offset = Type::OffsetBot; // Flatten constant access into array body
1404 tj = ta = ta->
1405 remove_speculative()->
1406 cast_to_ptr_type(ptr)->
1407 cast_to_exactness(false)->
1408 with_offset(offset);
1409 } else if( offset == arrayOopDesc::length_offset_in_bytes() ) {
1410 // range is OK as-is.
1411 tj = ta = TypeAryPtr::RANGE;
1412 } else if( offset == oopDesc::klass_offset_in_bytes() ) {
1413 tj = TypeInstPtr::KLASS; // all klass loads look alike
1414 ta = TypeAryPtr::RANGE; // generic ignored junk
1415 ptr = TypePtr::BotPTR;
1416 } else if( offset == oopDesc::mark_offset_in_bytes() ) {
1417 tj = TypeInstPtr::MARK;
1418 ta = TypeAryPtr::RANGE; // generic ignored junk
1419 ptr = TypePtr::BotPTR;
1420 } else { // Random constant offset into array body
1421 offset = Type::OffsetBot; // Flatten constant access into array body
1422 tj = ta = ta->
1423 remove_speculative()->
1424 cast_to_ptr_type(ptr)->
1425 cast_to_exactness(false)->
1426 with_offset(offset);
1427 }
1428 }
1429 // Arrays of fixed size alias with arrays of unknown size.
1430 if (ta->size() != TypeInt::POS) {
1431 const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS);
1432 tj = ta = ta->
1433 remove_speculative()->
1434 cast_to_ptr_type(ptr)->
1435 with_ary(tary)->
1436 cast_to_exactness(false);
1437 }
1438 // Arrays of known objects become arrays of unknown objects.
1439 if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) {
1440 const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size());
1441 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,nullptr,false,Type::Offset(offset), ta->field_offset());
1442 }
1443 if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) {
1444 const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size());
1445 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,nullptr,false,Type::Offset(offset), ta->field_offset());
1446 }
1447 // Initially all flattened array accesses share a single slice
1448 if (ta->is_flat() && ta->elem() != TypeInstPtr::BOTTOM && _flat_accesses_share_alias) {
1449 const TypeAry* tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size(), /* stable= */ false, /* flat= */ true);
1450 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,nullptr,false,Type::Offset(offset), Type::Offset(Type::OffsetBot));
1451 }
1452 // Arrays of bytes and of booleans both use 'bastore' and 'baload' so
1453 // cannot be distinguished by bytecode alone.
1454 if (ta->elem() == TypeInt::BOOL) {
1455 const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size());
1456 ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE);
1457 tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,Type::Offset(offset), ta->field_offset());
1458 }
1459 // During the 2nd round of IterGVN, NotNull castings are removed.
1460 // Make sure the Bottom and NotNull variants alias the same.
1461 // Also, make sure exact and non-exact variants alias the same.
1462 if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != nullptr) {
1463 tj = ta = ta->
1464 remove_speculative()->
1465 cast_to_ptr_type(TypePtr::BotPTR)->
1466 cast_to_exactness(false)->
1467 with_offset(offset);
1468 }
1469 }
1470
1471 // Oop pointers need some flattening
1472 const TypeInstPtr *to = tj->isa_instptr();
1473 if (to && to != TypeOopPtr::BOTTOM) {
1474 ciInstanceKlass* ik = to->instance_klass();
1475 if( ptr == TypePtr::Constant ) {
1476 if (ik != ciEnv::current()->Class_klass() ||
1477 offset < ik->layout_helper_size_in_bytes()) {
1487 } else if( is_known_inst ) {
1488 tj = to; // Keep NotNull and klass_is_exact for instance type
1489 } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) {
1490 // During the 2nd round of IterGVN, NotNull castings are removed.
1491 // Make sure the Bottom and NotNull variants alias the same.
1492 // Also, make sure exact and non-exact variants alias the same.
1493 tj = to = to->
1494 remove_speculative()->
1495 cast_to_instance_id(TypeOopPtr::InstanceBot)->
1496 cast_to_ptr_type(TypePtr::BotPTR)->
1497 cast_to_exactness(false);
1498 }
1499 if (to->speculative() != nullptr) {
1500 tj = to = to->remove_speculative();
1501 }
1502 // Canonicalize the holder of this field
1503 if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) {
1504 // First handle header references such as a LoadKlassNode, even if the
1505 // object's klass is unloaded at compile time (4965979).
1506 if (!is_known_inst) { // Do it only for non-instance types
1507 tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, nullptr, Type::Offset(offset));
1508 }
1509 } else if (offset < 0 || offset >= ik->layout_helper_size_in_bytes()) {
1510 // Static fields are in the space above the normal instance
1511 // fields in the java.lang.Class instance.
1512 if (ik != ciEnv::current()->Class_klass()) {
1513 to = nullptr;
1514 tj = TypeOopPtr::BOTTOM;
1515 offset = tj->offset();
1516 }
1517 } else {
1518 ciInstanceKlass *canonical_holder = ik->get_canonical_holder(offset);
1519 assert(offset < canonical_holder->layout_helper_size_in_bytes(), "");
1520 assert(tj->offset() == offset, "no change to offset expected");
1521 bool xk = to->klass_is_exact();
1522 int instance_id = to->instance_id();
1523
1524 // If the input type's class is the holder: if exact, the type only includes interfaces implemented by the holder
1525 // but if not exact, it may include extra interfaces: build new type from the holder class to make sure only
1526 // its interfaces are included.
1527 if (xk && ik->equals(canonical_holder)) {
1528 assert(tj == TypeInstPtr::make(to->ptr(), canonical_holder, is_known_inst, nullptr, Type::Offset(offset), instance_id), "exact type should be canonical type");
1529 } else {
1530 assert(xk || !is_known_inst, "Known instance should be exact type");
1531 tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, is_known_inst, nullptr, Type::Offset(offset), instance_id);
1532 }
1533 }
1534 }
1535
1536 // Klass pointers to object array klasses need some flattening
1537 const TypeKlassPtr *tk = tj->isa_klassptr();
1538 if( tk ) {
1539 // If we are referencing a field within a Klass, we need
1540 // to assume the worst case of an Object. Both exact and
1541 // inexact types must flatten to the same alias class so
1542 // use NotNull as the PTR.
1543 if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) {
1544 tj = tk = TypeInstKlassPtr::make(TypePtr::NotNull,
1545 env()->Object_klass(),
1546 Type::Offset(offset));
1547 }
1548
1549 if (tk->isa_aryklassptr() && tk->is_aryklassptr()->elem()->isa_klassptr()) {
1550 ciKlass* k = ciObjArrayKlass::make(env()->Object_klass());
1551 if (!k || !k->is_loaded()) { // Only fails for some -Xcomp runs
1552 tj = tk = TypeInstKlassPtr::make(TypePtr::NotNull, env()->Object_klass(), Type::Offset(offset));
1553 } else {
1554 tj = tk = TypeAryKlassPtr::make(TypePtr::NotNull, tk->is_aryklassptr()->elem(), k, Type::Offset(offset), tk->is_not_flat(), tk->is_not_null_free(), tk->is_flat(), tk->is_null_free());
1555 }
1556 }
1557 // Check for precise loads from the primary supertype array and force them
1558 // to the supertype cache alias index. Check for generic array loads from
1559 // the primary supertype array and also force them to the supertype cache
1560 // alias index. Since the same load can reach both, we need to merge
1561 // these 2 disparate memories into the same alias class. Since the
1562 // primary supertype array is read-only, there's no chance of confusion
1563 // where we bypass an array load and an array store.
1564 int primary_supers_offset = in_bytes(Klass::primary_supers_offset());
1565 if (offset == Type::OffsetBot ||
1566 (offset >= primary_supers_offset &&
1567 offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) ||
1568 offset == (int)in_bytes(Klass::secondary_super_cache_offset())) {
1569 offset = in_bytes(Klass::secondary_super_cache_offset());
1570 tj = tk = tk->with_offset(offset);
1571 }
1572 }
1573
1574 // Flatten all Raw pointers together.
1575 if (tj->base() == Type::RawPtr)
1576 tj = TypeRawPtr::BOTTOM;
1666 intptr_t key = (intptr_t) adr_type;
1667 key ^= key >> logAliasCacheSize;
1668 return &_alias_cache[key & right_n_bits(logAliasCacheSize)];
1669 }
1670
1671
1672 //-----------------------------grow_alias_types--------------------------------
1673 void Compile::grow_alias_types() {
1674 const int old_ats = _max_alias_types; // how many before?
1675 const int new_ats = old_ats; // how many more?
1676 const int grow_ats = old_ats+new_ats; // how many now?
1677 _max_alias_types = grow_ats;
1678 _alias_types = REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats);
1679 AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats);
1680 Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats);
1681 for (int i = 0; i < new_ats; i++) _alias_types[old_ats+i] = &ats[i];
1682 }
1683
1684
1685 //--------------------------------find_alias_type------------------------------
1686 Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field, bool uncached) {
1687 if (!do_aliasing()) {
1688 return alias_type(AliasIdxBot);
1689 }
1690
1691 AliasCacheEntry* ace = nullptr;
1692 if (!uncached) {
1693 ace = probe_alias_cache(adr_type);
1694 if (ace->_adr_type == adr_type) {
1695 return alias_type(ace->_index);
1696 }
1697 }
1698
1699 // Handle special cases.
1700 if (adr_type == nullptr) return alias_type(AliasIdxTop);
1701 if (adr_type == TypePtr::BOTTOM) return alias_type(AliasIdxBot);
1702
1703 // Do it the slow way.
1704 const TypePtr* flat = flatten_alias_type(adr_type);
1705
1706 #ifdef ASSERT
1707 {
1708 ResourceMark rm;
1709 assert(flat == flatten_alias_type(flat), "not idempotent: adr_type = %s; flat = %s => %s",
1710 Type::str(adr_type), Type::str(flat), Type::str(flatten_alias_type(flat)));
1711 assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr: adr_type = %s",
1712 Type::str(adr_type));
1713 if (flat->isa_oopptr() && !flat->isa_klassptr()) {
1714 const TypeOopPtr* foop = flat->is_oopptr();
1715 // Scalarizable allocations have exact klass always.
1716 bool exact = !foop->klass_is_exact() || foop->is_known_instance();
1726 if (alias_type(i)->adr_type() == flat) {
1727 idx = i;
1728 break;
1729 }
1730 }
1731
1732 if (idx == AliasIdxTop) {
1733 if (no_create) return nullptr;
1734 // Grow the array if necessary.
1735 if (_num_alias_types == _max_alias_types) grow_alias_types();
1736 // Add a new alias type.
1737 idx = _num_alias_types++;
1738 _alias_types[idx]->Init(idx, flat);
1739 if (flat == TypeInstPtr::KLASS) alias_type(idx)->set_rewritable(false);
1740 if (flat == TypeAryPtr::RANGE) alias_type(idx)->set_rewritable(false);
1741 if (flat->isa_instptr()) {
1742 if (flat->offset() == java_lang_Class::klass_offset()
1743 && flat->is_instptr()->instance_klass() == env()->Class_klass())
1744 alias_type(idx)->set_rewritable(false);
1745 }
1746 ciField* field = nullptr;
1747 if (flat->isa_aryptr()) {
1748 #ifdef ASSERT
1749 const int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE);
1750 // (T_BYTE has the weakest alignment and size restrictions...)
1751 assert(flat->offset() < header_size_min, "array body reference must be OffsetBot");
1752 #endif
1753 const Type* elemtype = flat->is_aryptr()->elem();
1754 if (flat->offset() == TypePtr::OffsetBot) {
1755 alias_type(idx)->set_element(elemtype);
1756 }
1757 int field_offset = flat->is_aryptr()->field_offset().get();
1758 if (flat->is_flat() &&
1759 field_offset != Type::OffsetBot) {
1760 ciInlineKlass* vk = elemtype->inline_klass();
1761 field_offset += vk->payload_offset();
1762 field = vk->get_field_by_offset(field_offset, false);
1763 }
1764 }
1765 if (flat->isa_klassptr()) {
1766 if (UseCompactObjectHeaders) {
1767 if (flat->offset() == in_bytes(Klass::prototype_header_offset()))
1768 alias_type(idx)->set_rewritable(false);
1769 }
1770 if (flat->offset() == in_bytes(Klass::super_check_offset_offset()))
1771 alias_type(idx)->set_rewritable(false);
1772 if (flat->offset() == in_bytes(Klass::access_flags_offset()))
1773 alias_type(idx)->set_rewritable(false);
1774 if (flat->offset() == in_bytes(Klass::misc_flags_offset()))
1775 alias_type(idx)->set_rewritable(false);
1776 if (flat->offset() == in_bytes(Klass::java_mirror_offset()))
1777 alias_type(idx)->set_rewritable(false);
1778 if (flat->offset() == in_bytes(Klass::layout_helper_offset()))
1779 alias_type(idx)->set_rewritable(false);
1780 if (flat->offset() == in_bytes(Klass::secondary_super_cache_offset()))
1781 alias_type(idx)->set_rewritable(false);
1782 }
1783 // %%% (We would like to finalize JavaThread::threadObj_offset(),
1784 // but the base pointer type is not distinctive enough to identify
1785 // references into JavaThread.)
1786
1787 // Check for final fields.
1788 const TypeInstPtr* tinst = flat->isa_instptr();
1789 if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) {
1790 if (tinst->const_oop() != nullptr &&
1791 tinst->instance_klass() == ciEnv::current()->Class_klass() &&
1792 tinst->offset() >= (tinst->instance_klass()->layout_helper_size_in_bytes())) {
1793 // static field
1794 ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass();
1795 field = k->get_field_by_offset(tinst->offset(), true);
1796 } else if (tinst->is_inlinetypeptr()) {
1797 // Inline type field
1798 ciInlineKlass* vk = tinst->inline_klass();
1799 field = vk->get_field_by_offset(tinst->offset(), false);
1800 } else {
1801 ciInstanceKlass *k = tinst->instance_klass();
1802 field = k->get_field_by_offset(tinst->offset(), false);
1803 }
1804 }
1805 assert(field == nullptr ||
1806 original_field == nullptr ||
1807 (field->holder() == original_field->holder() &&
1808 field->offset_in_bytes() == original_field->offset_in_bytes() &&
1809 field->is_static() == original_field->is_static()), "wrong field?");
1810 // Set field() and is_rewritable() attributes.
1811 if (field != nullptr) {
1812 alias_type(idx)->set_field(field);
1813 if (flat->isa_aryptr()) {
1814 // Fields of flat arrays are rewritable although they are declared final
1815 assert(flat->is_flat(), "must be a flat array");
1816 alias_type(idx)->set_rewritable(true);
1817 }
1818 }
1819 }
1820
1821 // Fill the cache for next time.
1822 if (!uncached) {
1823 ace->_adr_type = adr_type;
1824 ace->_index = idx;
1825 assert(alias_type(adr_type) == alias_type(idx), "type must be installed");
1826
1827 // Might as well try to fill the cache for the flattened version, too.
1828 AliasCacheEntry* face = probe_alias_cache(flat);
1829 if (face->_adr_type == nullptr) {
1830 face->_adr_type = flat;
1831 face->_index = idx;
1832 assert(alias_type(flat) == alias_type(idx), "flat type must work too");
1833 }
1834 }
1835
1836 return alias_type(idx);
1837 }
1838
1839
1840 Compile::AliasType* Compile::alias_type(ciField* field) {
1841 const TypeOopPtr* t;
1842 if (field->is_static())
1843 t = TypeInstPtr::make(field->holder()->java_mirror());
1844 else
1845 t = TypeOopPtr::make_from_klass_raw(field->holder());
1846 AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field);
1847 assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct");
1848 return atp;
1849 }
1850
1851
1852 //------------------------------have_alias_type--------------------------------
1853 bool Compile::have_alias_type(const TypePtr* adr_type) {
1932 assert(!C->major_progress(), "not cleared");
1933
1934 if (_for_post_loop_igvn.length() > 0) {
1935 while (_for_post_loop_igvn.length() > 0) {
1936 Node* n = _for_post_loop_igvn.pop();
1937 n->remove_flag(Node::NodeFlags::Flag_for_post_loop_opts_igvn);
1938 igvn._worklist.push(n);
1939 }
1940 igvn.optimize();
1941 if (failing()) return;
1942 assert(_for_post_loop_igvn.length() == 0, "no more delayed nodes allowed");
1943 assert(C->parse_predicate_count() == 0, "all parse predicates should have been removed now");
1944
1945 // Sometimes IGVN sets major progress (e.g., when processing loop nodes).
1946 if (C->major_progress()) {
1947 C->clear_major_progress(); // ensure that major progress is now clear
1948 }
1949 }
1950 }
1951
1952 void Compile::add_inline_type(Node* n) {
1953 assert(n->is_InlineType(), "unexpected node");
1954 _inline_type_nodes.push(n);
1955 }
1956
1957 void Compile::remove_inline_type(Node* n) {
1958 assert(n->is_InlineType(), "unexpected node");
1959 if (_inline_type_nodes.contains(n)) {
1960 _inline_type_nodes.remove(n);
1961 }
1962 }
1963
1964 // Does the return value keep otherwise useless inline type allocations alive?
1965 static bool return_val_keeps_allocations_alive(Node* ret_val) {
1966 ResourceMark rm;
1967 Unique_Node_List wq;
1968 wq.push(ret_val);
1969 bool some_allocations = false;
1970 for (uint i = 0; i < wq.size(); i++) {
1971 Node* n = wq.at(i);
1972 if (n->outcnt() > 1) {
1973 // Some other use for the allocation
1974 return false;
1975 } else if (n->is_InlineType()) {
1976 wq.push(n->in(1));
1977 } else if (n->is_Phi()) {
1978 for (uint j = 1; j < n->req(); j++) {
1979 wq.push(n->in(j));
1980 }
1981 } else if (n->is_CheckCastPP() &&
1982 n->in(1)->is_Proj() &&
1983 n->in(1)->in(0)->is_Allocate()) {
1984 some_allocations = true;
1985 } else if (n->is_CheckCastPP()) {
1986 wq.push(n->in(1));
1987 }
1988 }
1989 return some_allocations;
1990 }
1991
1992 void Compile::process_inline_types(PhaseIterGVN &igvn, bool remove) {
1993 // Make sure that the return value does not keep an otherwise unused allocation alive
1994 if (tf()->returns_inline_type_as_fields()) {
1995 Node* ret = nullptr;
1996 for (uint i = 1; i < root()->req(); i++) {
1997 Node* in = root()->in(i);
1998 if (in->Opcode() == Op_Return) {
1999 assert(ret == nullptr, "only one return");
2000 ret = in;
2001 }
2002 }
2003 if (ret != nullptr) {
2004 Node* ret_val = ret->in(TypeFunc::Parms);
2005 if (igvn.type(ret_val)->isa_oopptr() &&
2006 return_val_keeps_allocations_alive(ret_val)) {
2007 igvn.replace_input_of(ret, TypeFunc::Parms, InlineTypeNode::tagged_klass(igvn.type(ret_val)->inline_klass(), igvn));
2008 assert(ret_val->outcnt() == 0, "should be dead now");
2009 igvn.remove_dead_node(ret_val);
2010 }
2011 }
2012 }
2013 if (_inline_type_nodes.length() == 0) {
2014 return;
2015 }
2016 // Scalarize inline types in safepoint debug info.
2017 // Delay this until all inlining is over to avoid getting inconsistent debug info.
2018 set_scalarize_in_safepoints(true);
2019 for (int i = _inline_type_nodes.length()-1; i >= 0; i--) {
2020 InlineTypeNode* vt = _inline_type_nodes.at(i)->as_InlineType();
2021 vt->make_scalar_in_safepoints(&igvn);
2022 igvn.record_for_igvn(vt);
2023 }
2024 if (remove) {
2025 // Remove inline type nodes by replacing them with their oop input
2026 while (_inline_type_nodes.length() > 0) {
2027 InlineTypeNode* vt = _inline_type_nodes.pop()->as_InlineType();
2028 if (vt->outcnt() == 0) {
2029 igvn.remove_dead_node(vt);
2030 continue;
2031 }
2032 for (DUIterator i = vt->outs(); vt->has_out(i); i++) {
2033 DEBUG_ONLY(bool must_be_buffered = false);
2034 Node* u = vt->out(i);
2035 // Check if any users are blackholes. If so, rewrite them to use either the
2036 // allocated buffer, or individual components, instead of the inline type node
2037 // that goes away.
2038 if (u->is_Blackhole()) {
2039 BlackholeNode* bh = u->as_Blackhole();
2040
2041 // Unlink the old input
2042 int idx = bh->find_edge(vt);
2043 assert(idx != -1, "The edge should be there");
2044 bh->del_req(idx);
2045 --i;
2046
2047 if (vt->is_allocated(&igvn)) {
2048 // Already has the allocated instance, blackhole that
2049 bh->add_req(vt->get_oop());
2050 } else {
2051 // Not allocated yet, blackhole the components
2052 for (uint c = 0; c < vt->field_count(); c++) {
2053 bh->add_req(vt->field_value(c));
2054 }
2055 }
2056
2057 // Node modified, record for IGVN
2058 igvn.record_for_igvn(bh);
2059 }
2060 #ifdef ASSERT
2061 // Verify that inline type is buffered when replacing by oop
2062 else if (u->is_InlineType()) {
2063 // InlineType uses don't need buffering because they are about to be replaced as well
2064 } else if (u->is_Phi()) {
2065 // TODO 8302217 Remove this once InlineTypeNodes are reliably pushed through
2066 } else {
2067 must_be_buffered = true;
2068 }
2069 if (must_be_buffered && !vt->is_allocated(&igvn)) {
2070 vt->dump(0);
2071 u->dump(0);
2072 assert(false, "Should have been buffered");
2073 }
2074 #endif
2075 }
2076 igvn.replace_node(vt, vt->get_oop());
2077 }
2078 }
2079 igvn.optimize();
2080 }
2081
2082 void Compile::adjust_flat_array_access_aliases(PhaseIterGVN& igvn) {
2083 if (!_has_flat_accesses) {
2084 return;
2085 }
2086 // Initially, all flat array accesses share the same slice to
2087 // keep dependencies with Object[] array accesses (that could be
2088 // to a flat array) correct. We're done with parsing so we
2089 // now know all flat array accesses in this compile
2090 // unit. Let's move flat array accesses to their own slice,
2091 // one per element field. This should help memory access
2092 // optimizations.
2093 ResourceMark rm;
2094 Unique_Node_List wq;
2095 wq.push(root());
2096
2097 Node_List mergememnodes;
2098 Node_List memnodes;
2099
2100 // Alias index currently shared by all flat memory accesses
2101 int index = get_alias_index(TypeAryPtr::INLINES);
2102
2103 // Find MergeMem nodes and flat array accesses
2104 for (uint i = 0; i < wq.size(); i++) {
2105 Node* n = wq.at(i);
2106 if (n->is_Mem()) {
2107 const TypePtr* adr_type = nullptr;
2108 adr_type = get_adr_type(get_alias_index(n->adr_type()));
2109 if (adr_type == TypeAryPtr::INLINES) {
2110 memnodes.push(n);
2111 }
2112 } else if (n->is_MergeMem()) {
2113 MergeMemNode* mm = n->as_MergeMem();
2114 if (mm->memory_at(index) != mm->base_memory()) {
2115 mergememnodes.push(n);
2116 }
2117 }
2118 for (uint j = 0; j < n->req(); j++) {
2119 Node* m = n->in(j);
2120 if (m != nullptr) {
2121 wq.push(m);
2122 }
2123 }
2124 }
2125
2126 if (memnodes.size() > 0) {
2127 _flat_accesses_share_alias = false;
2128
2129 // We are going to change the slice for the flat array
2130 // accesses so we need to clear the cache entries that refer to
2131 // them.
2132 for (uint i = 0; i < AliasCacheSize; i++) {
2133 AliasCacheEntry* ace = &_alias_cache[i];
2134 if (ace->_adr_type != nullptr &&
2135 ace->_adr_type->is_flat()) {
2136 ace->_adr_type = nullptr;
2137 ace->_index = (i != 0) ? 0 : AliasIdxTop; // Make sure the nullptr adr_type resolves to AliasIdxTop
2138 }
2139 }
2140
2141 // Find what aliases we are going to add
2142 int start_alias = num_alias_types()-1;
2143 int stop_alias = 0;
2144
2145 for (uint i = 0; i < memnodes.size(); i++) {
2146 Node* m = memnodes.at(i);
2147 const TypePtr* adr_type = nullptr;
2148 adr_type = m->adr_type();
2149 #ifdef ASSERT
2150 m->as_Mem()->set_adr_type(adr_type);
2151 #endif
2152 int idx = get_alias_index(adr_type);
2153 start_alias = MIN2(start_alias, idx);
2154 stop_alias = MAX2(stop_alias, idx);
2155 }
2156
2157 assert(stop_alias >= start_alias, "should have expanded aliases");
2158
2159 Node_Stack stack(0);
2160 #ifdef ASSERT
2161 VectorSet seen(Thread::current()->resource_area());
2162 #endif
2163 // Now let's fix the memory graph so each flat array access
2164 // is moved to the right slice. Start from the MergeMem nodes.
2165 uint last = unique();
2166 for (uint i = 0; i < mergememnodes.size(); i++) {
2167 MergeMemNode* current = mergememnodes.at(i)->as_MergeMem();
2168 Node* n = current->memory_at(index);
2169 MergeMemNode* mm = nullptr;
2170 do {
2171 // Follow memory edges through memory accesses, phis and
2172 // narrow membars and push nodes on the stack. Once we hit
2173 // bottom memory, we pop element off the stack one at a
2174 // time, in reverse order, and move them to the right slice
2175 // by changing their memory edges.
2176 if ((n->is_Phi() && n->adr_type() != TypePtr::BOTTOM) || n->is_Mem() || n->adr_type() == TypeAryPtr::INLINES) {
2177 assert(!seen.test_set(n->_idx), "");
2178 // Uses (a load for instance) will need to be moved to the
2179 // right slice as well and will get a new memory state
2180 // that we don't know yet. The use could also be the
2181 // backedge of a loop. We put a place holder node between
2182 // the memory node and its uses. We replace that place
2183 // holder with the correct memory state once we know it,
2184 // i.e. when nodes are popped off the stack. Using the
2185 // place holder make the logic work in the presence of
2186 // loops.
2187 if (n->outcnt() > 1) {
2188 Node* place_holder = nullptr;
2189 assert(!n->has_out_with(Op_Node), "");
2190 for (DUIterator k = n->outs(); n->has_out(k); k++) {
2191 Node* u = n->out(k);
2192 if (u != current && u->_idx < last) {
2193 bool success = false;
2194 for (uint l = 0; l < u->req(); l++) {
2195 if (!stack.is_empty() && u == stack.node() && l == stack.index()) {
2196 continue;
2197 }
2198 Node* in = u->in(l);
2199 if (in == n) {
2200 if (place_holder == nullptr) {
2201 place_holder = new Node(1);
2202 place_holder->init_req(0, n);
2203 }
2204 igvn.replace_input_of(u, l, place_holder);
2205 success = true;
2206 }
2207 }
2208 if (success) {
2209 --k;
2210 }
2211 }
2212 }
2213 }
2214 if (n->is_Phi()) {
2215 stack.push(n, 1);
2216 n = n->in(1);
2217 } else if (n->is_Mem()) {
2218 stack.push(n, n->req());
2219 n = n->in(MemNode::Memory);
2220 } else {
2221 assert(n->is_Proj() && n->in(0)->Opcode() == Op_MemBarCPUOrder, "");
2222 stack.push(n, n->req());
2223 n = n->in(0)->in(TypeFunc::Memory);
2224 }
2225 } else {
2226 assert(n->adr_type() == TypePtr::BOTTOM || (n->Opcode() == Op_Node && n->_idx >= last) || (n->is_Proj() && n->in(0)->is_Initialize()), "");
2227 // Build a new MergeMem node to carry the new memory state
2228 // as we build it. IGVN should fold extraneous MergeMem
2229 // nodes.
2230 mm = MergeMemNode::make(n);
2231 igvn.register_new_node_with_optimizer(mm);
2232 while (stack.size() > 0) {
2233 Node* m = stack.node();
2234 uint idx = stack.index();
2235 if (m->is_Mem()) {
2236 // Move memory node to its new slice
2237 const TypePtr* adr_type = m->adr_type();
2238 int alias = get_alias_index(adr_type);
2239 Node* prev = mm->memory_at(alias);
2240 igvn.replace_input_of(m, MemNode::Memory, prev);
2241 mm->set_memory_at(alias, m);
2242 } else if (m->is_Phi()) {
2243 // We need as many new phis as there are new aliases
2244 igvn.replace_input_of(m, idx, mm);
2245 if (idx == m->req()-1) {
2246 Node* r = m->in(0);
2247 for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2248 const TypePtr* adr_type = get_adr_type(j);
2249 if (!adr_type->isa_aryptr() || !adr_type->is_flat() || j == (uint)index) {
2250 continue;
2251 }
2252 Node* phi = new PhiNode(r, Type::MEMORY, get_adr_type(j));
2253 igvn.register_new_node_with_optimizer(phi);
2254 for (uint k = 1; k < m->req(); k++) {
2255 phi->init_req(k, m->in(k)->as_MergeMem()->memory_at(j));
2256 }
2257 mm->set_memory_at(j, phi);
2258 }
2259 Node* base_phi = new PhiNode(r, Type::MEMORY, TypePtr::BOTTOM);
2260 igvn.register_new_node_with_optimizer(base_phi);
2261 for (uint k = 1; k < m->req(); k++) {
2262 base_phi->init_req(k, m->in(k)->as_MergeMem()->base_memory());
2263 }
2264 mm->set_base_memory(base_phi);
2265 }
2266 } else {
2267 // This is a MemBarCPUOrder node from
2268 // Parse::array_load()/Parse::array_store(), in the
2269 // branch that handles flat arrays hidden under
2270 // an Object[] array. We also need one new membar per
2271 // new alias to keep the unknown access that the
2272 // membars protect properly ordered with accesses to
2273 // known flat array.
2274 assert(m->is_Proj(), "projection expected");
2275 Node* ctrl = m->in(0)->in(TypeFunc::Control);
2276 igvn.replace_input_of(m->in(0), TypeFunc::Control, top());
2277 for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2278 const TypePtr* adr_type = get_adr_type(j);
2279 if (!adr_type->isa_aryptr() || !adr_type->is_flat() || j == (uint)index) {
2280 continue;
2281 }
2282 MemBarNode* mb = new MemBarCPUOrderNode(this, j, nullptr);
2283 igvn.register_new_node_with_optimizer(mb);
2284 Node* mem = mm->memory_at(j);
2285 mb->init_req(TypeFunc::Control, ctrl);
2286 mb->init_req(TypeFunc::Memory, mem);
2287 ctrl = new ProjNode(mb, TypeFunc::Control);
2288 igvn.register_new_node_with_optimizer(ctrl);
2289 mem = new ProjNode(mb, TypeFunc::Memory);
2290 igvn.register_new_node_with_optimizer(mem);
2291 mm->set_memory_at(j, mem);
2292 }
2293 igvn.replace_node(m->in(0)->as_Multi()->proj_out(TypeFunc::Control), ctrl);
2294 }
2295 if (idx < m->req()-1) {
2296 idx += 1;
2297 stack.set_index(idx);
2298 n = m->in(idx);
2299 break;
2300 }
2301 // Take care of place holder nodes
2302 if (m->has_out_with(Op_Node)) {
2303 Node* place_holder = m->find_out_with(Op_Node);
2304 if (place_holder != nullptr) {
2305 Node* mm_clone = mm->clone();
2306 igvn.register_new_node_with_optimizer(mm_clone);
2307 Node* hook = new Node(1);
2308 hook->init_req(0, mm);
2309 igvn.replace_node(place_holder, mm_clone);
2310 hook->destruct(&igvn);
2311 }
2312 assert(!m->has_out_with(Op_Node), "place holder should be gone now");
2313 }
2314 stack.pop();
2315 }
2316 }
2317 } while(stack.size() > 0);
2318 // Fix the memory state at the MergeMem we started from
2319 igvn.rehash_node_delayed(current);
2320 for (uint j = (uint)start_alias; j <= (uint)stop_alias; j++) {
2321 const TypePtr* adr_type = get_adr_type(j);
2322 if (!adr_type->isa_aryptr() || !adr_type->is_flat()) {
2323 continue;
2324 }
2325 current->set_memory_at(j, mm);
2326 }
2327 current->set_memory_at(index, current->base_memory());
2328 }
2329 igvn.optimize();
2330 }
2331 print_method(PHASE_SPLIT_INLINES_ARRAY, 2);
2332 #ifdef ASSERT
2333 if (!_flat_accesses_share_alias) {
2334 wq.clear();
2335 wq.push(root());
2336 for (uint i = 0; i < wq.size(); i++) {
2337 Node* n = wq.at(i);
2338 assert(n->adr_type() != TypeAryPtr::INLINES, "should have been removed from the graph");
2339 for (uint j = 0; j < n->req(); j++) {
2340 Node* m = n->in(j);
2341 if (m != nullptr) {
2342 wq.push(m);
2343 }
2344 }
2345 }
2346 }
2347 #endif
2348 }
2349
2350 void Compile::record_for_merge_stores_igvn(Node* n) {
2351 if (!n->for_merge_stores_igvn()) {
2352 assert(!_for_merge_stores_igvn.contains(n), "duplicate");
2353 n->add_flag(Node::NodeFlags::Flag_for_merge_stores_igvn);
2354 _for_merge_stores_igvn.append(n);
2355 }
2356 }
2357
2358 void Compile::remove_from_merge_stores_igvn(Node* n) {
2359 n->remove_flag(Node::NodeFlags::Flag_for_merge_stores_igvn);
2360 _for_merge_stores_igvn.remove(n);
2361 }
2362
2363 // We need to wait with merging stores until RangeCheck smearing has removed the RangeChecks during
2364 // the post loops IGVN phase. If we do it earlier, then there may still be some RangeChecks between
2365 // the stores, and we merge the wrong sequence of stores.
2366 // Example:
2367 // StoreI RangeCheck StoreI StoreI RangeCheck StoreI
2368 // Apply MergeStores:
2369 // StoreI RangeCheck [ StoreL ] RangeCheck StoreI
2448 assert(next_bci == iter.next_bci() || next_bci == iter.get_dest(), "wrong next_bci at unstable_if");
2449 Bytecodes::Code c = iter.cur_bc();
2450 Node* lhs = nullptr;
2451 Node* rhs = nullptr;
2452 if (c == Bytecodes::_if_acmpeq || c == Bytecodes::_if_acmpne) {
2453 lhs = unc->peek_operand(0);
2454 rhs = unc->peek_operand(1);
2455 } else if (c == Bytecodes::_ifnull || c == Bytecodes::_ifnonnull) {
2456 lhs = unc->peek_operand(0);
2457 }
2458
2459 ResourceMark rm;
2460 const MethodLivenessResult& live_locals = method->liveness_at_bci(next_bci);
2461 assert(live_locals.is_valid(), "broken liveness info");
2462 int len = (int)live_locals.size();
2463
2464 for (int i = 0; i < len; i++) {
2465 Node* local = unc->local(jvms, i);
2466 // kill local using the liveness of next_bci.
2467 // give up when the local looks like an operand to secure reexecution.
2468 if (!live_locals.at(i) && !local->is_top() && local != lhs && local != rhs) {
2469 uint idx = jvms->locoff() + i;
2470 #ifdef ASSERT
2471 if (PrintOpto && Verbose) {
2472 tty->print("[unstable_if] kill local#%d: ", idx);
2473 local->dump();
2474 tty->cr();
2475 }
2476 #endif
2477 igvn.replace_input_of(unc, idx, top());
2478 modified = true;
2479 }
2480 }
2481 }
2482
2483 // keep the modified trap for late query
2484 if (modified) {
2485 trap->set_modified();
2486 } else {
2487 _unstable_if_traps.delete_at(i);
2488 }
2489 }
2490 igvn.optimize();
2491 }
2492
2493 // StringOpts and late inlining of string methods
2494 void Compile::inline_string_calls(bool parse_time) {
2495 {
2496 // remove useless nodes to make the usage analysis simpler
2497 ResourceMark rm;
2498 PhaseRemoveUseless pru(initial_gvn(), *igvn_worklist());
2499 }
2500
2501 {
2502 ResourceMark rm;
2503 print_method(PHASE_BEFORE_STRINGOPTS, 3);
2658
2659 if (_string_late_inlines.length() > 0) {
2660 assert(has_stringbuilder(), "inconsistent");
2661
2662 inline_string_calls(false);
2663
2664 if (failing()) return;
2665
2666 inline_incrementally_cleanup(igvn);
2667 }
2668
2669 set_inlining_incrementally(false);
2670 }
2671
2672 void Compile::process_late_inline_calls_no_inline(PhaseIterGVN& igvn) {
2673 // "inlining_incrementally() == false" is used to signal that no inlining is allowed
2674 // (see LateInlineVirtualCallGenerator::do_late_inline_check() for details).
2675 // Tracking and verification of modified nodes is disabled by setting "_modified_nodes == nullptr"
2676 // as if "inlining_incrementally() == true" were set.
2677 assert(inlining_incrementally() == false, "not allowed");
2678 #ifdef ASSERT
2679 Unique_Node_List* modified_nodes = _modified_nodes;
2680 _modified_nodes = nullptr;
2681 #endif
2682 assert(_late_inlines.length() > 0, "sanity");
2683
2684 while (_late_inlines.length() > 0) {
2685 igvn_worklist()->ensure_empty(); // should be done with igvn
2686
2687 while (inline_incrementally_one()) {
2688 assert(!failing_internal() || failure_is_artificial(), "inconsistent");
2689 }
2690 if (failing()) return;
2691
2692 inline_incrementally_cleanup(igvn);
2693 }
2694 DEBUG_ONLY( _modified_nodes = modified_nodes; )
2695 }
2696
2697 bool Compile::optimize_loops(PhaseIterGVN& igvn, LoopOptsMode mode) {
2698 if (_loop_opts_cnt > 0) {
2699 while (major_progress() && (_loop_opts_cnt > 0)) {
2700 TracePhase tp(_t_idealLoop);
2701 PhaseIdealLoop::optimize(igvn, mode);
2702 _loop_opts_cnt--;
2703 if (failing()) return false;
2704 if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
2705 }
2706 }
2707 return true;
2708 }
2709
2710 // Remove edges from "root" to each SafePoint at a backward branch.
2711 // They were inserted during parsing (see add_safepoint()) to make
2712 // infinite loops without calls or exceptions visible to root, i.e.,
2713 // useful.
2714 void Compile::remove_root_to_sfpts_edges(PhaseIterGVN& igvn) {
2819 print_method(PHASE_ITER_GVN_AFTER_VECTOR, 2);
2820 }
2821 assert(!has_vbox_nodes(), "sanity");
2822
2823 if (!failing() && RenumberLiveNodes && live_nodes() + NodeLimitFudgeFactor < unique()) {
2824 Compile::TracePhase tp(_t_renumberLive);
2825 igvn_worklist()->ensure_empty(); // should be done with igvn
2826 {
2827 ResourceMark rm;
2828 PhaseRenumberLive prl(initial_gvn(), *igvn_worklist());
2829 }
2830 igvn.reset_from_gvn(initial_gvn());
2831 igvn.optimize();
2832 if (failing()) return;
2833 }
2834
2835 // Now that all inlining is over and no PhaseRemoveUseless will run, cut edge from root to loop
2836 // safepoints
2837 remove_root_to_sfpts_edges(igvn);
2838
2839 // Process inline type nodes now that all inlining is over
2840 process_inline_types(igvn);
2841
2842 adjust_flat_array_access_aliases(igvn);
2843
2844 if (failing()) return;
2845
2846 // Perform escape analysis
2847 if (do_escape_analysis() && ConnectionGraph::has_candidates(this)) {
2848 if (has_loops()) {
2849 // Cleanup graph (remove dead nodes).
2850 TracePhase tp(_t_idealLoop);
2851 PhaseIdealLoop::optimize(igvn, LoopOptsMaxUnroll);
2852 if (failing()) return;
2853 }
2854 bool progress;
2855 print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
2856 do {
2857 ConnectionGraph::do_analysis(this, &igvn);
2858
2859 if (failing()) return;
2860
2861 int mcount = macro_count(); // Record number of allocations and locks before IGVN
2862
2863 // Optimize out fields loads from scalar replaceable allocations.
2949 // Loop transforms on the ideal graph. Range Check Elimination,
2950 // peeling, unrolling, etc.
2951 if (!optimize_loops(igvn, LoopOptsDefault)) {
2952 return;
2953 }
2954
2955 if (failing()) return;
2956
2957 C->clear_major_progress(); // ensure that major progress is now clear
2958
2959 process_for_post_loop_opts_igvn(igvn);
2960
2961 process_for_merge_stores_igvn(igvn);
2962
2963 if (failing()) return;
2964
2965 #ifdef ASSERT
2966 bs->verify_gc_barriers(this, BarrierSetC2::BeforeMacroExpand);
2967 #endif
2968
2969 assert(_late_inlines.length() == 0 || IncrementalInlineMH || IncrementalInlineVirtual, "not empty");
2970
2971 if (_late_inlines.length() > 0) {
2972 // More opportunities to optimize virtual and MH calls.
2973 // Though it's maybe too late to perform inlining, strength-reducing them to direct calls is still an option.
2974 process_late_inline_calls_no_inline(igvn);
2975 }
2976
2977 {
2978 TracePhase tp(_t_macroExpand);
2979 print_method(PHASE_BEFORE_MACRO_EXPANSION, 3);
2980 PhaseMacroExpand mex(igvn);
2981 if (mex.expand_macro_nodes()) {
2982 assert(failing(), "must bail out w/ explicit message");
2983 return;
2984 }
2985 print_method(PHASE_AFTER_MACRO_EXPANSION, 2);
2986 }
2987
2988 // Process inline type nodes again and remove them. From here
2989 // on we don't need to keep track of field values anymore.
2990 process_inline_types(igvn, /* remove= */ true);
2991
2992 {
2993 TracePhase tp(_t_barrierExpand);
2994 if (bs->expand_barriers(this, igvn)) {
2995 assert(failing(), "must bail out w/ explicit message");
2996 return;
2997 }
2998 print_method(PHASE_BARRIER_EXPANSION, 2);
2999 }
3000
3001 if (C->max_vector_size() > 0) {
3002 C->optimize_logic_cones(igvn);
3003 igvn.optimize();
3004 if (failing()) return;
3005 }
3006
3007 DEBUG_ONLY( _modified_nodes = nullptr; )
3008 DEBUG_ONLY( _late_inlines.clear(); )
3009
3010 assert(igvn._worklist.size() == 0, "not empty");
3011 } // (End scope of igvn; run destructor if necessary for asserts.)
3012
3013 check_no_dead_use();
3014
3015 // We will never use the NodeHash table any more. Clear it so that final_graph_reshaping does not have
3016 // to remove hashes to unlock nodes for modifications.
3017 C->node_hash()->clear();
3018
3019 // A method with only infinite loops has no edges entering loops from root
3020 {
3021 TracePhase tp(_t_graphReshaping);
3022 if (final_graph_reshaping()) {
3023 assert(failing(), "must bail out w/ explicit message");
3024 return;
3025 }
3026 }
3027
3028 print_method(PHASE_OPTIMIZE_FINISHED, 2);
3029 DEBUG_ONLY(set_phase_optimize_finished();)
3030 }
3763 int nop = n->Opcode();
3764 // Clone shared simple arguments to uncommon calls, item (1).
3765 if (n->outcnt() > 1 &&
3766 !n->is_Proj() &&
3767 nop != Op_CreateEx &&
3768 nop != Op_CheckCastPP &&
3769 nop != Op_DecodeN &&
3770 nop != Op_DecodeNKlass &&
3771 !n->is_Mem() &&
3772 !n->is_Phi()) {
3773 Node *x = n->clone();
3774 call->set_req(TypeFunc::Parms, x);
3775 }
3776 }
3777 break;
3778 }
3779 case Op_StoreB:
3780 case Op_StoreC:
3781 case Op_StoreI:
3782 case Op_StoreL:
3783 case Op_StoreLSpecial:
3784 case Op_CompareAndSwapB:
3785 case Op_CompareAndSwapS:
3786 case Op_CompareAndSwapI:
3787 case Op_CompareAndSwapL:
3788 case Op_CompareAndSwapP:
3789 case Op_CompareAndSwapN:
3790 case Op_WeakCompareAndSwapB:
3791 case Op_WeakCompareAndSwapS:
3792 case Op_WeakCompareAndSwapI:
3793 case Op_WeakCompareAndSwapL:
3794 case Op_WeakCompareAndSwapP:
3795 case Op_WeakCompareAndSwapN:
3796 case Op_CompareAndExchangeB:
3797 case Op_CompareAndExchangeS:
3798 case Op_CompareAndExchangeI:
3799 case Op_CompareAndExchangeL:
3800 case Op_CompareAndExchangeP:
3801 case Op_CompareAndExchangeN:
3802 case Op_GetAndAddS:
3803 case Op_GetAndAddB:
4307 k->subsume_by(m, this);
4308 }
4309 }
4310 }
4311 break;
4312 }
4313 case Op_CmpUL: {
4314 if (!Matcher::has_match_rule(Op_CmpUL)) {
4315 // No support for unsigned long comparisons
4316 ConINode* sign_pos = new ConINode(TypeInt::make(BitsPerLong - 1));
4317 Node* sign_bit_mask = new RShiftLNode(n->in(1), sign_pos);
4318 Node* orl = new OrLNode(n->in(1), sign_bit_mask);
4319 ConLNode* remove_sign_mask = new ConLNode(TypeLong::make(max_jlong));
4320 Node* andl = new AndLNode(orl, remove_sign_mask);
4321 Node* cmp = new CmpLNode(andl, n->in(2));
4322 n->subsume_by(cmp, this);
4323 }
4324 break;
4325 }
4326 #ifdef ASSERT
4327 case Op_InlineType: {
4328 n->dump(-1);
4329 assert(false, "inline type node was not removed");
4330 break;
4331 }
4332 case Op_ConNKlass: {
4333 const TypePtr* tp = n->as_Type()->type()->make_ptr();
4334 ciKlass* klass = tp->is_klassptr()->exact_klass();
4335 assert(klass->is_in_encoding_range(), "klass cannot be compressed");
4336 break;
4337 }
4338 #endif
4339 default:
4340 assert(!n->is_Call(), "");
4341 assert(!n->is_Mem(), "");
4342 assert(nop != Op_ProfileBoolean, "should be eliminated during IGVN");
4343 break;
4344 }
4345 }
4346
4347 //------------------------------final_graph_reshaping_walk---------------------
4348 // Replacing Opaque nodes with their input in final_graph_reshaping_impl(),
4349 // requires that the walk visits a node's inputs before visiting the node.
4350 void Compile::final_graph_reshaping_walk(Node_Stack& nstack, Node* root, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes) {
4351 Unique_Node_List sfpt;
4698 }
4699 }
4700
4701 bool Compile::needs_clinit_barrier(ciMethod* method, ciMethod* accessing_method) {
4702 return method->is_static() && needs_clinit_barrier(method->holder(), accessing_method);
4703 }
4704
4705 bool Compile::needs_clinit_barrier(ciField* field, ciMethod* accessing_method) {
4706 return field->is_static() && needs_clinit_barrier(field->holder(), accessing_method);
4707 }
4708
4709 bool Compile::needs_clinit_barrier(ciInstanceKlass* holder, ciMethod* accessing_method) {
4710 if (holder->is_initialized()) {
4711 return false;
4712 }
4713 if (holder->is_being_initialized()) {
4714 if (accessing_method->holder() == holder) {
4715 // Access inside a class. The barrier can be elided when access happens in <clinit>,
4716 // <init>, or a static method. In all those cases, there was an initialization
4717 // barrier on the holder klass passed.
4718 if (accessing_method->is_class_initializer() ||
4719 accessing_method->is_object_constructor() ||
4720 accessing_method->is_static()) {
4721 return false;
4722 }
4723 } else if (accessing_method->holder()->is_subclass_of(holder)) {
4724 // Access from a subclass. The barrier can be elided only when access happens in <clinit>.
4725 // In case of <init> or a static method, the barrier is on the subclass is not enough:
4726 // child class can become fully initialized while its parent class is still being initialized.
4727 if (accessing_method->is_class_initializer()) {
4728 return false;
4729 }
4730 }
4731 ciMethod* root = method(); // the root method of compilation
4732 if (root != accessing_method) {
4733 return needs_clinit_barrier(holder, root); // check access in the context of compilation root
4734 }
4735 }
4736 return true;
4737 }
4738
4739 #ifndef PRODUCT
4740 //------------------------------verify_bidirectional_edges---------------------
4741 // For each input edge to a node (ie - for each Use-Def edge), verify that
4742 // there is a corresponding Def-Use edge.
4743 void Compile::verify_bidirectional_edges(Unique_Node_List& visited, const Unique_Node_List* root_and_safepoints) const {
4744 // Allocate stack of size C->live_nodes()/16 to avoid frequent realloc
4745 uint stack_size = live_nodes() >> 4;
4746 Node_List nstack(MAX2(stack_size, (uint) OptoNodeListSize));
4747 if (root_and_safepoints != nullptr) {
4777 if (in != nullptr && !in->is_top()) {
4778 // Count instances of `next`
4779 int cnt = 0;
4780 for (uint idx = 0; idx < in->_outcnt; idx++) {
4781 if (in->_out[idx] == n) {
4782 cnt++;
4783 }
4784 }
4785 assert(cnt > 0, "Failed to find Def-Use edge.");
4786 // Check for duplicate edges
4787 // walk the input array downcounting the input edges to n
4788 for (uint j = 0; j < length; j++) {
4789 if (n->in(j) == in) {
4790 cnt--;
4791 }
4792 }
4793 assert(cnt == 0, "Mismatched edge count.");
4794 } else if (in == nullptr) {
4795 assert(i == 0 || i >= n->req() ||
4796 n->is_Region() || n->is_Phi() || n->is_ArrayCopy() ||
4797 (n->is_Allocate() && i >= AllocateNode::InlineType) ||
4798 (n->is_Unlock() && i == (n->req() - 1)) ||
4799 (n->is_MemBar() && i == 5), // the precedence edge to a membar can be removed during macro node expansion
4800 "only region, phi, arraycopy, allocate, unlock or membar nodes have null data edges");
4801 } else {
4802 assert(in->is_top(), "sanity");
4803 // Nothing to check.
4804 }
4805 }
4806 }
4807 }
4808
4809 //------------------------------verify_graph_edges---------------------------
4810 // Walk the Graph and verify that there is a one-to-one correspondence
4811 // between Use-Def edges and Def-Use edges in the graph.
4812 void Compile::verify_graph_edges(bool no_dead_code, const Unique_Node_List* root_and_safepoints) const {
4813 if (VerifyGraphEdges) {
4814 Unique_Node_List visited;
4815
4816 // Call graph walk to check edges
4817 verify_bidirectional_edges(visited, root_and_safepoints);
4818 if (no_dead_code) {
4819 // Now make sure that no visited node is used by an unvisited node.
4820 bool dead_nodes = false;
4931 // (1) subklass is already limited to a subtype of superklass => always ok
4932 // (2) subklass does not overlap with superklass => always fail
4933 // (3) superklass has NO subtypes and we can check with a simple compare.
4934 Compile::SubTypeCheckResult Compile::static_subtype_check(const TypeKlassPtr* superk, const TypeKlassPtr* subk, bool skip) {
4935 if (skip) {
4936 return SSC_full_test; // Let caller generate the general case.
4937 }
4938
4939 if (subk->is_java_subtype_of(superk)) {
4940 return SSC_always_true; // (0) and (1) this test cannot fail
4941 }
4942
4943 if (!subk->maybe_java_subtype_of(superk)) {
4944 return SSC_always_false; // (2) true path dead; no dynamic test needed
4945 }
4946
4947 const Type* superelem = superk;
4948 if (superk->isa_aryklassptr()) {
4949 int ignored;
4950 superelem = superk->is_aryklassptr()->base_element_type(ignored);
4951
4952 // Do not fold the subtype check to an array klass pointer comparison for null-able inline type arrays
4953 // because null-free [LMyValue <: null-able [LMyValue but the klasses are different. Perform a full test.
4954 if (!superk->is_aryklassptr()->is_null_free() && superk->is_aryklassptr()->elem()->isa_instklassptr() &&
4955 superk->is_aryklassptr()->elem()->is_instklassptr()->instance_klass()->is_inlinetype()) {
4956 return SSC_full_test;
4957 }
4958 }
4959
4960 if (superelem->isa_instklassptr()) {
4961 ciInstanceKlass* ik = superelem->is_instklassptr()->instance_klass();
4962 if (!ik->has_subklass()) {
4963 if (!ik->is_final()) {
4964 // Add a dependency if there is a chance of a later subclass.
4965 dependencies()->assert_leaf_type(ik);
4966 }
4967 if (!superk->maybe_java_subtype_of(subk)) {
4968 return SSC_always_false;
4969 }
4970 return SSC_easy_test; // (3) caller can do a simple ptr comparison
4971 }
4972 } else {
4973 // A primitive array type has no subtypes.
4974 return SSC_easy_test; // (3) caller can do a simple ptr comparison
4975 }
4976
4977 return SSC_full_test;
5419 const Type* t = igvn.type_or_null(n);
5420 assert((t == nullptr) || (t == t->remove_speculative()), "no more speculative types");
5421 if (n->is_Type()) {
5422 t = n->as_Type()->type();
5423 assert(t == t->remove_speculative(), "no more speculative types");
5424 }
5425 // Iterate over outs - endless loops is unreachable from below
5426 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
5427 Node *m = n->fast_out(i);
5428 if (not_a_node(m)) {
5429 continue;
5430 }
5431 worklist.push(m);
5432 }
5433 }
5434 igvn.check_no_speculative_types();
5435 #endif
5436 }
5437 }
5438
5439 Node* Compile::optimize_acmp(PhaseGVN* phase, Node* a, Node* b) {
5440 const TypeInstPtr* ta = phase->type(a)->isa_instptr();
5441 const TypeInstPtr* tb = phase->type(b)->isa_instptr();
5442 if (!EnableValhalla || ta == nullptr || tb == nullptr ||
5443 ta->is_zero_type() || tb->is_zero_type() ||
5444 !ta->can_be_inline_type() || !tb->can_be_inline_type()) {
5445 // Use old acmp if one operand is null or not an inline type
5446 return new CmpPNode(a, b);
5447 } else if (ta->is_inlinetypeptr() || tb->is_inlinetypeptr()) {
5448 // We know that one operand is an inline type. Therefore,
5449 // new acmp will only return true if both operands are nullptr.
5450 // Check if both operands are null by or'ing the oops.
5451 a = phase->transform(new CastP2XNode(nullptr, a));
5452 b = phase->transform(new CastP2XNode(nullptr, b));
5453 a = phase->transform(new OrXNode(a, b));
5454 return new CmpXNode(a, phase->MakeConX(0));
5455 }
5456 // Use new acmp
5457 return nullptr;
5458 }
5459
5460 // Auxiliary methods to support randomized stressing/fuzzing.
5461
5462 void Compile::initialize_stress_seed(const DirectiveSet* directive) {
5463 if (FLAG_IS_DEFAULT(StressSeed) || (FLAG_IS_ERGO(StressSeed) && directive->RepeatCompilationOption)) {
5464 _stress_seed = static_cast<uint>(Ticks::now().nanoseconds());
5465 FLAG_SET_ERGO(StressSeed, _stress_seed);
5466 } else {
5467 _stress_seed = StressSeed;
5468 }
5469 if (_log != nullptr) {
5470 _log->elem("stress_test seed='%u'", _stress_seed);
5471 }
5472 }
5473
5474 int Compile::random() {
5475 _stress_seed = os::next_random(_stress_seed);
5476 return static_cast<int>(_stress_seed);
5477 }
5478
5479 // This method can be called the arbitrary number of times, with current count
5785 } else {
5786 _debug_network_printer->update_compiled_method(C->method());
5787 }
5788 tty->print_cr("Method printed over network stream to IGV");
5789 _debug_network_printer->print(name, C->root(), visible_nodes);
5790 }
5791 #endif
5792
5793 Node* Compile::narrow_value(BasicType bt, Node* value, const Type* type, PhaseGVN* phase, bool transform_res) {
5794 if (type != nullptr && phase->type(value)->higher_equal(type)) {
5795 return value;
5796 }
5797 Node* result = nullptr;
5798 if (bt == T_BYTE) {
5799 result = phase->transform(new LShiftINode(value, phase->intcon(24)));
5800 result = new RShiftINode(result, phase->intcon(24));
5801 } else if (bt == T_BOOLEAN) {
5802 result = new AndINode(value, phase->intcon(0xFF));
5803 } else if (bt == T_CHAR) {
5804 result = new AndINode(value,phase->intcon(0xFFFF));
5805 } else if (bt == T_FLOAT) {
5806 result = new MoveI2FNode(value);
5807 } else {
5808 assert(bt == T_SHORT, "unexpected narrow type");
5809 result = phase->transform(new LShiftINode(value, phase->intcon(16)));
5810 result = new RShiftINode(result, phase->intcon(16));
5811 }
5812 if (transform_res) {
5813 result = phase->transform(result);
5814 }
5815 return result;
5816 }
5817
5818 void Compile::record_method_not_compilable_oom() {
5819 record_method_not_compilable(CompilationMemoryStatistic::failure_reason_memlimit());
5820 }
|