1 /*
   2  * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/bcEscapeAnalyzer.hpp"
  26 #include "ci/ciCallSite.hpp"
  27 #include "ci/ciObjArray.hpp"
  28 #include "ci/ciMemberName.hpp"
  29 #include "ci/ciMethodHandle.hpp"
  30 #include "classfile/javaClasses.hpp"
  31 #include "compiler/compileLog.hpp"
  32 #include "opto/addnode.hpp"
  33 #include "opto/callGenerator.hpp"
  34 #include "opto/callnode.hpp"
  35 #include "opto/castnode.hpp"
  36 #include "opto/cfgnode.hpp"
  37 #include "opto/inlinetypenode.hpp"
  38 #include "opto/parse.hpp"
  39 #include "opto/rootnode.hpp"
  40 #include "opto/runtime.hpp"
  41 #include "opto/subnode.hpp"
  42 #include "runtime/os.inline.hpp"
  43 #include "runtime/sharedRuntime.hpp"
  44 #include "utilities/debug.hpp"
  45 
  46 // Utility function.
  47 const TypeFunc* CallGenerator::tf() const {
  48   return TypeFunc::make(method());
  49 }
  50 
  51 bool CallGenerator::is_inlined_method_handle_intrinsic(JVMState* jvms, ciMethod* m) {
  52   return is_inlined_method_handle_intrinsic(jvms->method(), jvms->bci(), m);
  53 }
  54 
  55 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* caller, int bci, ciMethod* m) {
  56   ciMethod* symbolic_info = caller->get_method_at_bci(bci);
  57   return is_inlined_method_handle_intrinsic(symbolic_info, m);
  58 }
  59 
  60 bool CallGenerator::is_inlined_method_handle_intrinsic(ciMethod* symbolic_info, ciMethod* m) {
  61   return symbolic_info->is_method_handle_intrinsic() && !m->is_method_handle_intrinsic();
  62 }
  63 
  64 //-----------------------------ParseGenerator---------------------------------
  65 // Internal class which handles all direct bytecode traversal.
  66 class ParseGenerator : public InlineCallGenerator {
  67 private:
  68   bool  _is_osr;
  69   float _expected_uses;
  70 
  71 public:
  72   ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false)
  73     : InlineCallGenerator(method)
  74   {
  75     _is_osr        = is_osr;
  76     _expected_uses = expected_uses;
  77     assert(InlineTree::check_can_parse(method) == nullptr, "parse must be possible");
  78   }
  79 
  80   virtual bool      is_parse() const           { return true; }
  81   virtual JVMState* generate(JVMState* jvms);
  82   int is_osr() { return _is_osr; }
  83 
  84 };
  85 
  86 JVMState* ParseGenerator::generate(JVMState* jvms) {
  87   Compile* C = Compile::current();
  88 
  89   if (is_osr()) {
  90     // The JVMS for a OSR has a single argument (see its TypeFunc).
  91     assert(jvms->depth() == 1, "no inline OSR");
  92   }
  93 
  94   if (C->failing()) {
  95     return nullptr;  // bailing out of the compile; do not try to parse
  96   }
  97 
  98   Parse parser(jvms, method(), _expected_uses);
  99   if (C->failing()) return nullptr;
 100 
 101   // Grab signature for matching/allocation
 102   GraphKit& exits = parser.exits();
 103 
 104   if (C->failing()) {
 105     while (exits.pop_exception_state() != nullptr) ;
 106     return nullptr;
 107   }
 108 
 109   assert(exits.jvms()->same_calls_as(jvms), "sanity");
 110 
 111   // Simply return the exit state of the parser,
 112   // augmented by any exceptional states.
 113   return exits.transfer_exceptions_into_jvms();
 114 }
 115 
 116 //---------------------------DirectCallGenerator------------------------------
 117 // Internal class which handles all out-of-line calls w/o receiver type checks.
 118 class DirectCallGenerator : public CallGenerator {
 119  private:
 120   CallStaticJavaNode* _call_node;
 121   // Force separate memory and I/O projections for the exceptional
 122   // paths to facilitate late inlining.
 123   bool                _separate_io_proj;
 124 
 125 protected:
 126   void set_call_node(CallStaticJavaNode* call) { _call_node = call; }
 127 
 128  public:
 129   DirectCallGenerator(ciMethod* method, bool separate_io_proj)
 130     : CallGenerator(method),
 131       _call_node(nullptr),
 132       _separate_io_proj(separate_io_proj)
 133   {
 134     if (InlineTypeReturnedAsFields && method->is_method_handle_intrinsic()) {
 135       // If that call has not been optimized by the time optimizations are over,
 136       // we'll need to add a call to create an inline type instance from the klass
 137       // returned by the call (see PhaseMacroExpand::expand_mh_intrinsic_return).
 138       // Separating memory and I/O projections for exceptions is required to
 139       // perform that graph transformation.
 140       _separate_io_proj = true;
 141     }
 142   }
 143   virtual JVMState* generate(JVMState* jvms);
 144 
 145   virtual CallNode* call_node() const { return _call_node; }
 146   virtual CallGenerator* with_call_node(CallNode* call) {
 147     DirectCallGenerator* dcg = new DirectCallGenerator(method(), _separate_io_proj);
 148     dcg->set_call_node(call->as_CallStaticJava());
 149     return dcg;
 150   }
 151 };
 152 
 153 JVMState* DirectCallGenerator::generate(JVMState* jvms) {
 154   GraphKit kit(jvms);
 155   PhaseGVN& gvn = kit.gvn();
 156   bool is_static = method()->is_static();
 157   address target = is_static ? SharedRuntime::get_resolve_static_call_stub()
 158                              : SharedRuntime::get_resolve_opt_virtual_call_stub();
 159 
 160   if (kit.C->log() != nullptr) {
 161     kit.C->log()->elem("direct_call bci='%d'", jvms->bci());
 162   }
 163 
 164   CallStaticJavaNode* call = new CallStaticJavaNode(kit.C, tf(), target, method());
 165   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 166     // To be able to issue a direct call and skip a call to MH.linkTo*/invokeBasic adapter,
 167     // additional information about the method being invoked should be attached
 168     // to the call site to make resolution logic work
 169     // (see SharedRuntime::resolve_static_call_C).
 170     call->set_override_symbolic_info(true);
 171   }
 172   _call_node = call;  // Save the call node in case we need it later
 173   if (!is_static) {
 174     // Make an explicit receiver null_check as part of this call.
 175     // Since we share a map with the caller, his JVMS gets adjusted.
 176     kit.null_check_receiver_before_call(method());
 177     if (kit.stopped()) {
 178       // And dump it back to the caller, decorated with any exceptions:
 179       return kit.transfer_exceptions_into_jvms();
 180     }
 181     // Mark the call node as virtual, sort of:
 182     call->set_optimized_virtual(true);
 183     if (method()->is_method_handle_intrinsic() ||
 184         method()->is_compiled_lambda_form()) {
 185       call->set_method_handle_invoke(true);
 186     }
 187   }
 188   kit.set_arguments_for_java_call(call, is_late_inline());
 189   if (kit.stopped()) {
 190     return kit.transfer_exceptions_into_jvms();
 191   }
 192   kit.set_edges_for_java_call(call, false, _separate_io_proj);
 193   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 194   kit.push_node(method()->return_type()->basic_type(), ret);
 195   return kit.transfer_exceptions_into_jvms();
 196 }
 197 
 198 //--------------------------VirtualCallGenerator------------------------------
 199 // Internal class which handles all out-of-line calls checking receiver type.
 200 class VirtualCallGenerator : public CallGenerator {
 201 private:
 202   int _vtable_index;
 203   bool _separate_io_proj;
 204   CallDynamicJavaNode* _call_node;
 205 
 206 protected:
 207   void set_call_node(CallDynamicJavaNode* call) { _call_node = call; }
 208 
 209 public:
 210   VirtualCallGenerator(ciMethod* method, int vtable_index, bool separate_io_proj)
 211     : CallGenerator(method), _vtable_index(vtable_index), _separate_io_proj(separate_io_proj), _call_node(nullptr)
 212   {
 213     assert(vtable_index == Method::invalid_vtable_index ||
 214            vtable_index >= 0, "either invalid or usable");
 215   }
 216   virtual bool      is_virtual() const          { return true; }
 217   virtual JVMState* generate(JVMState* jvms);
 218 
 219   virtual CallNode* call_node() const { return _call_node; }
 220   int vtable_index() const { return _vtable_index; }
 221 
 222   virtual CallGenerator* with_call_node(CallNode* call) {
 223     VirtualCallGenerator* cg = new VirtualCallGenerator(method(), _vtable_index, _separate_io_proj);
 224     cg->set_call_node(call->as_CallDynamicJava());
 225     return cg;
 226   }
 227 };
 228 
 229 JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
 230   GraphKit kit(jvms);
 231   Node* receiver = kit.argument(0);
 232   if (kit.C->log() != nullptr) {
 233     kit.C->log()->elem("virtual_call bci='%d'", jvms->bci());
 234   }
 235 
 236   // If the receiver is a constant null, do not torture the system
 237   // by attempting to call through it.  The compile will proceed
 238   // correctly, but may bail out in final_graph_reshaping, because
 239   // the call instruction will have a seemingly deficient out-count.
 240   // (The bailout says something misleading about an "infinite loop".)
 241   if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
 242     assert(Bytecodes::is_invoke(kit.java_bc()), "%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()));
 243     ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
 244     int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
 245     kit.inc_sp(arg_size);  // restore arguments
 246     kit.uncommon_trap(Deoptimization::Reason_null_check,
 247                       Deoptimization::Action_none,
 248                       nullptr, "null receiver");
 249     return kit.transfer_exceptions_into_jvms();
 250   }
 251 
 252   // Ideally we would unconditionally do a null check here and let it
 253   // be converted to an implicit check based on profile information.
 254   // However currently the conversion to implicit null checks in
 255   // Block::implicit_null_check() only looks for loads and stores, not calls.
 256   ciMethod *caller = kit.method();
 257   ciMethodData *caller_md = (caller == nullptr) ? nullptr : caller->method_data();
 258   if (!UseInlineCaches || !ImplicitNullChecks || !os::zero_page_read_protected() ||
 259        ((ImplicitNullCheckThreshold > 0) && caller_md &&
 260        (caller_md->trap_count(Deoptimization::Reason_null_check)
 261        >= (uint)ImplicitNullCheckThreshold))) {
 262     // Make an explicit receiver null_check as part of this call.
 263     // Since we share a map with the caller, his JVMS gets adjusted.
 264     receiver = kit.null_check_receiver_before_call(method());
 265     if (kit.stopped()) {
 266       // And dump it back to the caller, decorated with any exceptions:
 267       return kit.transfer_exceptions_into_jvms();
 268     }
 269   }
 270 
 271   assert(!method()->is_static(), "virtual call must not be to static");
 272   assert(!method()->is_final(), "virtual call should not be to final");
 273   assert(!method()->is_private(), "virtual call should not be to private");
 274   assert(_vtable_index == Method::invalid_vtable_index || !UseInlineCaches,
 275          "no vtable calls if +UseInlineCaches ");
 276   address target = SharedRuntime::get_resolve_virtual_call_stub();
 277   // Normal inline cache used for call
 278   CallDynamicJavaNode* call = new CallDynamicJavaNode(tf(), target, method(), _vtable_index);
 279   if (is_inlined_method_handle_intrinsic(jvms, method())) {
 280     // To be able to issue a direct call (optimized virtual or virtual)
 281     // and skip a call to MH.linkTo*/invokeBasic adapter, additional information
 282     // about the method being invoked should be attached to the call site to
 283     // make resolution logic work (see SharedRuntime::resolve_{virtual,opt_virtual}_call_C).
 284     call->set_override_symbolic_info(true);
 285   }
 286   _call_node = call;  // Save the call node in case we need it later
 287 
 288   kit.set_arguments_for_java_call(call);
 289   if (kit.stopped()) {
 290     return kit.transfer_exceptions_into_jvms();
 291   }
 292   kit.set_edges_for_java_call(call, false /*must_throw*/, _separate_io_proj);
 293   Node* ret = kit.set_results_for_java_call(call, _separate_io_proj);
 294   kit.push_node(method()->return_type()->basic_type(), ret);
 295 
 296   // Represent the effect of an implicit receiver null_check
 297   // as part of this call.  Since we share a map with the caller,
 298   // his JVMS gets adjusted.
 299   kit.cast_not_null(receiver);
 300   return kit.transfer_exceptions_into_jvms();
 301 }
 302 
 303 CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) {
 304   if (InlineTree::check_can_parse(m) != nullptr)  return nullptr;
 305   return new ParseGenerator(m, expected_uses);
 306 }
 307 
 308 // As a special case, the JVMS passed to this CallGenerator is
 309 // for the method execution already in progress, not just the JVMS
 310 // of the caller.  Thus, this CallGenerator cannot be mixed with others!
 311 CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) {
 312   if (InlineTree::check_can_parse(m) != nullptr)  return nullptr;
 313   float past_uses = m->interpreter_invocation_count();
 314   float expected_uses = past_uses;
 315   return new ParseGenerator(m, expected_uses, true);
 316 }
 317 
 318 CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) {
 319   assert(!m->is_abstract(), "for_direct_call mismatch");
 320   return new DirectCallGenerator(m, separate_io_proj);
 321 }
 322 
 323 CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
 324   assert(!m->is_static(), "for_virtual_call mismatch");
 325   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
 326   return new VirtualCallGenerator(m, vtable_index, false /*separate_io_projs*/);
 327 }
 328 
 329 // Allow inlining decisions to be delayed
 330 class LateInlineCallGenerator : public DirectCallGenerator {
 331  private:
 332   jlong _unique_id;   // unique id for log compilation
 333   bool _is_pure_call; // a hint that the call doesn't have important side effects to care about
 334 
 335  protected:
 336   CallGenerator* _inline_cg;
 337   virtual bool do_late_inline_check(Compile* C, JVMState* jvms) { return true; }
 338   virtual CallGenerator* inline_cg() const { return _inline_cg; }
 339   virtual bool is_pure_call() const { return _is_pure_call; }
 340 
 341  public:
 342   LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg, bool is_pure_call = false) :
 343     DirectCallGenerator(method, true), _unique_id(0), _is_pure_call(is_pure_call), _inline_cg(inline_cg) {}
 344 
 345   virtual bool is_late_inline() const { return true; }
 346 
 347   // Convert the CallStaticJava into an inline
 348   virtual void do_late_inline();
 349 
 350   virtual JVMState* generate(JVMState* jvms) {
 351     Compile *C = Compile::current();
 352 
 353     C->log_inline_id(this);
 354 
 355     // Record that this call site should be revisited once the main
 356     // parse is finished.
 357     if (!is_mh_late_inline()) {
 358       C->add_late_inline(this);
 359     }
 360 
 361     // Emit the CallStaticJava and request separate projections so
 362     // that the late inlining logic can distinguish between fall
 363     // through and exceptional uses of the memory and io projections
 364     // as is done for allocations and macro expansion.
 365     return DirectCallGenerator::generate(jvms);
 366   }
 367 
 368   virtual void set_unique_id(jlong id) {
 369     _unique_id = id;
 370   }
 371 
 372   virtual jlong unique_id() const {
 373     return _unique_id;
 374   }
 375 
 376   virtual CallGenerator* inline_cg() {
 377     return _inline_cg;
 378   }
 379 
 380   virtual CallGenerator* with_call_node(CallNode* call) {
 381     LateInlineCallGenerator* cg = new LateInlineCallGenerator(method(), _inline_cg, _is_pure_call);
 382     cg->set_call_node(call->as_CallStaticJava());
 383     return cg;
 384   }
 385 };
 386 
 387 CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 388   return new LateInlineCallGenerator(method, inline_cg);
 389 }
 390 
 391 class LateInlineMHCallGenerator : public LateInlineCallGenerator {
 392   ciMethod* _caller;
 393   bool _input_not_const;
 394 
 395   virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
 396 
 397  public:
 398   LateInlineMHCallGenerator(ciMethod* caller, ciMethod* callee, bool input_not_const) :
 399     LateInlineCallGenerator(callee, nullptr), _caller(caller), _input_not_const(input_not_const) {}
 400 
 401   virtual bool is_mh_late_inline() const { return true; }
 402 
 403   // Convert the CallStaticJava into an inline
 404   virtual void do_late_inline();
 405 
 406   virtual JVMState* generate(JVMState* jvms) {
 407     JVMState* new_jvms = LateInlineCallGenerator::generate(jvms);
 408 
 409     Compile* C = Compile::current();
 410     if (_input_not_const) {
 411       // inlining won't be possible so no need to enqueue right now.
 412       call_node()->set_generator(this);
 413     } else {
 414       C->add_late_inline(this);
 415     }
 416     return new_jvms;
 417   }
 418 
 419   virtual CallGenerator* with_call_node(CallNode* call) {
 420     LateInlineMHCallGenerator* cg = new LateInlineMHCallGenerator(_caller, method(), _input_not_const);
 421     cg->set_call_node(call->as_CallStaticJava());
 422     return cg;
 423   }
 424 };
 425 
 426 bool LateInlineMHCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
 427   // When inlining a virtual call, the null check at the call and the call itself can throw. These 2 paths have different
 428   // expression stacks which causes late inlining to break. The MH invoker is not expected to be called from a method with
 429   // exception handlers. When there is no exception handler, GraphKit::builtin_throw() pops the stack which solves the issue
 430   // of late inlining with exceptions.
 431   assert(!jvms->method()->has_exception_handlers() ||
 432          (method()->intrinsic_id() != vmIntrinsics::_linkToVirtual &&
 433           method()->intrinsic_id() != vmIntrinsics::_linkToInterface), "no exception handler expected");
 434   // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
 435   bool allow_inline = C->inlining_incrementally();
 436   bool input_not_const = true;
 437   CallGenerator* cg = for_method_handle_inline(jvms, _caller, method(), allow_inline, input_not_const);
 438   assert(!input_not_const, "sanity"); // shouldn't have been scheduled for inlining in the first place
 439 
 440   if (cg != nullptr) {
 441     // AlwaysIncrementalInline causes for_method_handle_inline() to
 442     // return a LateInlineCallGenerator. Extract the
 443     // InlineCallGenerator from it.
 444     if (AlwaysIncrementalInline && cg->is_late_inline() && !cg->is_virtual_late_inline()) {
 445       cg = cg->inline_cg();
 446       assert(cg != nullptr, "inline call generator expected");
 447     }
 448 
 449     if (!allow_inline) {
 450       C->inline_printer()->record(cg->method(), call_node()->jvms(), InliningResult::FAILURE,
 451                                   "late method handle call resolution");
 452     }
 453     assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
 454     _inline_cg = cg;
 455     C->dec_number_of_mh_late_inlines();
 456     return true;
 457   } else {
 458     // Method handle call which has a constant appendix argument should be either inlined or replaced with a direct call
 459     // unless there's a signature mismatch between caller and callee. If the failure occurs, there's not much to be improved later,
 460     // so don't reinstall the generator to avoid pushing the generator between IGVN and incremental inlining indefinitely.
 461     return false;
 462   }
 463 }
 464 
 465 CallGenerator* CallGenerator::for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const) {
 466   assert(IncrementalInlineMH, "required");
 467   Compile::current()->inc_number_of_mh_late_inlines();
 468   CallGenerator* cg = new LateInlineMHCallGenerator(caller, callee, input_not_const);
 469   return cg;
 470 }
 471 
 472 // Allow inlining decisions to be delayed
 473 class LateInlineVirtualCallGenerator : public VirtualCallGenerator {
 474  private:
 475   jlong          _unique_id;   // unique id for log compilation
 476   CallGenerator* _inline_cg;
 477   ciMethod*      _callee;
 478   bool           _is_pure_call;
 479   float          _prof_factor;
 480 
 481  protected:
 482   virtual bool do_late_inline_check(Compile* C, JVMState* jvms);
 483   virtual CallGenerator* inline_cg() const { return _inline_cg; }
 484   virtual bool is_pure_call() const { return _is_pure_call; }
 485 
 486  public:
 487   LateInlineVirtualCallGenerator(ciMethod* method, int vtable_index, float prof_factor)
 488   : VirtualCallGenerator(method, vtable_index, true /*separate_io_projs*/),
 489     _unique_id(0), _inline_cg(nullptr), _callee(nullptr), _is_pure_call(false), _prof_factor(prof_factor) {
 490     assert(IncrementalInlineVirtual, "required");
 491   }
 492 
 493   virtual bool is_late_inline() const { return true; }
 494 
 495   virtual bool is_virtual_late_inline() const { return true; }
 496 
 497   // Convert the CallDynamicJava into an inline
 498   virtual void do_late_inline();
 499 
 500   virtual void set_callee_method(ciMethod* m) {
 501     assert(_callee == nullptr, "repeated inlining attempt");
 502     _callee = m;
 503   }
 504 
 505   virtual JVMState* generate(JVMState* jvms) {
 506     // Emit the CallDynamicJava and request separate projections so
 507     // that the late inlining logic can distinguish between fall
 508     // through and exceptional uses of the memory and io projections
 509     // as is done for allocations and macro expansion.
 510     JVMState* new_jvms = VirtualCallGenerator::generate(jvms);
 511     if (call_node() != nullptr) {
 512       call_node()->set_generator(this);
 513     }
 514     return new_jvms;
 515   }
 516 
 517   virtual void set_unique_id(jlong id) {
 518     _unique_id = id;
 519   }
 520 
 521   virtual jlong unique_id() const {
 522     return _unique_id;
 523   }
 524 
 525   virtual CallGenerator* with_call_node(CallNode* call) {
 526     LateInlineVirtualCallGenerator* cg = new LateInlineVirtualCallGenerator(method(), vtable_index(), _prof_factor);
 527     cg->set_call_node(call->as_CallDynamicJava());
 528     return cg;
 529   }
 530 };
 531 
 532 bool LateInlineVirtualCallGenerator::do_late_inline_check(Compile* C, JVMState* jvms) {
 533   // Method handle linker case is handled in CallDynamicJavaNode::Ideal().
 534   // Unless inlining is performed, _override_symbolic_info bit will be set in DirectCallGenerator::generate().
 535 
 536   // Implicit receiver null checks introduce problems when exception states are combined.
 537   Node* receiver = jvms->map()->argument(jvms, 0);
 538   const Type* recv_type = C->initial_gvn()->type(receiver);
 539   if (recv_type->maybe_null()) {
 540     C->inline_printer()->record(method(), call_node()->jvms(), InliningResult::FAILURE,
 541                                 "late call devirtualization failed (receiver may be null)");
 542     return false;
 543   }
 544   // Even if inlining is not allowed, a virtual call can be strength-reduced to a direct call.
 545   bool allow_inline = C->inlining_incrementally();
 546   if (!allow_inline && _callee->holder()->is_interface()) {
 547     // Don't convert the interface call to a direct call guarded by an interface subtype check.
 548     C->inline_printer()->record(method(), call_node()->jvms(), InliningResult::FAILURE,
 549                                 "late call devirtualization failed (interface call)");
 550     return false;
 551   }
 552   CallGenerator* cg = C->call_generator(_callee,
 553                                         vtable_index(),
 554                                         false /*call_does_dispatch*/,
 555                                         jvms,
 556                                         allow_inline,
 557                                         _prof_factor,
 558                                         nullptr /*speculative_receiver_type*/,
 559                                         true /*allow_intrinsics*/);
 560 
 561   if (cg != nullptr) {
 562     if (!allow_inline) {
 563       C->inline_printer()->record(cg->method(), call_node()->jvms(), InliningResult::FAILURE, "late call devirtualization");
 564     }
 565     assert(!cg->is_late_inline() || cg->is_mh_late_inline() || AlwaysIncrementalInline || StressIncrementalInlining, "we're doing late inlining");
 566     _inline_cg = cg;
 567     return true;
 568   } else {
 569     // Virtual call which provably doesn't dispatch should be either inlined or replaced with a direct call.
 570     assert(false, "no progress");
 571     return false;
 572   }
 573 }
 574 
 575 CallGenerator* CallGenerator::for_late_inline_virtual(ciMethod* m, int vtable_index, float prof_factor) {
 576   assert(IncrementalInlineVirtual, "required");
 577   assert(!m->is_static(), "for_virtual_call mismatch");
 578   assert(!m->is_method_handle_intrinsic(), "should be a direct call");
 579   return new LateInlineVirtualCallGenerator(m, vtable_index, prof_factor);
 580 }
 581 
 582 void LateInlineCallGenerator::do_late_inline() {
 583   CallGenerator::do_late_inline_helper();
 584 }
 585 
 586 void LateInlineMHCallGenerator::do_late_inline() {
 587   CallGenerator::do_late_inline_helper();
 588 }
 589 
 590 void LateInlineVirtualCallGenerator::do_late_inline() {
 591   assert(_callee != nullptr, "required"); // set up in CallDynamicJavaNode::Ideal
 592   CallGenerator::do_late_inline_helper();
 593 }
 594 
 595 void CallGenerator::do_late_inline_helper() {
 596   assert(is_late_inline(), "only late inline allowed");
 597 
 598   // Can't inline it
 599   CallNode* call = call_node();
 600   if (call == nullptr || call->outcnt() == 0 ||
 601       call->in(0) == nullptr || call->in(0)->is_top()) {
 602     return;
 603   }
 604 
 605   const TypeTuple* r = call->tf()->domain_cc();
 606   for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
 607     if (call->in(i1)->is_top() && r->field_at(i1) != Type::HALF) {
 608       assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 609       return;
 610     }
 611   }
 612 
 613   if (call->in(TypeFunc::Memory)->is_top()) {
 614     assert(Compile::current()->inlining_incrementally(), "shouldn't happen during parsing");
 615     return;
 616   }
 617   if (call->in(TypeFunc::Memory)->is_MergeMem()) {
 618     MergeMemNode* merge_mem = call->in(TypeFunc::Memory)->as_MergeMem();
 619     if (merge_mem->base_memory() == merge_mem->empty_memory()) {
 620       return; // dead path
 621     }
 622   }
 623 
 624   // check for unreachable loop
 625   // Similar to incremental inlining, don't assert that all call
 626   // projections are still there for post-parse call devirtualization.
 627   bool do_asserts = !is_mh_late_inline() && !is_virtual_late_inline();
 628   CallProjections* callprojs = call->extract_projections(true, do_asserts);
 629   if ((callprojs->fallthrough_catchproj == call->in(0)) ||
 630       (callprojs->catchall_catchproj    == call->in(0)) ||
 631       (callprojs->fallthrough_memproj   == call->in(TypeFunc::Memory)) ||
 632       (callprojs->catchall_memproj      == call->in(TypeFunc::Memory)) ||
 633       (callprojs->fallthrough_ioproj    == call->in(TypeFunc::I_O)) ||
 634       (callprojs->catchall_ioproj       == call->in(TypeFunc::I_O)) ||
 635       (callprojs->exobj != nullptr && call->find_edge(callprojs->exobj) != -1)) {
 636     return;
 637   }
 638 
 639   Compile* C = Compile::current();
 640   // Remove inlined methods from Compiler's lists.
 641   if (call->is_macro()) {
 642     C->remove_macro_node(call);
 643   }
 644 
 645 
 646   bool result_not_used = true;
 647   for (uint i = 0; i < callprojs->nb_resproj; i++) {
 648     if (callprojs->resproj[i] != nullptr) {
 649       if (callprojs->resproj[i]->outcnt() != 0) {
 650         result_not_used = false;
 651       }
 652       if (call->find_edge(callprojs->resproj[i]) != -1) {
 653         return;
 654       }
 655     }
 656   }
 657 
 658   if (is_pure_call() && result_not_used) {
 659     // The call is marked as pure (no important side effects), but result isn't used.
 660     // It's safe to remove the call.
 661     GraphKit kit(call->jvms());
 662     kit.replace_call(call, C->top(), true, do_asserts);
 663   } else {
 664     // Make a clone of the JVMState that appropriate to use for driving a parse
 665     JVMState* old_jvms = call->jvms();
 666     JVMState* jvms = old_jvms->clone_shallow(C);
 667     uint size = call->req();
 668     SafePointNode* map = new SafePointNode(size, jvms);
 669     for (uint i1 = 0; i1 < size; i1++) {
 670       map->init_req(i1, call->in(i1));
 671     }
 672 
 673     PhaseGVN& gvn = *C->initial_gvn();
 674     // Make sure the state is a MergeMem for parsing.
 675     if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
 676       Node* mem = MergeMemNode::make(map->in(TypeFunc::Memory));
 677       gvn.set_type_bottom(mem);
 678       map->set_req(TypeFunc::Memory, mem);
 679     }
 680 
 681     // blow away old call arguments
 682     for (uint i1 = TypeFunc::Parms; i1 < r->cnt(); i1++) {
 683       map->set_req(i1, C->top());
 684     }
 685     jvms->set_map(map);
 686 
 687     // Make enough space in the expression stack to transfer
 688     // the incoming arguments and return value.
 689     map->ensure_stack(jvms, jvms->method()->max_stack());
 690     const TypeTuple* domain_sig = call->_tf->domain_sig();
 691     uint nargs = method()->arg_size();
 692     assert(domain_sig->cnt() - TypeFunc::Parms == nargs, "inconsistent signature");
 693 
 694     uint j = TypeFunc::Parms;
 695     int arg_num = 0;
 696     for (uint i1 = 0; i1 < nargs; i1++) {
 697       const Type* t = domain_sig->field_at(TypeFunc::Parms + i1);
 698       if (t->is_inlinetypeptr() && !method()->get_Method()->mismatch() && method()->is_scalarized_arg(arg_num)) {
 699         // Inline type arguments are not passed by reference: we get an argument per
 700         // field of the inline type. Build InlineTypeNodes from the inline type arguments.
 701         GraphKit arg_kit(jvms, &gvn);
 702         Node* vt = InlineTypeNode::make_from_multi(&arg_kit, call, t->inline_klass(), j, /* in= */ true, /* null_free= */ !t->maybe_null());
 703         map->set_control(arg_kit.control());
 704         map->set_argument(jvms, i1, vt);
 705       } else {
 706         map->set_argument(jvms, i1, call->in(j++));
 707       }
 708       if (t != Type::HALF) {
 709         arg_num++;
 710       }
 711     }
 712 
 713     C->log_late_inline(this);
 714 
 715     // JVMState is ready, so time to perform some checks and prepare for inlining attempt.
 716     if (!do_late_inline_check(C, jvms)) {
 717       map->disconnect_inputs(C);
 718       return;
 719     }
 720 
 721     // Check if we are late inlining a method handle call that returns an inline type as fields.
 722     Node* buffer_oop = nullptr;
 723     ciMethod* inline_method = inline_cg()->method();
 724     ciType* return_type = inline_method->return_type();
 725     if (!call->tf()->returns_inline_type_as_fields() && is_mh_late_inline() &&
 726         return_type->is_inlinetype() && return_type->as_inline_klass()->can_be_returned_as_fields()) {
 727       // Allocate a buffer for the inline type returned as fields because the caller expects an oop return.
 728       // Do this before the method handle call in case the buffer allocation triggers deoptimization and
 729       // we need to "re-execute" the call in the interpreter (to make sure the call is only executed once).
 730       GraphKit arg_kit(jvms, &gvn);
 731       {
 732         PreserveReexecuteState preexecs(&arg_kit);
 733         arg_kit.jvms()->set_should_reexecute(true);
 734         arg_kit.inc_sp(nargs);
 735         Node* klass_node = arg_kit.makecon(TypeKlassPtr::make(return_type->as_inline_klass()));
 736         buffer_oop = arg_kit.new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true);
 737       }
 738       jvms = arg_kit.transfer_exceptions_into_jvms();
 739     }
 740 
 741     // Setup default node notes to be picked up by the inlining
 742     Node_Notes* old_nn = C->node_notes_at(call->_idx);
 743     if (old_nn != nullptr) {
 744       Node_Notes* entry_nn = old_nn->clone(C);
 745       entry_nn->set_jvms(jvms);
 746       C->set_default_node_notes(entry_nn);
 747     }
 748 
 749     // Now perform the inlining using the synthesized JVMState
 750     JVMState* new_jvms = inline_cg()->generate(jvms);
 751     if (new_jvms == nullptr)  return;  // no change
 752     if (C->failing())      return;
 753 
 754     if (is_mh_late_inline()) {
 755       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (method handle)");
 756     } else if (is_string_late_inline()) {
 757       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (string method)");
 758     } else if (is_boxing_late_inline()) {
 759       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (boxing method)");
 760     } else if (is_vector_reboxing_late_inline()) {
 761       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded (vector reboxing method)");
 762     } else {
 763       C->inline_printer()->record(method(), jvms, InliningResult::SUCCESS, "late inline succeeded");
 764     }
 765 
 766     // Capture any exceptional control flow
 767     GraphKit kit(new_jvms);
 768 
 769     // Find the result object
 770     Node* result = C->top();
 771     int   result_size = method()->return_type()->size();
 772     if (result_size != 0 && !kit.stopped()) {
 773       result = (result_size == 1) ? kit.pop() : kit.pop_pair();
 774     }
 775 
 776     if (call->is_CallStaticJava() && call->as_CallStaticJava()->is_boxing_method()) {
 777       result = kit.must_be_not_null(result, false);
 778     }
 779 
 780     if (inline_cg()->is_inline()) {
 781       C->set_has_loops(C->has_loops() || inline_method->has_loops());
 782       C->env()->notice_inlined_method(inline_method);
 783     }
 784     C->set_inlining_progress(true);
 785     C->set_do_cleanup(kit.stopped()); // path is dead; needs cleanup
 786 
 787     // Handle inline type returns
 788     InlineTypeNode* vt = result->isa_InlineType();
 789     if (vt != nullptr) {
 790       if (call->tf()->returns_inline_type_as_fields()) {
 791         vt->replace_call_results(&kit, call, C);
 792       } else if (vt->is_InlineType()) {
 793         // Result might still be allocated (for example, if it has been stored to a non-flat field)
 794         if (!vt->is_allocated(&kit.gvn())) {
 795           assert(buffer_oop != nullptr, "should have allocated a buffer");
 796           RegionNode* region = new RegionNode(3);
 797 
 798           // Check if result is null
 799           Node* null_ctl = kit.top();
 800           kit.null_check_common(vt->get_is_init(), T_INT, false, &null_ctl);
 801           region->init_req(1, null_ctl);
 802           PhiNode* oop = PhiNode::make(region, kit.gvn().zerocon(T_OBJECT), TypeInstPtr::make(TypePtr::BotPTR, vt->type()->inline_klass()));
 803           Node* init_mem = kit.reset_memory();
 804           PhiNode* mem = PhiNode::make(region, init_mem, Type::MEMORY, TypePtr::BOTTOM);
 805 
 806           // Not null, initialize the buffer
 807           kit.set_all_memory(init_mem);
 808           vt->store(&kit, buffer_oop, buffer_oop, vt->type()->inline_klass());
 809           // Do not let stores that initialize this buffer be reordered with a subsequent
 810           // store that would make this buffer accessible by other threads.
 811           AllocateNode* alloc = AllocateNode::Ideal_allocation(buffer_oop);
 812           assert(alloc != nullptr, "must have an allocation node");
 813           kit.insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress));
 814           region->init_req(2, kit.control());
 815           oop->init_req(2, buffer_oop);
 816           mem->init_req(2, kit.merged_memory());
 817 
 818           // Update oop input to buffer
 819           kit.gvn().hash_delete(vt);
 820           vt->set_oop(kit.gvn(), kit.gvn().transform(oop));
 821           vt->set_is_buffered(kit.gvn());
 822           vt = kit.gvn().transform(vt)->as_InlineType();
 823 
 824           kit.set_control(kit.gvn().transform(region));
 825           kit.set_all_memory(kit.gvn().transform(mem));
 826           kit.record_for_igvn(region);
 827           kit.record_for_igvn(oop);
 828           kit.record_for_igvn(mem);
 829         }
 830         result = vt;
 831       }
 832       DEBUG_ONLY(buffer_oop = nullptr);
 833     } else {
 834       assert(result->is_top() || !call->tf()->returns_inline_type_as_fields(), "Unexpected return value");
 835     }
 836     assert(buffer_oop == nullptr, "unused buffer allocation");
 837 
 838     kit.replace_call(call, result, true, do_asserts);
 839   }
 840 }
 841 
 842 class LateInlineStringCallGenerator : public LateInlineCallGenerator {
 843 
 844  public:
 845   LateInlineStringCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 846     LateInlineCallGenerator(method, inline_cg) {}
 847 
 848   virtual JVMState* generate(JVMState* jvms) {
 849     Compile *C = Compile::current();
 850 
 851     C->log_inline_id(this);
 852 
 853     C->add_string_late_inline(this);
 854 
 855     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 856     return new_jvms;
 857   }
 858 
 859   virtual bool is_string_late_inline() const { return true; }
 860 
 861   virtual CallGenerator* with_call_node(CallNode* call) {
 862     LateInlineStringCallGenerator* cg = new LateInlineStringCallGenerator(method(), _inline_cg);
 863     cg->set_call_node(call->as_CallStaticJava());
 864     return cg;
 865   }
 866 };
 867 
 868 CallGenerator* CallGenerator::for_string_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 869   return new LateInlineStringCallGenerator(method, inline_cg);
 870 }
 871 
 872 class LateInlineBoxingCallGenerator : public LateInlineCallGenerator {
 873 
 874  public:
 875   LateInlineBoxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 876     LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
 877 
 878   virtual JVMState* generate(JVMState* jvms) {
 879     Compile *C = Compile::current();
 880 
 881     C->log_inline_id(this);
 882 
 883     C->add_boxing_late_inline(this);
 884 
 885     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 886     return new_jvms;
 887   }
 888 
 889   virtual bool is_boxing_late_inline() const { return true; }
 890 
 891   virtual CallGenerator* with_call_node(CallNode* call) {
 892     LateInlineBoxingCallGenerator* cg = new LateInlineBoxingCallGenerator(method(), _inline_cg);
 893     cg->set_call_node(call->as_CallStaticJava());
 894     return cg;
 895   }
 896 };
 897 
 898 CallGenerator* CallGenerator::for_boxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 899   return new LateInlineBoxingCallGenerator(method, inline_cg);
 900 }
 901 
 902 class LateInlineVectorReboxingCallGenerator : public LateInlineCallGenerator {
 903 
 904  public:
 905   LateInlineVectorReboxingCallGenerator(ciMethod* method, CallGenerator* inline_cg) :
 906     LateInlineCallGenerator(method, inline_cg, /*is_pure=*/true) {}
 907 
 908   virtual JVMState* generate(JVMState* jvms) {
 909     Compile *C = Compile::current();
 910 
 911     C->log_inline_id(this);
 912 
 913     C->add_vector_reboxing_late_inline(this);
 914 
 915     JVMState* new_jvms = DirectCallGenerator::generate(jvms);
 916     return new_jvms;
 917   }
 918 
 919   virtual bool is_vector_reboxing_late_inline() const { return true; }
 920 
 921   virtual CallGenerator* with_call_node(CallNode* call) {
 922     LateInlineVectorReboxingCallGenerator* cg = new LateInlineVectorReboxingCallGenerator(method(), _inline_cg);
 923     cg->set_call_node(call->as_CallStaticJava());
 924     return cg;
 925   }
 926 };
 927 
 928 //   static CallGenerator* for_vector_reboxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
 929 CallGenerator* CallGenerator::for_vector_reboxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
 930   return new LateInlineVectorReboxingCallGenerator(method, inline_cg);
 931 }
 932 
 933 //------------------------PredictedCallGenerator------------------------------
 934 // Internal class which handles all out-of-line calls checking receiver type.
 935 class PredictedCallGenerator : public CallGenerator {
 936   ciKlass*       _predicted_receiver;
 937   CallGenerator* _if_missed;
 938   CallGenerator* _if_hit;
 939   float          _hit_prob;
 940   bool           _exact_check;
 941 
 942 public:
 943   PredictedCallGenerator(ciKlass* predicted_receiver,
 944                          CallGenerator* if_missed,
 945                          CallGenerator* if_hit, bool exact_check,
 946                          float hit_prob)
 947     : CallGenerator(if_missed->method())
 948   {
 949     // The call profile data may predict the hit_prob as extreme as 0 or 1.
 950     // Remove the extremes values from the range.
 951     if (hit_prob > PROB_MAX)   hit_prob = PROB_MAX;
 952     if (hit_prob < PROB_MIN)   hit_prob = PROB_MIN;
 953 
 954     _predicted_receiver = predicted_receiver;
 955     _if_missed          = if_missed;
 956     _if_hit             = if_hit;
 957     _hit_prob           = hit_prob;
 958     _exact_check        = exact_check;
 959   }
 960 
 961   virtual bool      is_virtual()   const    { return true; }
 962   virtual bool      is_inline()    const    { return _if_hit->is_inline(); }
 963   virtual bool      is_deferred()  const    { return _if_hit->is_deferred(); }
 964 
 965   virtual JVMState* generate(JVMState* jvms);
 966 };
 967 
 968 
 969 CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver,
 970                                                  CallGenerator* if_missed,
 971                                                  CallGenerator* if_hit,
 972                                                  float hit_prob) {
 973   return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit,
 974                                     /*exact_check=*/true, hit_prob);
 975 }
 976 
 977 CallGenerator* CallGenerator::for_guarded_call(ciKlass* guarded_receiver,
 978                                                CallGenerator* if_missed,
 979                                                CallGenerator* if_hit) {
 980   return new PredictedCallGenerator(guarded_receiver, if_missed, if_hit,
 981                                     /*exact_check=*/false, PROB_ALWAYS);
 982 }
 983 
 984 JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
 985   GraphKit kit(jvms);
 986   PhaseGVN& gvn = kit.gvn();
 987   // We need an explicit receiver null_check before checking its type.
 988   // We share a map with the caller, so his JVMS gets adjusted.
 989   Node* receiver = kit.argument(0);
 990   CompileLog* log = kit.C->log();
 991   if (log != nullptr) {
 992     log->elem("predicted_call bci='%d' exact='%d' klass='%d'",
 993               jvms->bci(), (_exact_check ? 1 : 0), log->identify(_predicted_receiver));
 994   }
 995 
 996   receiver = kit.null_check_receiver_before_call(method());
 997   if (kit.stopped()) {
 998     return kit.transfer_exceptions_into_jvms();
 999   }
1000 
1001   // Make a copy of the replaced nodes in case we need to restore them
1002   ReplacedNodes replaced_nodes = kit.map()->replaced_nodes();
1003   replaced_nodes.clone();
1004 
1005   Node* casted_receiver = receiver;  // will get updated in place...
1006   Node* slow_ctl = nullptr;
1007   if (_exact_check) {
1008     slow_ctl = kit.type_check_receiver(receiver, _predicted_receiver, _hit_prob,
1009                                        &casted_receiver);
1010   } else {
1011     slow_ctl = kit.subtype_check_receiver(receiver, _predicted_receiver,
1012                                           &casted_receiver);
1013   }
1014 
1015   SafePointNode* slow_map = nullptr;
1016   JVMState* slow_jvms = nullptr;
1017   { PreserveJVMState pjvms(&kit);
1018     kit.set_control(slow_ctl);
1019     if (!kit.stopped()) {
1020       slow_jvms = _if_missed->generate(kit.sync_jvms());
1021       if (kit.failing())
1022         return nullptr;  // might happen because of NodeCountInliningCutoff
1023       assert(slow_jvms != nullptr, "must be");
1024       kit.add_exception_states_from(slow_jvms);
1025       kit.set_map(slow_jvms->map());
1026       if (!kit.stopped())
1027         slow_map = kit.stop();
1028     }
1029   }
1030 
1031   if (kit.stopped()) {
1032     // Instance does not match the predicted type.
1033     kit.set_jvms(slow_jvms);
1034     return kit.transfer_exceptions_into_jvms();
1035   }
1036 
1037   // Fall through if the instance matches the desired type.
1038   kit.replace_in_map(receiver, casted_receiver);
1039 
1040   // Make the hot call:
1041   JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
1042   if (kit.failing()) {
1043     return nullptr;
1044   }
1045   if (new_jvms == nullptr) {
1046     // Inline failed, so make a direct call.
1047     assert(_if_hit->is_inline(), "must have been a failed inline");
1048     CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
1049     new_jvms = cg->generate(kit.sync_jvms());
1050   }
1051   kit.add_exception_states_from(new_jvms);
1052   kit.set_jvms(new_jvms);
1053 
1054   // Need to merge slow and fast?
1055   if (slow_map == nullptr) {
1056     // The fast path is the only path remaining.
1057     return kit.transfer_exceptions_into_jvms();
1058   }
1059 
1060   if (kit.stopped()) {
1061     // Inlined method threw an exception, so it's just the slow path after all.
1062     kit.set_jvms(slow_jvms);
1063     return kit.transfer_exceptions_into_jvms();
1064   }
1065 
1066   // Allocate inline types if they are merged with objects (similar to Parse::merge_common())
1067   uint tos = kit.jvms()->stkoff() + kit.sp();
1068   uint limit = slow_map->req();
1069   for (uint i = TypeFunc::Parms; i < limit; i++) {
1070     Node* m = kit.map()->in(i);
1071     Node* n = slow_map->in(i);
1072     const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1073     // TODO 8284443 still needed?
1074     if (m->is_InlineType() && !t->is_inlinetypeptr()) {
1075       // Allocate inline type in fast path
1076       m = m->as_InlineType()->buffer(&kit);
1077       kit.map()->set_req(i, m);
1078     }
1079     if (n->is_InlineType() && !t->is_inlinetypeptr()) {
1080       // Allocate inline type in slow path
1081       PreserveJVMState pjvms(&kit);
1082       kit.set_map(slow_map);
1083       n = n->as_InlineType()->buffer(&kit);
1084       kit.map()->set_req(i, n);
1085       slow_map = kit.stop();
1086     }
1087   }
1088 
1089   // There are 2 branches and the replaced nodes are only valid on
1090   // one: restore the replaced nodes to what they were before the
1091   // branch.
1092   kit.map()->set_replaced_nodes(replaced_nodes);
1093 
1094   // Finish the diamond.
1095   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1096   RegionNode* region = new RegionNode(3);
1097   region->init_req(1, kit.control());
1098   region->init_req(2, slow_map->control());
1099   kit.set_control(gvn.transform(region));
1100   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1101   iophi->set_req(2, slow_map->i_o());
1102   kit.set_i_o(gvn.transform(iophi));
1103   // Merge memory
1104   kit.merge_memory(slow_map->merged_memory(), region, 2);
1105   // Transform new memory Phis.
1106   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1107     Node* phi = mms.memory();
1108     if (phi->is_Phi() && phi->in(0) == region) {
1109       mms.set_memory(gvn.transform(phi));
1110     }
1111   }
1112   for (uint i = TypeFunc::Parms; i < limit; i++) {
1113     // Skip unused stack slots; fast forward to monoff();
1114     if (i == tos) {
1115       i = kit.jvms()->monoff();
1116       if( i >= limit ) break;
1117     }
1118     Node* m = kit.map()->in(i);
1119     Node* n = slow_map->in(i);
1120     if (m != n) {
1121       const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
1122       Node* phi = PhiNode::make(region, m, t);
1123       phi->set_req(2, n);
1124       kit.map()->set_req(i, gvn.transform(phi));
1125     }
1126   }
1127   return kit.transfer_exceptions_into_jvms();
1128 }
1129 
1130 
1131 CallGenerator* CallGenerator::for_method_handle_call(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline) {
1132   assert(callee->is_method_handle_intrinsic(), "for_method_handle_call mismatch");
1133   bool input_not_const;
1134   CallGenerator* cg = CallGenerator::for_method_handle_inline(jvms, caller, callee, allow_inline, input_not_const);
1135   Compile* C = Compile::current();
1136   bool should_delay = C->should_delay_inlining();
1137   if (cg != nullptr) {
1138     if (should_delay) {
1139       return CallGenerator::for_late_inline(callee, cg);
1140     } else {
1141       return cg;
1142     }
1143   }
1144   int bci = jvms->bci();
1145   ciCallProfile profile = caller->call_profile_at_bci(bci);
1146   int call_site_count = caller->scale_count(profile.count());
1147 
1148   if (IncrementalInlineMH && (AlwaysIncrementalInline ||
1149                             (call_site_count > 0 && (should_delay || input_not_const || !C->inlining_incrementally() || C->over_inlining_cutoff())))) {
1150     return CallGenerator::for_mh_late_inline(caller, callee, input_not_const);
1151   } else {
1152     // Out-of-line call.
1153     return CallGenerator::for_direct_call(callee);
1154   }
1155 }
1156 
1157 
1158 CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool allow_inline, bool& input_not_const) {
1159   GraphKit kit(jvms);
1160   PhaseGVN& gvn = kit.gvn();
1161   Compile* C = kit.C;
1162   vmIntrinsics::ID iid = callee->intrinsic_id();
1163   input_not_const = true;
1164   if (StressMethodHandleLinkerInlining) {
1165     allow_inline = false;
1166   }
1167   switch (iid) {
1168   case vmIntrinsics::_invokeBasic:
1169     {
1170       // Get MethodHandle receiver:
1171       Node* receiver = kit.argument(0);
1172       if (receiver->Opcode() == Op_ConP) {
1173         input_not_const = false;
1174         const TypeOopPtr* recv_toop = receiver->bottom_type()->isa_oopptr();
1175         if (recv_toop != nullptr) {
1176           ciMethod* target = recv_toop->const_oop()->as_method_handle()->get_vmtarget();
1177           const int vtable_index = Method::invalid_vtable_index;
1178 
1179           if (!ciMethod::is_consistent_info(callee, target)) {
1180             print_inlining_failure(C, callee, jvms, "signatures mismatch");
1181             return nullptr;
1182           }
1183 
1184           CallGenerator *cg = C->call_generator(target, vtable_index,
1185                                                 false /* call_does_dispatch */,
1186                                                 jvms,
1187                                                 allow_inline,
1188                                                 PROB_ALWAYS);
1189           return cg;
1190         } else {
1191           assert(receiver->bottom_type() == TypePtr::NULL_PTR, "not a null: %s",
1192                  Type::str(receiver->bottom_type()));
1193           print_inlining_failure(C, callee, jvms, "receiver is always null");
1194         }
1195       } else {
1196         print_inlining_failure(C, callee, jvms, "receiver not constant");
1197       }
1198   } break;
1199 
1200   case vmIntrinsics::_linkToVirtual:
1201   case vmIntrinsics::_linkToStatic:
1202   case vmIntrinsics::_linkToSpecial:
1203   case vmIntrinsics::_linkToInterface:
1204     {
1205       int nargs = callee->arg_size();
1206       // Get MemberName argument:
1207       Node* member_name = kit.argument(nargs - 1);
1208       if (member_name->Opcode() == Op_ConP) {
1209         input_not_const = false;
1210         const TypeOopPtr* oop_ptr = member_name->bottom_type()->is_oopptr();
1211         ciMethod* target = oop_ptr->const_oop()->as_member_name()->get_vmtarget();
1212 
1213         if (!ciMethod::is_consistent_info(callee, target)) {
1214           print_inlining_failure(C, callee, jvms, "signatures mismatch");
1215           return nullptr;
1216         }
1217 
1218         // In lambda forms we erase signature types to avoid resolving issues
1219         // involving class loaders.  When we optimize a method handle invoke
1220         // to a direct call we must cast the receiver and arguments to its
1221         // actual types.
1222         ciSignature* signature = target->signature();
1223         const int receiver_skip = target->is_static() ? 0 : 1;
1224         // Cast receiver to its type.
1225         if (!target->is_static()) {
1226           Node* recv = kit.argument(0);
1227           Node* casted_recv = kit.maybe_narrow_object_type(recv, signature->accessing_klass());
1228           if (casted_recv->is_top()) {
1229             print_inlining_failure(C, callee, jvms, "argument types mismatch");
1230             return nullptr; // FIXME: effectively dead; issue a halt node instead
1231           } else if (casted_recv != recv) {
1232             kit.set_argument(0, casted_recv);
1233           }
1234         }
1235         // Cast reference arguments to its type.
1236         for (int i = 0, j = 0; i < signature->count(); i++) {
1237           ciType* t = signature->type_at(i);
1238           if (t->is_klass()) {
1239             Node* arg = kit.argument(receiver_skip + j);
1240             Node* casted_arg = kit.maybe_narrow_object_type(arg, t->as_klass());
1241             if (casted_arg->is_top()) {
1242               print_inlining_failure(C, callee, jvms, "argument types mismatch");
1243               return nullptr; // FIXME: effectively dead; issue a halt node instead
1244             } else if (casted_arg != arg) {
1245               kit.set_argument(receiver_skip + j, casted_arg);
1246             }
1247           }
1248           j += t->size();  // long and double take two slots
1249         }
1250 
1251         // Try to get the most accurate receiver type
1252         const bool is_virtual              = (iid == vmIntrinsics::_linkToVirtual);
1253         const bool is_virtual_or_interface = (is_virtual || iid == vmIntrinsics::_linkToInterface);
1254         int  vtable_index       = Method::invalid_vtable_index;
1255         bool call_does_dispatch = false;
1256 
1257         ciKlass* speculative_receiver_type = nullptr;
1258         if (is_virtual_or_interface) {
1259           ciInstanceKlass* klass = target->holder();
1260           Node*             receiver_node = kit.argument(0);
1261           const TypeOopPtr* receiver_type = gvn.type(receiver_node)->isa_oopptr();
1262           // call_does_dispatch and vtable_index are out-parameters.  They might be changed.
1263           // optimize_virtual_call() takes 2 different holder
1264           // arguments for a corner case that doesn't apply here (see
1265           // Parse::do_call())
1266           target = C->optimize_virtual_call(caller, klass, klass,
1267                                             target, receiver_type, is_virtual,
1268                                             call_does_dispatch, vtable_index, // out-parameters
1269                                             false /* check_access */);
1270           // We lack profiling at this call but type speculation may
1271           // provide us with a type
1272           speculative_receiver_type = (receiver_type != nullptr) ? receiver_type->speculative_type() : nullptr;
1273         }
1274         CallGenerator* cg = C->call_generator(target, vtable_index, call_does_dispatch, jvms,
1275                                               allow_inline,
1276                                               PROB_ALWAYS,
1277                                               speculative_receiver_type,
1278                                               true);
1279         return cg;
1280       } else {
1281         print_inlining_failure(C, callee, jvms, "member_name not constant");
1282       }
1283   } break;
1284 
1285   case vmIntrinsics::_linkToNative:
1286     print_inlining_failure(C, callee, jvms, "native call");
1287     break;
1288 
1289   default:
1290     fatal("unexpected intrinsic %d: %s", vmIntrinsics::as_int(iid), vmIntrinsics::name_at(iid));
1291     break;
1292   }
1293   return nullptr;
1294 }
1295 
1296 //------------------------PredicatedIntrinsicGenerator------------------------------
1297 // Internal class which handles all predicated Intrinsic calls.
1298 class PredicatedIntrinsicGenerator : public CallGenerator {
1299   CallGenerator* _intrinsic;
1300   CallGenerator* _cg;
1301 
1302 public:
1303   PredicatedIntrinsicGenerator(CallGenerator* intrinsic,
1304                                CallGenerator* cg)
1305     : CallGenerator(cg->method())
1306   {
1307     _intrinsic = intrinsic;
1308     _cg        = cg;
1309   }
1310 
1311   virtual bool      is_virtual()   const    { return true; }
1312   virtual bool      is_inline()    const    { return true; }
1313   virtual bool      is_intrinsic() const    { return true; }
1314 
1315   virtual JVMState* generate(JVMState* jvms);
1316 };
1317 
1318 
1319 CallGenerator* CallGenerator::for_predicated_intrinsic(CallGenerator* intrinsic,
1320                                                        CallGenerator* cg) {
1321   return new PredicatedIntrinsicGenerator(intrinsic, cg);
1322 }
1323 
1324 
1325 JVMState* PredicatedIntrinsicGenerator::generate(JVMState* jvms) {
1326   // The code we want to generate here is:
1327   //    if (receiver == nullptr)
1328   //        uncommon_Trap
1329   //    if (predicate(0))
1330   //        do_intrinsic(0)
1331   //    else
1332   //    if (predicate(1))
1333   //        do_intrinsic(1)
1334   //    ...
1335   //    else
1336   //        do_java_comp
1337 
1338   GraphKit kit(jvms);
1339   PhaseGVN& gvn = kit.gvn();
1340 
1341   CompileLog* log = kit.C->log();
1342   if (log != nullptr) {
1343     log->elem("predicated_intrinsic bci='%d' method='%d'",
1344               jvms->bci(), log->identify(method()));
1345   }
1346 
1347   if (!method()->is_static()) {
1348     // We need an explicit receiver null_check before checking its type in predicate.
1349     // We share a map with the caller, so his JVMS gets adjusted.
1350     kit.null_check_receiver_before_call(method());
1351     if (kit.stopped()) {
1352       return kit.transfer_exceptions_into_jvms();
1353     }
1354   }
1355 
1356   int n_predicates = _intrinsic->predicates_count();
1357   assert(n_predicates > 0, "sanity");
1358 
1359   JVMState** result_jvms = NEW_RESOURCE_ARRAY(JVMState*, (n_predicates+1));
1360 
1361   // Region for normal compilation code if intrinsic failed.
1362   Node* slow_region = new RegionNode(1);
1363 
1364   int results = 0;
1365   for (int predicate = 0; (predicate < n_predicates) && !kit.stopped(); predicate++) {
1366 #ifdef ASSERT
1367     JVMState* old_jvms = kit.jvms();
1368     SafePointNode* old_map = kit.map();
1369     Node* old_io  = old_map->i_o();
1370     Node* old_mem = old_map->memory();
1371     Node* old_exc = old_map->next_exception();
1372 #endif
1373     Node* else_ctrl = _intrinsic->generate_predicate(kit.sync_jvms(), predicate);
1374 #ifdef ASSERT
1375     // Assert(no_new_memory && no_new_io && no_new_exceptions) after generate_predicate.
1376     assert(old_jvms == kit.jvms(), "generate_predicate should not change jvm state");
1377     SafePointNode* new_map = kit.map();
1378     assert(old_io  == new_map->i_o(), "generate_predicate should not change i_o");
1379     assert(old_mem == new_map->memory(), "generate_predicate should not change memory");
1380     assert(old_exc == new_map->next_exception(), "generate_predicate should not add exceptions");
1381 #endif
1382     if (!kit.stopped()) {
1383       PreserveJVMState pjvms(&kit);
1384       // Generate intrinsic code:
1385       JVMState* new_jvms = _intrinsic->generate(kit.sync_jvms());
1386       if (kit.failing()) {
1387         return nullptr;
1388       }
1389       if (new_jvms == nullptr) {
1390         // Intrinsic failed, use normal compilation path for this predicate.
1391         slow_region->add_req(kit.control());
1392       } else {
1393         kit.add_exception_states_from(new_jvms);
1394         kit.set_jvms(new_jvms);
1395         if (!kit.stopped()) {
1396           result_jvms[results++] = kit.jvms();
1397         }
1398       }
1399     }
1400     if (else_ctrl == nullptr) {
1401       else_ctrl = kit.C->top();
1402     }
1403     kit.set_control(else_ctrl);
1404   }
1405   if (!kit.stopped()) {
1406     // Final 'else' after predicates.
1407     slow_region->add_req(kit.control());
1408   }
1409   if (slow_region->req() > 1) {
1410     PreserveJVMState pjvms(&kit);
1411     // Generate normal compilation code:
1412     kit.set_control(gvn.transform(slow_region));
1413     JVMState* new_jvms = _cg->generate(kit.sync_jvms());
1414     if (kit.failing())
1415       return nullptr;  // might happen because of NodeCountInliningCutoff
1416     assert(new_jvms != nullptr, "must be");
1417     kit.add_exception_states_from(new_jvms);
1418     kit.set_jvms(new_jvms);
1419     if (!kit.stopped()) {
1420       result_jvms[results++] = kit.jvms();
1421     }
1422   }
1423 
1424   if (results == 0) {
1425     // All paths ended in uncommon traps.
1426     (void) kit.stop();
1427     return kit.transfer_exceptions_into_jvms();
1428   }
1429 
1430   if (results == 1) { // Only one path
1431     kit.set_jvms(result_jvms[0]);
1432     return kit.transfer_exceptions_into_jvms();
1433   }
1434 
1435   // Merge all paths.
1436   kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
1437   RegionNode* region = new RegionNode(results + 1);
1438   Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
1439   for (int i = 0; i < results; i++) {
1440     JVMState* jvms = result_jvms[i];
1441     int path = i + 1;
1442     SafePointNode* map = jvms->map();
1443     region->init_req(path, map->control());
1444     iophi->set_req(path, map->i_o());
1445     if (i == 0) {
1446       kit.set_jvms(jvms);
1447     } else {
1448       kit.merge_memory(map->merged_memory(), region, path);
1449     }
1450   }
1451   kit.set_control(gvn.transform(region));
1452   kit.set_i_o(gvn.transform(iophi));
1453   // Transform new memory Phis.
1454   for (MergeMemStream mms(kit.merged_memory()); mms.next_non_empty();) {
1455     Node* phi = mms.memory();
1456     if (phi->is_Phi() && phi->in(0) == region) {
1457       mms.set_memory(gvn.transform(phi));
1458     }
1459   }
1460 
1461   // Merge debug info.
1462   Node** ins = NEW_RESOURCE_ARRAY(Node*, results);
1463   uint tos = kit.jvms()->stkoff() + kit.sp();
1464   Node* map = kit.map();
1465   uint limit = map->req();
1466   for (uint i = TypeFunc::Parms; i < limit; i++) {
1467     // Skip unused stack slots; fast forward to monoff();
1468     if (i == tos) {
1469       i = kit.jvms()->monoff();
1470       if( i >= limit ) break;
1471     }
1472     Node* n = map->in(i);
1473     ins[0] = n;
1474     const Type* t = gvn.type(n);
1475     bool needs_phi = false;
1476     for (int j = 1; j < results; j++) {
1477       JVMState* jvms = result_jvms[j];
1478       Node* jmap = jvms->map();
1479       Node* m = nullptr;
1480       if (jmap->req() > i) {
1481         m = jmap->in(i);
1482         if (m != n) {
1483           needs_phi = true;
1484           t = t->meet_speculative(gvn.type(m));
1485         }
1486       }
1487       ins[j] = m;
1488     }
1489     if (needs_phi) {
1490       Node* phi = PhiNode::make(region, n, t);
1491       for (int j = 1; j < results; j++) {
1492         phi->set_req(j + 1, ins[j]);
1493       }
1494       map->set_req(i, gvn.transform(phi));
1495     }
1496   }
1497 
1498   return kit.transfer_exceptions_into_jvms();
1499 }
1500 
1501 //-------------------------UncommonTrapCallGenerator-----------------------------
1502 // Internal class which handles all out-of-line calls checking receiver type.
1503 class UncommonTrapCallGenerator : public CallGenerator {
1504   Deoptimization::DeoptReason _reason;
1505   Deoptimization::DeoptAction _action;
1506 
1507 public:
1508   UncommonTrapCallGenerator(ciMethod* m,
1509                             Deoptimization::DeoptReason reason,
1510                             Deoptimization::DeoptAction action)
1511     : CallGenerator(m)
1512   {
1513     _reason = reason;
1514     _action = action;
1515   }
1516 
1517   virtual bool      is_virtual() const          { ShouldNotReachHere(); return false; }
1518   virtual bool      is_trap() const             { return true; }
1519 
1520   virtual JVMState* generate(JVMState* jvms);
1521 };
1522 
1523 
1524 CallGenerator*
1525 CallGenerator::for_uncommon_trap(ciMethod* m,
1526                                  Deoptimization::DeoptReason reason,
1527                                  Deoptimization::DeoptAction action) {
1528   return new UncommonTrapCallGenerator(m, reason, action);
1529 }
1530 
1531 
1532 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
1533   GraphKit kit(jvms);
1534   // Take the trap with arguments pushed on the stack.  (Cf. null_check_receiver).
1535   // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
1536   // Use callsite signature always.
1537   ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
1538   int nargs = declared_method->arg_size();
1539   kit.inc_sp(nargs);
1540   assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
1541   if (_reason == Deoptimization::Reason_class_check &&
1542       _action == Deoptimization::Action_maybe_recompile) {
1543     // Temp fix for 6529811
1544     // Don't allow uncommon_trap to override our decision to recompile in the event
1545     // of a class cast failure for a monomorphic call as it will never let us convert
1546     // the call to either bi-morphic or megamorphic and can lead to unc-trap loops
1547     bool keep_exact_action = true;
1548     kit.uncommon_trap(_reason, _action, nullptr, "monomorphic vcall checkcast", false, keep_exact_action);
1549   } else {
1550     kit.uncommon_trap(_reason, _action);
1551   }
1552   return kit.transfer_exceptions_into_jvms();
1553 }
1554 
1555 // (Note:  Moved hook_up_call to GraphKit::set_edges_for_java_call.)
1556 
1557 // (Node:  Merged hook_up_exits into ParseGenerator::generate.)