1 /*
   2  * Copyright (c) 2010, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "cds/aotLinkedClassBulkLoader.hpp"
  26 #include "code/aotCodeCache.hpp"
  27 #include "code/scopeDesc.hpp"
  28 #include "compiler/compilationPolicy.hpp"
  29 #include "compiler/compileBroker.hpp"
  30 #include "compiler/compilerDefinitions.inline.hpp"
  31 #include "compiler/compilerOracle.hpp"
  32 #include "compiler/recompilationPolicy.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "oops/method.inline.hpp"
  35 #include "oops/methodData.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "oops/trainingData.hpp"
  38 #include "prims/jvmtiExport.hpp"
  39 #include "runtime/arguments.hpp"
  40 #include "runtime/deoptimization.hpp"
  41 #include "runtime/frame.hpp"
  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/globals_extension.hpp"
  44 #include "runtime/handles.inline.hpp"
  45 #include "runtime/safepoint.hpp"
  46 #include "runtime/safepointVerifiers.hpp"
  47 #ifdef COMPILER1
  48 #include "c1/c1_Compiler.hpp"
  49 #endif
  50 #ifdef COMPILER2
  51 #include "opto/c2compiler.hpp"
  52 #endif
  53 #if INCLUDE_JVMCI
  54 #include "jvmci/jvmci.hpp"
  55 #endif
  56 
  57 int64_t CompilationPolicy::_start_time = 0;
  58 int CompilationPolicy::_c1_count = 0;
  59 int CompilationPolicy::_c2_count = 0;
  60 int CompilationPolicy::_c3_count = 0;
  61 int CompilationPolicy::_ac_count = 0;
  62 double CompilationPolicy::_increase_threshold_at_ratio = 0;
  63 
  64 CompilationPolicy::TrainingReplayQueue CompilationPolicy::_training_replay_queue;
  65 
  66 void compilationPolicy_init() {
  67   CompilationPolicy::initialize();
  68 }
  69 
  70 int CompilationPolicy::compiler_count(CompLevel comp_level) {
  71   if (is_c1_compile(comp_level)) {
  72     return c1_count();
  73   } else if (is_c2_compile(comp_level)) {
  74     return c2_count();
  75   }
  76   return 0;
  77 }
  78 
  79 // Returns true if m must be compiled before executing it
  80 // This is intended to force compiles for methods (usually for
  81 // debugging) that would otherwise be interpreted for some reason.
  82 bool CompilationPolicy::must_be_compiled(const methodHandle& m, int comp_level) {
  83   // Don't allow Xcomp to cause compiles in replay mode
  84   if (ReplayCompiles) return false;
  85 
  86   if (m->has_compiled_code()) return false;       // already compiled
  87   if (!can_be_compiled(m, comp_level)) return false;
  88 
  89   return !UseInterpreter ||                                                                        // must compile all methods
  90          (AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
  91 }
  92 
  93 void CompilationPolicy::maybe_compile_early(const methodHandle& m, TRAPS) {
  94   if (m->method_holder()->is_not_initialized()) {
  95     // 'is_not_initialized' means not only '!is_initialized', but also that
  96     // initialization has not been started yet ('!being_initialized')
  97     // Do not force compilation of methods in uninitialized classes.
  98     return;
  99   }
 100   if (!m->is_native() && MethodTrainingData::have_data()) {
 101     MethodTrainingData* mtd = MethodTrainingData::find_fast(m);
 102     if (mtd == nullptr) {
 103       return;              // there is no training data recorded for m
 104     }
 105     bool recompile = m->code_has_clinit_barriers();
 106     CompLevel cur_level = static_cast<CompLevel>(m->highest_comp_level());
 107     CompLevel next_level = trained_transition(m, cur_level, mtd, THREAD);
 108     if ((next_level != cur_level || recompile) && can_be_compiled(m, next_level) && !CompileBroker::compilation_is_in_queue(m)) {
 109       bool requires_online_compilation = false;
 110       CompileTrainingData* ctd = mtd->last_toplevel_compile(next_level);
 111       if (ctd != nullptr) {
 112         requires_online_compilation = (ctd->init_deps_left() > 0);
 113       }
 114       if (requires_online_compilation && recompile) {
 115         return;
 116       }
 117       if (PrintTieredEvents) {
 118         print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, next_level);
 119       }
 120       CompileBroker::compile_method(m, InvocationEntryBci, next_level, methodHandle(), 0, requires_online_compilation, CompileTask::Reason_MustBeCompiled, THREAD);
 121       if (HAS_PENDING_EXCEPTION) {
 122         CLEAR_PENDING_EXCEPTION;
 123       }
 124     }
 125   }
 126 }
 127 
 128 void CompilationPolicy::maybe_compile_early_after_init(const methodHandle& m, TRAPS) {
 129   assert(m->method_holder()->is_initialized(), "Should be called after class initialization");
 130   maybe_compile_early(m, THREAD);
 131 }
 132 
 133 void CompilationPolicy::compile_if_required(const methodHandle& m, TRAPS) {
 134   if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) {
 135     // don't force compilation, resolve was on behalf of compiler
 136     return;
 137   }
 138   if (m->method_holder()->is_not_initialized()) {
 139     // 'is_not_initialized' means not only '!is_initialized', but also that
 140     // initialization has not been started yet ('!being_initialized')
 141     // Do not force compilation of methods in uninitialized classes.
 142     // Note that doing this would throw an assert later,
 143     // in CompileBroker::compile_method.
 144     // We sometimes use the link resolver to do reflective lookups
 145     // even before classes are initialized.
 146     return;
 147   }
 148 
 149   if (must_be_compiled(m)) {
 150     // This path is unusual, mostly used by the '-Xcomp' stress test mode.
 151     CompLevel level = initial_compile_level(m);
 152     if (PrintTieredEvents) {
 153       print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, level);
 154     }
 155     CompileBroker::compile_method(m, InvocationEntryBci, level, methodHandle(), 0, false, CompileTask::Reason_MustBeCompiled, THREAD);
 156   }
 157 }
 158 
 159 void CompilationPolicy::replay_training_at_init_impl(InstanceKlass* klass, TRAPS) {
 160   if (!klass->has_init_deps_processed()) {
 161     ResourceMark rm;
 162     log_debug(training)("Replay training: %s", klass->external_name());
 163 
 164     KlassTrainingData* ktd = KlassTrainingData::find(klass);
 165     if (ktd != nullptr) {
 166       guarantee(ktd->has_holder(), "");
 167       ktd->notice_fully_initialized(); // sets klass->has_init_deps_processed bit
 168       assert(klass->has_init_deps_processed(), "");
 169 
 170       if (AOTCompileEagerly) {
 171         ktd->iterate_comp_deps([&](CompileTrainingData* ctd) {
 172           if (ctd->init_deps_left() == 0) {
 173             MethodTrainingData* mtd = ctd->method();
 174             if (mtd->has_holder()) {
 175               const methodHandle mh(THREAD, const_cast<Method*>(mtd->holder()));
 176               CompilationPolicy::maybe_compile_early(mh, THREAD);
 177             }
 178           }
 179         });
 180       }
 181     }
 182   }
 183 }
 184 
 185 void CompilationPolicy::flush_replay_training_at_init(TRAPS) {
 186    MonitorLocker locker(THREAD, TrainingReplayQueue_lock);
 187    while (!_training_replay_queue.is_empty_unlocked()) {
 188      locker.wait(); // let the replay training thread drain the queue
 189    }
 190 }
 191 
 192 void CompilationPolicy::replay_training_at_init(InstanceKlass* klass, TRAPS) {
 193   assert(klass->is_initialized(), "");
 194   if (TrainingData::have_data() && klass->is_shared()) {
 195     _training_replay_queue.push(klass, TrainingReplayQueue_lock, THREAD);
 196   }
 197 }
 198 
 199 // For TrainingReplayQueue
 200 template<>
 201 void CompilationPolicyUtils::Queue<InstanceKlass>::print_on(outputStream* st) {
 202   int pos = 0;
 203   for (QueueNode* cur = _head; cur != nullptr; cur = cur->next()) {
 204     ResourceMark rm;
 205     InstanceKlass* ik = cur->value();
 206     st->print_cr("%3d: " INTPTR_FORMAT " %s", ++pos, p2i(ik), ik->external_name());
 207   }
 208 }
 209 
 210 void CompilationPolicy::replay_training_at_init_loop(TRAPS) {
 211   while (!CompileBroker::is_compilation_disabled_forever() || AOTVerifyTrainingData) {
 212     InstanceKlass* ik = _training_replay_queue.pop(TrainingReplayQueue_lock, THREAD);
 213     replay_training_at_init_impl(ik, THREAD);
 214   }
 215 }
 216 
 217 static inline CompLevel adjust_level_for_compilability_query(CompLevel comp_level) {
 218   if (comp_level == CompLevel_any) {
 219      if (CompilerConfig::is_c1_only()) {
 220        comp_level = CompLevel_simple;
 221      } else if (CompilerConfig::is_c2_or_jvmci_compiler_only()) {
 222        comp_level = CompLevel_full_optimization;
 223      }
 224   }
 225   return comp_level;
 226 }
 227 
 228 // Returns true if m is allowed to be compiled
 229 bool CompilationPolicy::can_be_compiled(const methodHandle& m, int comp_level) {
 230   // allow any levels for WhiteBox
 231   assert(WhiteBoxAPI || comp_level == CompLevel_any || is_compile(comp_level), "illegal compilation level %d", comp_level);
 232 
 233   if (m->is_abstract()) return false;
 234   if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
 235 
 236   // Math intrinsics should never be compiled as this can lead to
 237   // monotonicity problems because the interpreter will prefer the
 238   // compiled code to the intrinsic version.  This can't happen in
 239   // production because the invocation counter can't be incremented
 240   // but we shouldn't expose the system to this problem in testing
 241   // modes.
 242   if (!AbstractInterpreter::can_be_compiled(m)) {
 243     return false;
 244   }
 245   comp_level = adjust_level_for_compilability_query((CompLevel) comp_level);
 246   if (comp_level == CompLevel_any || is_compile(comp_level)) {
 247     return !m->is_not_compilable(comp_level);
 248   }
 249   return false;
 250 }
 251 
 252 // Returns true if m is allowed to be osr compiled
 253 bool CompilationPolicy::can_be_osr_compiled(const methodHandle& m, int comp_level) {
 254   bool result = false;
 255   comp_level = adjust_level_for_compilability_query((CompLevel) comp_level);
 256   if (comp_level == CompLevel_any || is_compile(comp_level)) {
 257     result = !m->is_not_osr_compilable(comp_level);
 258   }
 259   return (result && can_be_compiled(m, comp_level));
 260 }
 261 
 262 bool CompilationPolicy::is_compilation_enabled() {
 263   // NOTE: CompileBroker::should_compile_new_jobs() checks for UseCompiler
 264   return CompileBroker::should_compile_new_jobs();
 265 }
 266 
 267 CompileTask* CompilationPolicy::select_task_helper(CompileQueue* compile_queue) {
 268   // Remove unloaded methods from the queue
 269   for (CompileTask* task = compile_queue->first(); task != nullptr; ) {
 270     CompileTask* next = task->next();
 271     if (task->is_unloaded()) {
 272       compile_queue->remove_and_mark_stale(task);
 273     }
 274     task = next;
 275   }
 276 #if INCLUDE_JVMCI
 277   if (UseJVMCICompiler && !BackgroundCompilation) {
 278     /*
 279      * In blocking compilation mode, the CompileBroker will make
 280      * compilations submitted by a JVMCI compiler thread non-blocking. These
 281      * compilations should be scheduled after all blocking compilations
 282      * to service non-compiler related compilations sooner and reduce the
 283      * chance of such compilations timing out.
 284      */
 285     for (CompileTask* task = compile_queue->first(); task != nullptr; task = task->next()) {
 286       if (task->is_blocking()) {
 287         return task;
 288       }
 289     }
 290   }
 291 #endif
 292   return compile_queue->first();
 293 }
 294 
 295 // Simple methods are as good being compiled with C1 as C2.
 296 // Determine if a given method is such a case.
 297 bool CompilationPolicy::is_trivial(const methodHandle& method) {
 298   if (method->is_accessor() ||
 299       method->is_constant_getter()) {
 300     return true;
 301   }
 302   return false;
 303 }
 304 
 305 bool CompilationPolicy::force_comp_at_level_simple(const methodHandle& method) {
 306   if (CompilationModeFlag::quick_internal()) {
 307 #if INCLUDE_JVMCI
 308     if (UseJVMCICompiler) {
 309       AbstractCompiler* comp = CompileBroker::compiler(CompLevel_full_optimization);
 310       if (comp != nullptr && comp->is_jvmci() && ((JVMCICompiler*) comp)->force_comp_at_level_simple(method)) {
 311         return !AOTCodeCache::is_C3_on();
 312       }
 313     }
 314 #endif
 315   }
 316   return false;
 317 }
 318 
 319 CompLevel CompilationPolicy::comp_level(Method* method) {
 320   nmethod *nm = method->code();
 321   if (nm != nullptr && nm->is_in_use()) {
 322     return (CompLevel)nm->comp_level();
 323   }
 324   return CompLevel_none;
 325 }
 326 
 327 // Call and loop predicates determine whether a transition to a higher
 328 // compilation level should be performed (pointers to predicate functions
 329 // are passed to common()).
 330 // Tier?LoadFeedback is basically a coefficient that determines of
 331 // how many methods per compiler thread can be in the queue before
 332 // the threshold values double.
 333 class LoopPredicate : AllStatic {
 334 public:
 335   static bool apply_scaled(const methodHandle& method, CompLevel cur_level, int i, int b, double scale) {
 336     double threshold_scaling;
 337     if (CompilerOracle::has_option_value(method, CompileCommandEnum::CompileThresholdScaling, threshold_scaling)) {
 338       scale *= threshold_scaling;
 339     }
 340     switch(cur_level) {
 341     case CompLevel_none:
 342     case CompLevel_limited_profile:
 343       return b >= Tier3BackEdgeThreshold * scale;
 344     case CompLevel_full_profile:
 345       return b >= Tier4BackEdgeThreshold * scale;
 346     default:
 347       return true;
 348     }
 349   }
 350 
 351   static bool apply(const methodHandle& method, CompLevel cur_level, int i, int b) {
 352     double k = 1;
 353     switch(cur_level) {
 354     case CompLevel_none:
 355     // Fall through
 356     case CompLevel_limited_profile: {
 357       k = CompilationPolicy::threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
 358       break;
 359     }
 360     case CompLevel_full_profile: {
 361       k = CompilationPolicy::threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
 362       break;
 363     }
 364     default:
 365       return true;
 366     }
 367     return apply_scaled(method, cur_level, i, b, k);
 368   }
 369 };
 370 
 371 class CallPredicate : AllStatic {
 372 public:
 373   static bool apply_scaled(const methodHandle& method, CompLevel cur_level, int i, int b, double scale) {
 374     double threshold_scaling;
 375     if (CompilerOracle::has_option_value(method, CompileCommandEnum::CompileThresholdScaling, threshold_scaling)) {
 376       scale *= threshold_scaling;
 377     }
 378     switch(cur_level) {
 379     case CompLevel_none:
 380     case CompLevel_limited_profile:
 381       return (i >= Tier3InvocationThreshold * scale) ||
 382              (i >= Tier3MinInvocationThreshold * scale && i + b >= Tier3CompileThreshold * scale);
 383     case CompLevel_full_profile:
 384       return (i >= Tier4InvocationThreshold * scale) ||
 385              (i >= Tier4MinInvocationThreshold * scale && i + b >= Tier4CompileThreshold * scale);
 386     default:
 387      return true;
 388     }
 389   }
 390 
 391   static bool apply(const methodHandle& method, CompLevel cur_level, int i, int b) {
 392     double k = 1;
 393     switch(cur_level) {
 394     case CompLevel_none:
 395     case CompLevel_limited_profile: {
 396       k = CompilationPolicy::threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
 397       break;
 398     }
 399     case CompLevel_full_profile: {
 400       k = CompilationPolicy::threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
 401       break;
 402     }
 403     default:
 404       return true;
 405     }
 406     return apply_scaled(method, cur_level, i, b, k);
 407   }
 408 };
 409 
 410 double CompilationPolicy::threshold_scale(CompLevel level, int feedback_k) {
 411   int comp_count = compiler_count(level);
 412   if (comp_count > 0) {
 413     double queue_size = CompileBroker::queue_size(level);
 414     double k = (double)queue_size / ((double)feedback_k * (double)comp_count) + 1;
 415 
 416     // Increase C1 compile threshold when the code cache is filled more
 417     // than specified by IncreaseFirstTierCompileThresholdAt percentage.
 418     // The main intention is to keep enough free space for C2 compiled code
 419     // to achieve peak performance if the code cache is under stress.
 420     if (CompilerConfig::is_tiered() && !CompilationModeFlag::disable_intermediate() && is_c1_compile(level))  {
 421       double current_reverse_free_ratio = CodeCache::reverse_free_ratio();
 422       if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
 423         k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
 424       }
 425     }
 426     return k;
 427   }
 428   return 1;
 429 }
 430 
 431 void CompilationPolicy::print_counters(const char* prefix, Method* m) {
 432   int invocation_count = m->invocation_count();
 433   int backedge_count = m->backedge_count();
 434   MethodData* mdh = m->method_data();
 435   int mdo_invocations = 0, mdo_backedges = 0;
 436   int mdo_invocations_start = 0, mdo_backedges_start = 0;
 437   if (mdh != nullptr) {
 438     mdo_invocations = mdh->invocation_count();
 439     mdo_backedges = mdh->backedge_count();
 440     mdo_invocations_start = mdh->invocation_count_start();
 441     mdo_backedges_start = mdh->backedge_count_start();
 442   }
 443   tty->print(" %stotal=%d,%d %smdo=%d(%d),%d(%d)", prefix,
 444       invocation_count, backedge_count, prefix,
 445       mdo_invocations, mdo_invocations_start,
 446       mdo_backedges, mdo_backedges_start);
 447   tty->print(" %smax levels=%d,%d", prefix,
 448       m->highest_comp_level(), m->highest_osr_comp_level());
 449 }
 450 
 451 void CompilationPolicy::print_training_data(const char* prefix, Method* method) {
 452   methodHandle m(Thread::current(), method);
 453   tty->print(" %smtd: ", prefix);
 454   MethodTrainingData* mtd = MethodTrainingData::find(m);
 455   if (mtd == nullptr) {
 456     tty->print("null");
 457   } else {
 458     MethodData* md = mtd->final_profile();
 459     tty->print("mdo=");
 460     if (md == nullptr) {
 461       tty->print("null");
 462     } else {
 463       int mdo_invocations = md->invocation_count();
 464       int mdo_backedges = md->backedge_count();
 465       int mdo_invocations_start = md->invocation_count_start();
 466       int mdo_backedges_start = md->backedge_count_start();
 467       tty->print("%d(%d), %d(%d)", mdo_invocations, mdo_invocations_start, mdo_backedges, mdo_backedges_start);
 468     }
 469     CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
 470     tty->print(", deps=");
 471     if (ctd == nullptr) {
 472       tty->print("null");
 473     } else {
 474       tty->print("%d", ctd->init_deps_left());
 475     }
 476   }
 477 }
 478 
 479 // Print an event.
 480 void CompilationPolicy::print_event(EventType type, Method* m, Method* im, int bci, CompLevel level) {
 481   bool inlinee_event = m != im;
 482 
 483   ttyLocker tty_lock;
 484   tty->print("%lf: [", os::elapsedTime());
 485 
 486   switch(type) {
 487   case CALL:
 488     tty->print("call");
 489     break;
 490   case LOOP:
 491     tty->print("loop");
 492     break;
 493   case COMPILE:
 494     tty->print("compile");
 495     break;
 496   case FORCE_COMPILE:
 497     tty->print("force-compile");
 498     break;
 499   case FORCE_RECOMPILE:
 500     tty->print("force-recompile");
 501     break;
 502   case REMOVE_FROM_QUEUE:
 503     tty->print("remove-from-queue");
 504     break;
 505   case UPDATE_IN_QUEUE:
 506     tty->print("update-in-queue");
 507     break;
 508   case REPROFILE:
 509     tty->print("reprofile");
 510     break;
 511   case MAKE_NOT_ENTRANT:
 512     tty->print("make-not-entrant");
 513     break;
 514   default:
 515     tty->print("unknown");
 516   }
 517 
 518   tty->print(" level=%d ", level);
 519 
 520   ResourceMark rm;
 521   char *method_name = m->name_and_sig_as_C_string();
 522   tty->print("[%s", method_name);
 523   if (inlinee_event) {
 524     char *inlinee_name = im->name_and_sig_as_C_string();
 525     tty->print(" [%s]] ", inlinee_name);
 526   }
 527   else tty->print("] ");
 528   tty->print("@%d queues=%d,%d", bci, CompileBroker::queue_size(CompLevel_full_profile),
 529                                       CompileBroker::queue_size(CompLevel_full_optimization));
 530 
 531   tty->print(" rate=");
 532   if (m->prev_time() == 0) tty->print("n/a");
 533   else tty->print("%f", m->rate());
 534 
 535   RecompilationPolicy::print_load_average();
 536 
 537   tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
 538                                threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
 539 
 540   if (type != COMPILE) {
 541     print_counters("", m);
 542     if (inlinee_event) {
 543       print_counters("inlinee ", im);
 544     }
 545     tty->print(" compilable=");
 546     bool need_comma = false;
 547     if (!m->is_not_compilable(CompLevel_full_profile)) {
 548       tty->print("c1");
 549       need_comma = true;
 550     }
 551     if (!m->is_not_osr_compilable(CompLevel_full_profile)) {
 552       if (need_comma) tty->print(",");
 553       tty->print("c1-osr");
 554       need_comma = true;
 555     }
 556     if (!m->is_not_compilable(CompLevel_full_optimization)) {
 557       if (need_comma) tty->print(",");
 558       tty->print("c2");
 559       need_comma = true;
 560     }
 561     if (!m->is_not_osr_compilable(CompLevel_full_optimization)) {
 562       if (need_comma) tty->print(",");
 563       tty->print("c2-osr");
 564     }
 565     tty->print(" status=");
 566     if (m->queued_for_compilation()) {
 567       tty->print("in-queue");
 568     } else tty->print("idle");
 569     print_training_data("", m);
 570     if (inlinee_event) {
 571       print_training_data("inlinee ", im);
 572     }
 573   }
 574   tty->print_cr("]");
 575 }
 576 
 577 void CompilationPolicy::initialize() {
 578   if (!CompilerConfig::is_interpreter_only()) {
 579     if (StoreCachedCode) {
 580       // Assembly phase runs C1 and C2 compilation in separate phases,
 581       // and can use all the CPU threads it can reach. Adjust the common
 582       // options before policy starts overwriting them. There is a block
 583       // at the very end that overrides final thread counts.
 584       if (FLAG_IS_DEFAULT(UseDynamicNumberOfCompilerThreads)) {
 585         FLAG_SET_ERGO(UseDynamicNumberOfCompilerThreads, false);
 586       }
 587       if (FLAG_IS_DEFAULT(CICompilerCount)) {
 588         FLAG_SET_ERGO(CICompilerCount, MAX2(2, os::active_processor_count()));
 589       }
 590     }
 591     int count = CICompilerCount;
 592     bool c1_only = CompilerConfig::is_c1_only();
 593     bool c2_only = CompilerConfig::is_c2_or_jvmci_compiler_only();
 594 
 595 #ifdef _LP64
 596     // Turn on ergonomic compiler count selection
 597     if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
 598       FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
 599     }
 600     if (CICompilerCountPerCPU) {
 601       // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
 602       int log_cpu = log2i(os::active_processor_count());
 603       int loglog_cpu = log2i(MAX2(log_cpu, 1));
 604       count = MAX2(log_cpu * loglog_cpu * 3 / 2, 2);
 605       // Make sure there is enough space in the code cache to hold all the compiler buffers
 606       size_t c1_size = 0;
 607 #ifdef COMPILER1
 608       c1_size = Compiler::code_buffer_size();
 609 #endif
 610       size_t c2_size = 0;
 611 #ifdef COMPILER2
 612       c2_size = C2Compiler::initial_code_buffer_size();
 613 #endif
 614       size_t buffer_size = c1_only ? c1_size : (c1_size/3 + 2*c2_size/3);
 615       int max_count = (ReservedCodeCacheSize - (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3))) / (int)buffer_size;
 616       if (count > max_count) {
 617         // Lower the compiler count such that all buffers fit into the code cache
 618         count = MAX2(max_count, c1_only ? 1 : 2);
 619       }
 620       FLAG_SET_ERGO(CICompilerCount, count);
 621     }
 622 #else
 623     // On 32-bit systems, the number of compiler threads is limited to 3.
 624     // On these systems, the virtual address space available to the JVM
 625     // is usually limited to 2-4 GB (the exact value depends on the platform).
 626     // As the compilers (especially C2) can consume a large amount of
 627     // memory, scaling the number of compiler threads with the number of
 628     // available cores can result in the exhaustion of the address space
 629     /// available to the VM and thus cause the VM to crash.
 630     if (FLAG_IS_DEFAULT(CICompilerCount)) {
 631       count = 3;
 632       FLAG_SET_ERGO(CICompilerCount, count);
 633     }
 634 #endif
 635 
 636     if (c1_only) {
 637       // No C2 compiler thread required
 638       set_c1_count(count);
 639     } else if (c2_only) {
 640       set_c2_count(count);
 641     } else {
 642 #if INCLUDE_JVMCI
 643       if (UseJVMCICompiler && UseJVMCINativeLibrary) {
 644         int libjvmci_count = MAX2((int) (count * JVMCINativeLibraryThreadFraction), 1);
 645         int c1_count = MAX2(count - libjvmci_count, 1);
 646         set_c2_count(libjvmci_count);
 647         set_c1_count(c1_count);
 648       } else if (AOTCodeCache::is_C3_on()) {
 649         set_c1_count(MAX2(count / 3, 1));
 650         set_c2_count(MAX2(count - c1_count(), 1));
 651         set_c3_count(1);
 652       } else
 653 #endif
 654       {
 655         set_c1_count(MAX2(count / 3, 1));
 656         set_c2_count(MAX2(count - c1_count(), 1));
 657       }
 658     }
 659     if (StoreCachedCode) {
 660       set_c1_count(count);
 661       set_c2_count(count);
 662       count *= 2; // satisfy the assert below
 663     }
 664     if (AOTCodeCache::is_code_load_thread_on()) {
 665       set_ac_count((c1_only || c2_only) ? 1 : 2); // At minimum we need 2 threads to load C1 and C2 cached code in parallel
 666     }
 667     assert(count == c1_count() + c2_count(), "inconsistent compiler thread count");
 668     set_increase_threshold_at_ratio();
 669   }
 670 
 671   set_start_time(nanos_to_millis(os::javaTimeNanos()));
 672 }
 673 
 674 
 675 
 676 
 677 #ifdef ASSERT
 678 bool CompilationPolicy::verify_level(CompLevel level) {
 679   if (TieredCompilation && level > TieredStopAtLevel) {
 680     return false;
 681   }
 682   // Check if there is a compiler to process the requested level
 683   if (!CompilerConfig::is_c1_enabled() && is_c1_compile(level)) {
 684     return false;
 685   }
 686   if (!CompilerConfig::is_c2_or_jvmci_compiler_enabled() && is_c2_compile(level)) {
 687     return false;
 688   }
 689 
 690   // Interpreter level is always valid.
 691   if (level == CompLevel_none) {
 692     return true;
 693   }
 694   if (CompilationModeFlag::normal()) {
 695     return true;
 696   } else if (CompilationModeFlag::quick_only()) {
 697     return level == CompLevel_simple;
 698   } else if (CompilationModeFlag::high_only()) {
 699     return level == CompLevel_full_optimization;
 700   } else if (CompilationModeFlag::high_only_quick_internal()) {
 701     return level == CompLevel_full_optimization || level == CompLevel_simple;
 702   }
 703   return false;
 704 }
 705 #endif
 706 
 707 
 708 CompLevel CompilationPolicy::highest_compile_level() {
 709   CompLevel level = CompLevel_none;
 710   // Setup the maximum level available for the current compiler configuration.
 711   if (!CompilerConfig::is_interpreter_only()) {
 712     if (CompilerConfig::is_c2_or_jvmci_compiler_enabled()) {
 713       level = CompLevel_full_optimization;
 714     } else if (CompilerConfig::is_c1_enabled()) {
 715       if (CompilerConfig::is_c1_simple_only()) {
 716         level = CompLevel_simple;
 717       } else {
 718         level = CompLevel_full_profile;
 719       }
 720     }
 721   }
 722   // Clamp the maximum level with TieredStopAtLevel.
 723   if (TieredCompilation) {
 724     level = MIN2(level, (CompLevel) TieredStopAtLevel);
 725   }
 726 
 727   // Fix it up if after the clamping it has become invalid.
 728   // Bring it monotonically down depending on the next available level for
 729   // the compilation mode.
 730   if (!CompilationModeFlag::normal()) {
 731     // a) quick_only - levels 2,3,4 are invalid; levels -1,0,1 are valid;
 732     // b) high_only - levels 1,2,3 are invalid; levels -1,0,4 are valid;
 733     // c) high_only_quick_internal - levels 2,3 are invalid; levels -1,0,1,4 are valid.
 734     if (CompilationModeFlag::quick_only()) {
 735       if (level == CompLevel_limited_profile || level == CompLevel_full_profile || level == CompLevel_full_optimization) {
 736         level = CompLevel_simple;
 737       }
 738     } else if (CompilationModeFlag::high_only()) {
 739       if (level == CompLevel_simple || level == CompLevel_limited_profile || level == CompLevel_full_profile) {
 740         level = CompLevel_none;
 741       }
 742     } else if (CompilationModeFlag::high_only_quick_internal()) {
 743       if (level == CompLevel_limited_profile || level == CompLevel_full_profile) {
 744         level = CompLevel_simple;
 745       }
 746     }
 747   }
 748 
 749   assert(verify_level(level), "Invalid highest compilation level: %d", level);
 750   return level;
 751 }
 752 
 753 CompLevel CompilationPolicy::limit_level(CompLevel level) {
 754   level = MIN2(level, highest_compile_level());
 755   assert(verify_level(level), "Invalid compilation level: %d", level);
 756   return level;
 757 }
 758 
 759 CompLevel CompilationPolicy::initial_compile_level(const methodHandle& method) {
 760   CompLevel level = CompLevel_any;
 761   if (CompilationModeFlag::normal()) {
 762     level = CompLevel_full_profile;
 763   } else if (CompilationModeFlag::quick_only()) {
 764     level = CompLevel_simple;
 765   } else if (CompilationModeFlag::high_only()) {
 766     level = CompLevel_full_optimization;
 767   } else if (CompilationModeFlag::high_only_quick_internal()) {
 768     if (force_comp_at_level_simple(method)) {
 769       level = CompLevel_simple;
 770     } else {
 771       level = CompLevel_full_optimization;
 772     }
 773   }
 774   assert(level != CompLevel_any, "Unhandled compilation mode");
 775   return limit_level(level);
 776 }
 777 
 778 // Set carry flags on the counters if necessary
 779 void CompilationPolicy::handle_counter_overflow(const methodHandle& method) {
 780   MethodCounters *mcs = method->method_counters();
 781   if (mcs != nullptr) {
 782     mcs->invocation_counter()->set_carry_on_overflow();
 783     mcs->backedge_counter()->set_carry_on_overflow();
 784   }
 785   MethodData* mdo = method->method_data();
 786   if (mdo != nullptr) {
 787     mdo->invocation_counter()->set_carry_on_overflow();
 788     mdo->backedge_counter()->set_carry_on_overflow();
 789   }
 790 }
 791 
 792 // Called with the queue locked and with at least one element
 793 CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue, JavaThread* THREAD) {
 794   CompileTask *max_blocking_task = nullptr;
 795   CompileTask *max_task = nullptr;
 796   Method* max_method = nullptr;
 797 
 798   int64_t t = nanos_to_millis(os::javaTimeNanos());
 799   // Iterate through the queue and find a method with a maximum rate.
 800   for (CompileTask* task = compile_queue->first(); task != nullptr;) {
 801     CompileTask* next_task = task->next();
 802     // If a method was unloaded or has been stale for some time, remove it from the queue.
 803     // Blocking tasks and tasks submitted from whitebox API don't become stale
 804     if (task->is_unloaded()) {
 805       compile_queue->remove_and_mark_stale(task);
 806       task = next_task;
 807       continue;
 808     }
 809     if (task->is_aot()) {
 810       // AOTCodeCache tasks are on separate queue, and they should load fast. There is no need to walk
 811       // the rest of the queue, just take the task and go.
 812       return task;
 813     }
 814     if (task->is_blocking() && task->compile_reason() == CompileTask::Reason_Whitebox) {
 815       // CTW tasks, submitted as blocking Whitebox requests, do not participate in rate
 816       // selection and/or any level adjustments. Just return them in order.
 817       return task;
 818     }
 819     Method* method = task->method();
 820     methodHandle mh(THREAD, method);
 821     if (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, mh) && !is_old(mh)) {
 822       if (PrintTieredEvents) {
 823         print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel) task->comp_level());
 824       }
 825       method->clear_queued_for_compilation();
 826       method->set_pending_queue_processed(false);
 827       compile_queue->remove_and_mark_stale(task);
 828       task = next_task;
 829       continue;
 830     }
 831     update_rate(t, mh);
 832     if (max_task == nullptr || compare_methods(method, max_method) || compare_tasks(task, max_task)) {
 833       // Select a method with the highest rate
 834       max_task = task;
 835       max_method = method;
 836     }
 837 
 838     if (task->is_blocking()) {
 839       if (max_blocking_task == nullptr || compare_methods(method, max_blocking_task->method())) {
 840         max_blocking_task = task;
 841       }
 842     }
 843 
 844     task = next_task;
 845   }
 846 
 847   if (max_blocking_task != nullptr) {
 848     // In blocking compilation mode, the CompileBroker will make
 849     // compilations submitted by a JVMCI compiler thread non-blocking. These
 850     // compilations should be scheduled after all blocking compilations
 851     // to service non-compiler related compilations sooner and reduce the
 852     // chance of such compilations timing out.
 853     max_task = max_blocking_task;
 854     max_method = max_task->method();
 855   }
 856 
 857   methodHandle max_method_h(THREAD, max_method);
 858 
 859   if (max_task != nullptr && max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile &&
 860       max_method != nullptr && is_method_profiled(max_method_h) && !Arguments::is_compiler_only()) {
 861     max_task->set_comp_level(CompLevel_limited_profile);
 862 
 863     if (CompileBroker::compilation_is_complete(max_method_h(), max_task->osr_bci(), CompLevel_limited_profile,
 864                                                false /* requires_online_compilation */,
 865                                                CompileTask::Reason_None)) {
 866       if (PrintTieredEvents) {
 867         print_event(REMOVE_FROM_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
 868       }
 869       compile_queue->remove_and_mark_stale(max_task);
 870       max_method->clear_queued_for_compilation();
 871       return nullptr;
 872     }
 873 
 874     if (PrintTieredEvents) {
 875       print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
 876     }
 877   }
 878   return max_task;
 879 }
 880 
 881 void CompilationPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
 882   for (ScopeDesc* sd = trap_scope;; sd = sd->sender()) {
 883     if (PrintTieredEvents) {
 884       print_event(REPROFILE, sd->method(), sd->method(), InvocationEntryBci, CompLevel_none);
 885     }
 886     MethodData* mdo = sd->method()->method_data();
 887     if (mdo != nullptr) {
 888       mdo->reset_start_counters();
 889     }
 890     if (sd->is_top()) break;
 891   }
 892 }
 893 
 894 nmethod* CompilationPolicy::event(const methodHandle& method, const methodHandle& inlinee,
 895                                       int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) {
 896   if (PrintTieredEvents) {
 897     print_event(bci == InvocationEntryBci ? CALL : LOOP, method(), inlinee(), bci, comp_level);
 898   }
 899 
 900 #if INCLUDE_JVMCI
 901   if (EnableJVMCI && UseJVMCICompiler &&
 902       comp_level == CompLevel_full_optimization CDS_ONLY(&& !AOTLinkedClassBulkLoader::class_preloading_finished())) {
 903     return nullptr;
 904   }
 905 #endif
 906 
 907   if (comp_level == CompLevel_none &&
 908       JvmtiExport::can_post_interpreter_events() &&
 909       THREAD->is_interp_only_mode()) {
 910     return nullptr;
 911   }
 912   if (ReplayCompiles) {
 913     // Don't trigger other compiles in testing mode
 914     return nullptr;
 915   }
 916 
 917   handle_counter_overflow(method);
 918   if (method() != inlinee()) {
 919     handle_counter_overflow(inlinee);
 920   }
 921 
 922   if (bci == InvocationEntryBci) {
 923     method_invocation_event(method, inlinee, comp_level, nm, THREAD);
 924   } else {
 925     // method == inlinee if the event originated in the main method
 926     method_back_branch_event(method, inlinee, bci, comp_level, nm, THREAD);
 927     // Check if event led to a higher level OSR compilation
 928     CompLevel expected_comp_level = MIN2(CompLevel_full_optimization, static_cast<CompLevel>(comp_level + 1));
 929     if (!CompilationModeFlag::disable_intermediate() && inlinee->is_not_osr_compilable(expected_comp_level)) {
 930       // It's not possible to reach the expected level so fall back to simple.
 931       expected_comp_level = CompLevel_simple;
 932     }
 933     CompLevel max_osr_level = static_cast<CompLevel>(inlinee->highest_osr_comp_level());
 934     if (max_osr_level >= expected_comp_level) { // fast check to avoid locking in a typical scenario
 935       nmethod* osr_nm = inlinee->lookup_osr_nmethod_for(bci, expected_comp_level, false);
 936       assert(osr_nm == nullptr || osr_nm->comp_level() >= expected_comp_level, "lookup_osr_nmethod_for is broken");
 937       if (osr_nm != nullptr && osr_nm->comp_level() != comp_level) {
 938         // Perform OSR with new nmethod
 939         return osr_nm;
 940       }
 941     }
 942   }
 943   return nullptr;
 944 }
 945 
 946 // Check if the method can be compiled, change level if necessary
 947 void CompilationPolicy::compile(const methodHandle& mh, int bci, CompLevel level, TRAPS) {
 948   assert(verify_level(level), "Invalid compilation level requested: %d", level);
 949 
 950   if (level == CompLevel_none) {
 951     if (mh->has_compiled_code()) {
 952       // Happens when we switch to interpreter to profile.
 953       MutexLocker ml(Compile_lock);
 954       NoSafepointVerifier nsv;
 955       if (mh->has_compiled_code()) {
 956         mh->code()->make_not_used();
 957       }
 958       // Deoptimize immediately (we don't have to wait for a compile).
 959       JavaThread* jt = THREAD;
 960       RegisterMap map(jt,
 961                       RegisterMap::UpdateMap::skip,
 962                       RegisterMap::ProcessFrames::include,
 963                       RegisterMap::WalkContinuation::skip);
 964       frame fr = jt->last_frame().sender(&map);
 965       Deoptimization::deoptimize_frame(jt, fr.id());
 966     }
 967     return;
 968   }
 969 
 970   if (!CompilationModeFlag::disable_intermediate()) {
 971     // Check if the method can be compiled. If it cannot be compiled with C1, continue profiling
 972     // in the interpreter and then compile with C2 (the transition function will request that,
 973     // see common() ). If the method cannot be compiled with C2 but still can with C1, compile it with
 974     // pure C1.
 975     if ((bci == InvocationEntryBci && !can_be_compiled(mh, level))) {
 976       if (level == CompLevel_full_optimization && can_be_compiled(mh, CompLevel_simple)) {
 977         compile(mh, bci, CompLevel_simple, THREAD);
 978       }
 979       return;
 980     }
 981     if ((bci != InvocationEntryBci && !can_be_osr_compiled(mh, level))) {
 982       if (level == CompLevel_full_optimization && can_be_osr_compiled(mh, CompLevel_simple)) {
 983         nmethod* osr_nm = mh->lookup_osr_nmethod_for(bci, CompLevel_simple, false);
 984         if (osr_nm != nullptr && osr_nm->comp_level() > CompLevel_simple) {
 985           // Invalidate the existing OSR nmethod so that a compile at CompLevel_simple is permitted.
 986           osr_nm->make_not_entrant("OSR invalidation for compiling with C1");
 987         }
 988         compile(mh, bci, CompLevel_simple, THREAD);
 989       }
 990       return;
 991     }
 992   }
 993   if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) {
 994     return;
 995   }
 996   if (!CompileBroker::compilation_is_in_queue(mh)) {
 997     if (PrintTieredEvents) {
 998       print_event(COMPILE, mh(), mh(), bci, level);
 999     }
1000     int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
1001     update_rate(nanos_to_millis(os::javaTimeNanos()), mh);
1002     bool requires_online_compilation = false;
1003     if (TrainingData::have_data()) {
1004       MethodTrainingData* mtd = MethodTrainingData::find_fast(mh);
1005       if (mtd != nullptr) {
1006         CompileTrainingData* ctd = mtd->last_toplevel_compile(level);
1007         if (ctd != nullptr) {
1008           requires_online_compilation = (ctd->init_deps_left() > 0);
1009         }
1010       }
1011     }
1012     CompileBroker::compile_method(mh, bci, level, mh, hot_count, requires_online_compilation, CompileTask::Reason_Tiered, THREAD);
1013   }
1014 }
1015 
1016 // update_rate() is called from select_task() while holding a compile queue lock.
1017 void CompilationPolicy::update_rate(int64_t t, const methodHandle& method) {
1018   // Skip update if counters are absent.
1019   // Can't allocate them since we are holding compile queue lock.
1020   if (method->method_counters() == nullptr)  return;
1021 
1022   if (is_old(method)) {
1023     // We don't remove old methods from the queue,
1024     // so we can just zero the rate.
1025     method->set_rate(0);
1026     return;
1027   }
1028 
1029   // We don't update the rate if we've just came out of a safepoint.
1030   // delta_s is the time since last safepoint in milliseconds.
1031   int64_t delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
1032   int64_t delta_t = t - (method->prev_time() != 0 ? method->prev_time() : start_time()); // milliseconds since the last measurement
1033   // How many events were there since the last time?
1034   int event_count = method->invocation_count() + method->backedge_count();
1035   int delta_e = event_count - method->prev_event_count();
1036 
1037   // We should be running for at least 1ms.
1038   if (delta_s >= TieredRateUpdateMinTime) {
1039     // And we must've taken the previous point at least 1ms before.
1040     if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) {
1041       method->set_prev_time(t);
1042       method->set_prev_event_count(event_count);
1043       method->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond
1044     } else {
1045       if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) {
1046         // If nothing happened for 25ms, zero the rate. Don't modify prev values.
1047         method->set_rate(0);
1048       }
1049     }
1050   }
1051 }
1052 
1053 // Check if this method has been stale for a given number of milliseconds.
1054 // See select_task().
1055 bool CompilationPolicy::is_stale(int64_t t, int64_t timeout, const methodHandle& method) {
1056   int64_t delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
1057   int64_t delta_t = t - method->prev_time();
1058   if (delta_t > timeout && delta_s > timeout) {
1059     int event_count = method->invocation_count() + method->backedge_count();
1060     int delta_e = event_count - method->prev_event_count();
1061     // Return true if there were no events.
1062     return delta_e == 0;
1063   }
1064   return false;
1065 }
1066 
1067 // We don't remove old methods from the compile queue even if they have
1068 // very low activity. See select_task().
1069 bool CompilationPolicy::is_old(const methodHandle& method) {
1070   int i = method->invocation_count();
1071   int b = method->backedge_count();
1072   double k = TieredOldPercentage / 100.0;
1073 
1074   return CallPredicate::apply_scaled(method, CompLevel_none, i, b, k) || LoopPredicate::apply_scaled(method, CompLevel_none, i, b, k);
1075 }
1076 
1077 double CompilationPolicy::weight(Method* method) {
1078   return (double)(method->rate() + 1) * (method->invocation_count() + 1) * (method->backedge_count() + 1);
1079 }
1080 
1081 // Apply heuristics and return true if x should be compiled before y
1082 bool CompilationPolicy::compare_methods(Method* x, Method* y) {
1083   if (x->highest_comp_level() > y->highest_comp_level()) {
1084     // recompilation after deopt
1085     return true;
1086   } else
1087     if (x->highest_comp_level() == y->highest_comp_level()) {
1088       if (weight(x) > weight(y)) {
1089         return true;
1090       }
1091     }
1092   return false;
1093 }
1094 
1095 bool CompilationPolicy::compare_tasks(CompileTask* x, CompileTask* y) {
1096   assert(!x->is_aot() && !y->is_aot(), "AOT code caching tasks are not expected here");
1097   if (x->compile_reason() != y->compile_reason() && y->compile_reason() == CompileTask::Reason_MustBeCompiled) {
1098     return true;
1099   }
1100   return false;
1101 }
1102 
1103 // Is method profiled enough?
1104 bool CompilationPolicy::is_method_profiled(const methodHandle& method) {
1105   MethodData* mdo = method->method_data();
1106   if (mdo != nullptr) {
1107     int i = mdo->invocation_count_delta();
1108     int b = mdo->backedge_count_delta();
1109     return CallPredicate::apply_scaled(method, CompLevel_full_profile, i, b, 1);
1110   }
1111   return false;
1112 }
1113 
1114 
1115 // Determine is a method is mature.
1116 bool CompilationPolicy::is_mature(MethodData* mdo) {
1117   if (Arguments::is_compiler_only()) {
1118     // Always report profiles as immature with -Xcomp
1119     return false;
1120   }
1121   methodHandle mh(Thread::current(), mdo->method());
1122   if (mdo != nullptr) {
1123     int i = mdo->invocation_count();
1124     int b = mdo->backedge_count();
1125     double k = ProfileMaturityPercentage / 100.0;
1126     return CallPredicate::apply_scaled(mh, CompLevel_full_profile, i, b, k) || LoopPredicate::apply_scaled(mh, CompLevel_full_profile, i, b, k);
1127   }
1128   return false;
1129 }
1130 
1131 // If a method is old enough and is still in the interpreter we would want to
1132 // start profiling without waiting for the compiled method to arrive.
1133 // We also take the load on compilers into the account.
1134 bool CompilationPolicy::should_create_mdo(const methodHandle& method, CompLevel cur_level) {
1135   if (cur_level != CompLevel_none || force_comp_at_level_simple(method) || CompilationModeFlag::quick_only() || !ProfileInterpreter) {
1136     return false;
1137   }
1138 
1139   if (TrainingData::have_data()) {
1140     MethodTrainingData* mtd = MethodTrainingData::find_fast(method);
1141     if (mtd != nullptr && mtd->saw_level(CompLevel_full_optimization)) {
1142       return true;
1143     }
1144   }
1145 
1146   if (is_old(method)) {
1147     return true;
1148   }
1149 
1150   int i = method->invocation_count();
1151   int b = method->backedge_count();
1152   double k = Tier0ProfilingStartPercentage / 100.0;
1153 
1154   // If the top level compiler is not keeping up, delay profiling.
1155   if (CompileBroker::queue_size(CompLevel_full_optimization) <= Tier0Delay * compiler_count(CompLevel_full_optimization)) {
1156     return CallPredicate::apply_scaled(method, CompLevel_none, i, b, k) || LoopPredicate::apply_scaled(method, CompLevel_none, i, b, k);
1157   }
1158   return false;
1159 }
1160 
1161 // Inlining control: if we're compiling a profiled method with C1 and the callee
1162 // is known to have OSRed in a C2 version, don't inline it.
1163 bool CompilationPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
1164   CompLevel comp_level = (CompLevel)env->comp_level();
1165   if (comp_level == CompLevel_full_profile ||
1166       comp_level == CompLevel_limited_profile) {
1167     return callee->highest_osr_comp_level() == CompLevel_full_optimization;
1168   }
1169   return false;
1170 }
1171 
1172 // Create MDO if necessary.
1173 void CompilationPolicy::create_mdo(const methodHandle& mh, JavaThread* THREAD) {
1174   if (mh->is_native() ||
1175       mh->is_abstract() ||
1176       mh->is_accessor() ||
1177       mh->is_constant_getter()) {
1178     return;
1179   }
1180   if (mh->method_data() == nullptr) {
1181     Method::build_profiling_method_data(mh, CHECK_AND_CLEAR);
1182   }
1183   if (ProfileInterpreter && THREAD->has_last_Java_frame()) {
1184     MethodData* mdo = mh->method_data();
1185     if (mdo != nullptr) {
1186       frame last_frame = THREAD->last_frame();
1187       if (last_frame.is_interpreted_frame() && mh == last_frame.interpreter_frame_method()) {
1188         int bci = last_frame.interpreter_frame_bci();
1189         address dp = mdo->bci_to_dp(bci);
1190         last_frame.interpreter_frame_set_mdp(dp);
1191       }
1192     }
1193   }
1194 }
1195 
1196 CompLevel CompilationPolicy::trained_transition_from_none(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1197   precond(mtd != nullptr);
1198   precond(cur_level == CompLevel_none);
1199 
1200   if (mtd->only_inlined() && !mtd->saw_level(CompLevel_full_optimization)) {
1201     return CompLevel_none;
1202   }
1203 
1204   bool training_has_profile = (mtd->final_profile() != nullptr);
1205   if (mtd->saw_level(CompLevel_full_optimization) && !training_has_profile) {
1206     return CompLevel_full_profile;
1207   }
1208 
1209   CompLevel highest_training_level = static_cast<CompLevel>(mtd->highest_top_level());
1210   switch (highest_training_level) {
1211     case CompLevel_limited_profile:
1212     case CompLevel_full_profile:
1213       return CompLevel_limited_profile;
1214     case CompLevel_simple:
1215       return CompLevel_simple;
1216     case CompLevel_none:
1217       return CompLevel_none;
1218     default:
1219       break;
1220   }
1221 
1222   // Now handle the case of level 4.
1223   assert(highest_training_level == CompLevel_full_optimization, "Unexpected compilation level: %d", highest_training_level);
1224   if (!training_has_profile) {
1225     // The method was a part of a level 4 compile, but don't have a stored profile,
1226     // we need to profile it.
1227     return CompLevel_full_profile;
1228   }
1229   const bool deopt = (static_cast<CompLevel>(method->highest_comp_level()) == CompLevel_full_optimization);
1230   // If we deopted, then we reprofile
1231   if (deopt && !is_method_profiled(method)) {
1232     return CompLevel_full_profile;
1233   }
1234 
1235   CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
1236   assert(ctd != nullptr, "Should have CTD for CompLevel_full_optimization");
1237   // With SkipTier2IfPossible and all deps satisfied, go to level 4 immediately
1238   if (SkipTier2IfPossible && ctd->init_deps_left() == 0) {
1239     if (method->method_data() == nullptr) {
1240       create_mdo(method, THREAD);
1241     }
1242     return CompLevel_full_optimization;
1243   }
1244 
1245   // Otherwise go to level 2
1246   return CompLevel_limited_profile;
1247 }
1248 
1249 
1250 CompLevel CompilationPolicy::trained_transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1251   precond(mtd != nullptr);
1252   precond(cur_level == CompLevel_limited_profile);
1253 
1254   // One of the main reasons that we can get here is that we're waiting for the stored C2 code to become ready.
1255 
1256   // But first, check if we have a saved profile
1257   bool training_has_profile = (mtd->final_profile() != nullptr);
1258   if (!training_has_profile) {
1259     return CompLevel_full_profile;
1260   }
1261 
1262 
1263   assert(training_has_profile, "Have to have a profile to be here");
1264   // Check if the method is ready
1265   CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
1266   if (ctd != nullptr && ctd->init_deps_left() == 0) {
1267     if (method->method_data() == nullptr) {
1268       create_mdo(method, THREAD);
1269     }
1270     return CompLevel_full_optimization;
1271   }
1272 
1273   // Otherwise stay at the current level
1274   return CompLevel_limited_profile;
1275 }
1276 
1277 
1278 CompLevel CompilationPolicy::trained_transition_from_full_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1279   precond(mtd != nullptr);
1280   precond(cur_level == CompLevel_full_profile);
1281 
1282   CompLevel highest_training_level = static_cast<CompLevel>(mtd->highest_top_level());
1283   // We have method at the full profile level and we also know that it's possibly an important method.
1284   if (highest_training_level == CompLevel_full_optimization && !mtd->only_inlined()) {
1285     // Check if it is adequately profiled
1286     if (is_method_profiled(method)) {
1287       return CompLevel_full_optimization;
1288     }
1289   }
1290 
1291   // Otherwise stay at the current level
1292   return CompLevel_full_profile;
1293 }
1294 
1295 CompLevel CompilationPolicy::trained_transition(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1296   precond(MethodTrainingData::have_data());
1297 
1298   // If there is no training data recorded for this method, bail out.
1299   if (mtd == nullptr) {
1300     return cur_level;
1301   }
1302 
1303   CompLevel next_level = cur_level;
1304   switch(cur_level) {
1305     default: break;
1306     case CompLevel_none:
1307       next_level = trained_transition_from_none(method, cur_level, mtd, THREAD);
1308       break;
1309     case CompLevel_limited_profile:
1310       next_level = trained_transition_from_limited_profile(method, cur_level, mtd, THREAD);
1311       break;
1312     case CompLevel_full_profile:
1313       next_level = trained_transition_from_full_profile(method, cur_level, mtd, THREAD);
1314       break;
1315   }
1316 
1317   // We don't have any special strategies for the C2-only compilation modes, so just fix up the levels for now.
1318   if (CompilationModeFlag::high_only_quick_internal() && CompLevel_simple < next_level && next_level < CompLevel_full_optimization) {
1319     return CompLevel_none;
1320   }
1321   if (CompilationModeFlag::high_only() && next_level < CompLevel_full_optimization) {
1322     return CompLevel_none;
1323   }
1324   return (cur_level != next_level) ? limit_level(next_level) : cur_level;
1325 }
1326 
1327 /*
1328  * Method states:
1329  *   0 - interpreter (CompLevel_none)
1330  *   1 - pure C1 (CompLevel_simple)
1331  *   2 - C1 with invocation and backedge counting (CompLevel_limited_profile)
1332  *   3 - C1 with full profiling (CompLevel_full_profile)
1333  *   4 - C2 or Graal (CompLevel_full_optimization)
1334  *
1335  * Common state transition patterns:
1336  * a. 0 -> 3 -> 4.
1337  *    The most common path. But note that even in this straightforward case
1338  *    profiling can start at level 0 and finish at level 3.
1339  *
1340  * b. 0 -> 2 -> 3 -> 4.
1341  *    This case occurs when the load on C2 is deemed too high. So, instead of transitioning
1342  *    into state 3 directly and over-profiling while a method is in the C2 queue we transition to
1343  *    level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
1344  *
1345  * c. 0 -> (3->2) -> 4.
1346  *    In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough
1347  *    to enable the profiling to fully occur at level 0. In this case we change the compilation level
1348  *    of the method to 2 while the request is still in-queue, because it'll allow it to run much faster
1349  *    without full profiling while c2 is compiling.
1350  *
1351  * d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
1352  *    After a method was once compiled with C1 it can be identified as trivial and be compiled to
1353  *    level 1. These transition can also occur if a method can't be compiled with C2 but can with C1.
1354  *
1355  * e. 0 -> 4.
1356  *    This can happen if a method fails C1 compilation (it will still be profiled in the interpreter)
1357  *    or because of a deopt that didn't require reprofiling (compilation won't happen in this case because
1358  *    the compiled version already exists).
1359  *
1360  * Note that since state 0 can be reached from any other state via deoptimization different loops
1361  * are possible.
1362  *
1363  */
1364 
1365 // Common transition function. Given a predicate determines if a method should transition to another level.
1366 template<typename Predicate>
1367 CompLevel CompilationPolicy::common(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD, bool disable_feedback) {
1368   CompLevel next_level = cur_level;
1369 
1370   if (force_comp_at_level_simple(method)) {
1371     next_level = CompLevel_simple;
1372   } else if (is_trivial(method) || method->is_native()) {
1373     // We do not care if there is profiling data for these methods, throw them to compiler.
1374     next_level = CompilationModeFlag::disable_intermediate() ? CompLevel_full_optimization : CompLevel_simple;
1375   } else if (MethodTrainingData::have_data()) {
1376     MethodTrainingData* mtd = MethodTrainingData::find_fast(method);
1377     if (mtd == nullptr) {
1378       // We haven't see compilations of this method in training. It's either very cold or the behavior changed.
1379       // Feed it to the standard TF with no profiling delay.
1380       next_level = standard_transition<Predicate>(method, cur_level, false /*delay_profiling*/, disable_feedback);
1381     } else {
1382       next_level = trained_transition(method, cur_level, mtd, THREAD);
1383       if (cur_level == next_level) {
1384         // trained_transtion() is going to return the same level if no startup/warmup optimizations apply.
1385         // In order to catch possible pathologies due to behavior change we feed the event to the regular
1386         // TF but with profiling delay.
1387         next_level = standard_transition<Predicate>(method, cur_level, true /*delay_profiling*/, disable_feedback);
1388       }
1389     }
1390   } else {
1391     next_level = standard_transition<Predicate>(method, cur_level, false /*delay_profiling*/, disable_feedback);
1392   }
1393   return (next_level != cur_level) ? limit_level(next_level) : next_level;
1394 }
1395 
1396 
1397 template<typename Predicate>
1398 CompLevel CompilationPolicy::standard_transition(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) {
1399   CompLevel next_level = cur_level;
1400   switch(cur_level) {
1401   default: break;
1402   case CompLevel_none:
1403     next_level = transition_from_none<Predicate>(method, cur_level, delay_profiling, disable_feedback);
1404     break;
1405   case CompLevel_limited_profile:
1406     next_level = transition_from_limited_profile<Predicate>(method, cur_level, delay_profiling, disable_feedback);
1407     break;
1408   case CompLevel_full_profile:
1409     next_level = transition_from_full_profile<Predicate>(method, cur_level);
1410     break;
1411   }
1412   return next_level;
1413 }
1414 
1415 template<typename Predicate>
1416 CompLevel CompilationPolicy::transition_from_none(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) {
1417   precond(cur_level == CompLevel_none);
1418   CompLevel next_level = cur_level;
1419   int i = method->invocation_count();
1420   int b = method->backedge_count();
1421   double scale = delay_profiling ? Tier0ProfileDelayFactor : 1.0;
1422   // If we were at full profile level, would we switch to full opt?
1423   if (transition_from_full_profile<Predicate>(method, CompLevel_full_profile) == CompLevel_full_optimization) {
1424     next_level = CompLevel_full_optimization;
1425   } else if (!CompilationModeFlag::disable_intermediate() && Predicate::apply_scaled(method, cur_level, i, b, scale)) {
1426     // C1-generated fully profiled code is about 30% slower than the limited profile
1427     // code that has only invocation and backedge counters. The observation is that
1428     // if C2 queue is large enough we can spend too much time in the fully profiled code
1429     // while waiting for C2 to pick the method from the queue. To alleviate this problem
1430     // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
1431     // we choose to compile a limited profiled version and then recompile with full profiling
1432     // when the load on C2 goes down.
1433     if (delay_profiling || (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) > Tier3DelayOn * compiler_count(CompLevel_full_optimization))) {
1434       next_level = CompLevel_limited_profile;
1435     } else {
1436       next_level = CompLevel_full_profile;
1437     }
1438   }
1439   return next_level;
1440 }
1441 
1442 template<typename Predicate>
1443 CompLevel CompilationPolicy::transition_from_full_profile(const methodHandle& method, CompLevel cur_level) {
1444   precond(cur_level == CompLevel_full_profile);
1445   CompLevel next_level = cur_level;
1446   MethodData* mdo = method->method_data();
1447   if (mdo != nullptr) {
1448     if (mdo->would_profile() || CompilationModeFlag::disable_intermediate()) {
1449       int mdo_i = mdo->invocation_count_delta();
1450       int mdo_b = mdo->backedge_count_delta();
1451       if (Predicate::apply(method, cur_level, mdo_i, mdo_b)) {
1452         next_level = CompLevel_full_optimization;
1453       }
1454     } else {
1455       next_level = CompLevel_full_optimization;
1456     }
1457   }
1458   return next_level;
1459 }
1460 
1461 template<typename Predicate>
1462 CompLevel CompilationPolicy::transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) {
1463   precond(cur_level == CompLevel_limited_profile);
1464   CompLevel next_level = cur_level;
1465   int i = method->invocation_count();
1466   int b = method->backedge_count();
1467   double scale = delay_profiling ? Tier2ProfileDelayFactor : 1.0;
1468   MethodData* mdo = method->method_data();
1469   if (mdo != nullptr) {
1470     if (mdo->would_profile()) {
1471       if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
1472                               Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
1473                               Predicate::apply_scaled(method, cur_level, i, b, scale))) {
1474         next_level = CompLevel_full_profile;
1475       }
1476     } else {
1477       next_level = CompLevel_full_optimization;
1478     }
1479   } else {
1480     // If there is no MDO we need to profile
1481     if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
1482                             Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
1483                             Predicate::apply_scaled(method, cur_level, i, b, scale))) {
1484       next_level = CompLevel_full_profile;
1485     }
1486   }
1487   if (next_level == CompLevel_full_profile && is_method_profiled(method)) {
1488     next_level = CompLevel_full_optimization;
1489   }
1490   return next_level;
1491 }
1492 
1493 
1494 // Determine if a method should be compiled with a normal entry point at a different level.
1495 CompLevel CompilationPolicy::call_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD) {
1496   CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), common<LoopPredicate>(method, cur_level, THREAD, true));
1497   CompLevel next_level = common<CallPredicate>(method, cur_level, THREAD, !TrainingData::have_data() && is_old(method));
1498 
1499   // If OSR method level is greater than the regular method level, the levels should be
1500   // equalized by raising the regular method level in order to avoid OSRs during each
1501   // invocation of the method.
1502   if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
1503     MethodData* mdo = method->method_data();
1504     guarantee(mdo != nullptr, "MDO should not be nullptr");
1505     if (mdo->invocation_count() >= 1) {
1506       next_level = CompLevel_full_optimization;
1507     }
1508   } else {
1509     next_level = MAX2(osr_level, next_level);
1510   }
1511 #if INCLUDE_JVMCI
1512   if (EnableJVMCI && UseJVMCICompiler &&
1513       next_level == CompLevel_full_optimization CDS_ONLY(&& !AOTLinkedClassBulkLoader::class_preloading_finished())) {
1514     next_level = cur_level;
1515   }
1516 #endif
1517   return next_level;
1518 }
1519 
1520 // Determine if we should do an OSR compilation of a given method.
1521 CompLevel CompilationPolicy::loop_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD) {
1522   CompLevel next_level = common<LoopPredicate>(method, cur_level, THREAD, true);
1523   if (cur_level == CompLevel_none) {
1524     // If there is a live OSR method that means that we deopted to the interpreter
1525     // for the transition.
1526     CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
1527     if (osr_level > CompLevel_none) {
1528       return osr_level;
1529     }
1530   }
1531   return next_level;
1532 }
1533 
1534 // Handle the invocation event.
1535 void CompilationPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
1536                                                       CompLevel level, nmethod* nm, TRAPS) {
1537   if (should_create_mdo(mh, level)) {
1538     create_mdo(mh, THREAD);
1539   }
1540   CompLevel next_level = call_event(mh, level, THREAD);
1541   if (next_level != level) {
1542     if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
1543       compile(mh, InvocationEntryBci, next_level, THREAD);
1544     }
1545   }
1546 }
1547 
1548 // Handle the back branch event. Notice that we can compile the method
1549 // with a regular entry from here.
1550 void CompilationPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh,
1551                                                      int bci, CompLevel level, nmethod* nm, TRAPS) {
1552   if (should_create_mdo(mh, level)) {
1553     create_mdo(mh, THREAD);
1554   }
1555   // Check if MDO should be created for the inlined method
1556   if (should_create_mdo(imh, level)) {
1557     create_mdo(imh, THREAD);
1558   }
1559 
1560   if (is_compilation_enabled()) {
1561     CompLevel next_osr_level = loop_event(imh, level, THREAD);
1562     CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
1563     // At the very least compile the OSR version
1564     if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) {
1565       compile(imh, bci, next_osr_level, CHECK);
1566     }
1567 
1568     // Use loop event as an opportunity to also check if there's been
1569     // enough calls.
1570     CompLevel cur_level, next_level;
1571     if (mh() != imh()) { // If there is an enclosing method
1572       {
1573         guarantee(nm != nullptr, "Should have nmethod here");
1574         cur_level = comp_level(mh());
1575         next_level = call_event(mh, cur_level, THREAD);
1576 
1577         if (max_osr_level == CompLevel_full_optimization) {
1578           // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
1579           bool make_not_entrant = false;
1580           if (nm->is_osr_method()) {
1581             // This is an osr method, just make it not entrant and recompile later if needed
1582             make_not_entrant = true;
1583           } else {
1584             if (next_level != CompLevel_full_optimization) {
1585               // next_level is not full opt, so we need to recompile the
1586               // enclosing method without the inlinee
1587               cur_level = CompLevel_none;
1588               make_not_entrant = true;
1589             }
1590           }
1591           if (make_not_entrant) {
1592             if (PrintTieredEvents) {
1593               int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
1594               print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
1595             }
1596             nm->make_not_entrant("OSR invalidation, back branch");
1597           }
1598         }
1599         // Fix up next_level if necessary to avoid deopts
1600         if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
1601           next_level = CompLevel_full_profile;
1602         }
1603         if (cur_level != next_level) {
1604           if (!CompileBroker::compilation_is_in_queue(mh)) {
1605             compile(mh, InvocationEntryBci, next_level, THREAD);
1606           }
1607         }
1608       }
1609     } else {
1610       cur_level = comp_level(mh());
1611       next_level = call_event(mh, cur_level, THREAD);
1612       if (next_level != cur_level) {
1613         if (!CompileBroker::compilation_is_in_queue(mh)) {
1614           compile(mh, InvocationEntryBci, next_level, THREAD);
1615         }
1616       }
1617     }
1618   }
1619 }
1620