1 /*
   2  * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/ciMethodData.hpp"
  26 #include "ci/ciSymbols.hpp"
  27 #include "classfile/vmSymbols.hpp"
  28 #include "compiler/compileLog.hpp"
  29 #include "interpreter/linkResolver.hpp"
  30 #include "jvm_io.h"
  31 #include "memory/resourceArea.hpp"
  32 #include "memory/universe.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "opto/addnode.hpp"
  35 #include "opto/castnode.hpp"
  36 #include "opto/convertnode.hpp"
  37 #include "opto/divnode.hpp"
  38 #include "opto/idealGraphPrinter.hpp"
  39 #include "opto/idealKit.hpp"
  40 #include "opto/inlinetypenode.hpp"
  41 #include "opto/matcher.hpp"
  42 #include "opto/memnode.hpp"
  43 #include "opto/mulnode.hpp"
  44 #include "opto/opaquenode.hpp"
  45 #include "opto/parse.hpp"
  46 #include "opto/runtime.hpp"
  47 #include "runtime/deoptimization.hpp"
  48 #include "runtime/sharedRuntime.hpp"
  49 
  50 #ifndef PRODUCT
  51 extern uint explicit_null_checks_inserted,
  52             explicit_null_checks_elided;
  53 #endif
  54 
  55 Node* Parse::record_profile_for_speculation_at_array_load(Node* ld) {
  56   // Feed unused profile data to type speculation
  57   if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
  58     ciKlass* array_type = nullptr;
  59     ciKlass* element_type = nullptr;
  60     ProfilePtrKind element_ptr = ProfileMaybeNull;
  61     bool flat_array = true;
  62     bool null_free_array = true;
  63     method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
  64     if (element_type != nullptr || element_ptr != ProfileMaybeNull) {
  65       ld = record_profile_for_speculation(ld, element_type, element_ptr);
  66     }
  67   }
  68   return ld;
  69 }
  70 
  71 
  72 //---------------------------------array_load----------------------------------
  73 void Parse::array_load(BasicType bt) {
  74   const Type* elemtype = Type::TOP;
  75   Node* adr = array_addressing(bt, 0, elemtype);
  76   if (stopped())  return;     // guaranteed null or range check
  77 
  78   Node* array_index = pop();
  79   Node* array = pop();
  80 
  81   // Handle inline type arrays
  82   const TypeOopPtr* element_ptr = elemtype->make_oopptr();
  83   const TypeAryPtr* array_type = _gvn.type(array)->is_aryptr();
  84 
  85   if (!array_type->is_not_flat()) {
  86     // Cannot statically determine if array is a flat array, emit runtime check
  87     assert(UseArrayFlattening && is_reference_type(bt) && element_ptr->can_be_inline_type() &&
  88            (!element_ptr->is_inlinetypeptr() || element_ptr->inline_klass()->flat_in_array()), "array can't be flat");
  89     IdealKit ideal(this);
  90     IdealVariable res(ideal);
  91     ideal.declarations_done();
  92     ideal.if_then(flat_array_test(array, /* flat = */ false)); {
  93       // Non-flat array
  94       sync_kit(ideal);
  95       if (!array_type->is_flat()) {
  96         assert(array_type->is_flat() || control()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
  97         const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
  98         DecoratorSet decorator_set = IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD;
  99         if (needs_range_check(array_type->size(), array_index)) {
 100           // We've emitted a RangeCheck but now insert an additional check between the range check and the actual load.
 101           // We cannot pin the load to two separate nodes. Instead, we pin it conservatively here such that it cannot
 102           // possibly float above the range check at any point.
 103           decorator_set |= C2_UNKNOWN_CONTROL_LOAD;
 104         }
 105         Node* ld = access_load_at(array, adr, adr_type, element_ptr, bt, decorator_set);
 106         if (element_ptr->is_inlinetypeptr()) {
 107           ld = InlineTypeNode::make_from_oop(this, ld, element_ptr->inline_klass());
 108         }
 109         ideal.set(res, ld);
 110       }
 111       ideal.sync_kit(this);
 112     } ideal.else_(); {
 113       // Flat array
 114       sync_kit(ideal);
 115       if (!array_type->is_not_flat()) {
 116         if (element_ptr->is_inlinetypeptr()) {
 117           // Element type is known, cast and load from flat array layout.
 118           ciInlineKlass* vk = element_ptr->inline_klass();
 119           bool is_null_free = array_type->is_null_free() || !vk->has_nullable_atomic_layout();
 120           bool is_not_null_free = array_type->is_not_null_free() || (!vk->has_atomic_layout() && !vk->has_non_atomic_layout());
 121           if (is_null_free) {
 122             // TODO 8350865 Impossible type
 123             is_not_null_free = false;
 124           }
 125           bool is_naturally_atomic = (is_null_free && vk->nof_declared_nonstatic_fields() <= 1);
 126           bool may_need_atomicity = !is_naturally_atomic && ((!is_not_null_free && vk->has_atomic_layout()) || (!is_null_free && vk->has_nullable_atomic_layout()));
 127 
 128           // Re-execute flat array load if buffering triggers deoptimization
 129           PreserveReexecuteState preexecs(this);
 130           jvms()->set_should_reexecute(true);
 131           inc_sp(3);
 132 
 133           adr = flat_array_element_address(array, array_index, vk, is_null_free, is_not_null_free, may_need_atomicity);
 134           int nm_offset = is_null_free ? -1 : vk->null_marker_offset_in_payload();
 135           Node* vt = InlineTypeNode::make_from_flat(this, vk, array, adr, array_index, nullptr, 0, may_need_atomicity, nm_offset);
 136           ideal.set(res, vt);
 137         } else {
 138           // Element type is unknown, and thus we cannot statically determine the exact flat array layout. Emit a
 139           // runtime call to correctly load the inline type element from the flat array.
 140           Node* inline_type = load_from_unknown_flat_array(array, array_index, element_ptr);
 141           bool is_null_free = array_type->is_null_free() || !UseNullableValueFlattening;
 142           if (is_null_free) {
 143             inline_type = cast_not_null(inline_type);
 144           }
 145           ideal.set(res, inline_type);
 146         }
 147       }
 148       ideal.sync_kit(this);
 149     } ideal.end_if();
 150     sync_kit(ideal);
 151     Node* ld = _gvn.transform(ideal.value(res));
 152     ld = record_profile_for_speculation_at_array_load(ld);
 153     push_node(bt, ld);
 154     return;
 155   }
 156 
 157   if (elemtype == TypeInt::BOOL) {
 158     bt = T_BOOLEAN;
 159   }
 160   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
 161   Node* ld = access_load_at(array, adr, adr_type, elemtype, bt,
 162                             IN_HEAP | IS_ARRAY | C2_CONTROL_DEPENDENT_LOAD);
 163   ld = record_profile_for_speculation_at_array_load(ld);
 164   // Loading an inline type from a non-flat array
 165   if (element_ptr != nullptr && element_ptr->is_inlinetypeptr()) {
 166     assert(!array_type->is_null_free() || !element_ptr->maybe_null(), "inline type array elements should never be null");
 167     ld = InlineTypeNode::make_from_oop(this, ld, element_ptr->inline_klass());
 168   }
 169   push_node(bt, ld);
 170 }
 171 
 172 Node* Parse::load_from_unknown_flat_array(Node* array, Node* array_index, const TypeOopPtr* element_ptr) {
 173   // Below membars keep this access to an unknown flat array correctly
 174   // ordered with other unknown and known flat array accesses.
 175   insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 176 
 177   Node* call = nullptr;
 178   {
 179     // Re-execute flat array load if runtime call triggers deoptimization
 180     PreserveReexecuteState preexecs(this);
 181     jvms()->set_bci(_bci);
 182     jvms()->set_should_reexecute(true);
 183     inc_sp(2);
 184     kill_dead_locals();
 185     call = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
 186                              OptoRuntime::load_unknown_inline_Type(),
 187                              OptoRuntime::load_unknown_inline_Java(),
 188                              nullptr, TypeRawPtr::BOTTOM,
 189                              array, array_index);
 190   }
 191   make_slow_call_ex(call, env()->Throwable_klass(), false);
 192   Node* buffer = _gvn.transform(new ProjNode(call, TypeFunc::Parms));
 193 
 194   insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 195 
 196   // Keep track of the information that the inline type is in flat arrays
 197   const Type* unknown_value = element_ptr->is_instptr()->cast_to_flat_in_array();
 198   return _gvn.transform(new CheckCastPPNode(control(), buffer, unknown_value));
 199 }
 200 
 201 //--------------------------------array_store----------------------------------
 202 void Parse::array_store(BasicType bt) {
 203   const Type* elemtype = Type::TOP;
 204   Node* adr = array_addressing(bt, type2size[bt], elemtype);
 205   if (stopped())  return;     // guaranteed null or range check
 206   Node* stored_value_casted = nullptr;
 207   if (bt == T_OBJECT) {
 208     stored_value_casted = array_store_check(adr, elemtype);
 209     if (stopped()) {
 210       return;
 211     }
 212   }
 213   Node* const stored_value = pop_node(bt); // Value to store
 214   Node* const array_index = pop();         // Index in the array
 215   Node* array = pop();                     // The array itself
 216 
 217   const TypeAryPtr* array_type = _gvn.type(array)->is_aryptr();
 218   const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(bt);
 219 
 220   if (elemtype == TypeInt::BOOL) {
 221     bt = T_BOOLEAN;
 222   } else if (bt == T_OBJECT) {
 223     elemtype = elemtype->make_oopptr();
 224     const Type* stored_value_casted_type = _gvn.type(stored_value_casted);
 225     // Based on the value to be stored, try to determine if the array is not null-free and/or not flat.
 226     // This is only legal for non-null stores because the array_store_check always passes for null, even
 227     // if the array is null-free. Null stores are handled in GraphKit::inline_array_null_guard().
 228     bool not_inline = !stored_value_casted_type->maybe_null() && !stored_value_casted_type->is_oopptr()->can_be_inline_type();
 229     bool not_null_free = not_inline;
 230     bool not_flat = not_inline || ( stored_value_casted_type->is_inlinetypeptr() &&
 231                                    !stored_value_casted_type->inline_klass()->flat_in_array());
 232     if (!array_type->is_not_null_free() && not_null_free) {
 233       // Storing a non-inline type, mark array as not null-free.
 234       array_type = array_type->cast_to_not_null_free();
 235       Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, array_type));
 236       replace_in_map(array, cast);
 237       array = cast;
 238     }
 239     if (!array_type->is_not_flat() && not_flat) {
 240       // Storing to a non-flat array, mark array as not flat.
 241       array_type = array_type->cast_to_not_flat();
 242       Node* cast = _gvn.transform(new CheckCastPPNode(control(), array, array_type));
 243       replace_in_map(array, cast);
 244       array = cast;
 245     }
 246 
 247     if (!array_type->is_flat() && array_type->is_null_free()) {
 248       // Store to non-flat null-free inline type array (elements can never be null)
 249       assert(!stored_value_casted_type->maybe_null(), "should be guaranteed by array store check");
 250       if (elemtype->is_inlinetypeptr() && elemtype->inline_klass()->is_empty()) {
 251         // Ignore empty inline stores, array is already initialized.
 252         return;
 253       }
 254     } else if (!array_type->is_not_flat()) {
 255       // Array might be a flat array, emit runtime checks (for nullptr, a simple inline_array_null_guard is sufficient).
 256       assert(UseArrayFlattening && !not_flat && elemtype->is_oopptr()->can_be_inline_type() &&
 257              (!array_type->klass_is_exact() || array_type->is_flat()), "array can't be a flat array");
 258       // TODO 8350865 Depending on the available layouts, we can avoid this check in below flat/not-flat branches. Also the safe_for_replace arg is now always true.
 259       array = inline_array_null_guard(array, stored_value_casted, 3, true);
 260       IdealKit ideal(this);
 261       ideal.if_then(flat_array_test(array, /* flat = */ false)); {
 262         // Non-flat array
 263         if (!array_type->is_flat()) {
 264           sync_kit(ideal);
 265           assert(array_type->is_flat() || ideal.ctrl()->in(0)->as_If()->is_flat_array_check(&_gvn), "Should be found");
 266           inc_sp(3);
 267           access_store_at(array, adr, adr_type, stored_value_casted, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY, false);
 268           dec_sp(3);
 269           ideal.sync_kit(this);
 270         }
 271       } ideal.else_(); {
 272         // Flat array
 273         sync_kit(ideal);
 274         if (!array_type->is_not_flat()) {
 275           // Try to determine the inline klass type of the stored value
 276           ciInlineKlass* vk = nullptr;
 277           if (stored_value_casted_type->is_inlinetypeptr()) {
 278             vk = stored_value_casted_type->inline_klass();
 279           } else if (elemtype->is_inlinetypeptr()) {
 280             vk = elemtype->inline_klass();
 281           }
 282 
 283           if (vk != nullptr) {
 284             // Element type is known, cast and store to flat array layout.
 285             bool is_null_free = array_type->is_null_free() || !vk->has_nullable_atomic_layout();
 286             bool is_not_null_free = array_type->is_not_null_free() || (!vk->has_atomic_layout() && !vk->has_non_atomic_layout());
 287             if (is_null_free) {
 288               // TODO 8350865 Impossible type
 289               is_not_null_free = false;
 290             }
 291             bool is_naturally_atomic = (is_null_free && vk->nof_declared_nonstatic_fields() <= 1);
 292             bool may_need_atomicity = !is_naturally_atomic && ((!is_not_null_free && vk->has_atomic_layout()) || (!is_null_free && vk->has_nullable_atomic_layout()));
 293 
 294             // Re-execute flat array store if buffering triggers deoptimization
 295             PreserveReexecuteState preexecs(this);
 296             jvms()->set_should_reexecute(true);
 297             inc_sp(3);
 298 
 299             if (!stored_value_casted->is_InlineType()) {
 300               assert(_gvn.type(stored_value_casted) == TypePtr::NULL_PTR, "Unexpected value");
 301               stored_value_casted = InlineTypeNode::make_null(_gvn, vk);
 302             }
 303             adr = flat_array_element_address(array, array_index, vk, is_null_free, is_not_null_free, may_need_atomicity);
 304             int nm_offset = is_null_free ? -1 : vk->null_marker_offset_in_payload();
 305             stored_value_casted->as_InlineType()->store_flat(this, array, adr, array_index, vk, 0, may_need_atomicity, nm_offset, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 306           } else {
 307             // Element type is unknown, emit a runtime call since the flat array layout is not statically known.
 308             store_to_unknown_flat_array(array, array_index, stored_value_casted);
 309           }
 310         }
 311         ideal.sync_kit(this);
 312       }
 313       ideal.end_if();
 314       sync_kit(ideal);
 315       return;
 316     } else if (!array_type->is_not_null_free()) {
 317       // Array is not flat but may be null free
 318       assert(elemtype->is_oopptr()->can_be_inline_type(), "array can't be null-free");
 319       array = inline_array_null_guard(array, stored_value_casted, 3, true);
 320     }
 321   }
 322   inc_sp(3);
 323   access_store_at(array, adr, adr_type, stored_value, elemtype, bt, MO_UNORDERED | IN_HEAP | IS_ARRAY);
 324   dec_sp(3);
 325 }
 326 
 327 // Emit a runtime call to store to a flat array whose element type is either unknown (i.e. we do not know the flat
 328 // array layout) or not exact (could have different flat array layouts at runtime).
 329 void Parse::store_to_unknown_flat_array(Node* array, Node* const idx, Node* non_null_stored_value) {
 330   // Below membars keep this access to an unknown flat array correctly
 331   // ordered with other unknown and known flat array accesses.
 332   insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 333 
 334   Node* call = nullptr;
 335   {
 336     // Re-execute flat array store if runtime call triggers deoptimization
 337     PreserveReexecuteState preexecs(this);
 338     jvms()->set_bci(_bci);
 339     jvms()->set_should_reexecute(true);
 340     inc_sp(3);
 341     kill_dead_locals();
 342     call = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
 343                       OptoRuntime::store_unknown_inline_Type(),
 344                       OptoRuntime::store_unknown_inline_Java(),
 345                       nullptr, TypeRawPtr::BOTTOM,
 346                       non_null_stored_value, array, idx);
 347   }
 348   make_slow_call_ex(call, env()->Throwable_klass(), false);
 349 
 350   insert_mem_bar_volatile(Op_MemBarCPUOrder, C->get_alias_index(TypeAryPtr::INLINES));
 351 }
 352 
 353 //------------------------------array_addressing-------------------------------
 354 // Pull array and index from the stack.  Compute pointer-to-element.
 355 Node* Parse::array_addressing(BasicType type, int vals, const Type*& elemtype) {
 356   Node *idx   = peek(0+vals);   // Get from stack without popping
 357   Node *ary   = peek(1+vals);   // in case of exception
 358 
 359   // Null check the array base, with correct stack contents
 360   ary = null_check(ary, T_ARRAY);
 361   // Compile-time detect of null-exception?
 362   if (stopped())  return top();
 363 
 364   const TypeAryPtr* arytype  = _gvn.type(ary)->is_aryptr();
 365   const TypeInt*    sizetype = arytype->size();
 366   elemtype = arytype->elem();
 367 
 368   if (UseUniqueSubclasses) {
 369     const Type* el = elemtype->make_ptr();
 370     if (el && el->isa_instptr()) {
 371       const TypeInstPtr* toop = el->is_instptr();
 372       if (toop->instance_klass()->unique_concrete_subklass()) {
 373         // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
 374         const Type* subklass = Type::get_const_type(toop->instance_klass());
 375         elemtype = subklass->join_speculative(el);
 376       }
 377     }
 378   }
 379 
 380   if (!arytype->is_loaded()) {
 381     // Only fails for some -Xcomp runs
 382     // The class is unloaded.  We have to run this bytecode in the interpreter.
 383     ciKlass* klass = arytype->unloaded_klass();
 384 
 385     uncommon_trap(Deoptimization::Reason_unloaded,
 386                   Deoptimization::Action_reinterpret,
 387                   klass, "!loaded array");
 388     return top();
 389   }
 390 
 391   ary = create_speculative_inline_type_array_checks(ary, arytype, elemtype);
 392 
 393   if (needs_range_check(sizetype, idx)) {
 394     create_range_check(idx, ary, sizetype);
 395   } else if (C->log() != nullptr) {
 396     C->log()->elem("observe that='!need_range_check'");
 397   }
 398 
 399   // Check for always knowing you are throwing a range-check exception
 400   if (stopped())  return top();
 401 
 402   // Make array address computation control dependent to prevent it
 403   // from floating above the range check during loop optimizations.
 404   Node* ptr = array_element_address(ary, idx, type, sizetype, control());
 405   assert(ptr != top(), "top should go hand-in-hand with stopped");
 406 
 407   return ptr;
 408 }
 409 
 410 // Check if we need a range check for an array access. This is the case if the index is either negative or if it could
 411 // be greater or equal the smallest possible array size (i.e. out-of-bounds).
 412 bool Parse::needs_range_check(const TypeInt* size_type, const Node* index) const {
 413   const TypeInt* index_type = _gvn.type(index)->is_int();
 414   return index_type->_hi >= size_type->_lo || index_type->_lo < 0;
 415 }
 416 
 417 void Parse::create_range_check(Node* idx, Node* ary, const TypeInt* sizetype) {
 418   Node* tst;
 419   if (sizetype->_hi <= 0) {
 420     // The greatest array bound is negative, so we can conclude that we're
 421     // compiling unreachable code, but the unsigned compare trick used below
 422     // only works with non-negative lengths.  Instead, hack "tst" to be zero so
 423     // the uncommon_trap path will always be taken.
 424     tst = _gvn.intcon(0);
 425   } else {
 426     // Range is constant in array-oop, so we can use the original state of mem
 427     Node* len = load_array_length(ary);
 428 
 429     // Test length vs index (standard trick using unsigned compare)
 430     Node* chk = _gvn.transform(new CmpUNode(idx, len) );
 431     BoolTest::mask btest = BoolTest::lt;
 432     tst = _gvn.transform(new BoolNode(chk, btest) );
 433   }
 434   RangeCheckNode* rc = new RangeCheckNode(control(), tst, PROB_MAX, COUNT_UNKNOWN);
 435   _gvn.set_type(rc, rc->Value(&_gvn));
 436   if (!tst->is_Con()) {
 437     record_for_igvn(rc);
 438   }
 439   set_control(_gvn.transform(new IfTrueNode(rc)));
 440   // Branch to failure if out of bounds
 441   {
 442     PreserveJVMState pjvms(this);
 443     set_control(_gvn.transform(new IfFalseNode(rc)));
 444     if (C->allow_range_check_smearing()) {
 445       // Do not use builtin_throw, since range checks are sometimes
 446       // made more stringent by an optimistic transformation.
 447       // This creates "tentative" range checks at this point,
 448       // which are not guaranteed to throw exceptions.
 449       // See IfNode::Ideal, is_range_check, adjust_check.
 450       uncommon_trap(Deoptimization::Reason_range_check,
 451                     Deoptimization::Action_make_not_entrant,
 452                     nullptr, "range_check");
 453     } else {
 454       // If we have already recompiled with the range-check-widening
 455       // heroic optimization turned off, then we must really be throwing
 456       // range check exceptions.
 457       builtin_throw(Deoptimization::Reason_range_check);
 458     }
 459   }
 460 }
 461 
 462 // For inline type arrays, we can use the profiling information for array accesses to speculate on the type, flatness,
 463 // and null-freeness. We can either prepare the speculative type for later uses or emit explicit speculative checks with
 464 // traps now. In the latter case, the speculative type guarantees can avoid additional runtime checks later (e.g.
 465 // non-null-free implies non-flat which allows us to remove flatness checks). This makes the graph simpler.
 466 Node* Parse::create_speculative_inline_type_array_checks(Node* array, const TypeAryPtr* array_type,
 467                                                          const Type*& element_type) {
 468   if (!array_type->is_flat() && !array_type->is_not_flat()) {
 469     // For arrays that might be flat, speculate that the array has the exact type reported in the profile data such that
 470     // we can rely on a fixed memory layout (i.e. either a flat layout or not).
 471     array = cast_to_speculative_array_type(array, array_type, element_type);
 472   } else if (UseTypeSpeculation && UseArrayLoadStoreProfile) {
 473     // Array is known to be either flat or not flat. If possible, update the speculative type by using the profile data
 474     // at this bci.
 475     array = cast_to_profiled_array_type(array);
 476   }
 477 
 478   // Even though the type does not tell us whether we have an inline type array or not, we can still check the profile data
 479   // whether we have a non-null-free or non-flat array. Speculating on a non-null-free array doesn't help aaload but could
 480   // be profitable for a subsequent aastore.
 481   if (!array_type->is_null_free() && !array_type->is_not_null_free()) {
 482     array = speculate_non_null_free_array(array, array_type);
 483   }
 484   if (!array_type->is_flat() && !array_type->is_not_flat()) {
 485     array = speculate_non_flat_array(array, array_type);
 486   }
 487   return array;
 488 }
 489 
 490 // Speculate that the array has the exact type reported in the profile data. We emit a trap when this turns out to be
 491 // wrong. On the fast path, we add a CheckCastPP to use the exact type.
 492 Node* Parse::cast_to_speculative_array_type(Node* const array, const TypeAryPtr*& array_type, const Type*& element_type) {
 493   Deoptimization::DeoptReason reason = Deoptimization::Reason_speculate_class_check;
 494   ciKlass* speculative_array_type = array_type->speculative_type();
 495   if (too_many_traps_or_recompiles(reason) || speculative_array_type == nullptr) {
 496     // No speculative type, check profile data at this bci
 497     speculative_array_type = nullptr;
 498     reason = Deoptimization::Reason_class_check;
 499     if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
 500       ciKlass* profiled_element_type = nullptr;
 501       ProfilePtrKind element_ptr = ProfileMaybeNull;
 502       bool flat_array = true;
 503       bool null_free_array = true;
 504       method()->array_access_profiled_type(bci(), speculative_array_type, profiled_element_type, element_ptr, flat_array,
 505                                            null_free_array);
 506     }
 507   }
 508   if (speculative_array_type != nullptr) {
 509     // Speculate that this array has the exact type reported by profile data
 510     Node* casted_array = nullptr;
 511     DEBUG_ONLY(Node* old_control = control();)
 512     Node* slow_ctl = type_check_receiver(array, speculative_array_type, 1.0, &casted_array);
 513     if (stopped()) {
 514       // The check always fails and therefore profile information is incorrect. Don't use it.
 515       assert(old_control == slow_ctl, "type check should have been removed");
 516       set_control(slow_ctl);
 517     } else if (!slow_ctl->is_top()) {
 518       { PreserveJVMState pjvms(this);
 519         set_control(slow_ctl);
 520         uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
 521       }
 522       replace_in_map(array, casted_array);
 523       array_type = _gvn.type(casted_array)->is_aryptr();
 524       element_type = array_type->elem();
 525       return casted_array;
 526     }
 527   }
 528   return array;
 529 }
 530 
 531 // Create a CheckCastPP when the speculative type can improve the current type.
 532 Node* Parse::cast_to_profiled_array_type(Node* const array) {
 533   ciKlass* array_type = nullptr;
 534   ciKlass* element_type = nullptr;
 535   ProfilePtrKind element_ptr = ProfileMaybeNull;
 536   bool flat_array = true;
 537   bool null_free_array = true;
 538   method()->array_access_profiled_type(bci(), array_type, element_type, element_ptr, flat_array, null_free_array);
 539   if (array_type != nullptr) {
 540     return record_profile_for_speculation(array, array_type, ProfileMaybeNull);
 541   }
 542   return array;
 543 }
 544 
 545 // Speculate that the array is non-null-free. We emit a trap when this turns out to be
 546 // wrong. On the fast path, we add a CheckCastPP to use the non-null-free type.
 547 Node* Parse::speculate_non_null_free_array(Node* const array, const TypeAryPtr*& array_type) {
 548   bool null_free_array = true;
 549   Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
 550   if (array_type->speculative() != nullptr &&
 551       array_type->speculative()->is_aryptr()->is_not_null_free() &&
 552       !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
 553     null_free_array = false;
 554     reason = Deoptimization::Reason_speculate_class_check;
 555   } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
 556     ciKlass* profiled_array_type = nullptr;
 557     ciKlass* profiled_element_type = nullptr;
 558     ProfilePtrKind element_ptr = ProfileMaybeNull;
 559     bool flat_array = true;
 560     method()->array_access_profiled_type(bci(), profiled_array_type, profiled_element_type, element_ptr, flat_array,
 561                                          null_free_array);
 562     reason = Deoptimization::Reason_class_check;
 563   }
 564   if (!null_free_array) {
 565     { // Deoptimize if null-free array
 566       BuildCutout unless(this, null_free_array_test(array, /* null_free = */ false), PROB_MAX);
 567       uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
 568     }
 569     assert(!stopped(), "null-free array should have been caught earlier");
 570     Node* casted_array = _gvn.transform(new CheckCastPPNode(control(), array, array_type->cast_to_not_null_free()));
 571     replace_in_map(array, casted_array);
 572     array_type = _gvn.type(casted_array)->is_aryptr();
 573     return casted_array;
 574   }
 575   return array;
 576 }
 577 
 578 // Speculate that the array is non-flat. We emit a trap when this turns out to be wrong.
 579 // On the fast path, we add a CheckCastPP to use the non-flat type.
 580 Node* Parse::speculate_non_flat_array(Node* const array, const TypeAryPtr* const array_type) {
 581   bool flat_array = true;
 582   Deoptimization::DeoptReason reason = Deoptimization::Reason_none;
 583   if (array_type->speculative() != nullptr &&
 584       array_type->speculative()->is_aryptr()->is_not_flat() &&
 585       !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
 586     flat_array = false;
 587     reason = Deoptimization::Reason_speculate_class_check;
 588   } else if (UseArrayLoadStoreProfile && !too_many_traps_or_recompiles(reason)) {
 589     ciKlass* profiled_array_type = nullptr;
 590     ciKlass* profiled_element_type = nullptr;
 591     ProfilePtrKind element_ptr = ProfileMaybeNull;
 592     bool null_free_array = true;
 593     method()->array_access_profiled_type(bci(), profiled_array_type, profiled_element_type, element_ptr, flat_array,
 594                                          null_free_array);
 595     reason = Deoptimization::Reason_class_check;
 596   }
 597   if (!flat_array) {
 598     { // Deoptimize if flat array
 599       BuildCutout unless(this, flat_array_test(array, /* flat = */ false), PROB_MAX);
 600       uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
 601     }
 602     assert(!stopped(), "flat array should have been caught earlier");
 603     Node* casted_array = _gvn.transform(new CheckCastPPNode(control(), array, array_type->cast_to_not_flat()));
 604     replace_in_map(array, casted_array);
 605     return casted_array;
 606   }
 607   return array;
 608 }
 609 
 610 // returns IfNode
 611 IfNode* Parse::jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask, float prob, float cnt) {
 612   Node   *cmp = _gvn.transform(new CmpINode(a, b)); // two cases: shiftcount > 32 and shiftcount <= 32
 613   Node   *tst = _gvn.transform(new BoolNode(cmp, mask));
 614   IfNode *iff = create_and_map_if(control(), tst, prob, cnt);
 615   return iff;
 616 }
 617 
 618 
 619 // sentinel value for the target bci to mark never taken branches
 620 // (according to profiling)
 621 static const int never_reached = INT_MAX;
 622 
 623 //------------------------------helper for tableswitch-------------------------
 624 void Parse::jump_if_true_fork(IfNode *iff, int dest_bci_if_true, bool unc) {
 625   // True branch, use existing map info
 626   { PreserveJVMState pjvms(this);
 627     Node *iftrue  = _gvn.transform( new IfTrueNode (iff) );
 628     set_control( iftrue );
 629     if (unc) {
 630       repush_if_args();
 631       uncommon_trap(Deoptimization::Reason_unstable_if,
 632                     Deoptimization::Action_reinterpret,
 633                     nullptr,
 634                     "taken always");
 635     } else {
 636       assert(dest_bci_if_true != never_reached, "inconsistent dest");
 637       merge_new_path(dest_bci_if_true);
 638     }
 639   }
 640 
 641   // False branch
 642   Node *iffalse = _gvn.transform( new IfFalseNode(iff) );
 643   set_control( iffalse );
 644 }
 645 
 646 void Parse::jump_if_false_fork(IfNode *iff, int dest_bci_if_true, bool unc) {
 647   // True branch, use existing map info
 648   { PreserveJVMState pjvms(this);
 649     Node *iffalse  = _gvn.transform( new IfFalseNode (iff) );
 650     set_control( iffalse );
 651     if (unc) {
 652       repush_if_args();
 653       uncommon_trap(Deoptimization::Reason_unstable_if,
 654                     Deoptimization::Action_reinterpret,
 655                     nullptr,
 656                     "taken never");
 657     } else {
 658       assert(dest_bci_if_true != never_reached, "inconsistent dest");
 659       merge_new_path(dest_bci_if_true);
 660     }
 661   }
 662 
 663   // False branch
 664   Node *iftrue = _gvn.transform( new IfTrueNode(iff) );
 665   set_control( iftrue );
 666 }
 667 
 668 void Parse::jump_if_always_fork(int dest_bci, bool unc) {
 669   // False branch, use existing map and control()
 670   if (unc) {
 671     repush_if_args();
 672     uncommon_trap(Deoptimization::Reason_unstable_if,
 673                   Deoptimization::Action_reinterpret,
 674                   nullptr,
 675                   "taken never");
 676   } else {
 677     assert(dest_bci != never_reached, "inconsistent dest");
 678     merge_new_path(dest_bci);
 679   }
 680 }
 681 
 682 
 683 extern "C" {
 684   static int jint_cmp(const void *i, const void *j) {
 685     int a = *(jint *)i;
 686     int b = *(jint *)j;
 687     return a > b ? 1 : a < b ? -1 : 0;
 688   }
 689 }
 690 
 691 
 692 class SwitchRange : public StackObj {
 693   // a range of integers coupled with a bci destination
 694   jint _lo;                     // inclusive lower limit
 695   jint _hi;                     // inclusive upper limit
 696   int _dest;
 697   float _cnt;                   // how many times this range was hit according to profiling
 698 
 699 public:
 700   jint lo() const              { return _lo;   }
 701   jint hi() const              { return _hi;   }
 702   int  dest() const            { return _dest; }
 703   bool is_singleton() const    { return _lo == _hi; }
 704   float cnt() const            { return _cnt; }
 705 
 706   void setRange(jint lo, jint hi, int dest, float cnt) {
 707     assert(lo <= hi, "must be a non-empty range");
 708     _lo = lo, _hi = hi; _dest = dest; _cnt = cnt;
 709     assert(_cnt >= 0, "");
 710   }
 711   bool adjoinRange(jint lo, jint hi, int dest, float cnt, bool trim_ranges) {
 712     assert(lo <= hi, "must be a non-empty range");
 713     if (lo == _hi+1) {
 714       // see merge_ranges() comment below
 715       if (trim_ranges) {
 716         if (cnt == 0) {
 717           if (_cnt != 0) {
 718             return false;
 719           }
 720           if (dest != _dest) {
 721             _dest = never_reached;
 722           }
 723         } else {
 724           if (_cnt == 0) {
 725             return false;
 726           }
 727           if (dest != _dest) {
 728             return false;
 729           }
 730         }
 731       } else {
 732         if (dest != _dest) {
 733           return false;
 734         }
 735       }
 736       _hi = hi;
 737       _cnt += cnt;
 738       return true;
 739     }
 740     return false;
 741   }
 742 
 743   void set (jint value, int dest, float cnt) {
 744     setRange(value, value, dest, cnt);
 745   }
 746   bool adjoin(jint value, int dest, float cnt, bool trim_ranges) {
 747     return adjoinRange(value, value, dest, cnt, trim_ranges);
 748   }
 749   bool adjoin(SwitchRange& other) {
 750     return adjoinRange(other._lo, other._hi, other._dest, other._cnt, false);
 751   }
 752 
 753   void print() {
 754     if (is_singleton())
 755       tty->print(" {%d}=>%d (cnt=%f)", lo(), dest(), cnt());
 756     else if (lo() == min_jint)
 757       tty->print(" {..%d}=>%d (cnt=%f)", hi(), dest(), cnt());
 758     else if (hi() == max_jint)
 759       tty->print(" {%d..}=>%d (cnt=%f)", lo(), dest(), cnt());
 760     else
 761       tty->print(" {%d..%d}=>%d (cnt=%f)", lo(), hi(), dest(), cnt());
 762   }
 763 };
 764 
 765 // We try to minimize the number of ranges and the size of the taken
 766 // ones using profiling data. When ranges are created,
 767 // SwitchRange::adjoinRange() only allows 2 adjoining ranges to merge
 768 // if both were never hit or both were hit to build longer unreached
 769 // ranges. Here, we now merge adjoining ranges with the same
 770 // destination and finally set destination of unreached ranges to the
 771 // special value never_reached because it can help minimize the number
 772 // of tests that are necessary.
 773 //
 774 // For instance:
 775 // [0, 1] to target1 sometimes taken
 776 // [1, 2] to target1 never taken
 777 // [2, 3] to target2 never taken
 778 // would lead to:
 779 // [0, 1] to target1 sometimes taken
 780 // [1, 3] never taken
 781 //
 782 // (first 2 ranges to target1 are not merged)
 783 static void merge_ranges(SwitchRange* ranges, int& rp) {
 784   if (rp == 0) {
 785     return;
 786   }
 787   int shift = 0;
 788   for (int j = 0; j < rp; j++) {
 789     SwitchRange& r1 = ranges[j-shift];
 790     SwitchRange& r2 = ranges[j+1];
 791     if (r1.adjoin(r2)) {
 792       shift++;
 793     } else if (shift > 0) {
 794       ranges[j+1-shift] = r2;
 795     }
 796   }
 797   rp -= shift;
 798   for (int j = 0; j <= rp; j++) {
 799     SwitchRange& r = ranges[j];
 800     if (r.cnt() == 0 && r.dest() != never_reached) {
 801       r.setRange(r.lo(), r.hi(), never_reached, r.cnt());
 802     }
 803   }
 804 }
 805 
 806 //-------------------------------do_tableswitch--------------------------------
 807 void Parse::do_tableswitch() {
 808   // Get information about tableswitch
 809   int default_dest = iter().get_dest_table(0);
 810   jint lo_index    = iter().get_int_table(1);
 811   jint hi_index    = iter().get_int_table(2);
 812   int len          = hi_index - lo_index + 1;
 813 
 814   if (len < 1) {
 815     // If this is a backward branch, add safepoint
 816     maybe_add_safepoint(default_dest);
 817     pop(); // the effect of the instruction execution on the operand stack
 818     merge(default_dest);
 819     return;
 820   }
 821 
 822   ciMethodData* methodData = method()->method_data();
 823   ciMultiBranchData* profile = nullptr;
 824   if (methodData->is_mature() && UseSwitchProfiling) {
 825     ciProfileData* data = methodData->bci_to_data(bci());
 826     if (data != nullptr && data->is_MultiBranchData()) {
 827       profile = (ciMultiBranchData*)data;
 828     }
 829   }
 830   bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
 831 
 832   // generate decision tree, using trichotomy when possible
 833   int rnum = len+2;
 834   bool makes_backward_branch = (default_dest <= bci());
 835   SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
 836   int rp = -1;
 837   if (lo_index != min_jint) {
 838     float cnt = 1.0F;
 839     if (profile != nullptr) {
 840       cnt = (float)profile->default_count() / (hi_index != max_jint ? 2.0F : 1.0F);
 841     }
 842     ranges[++rp].setRange(min_jint, lo_index-1, default_dest, cnt);
 843   }
 844   for (int j = 0; j < len; j++) {
 845     jint match_int = lo_index+j;
 846     int  dest      = iter().get_dest_table(j+3);
 847     makes_backward_branch |= (dest <= bci());
 848     float cnt = 1.0F;
 849     if (profile != nullptr) {
 850       cnt = (float)profile->count_at(j);
 851     }
 852     if (rp < 0 || !ranges[rp].adjoin(match_int, dest, cnt, trim_ranges)) {
 853       ranges[++rp].set(match_int, dest, cnt);
 854     }
 855   }
 856   jint highest = lo_index+(len-1);
 857   assert(ranges[rp].hi() == highest, "");
 858   if (highest != max_jint) {
 859     float cnt = 1.0F;
 860     if (profile != nullptr) {
 861       cnt = (float)profile->default_count() / (lo_index != min_jint ? 2.0F : 1.0F);
 862     }
 863     if (!ranges[rp].adjoinRange(highest+1, max_jint, default_dest, cnt, trim_ranges)) {
 864       ranges[++rp].setRange(highest+1, max_jint, default_dest, cnt);
 865     }
 866   }
 867   assert(rp < len+2, "not too many ranges");
 868 
 869   if (trim_ranges) {
 870     merge_ranges(ranges, rp);
 871   }
 872 
 873   // Safepoint in case if backward branch observed
 874   if (makes_backward_branch) {
 875     add_safepoint();
 876   }
 877 
 878   Node* lookup = pop(); // lookup value
 879   jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
 880 }
 881 
 882 
 883 //------------------------------do_lookupswitch--------------------------------
 884 void Parse::do_lookupswitch() {
 885   // Get information about lookupswitch
 886   int default_dest = iter().get_dest_table(0);
 887   jint len          = iter().get_int_table(1);
 888 
 889   if (len < 1) {    // If this is a backward branch, add safepoint
 890     maybe_add_safepoint(default_dest);
 891     pop(); // the effect of the instruction execution on the operand stack
 892     merge(default_dest);
 893     return;
 894   }
 895 
 896   ciMethodData* methodData = method()->method_data();
 897   ciMultiBranchData* profile = nullptr;
 898   if (methodData->is_mature() && UseSwitchProfiling) {
 899     ciProfileData* data = methodData->bci_to_data(bci());
 900     if (data != nullptr && data->is_MultiBranchData()) {
 901       profile = (ciMultiBranchData*)data;
 902     }
 903   }
 904   bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
 905 
 906   // generate decision tree, using trichotomy when possible
 907   jint* table = NEW_RESOURCE_ARRAY(jint, len*3);
 908   {
 909     for (int j = 0; j < len; j++) {
 910       table[3*j+0] = iter().get_int_table(2+2*j);
 911       table[3*j+1] = iter().get_dest_table(2+2*j+1);
 912       // Handle overflow when converting from uint to jint
 913       table[3*j+2] = (profile == nullptr) ? 1 : (jint)MIN2<uint>((uint)max_jint, profile->count_at(j));
 914     }
 915     qsort(table, len, 3*sizeof(table[0]), jint_cmp);
 916   }
 917 
 918   float default_cnt = 1.0F;
 919   if (profile != nullptr) {
 920     juint defaults = max_juint - len;
 921     default_cnt = (float)profile->default_count()/(float)defaults;
 922   }
 923 
 924   int rnum = len*2+1;
 925   bool makes_backward_branch = (default_dest <= bci());
 926   SwitchRange* ranges = NEW_RESOURCE_ARRAY(SwitchRange, rnum);
 927   int rp = -1;
 928   for (int j = 0; j < len; j++) {
 929     jint match_int   = table[3*j+0];
 930     jint  dest        = table[3*j+1];
 931     jint  cnt         = table[3*j+2];
 932     jint  next_lo     = rp < 0 ? min_jint : ranges[rp].hi()+1;
 933     makes_backward_branch |= (dest <= bci());
 934     float c = default_cnt * ((float)match_int - (float)next_lo);
 935     if (match_int != next_lo && (rp < 0 || !ranges[rp].adjoinRange(next_lo, match_int-1, default_dest, c, trim_ranges))) {
 936       assert(default_dest != never_reached, "sentinel value for dead destinations");
 937       ranges[++rp].setRange(next_lo, match_int-1, default_dest, c);
 938     }
 939     if (rp < 0 || !ranges[rp].adjoin(match_int, dest, (float)cnt, trim_ranges)) {
 940       assert(dest != never_reached, "sentinel value for dead destinations");
 941       ranges[++rp].set(match_int, dest,  (float)cnt);
 942     }
 943   }
 944   jint highest = table[3*(len-1)];
 945   assert(ranges[rp].hi() == highest, "");
 946   if (highest != max_jint &&
 947       !ranges[rp].adjoinRange(highest+1, max_jint, default_dest, default_cnt * ((float)max_jint - (float)highest), trim_ranges)) {
 948     ranges[++rp].setRange(highest+1, max_jint, default_dest, default_cnt * ((float)max_jint - (float)highest));
 949   }
 950   assert(rp < rnum, "not too many ranges");
 951 
 952   if (trim_ranges) {
 953     merge_ranges(ranges, rp);
 954   }
 955 
 956   // Safepoint in case backward branch observed
 957   if (makes_backward_branch) {
 958     add_safepoint();
 959   }
 960 
 961   Node *lookup = pop(); // lookup value
 962   jump_switch_ranges(lookup, &ranges[0], &ranges[rp]);
 963 }
 964 
 965 static float if_prob(float taken_cnt, float total_cnt) {
 966   assert(taken_cnt <= total_cnt, "");
 967   if (total_cnt == 0) {
 968     return PROB_FAIR;
 969   }
 970   float p = taken_cnt / total_cnt;
 971   return clamp(p, PROB_MIN, PROB_MAX);
 972 }
 973 
 974 static float if_cnt(float cnt) {
 975   if (cnt == 0) {
 976     return COUNT_UNKNOWN;
 977   }
 978   return cnt;
 979 }
 980 
 981 static float sum_of_cnts(SwitchRange *lo, SwitchRange *hi) {
 982   float total_cnt = 0;
 983   for (SwitchRange* sr = lo; sr <= hi; sr++) {
 984     total_cnt += sr->cnt();
 985   }
 986   return total_cnt;
 987 }
 988 
 989 class SwitchRanges : public ResourceObj {
 990 public:
 991   SwitchRange* _lo;
 992   SwitchRange* _hi;
 993   SwitchRange* _mid;
 994   float _cost;
 995 
 996   enum {
 997     Start,
 998     LeftDone,
 999     RightDone,
1000     Done
1001   } _state;
1002 
1003   SwitchRanges(SwitchRange *lo, SwitchRange *hi)
1004     : _lo(lo), _hi(hi), _mid(nullptr),
1005       _cost(0), _state(Start) {
1006   }
1007 
1008   SwitchRanges()
1009     : _lo(nullptr), _hi(nullptr), _mid(nullptr),
1010       _cost(0), _state(Start) {}
1011 };
1012 
1013 // Estimate cost of performing a binary search on lo..hi
1014 static float compute_tree_cost(SwitchRange *lo, SwitchRange *hi, float total_cnt) {
1015   GrowableArray<SwitchRanges> tree;
1016   SwitchRanges root(lo, hi);
1017   tree.push(root);
1018 
1019   float cost = 0;
1020   do {
1021     SwitchRanges& r = *tree.adr_at(tree.length()-1);
1022     if (r._hi != r._lo) {
1023       if (r._mid == nullptr) {
1024         float r_cnt = sum_of_cnts(r._lo, r._hi);
1025 
1026         if (r_cnt == 0) {
1027           tree.pop();
1028           cost = 0;
1029           continue;
1030         }
1031 
1032         SwitchRange* mid = nullptr;
1033         mid = r._lo;
1034         for (float cnt = 0; ; ) {
1035           assert(mid <= r._hi, "out of bounds");
1036           cnt += mid->cnt();
1037           if (cnt > r_cnt / 2) {
1038             break;
1039           }
1040           mid++;
1041         }
1042         assert(mid <= r._hi, "out of bounds");
1043         r._mid = mid;
1044         r._cost = r_cnt / total_cnt;
1045       }
1046       r._cost += cost;
1047       if (r._state < SwitchRanges::LeftDone && r._mid > r._lo) {
1048         cost = 0;
1049         r._state = SwitchRanges::LeftDone;
1050         tree.push(SwitchRanges(r._lo, r._mid-1));
1051       } else if (r._state < SwitchRanges::RightDone) {
1052         cost = 0;
1053         r._state = SwitchRanges::RightDone;
1054         tree.push(SwitchRanges(r._mid == r._lo ? r._mid+1 : r._mid, r._hi));
1055       } else {
1056         tree.pop();
1057         cost = r._cost;
1058       }
1059     } else {
1060       tree.pop();
1061       cost = r._cost;
1062     }
1063   } while (tree.length() > 0);
1064 
1065 
1066   return cost;
1067 }
1068 
1069 // It sometimes pays off to test most common ranges before the binary search
1070 void Parse::linear_search_switch_ranges(Node* key_val, SwitchRange*& lo, SwitchRange*& hi) {
1071   uint nr = hi - lo + 1;
1072   float total_cnt = sum_of_cnts(lo, hi);
1073 
1074   float min = compute_tree_cost(lo, hi, total_cnt);
1075   float extra = 1;
1076   float sub = 0;
1077 
1078   SwitchRange* array1 = lo;
1079   SwitchRange* array2 = NEW_RESOURCE_ARRAY(SwitchRange, nr);
1080 
1081   SwitchRange* ranges = nullptr;
1082 
1083   while (nr >= 2) {
1084     assert(lo == array1 || lo == array2, "one the 2 already allocated arrays");
1085     ranges = (lo == array1) ? array2 : array1;
1086 
1087     // Find highest frequency range
1088     SwitchRange* candidate = lo;
1089     for (SwitchRange* sr = lo+1; sr <= hi; sr++) {
1090       if (sr->cnt() > candidate->cnt()) {
1091         candidate = sr;
1092       }
1093     }
1094     SwitchRange most_freq = *candidate;
1095     if (most_freq.cnt() == 0) {
1096       break;
1097     }
1098 
1099     // Copy remaining ranges into another array
1100     int shift = 0;
1101     for (uint i = 0; i < nr; i++) {
1102       SwitchRange* sr = &lo[i];
1103       if (sr != candidate) {
1104         ranges[i-shift] = *sr;
1105       } else {
1106         shift++;
1107         if (i > 0 && i < nr-1) {
1108           SwitchRange prev = lo[i-1];
1109           prev.setRange(prev.lo(), sr->hi(), prev.dest(), prev.cnt());
1110           if (prev.adjoin(lo[i+1])) {
1111             shift++;
1112             i++;
1113           }
1114           ranges[i-shift] = prev;
1115         }
1116       }
1117     }
1118     nr -= shift;
1119 
1120     // Evaluate cost of testing the most common range and performing a
1121     // binary search on the other ranges
1122     float cost = extra + compute_tree_cost(&ranges[0], &ranges[nr-1], total_cnt);
1123     if (cost >= min) {
1124       break;
1125     }
1126     // swap arrays
1127     lo = &ranges[0];
1128     hi = &ranges[nr-1];
1129 
1130     // It pays off: emit the test for the most common range
1131     assert(most_freq.cnt() > 0, "must be taken");
1132     Node* val = _gvn.transform(new SubINode(key_val, _gvn.intcon(most_freq.lo())));
1133     Node* cmp = _gvn.transform(new CmpUNode(val, _gvn.intcon(java_subtract(most_freq.hi(), most_freq.lo()))));
1134     Node* tst = _gvn.transform(new BoolNode(cmp, BoolTest::le));
1135     IfNode* iff = create_and_map_if(control(), tst, if_prob(most_freq.cnt(), total_cnt), if_cnt(most_freq.cnt()));
1136     jump_if_true_fork(iff, most_freq.dest(), false);
1137 
1138     sub += most_freq.cnt() / total_cnt;
1139     extra += 1 - sub;
1140     min = cost;
1141   }
1142 }
1143 
1144 //----------------------------create_jump_tables-------------------------------
1145 bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi) {
1146   // Are jumptables enabled
1147   if (!UseJumpTables)  return false;
1148 
1149   // Are jumptables supported
1150   if (!Matcher::has_match_rule(Op_Jump))  return false;
1151 
1152   bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
1153 
1154   // Decide if a guard is needed to lop off big ranges at either (or
1155   // both) end(s) of the input set. We'll call this the default target
1156   // even though we can't be sure that it is the true "default".
1157 
1158   bool needs_guard = false;
1159   int default_dest;
1160   int64_t total_outlier_size = 0;
1161   int64_t hi_size = ((int64_t)hi->hi()) - ((int64_t)hi->lo()) + 1;
1162   int64_t lo_size = ((int64_t)lo->hi()) - ((int64_t)lo->lo()) + 1;
1163 
1164   if (lo->dest() == hi->dest()) {
1165     total_outlier_size = hi_size + lo_size;
1166     default_dest = lo->dest();
1167   } else if (lo_size > hi_size) {
1168     total_outlier_size = lo_size;
1169     default_dest = lo->dest();
1170   } else {
1171     total_outlier_size = hi_size;
1172     default_dest = hi->dest();
1173   }
1174 
1175   float total = sum_of_cnts(lo, hi);
1176   float cost = compute_tree_cost(lo, hi, total);
1177 
1178   // If a guard test will eliminate very sparse end ranges, then
1179   // it is worth the cost of an extra jump.
1180   float trimmed_cnt = 0;
1181   if (total_outlier_size > (MaxJumpTableSparseness * 4)) {
1182     needs_guard = true;
1183     if (default_dest == lo->dest()) {
1184       trimmed_cnt += lo->cnt();
1185       lo++;
1186     }
1187     if (default_dest == hi->dest()) {
1188       trimmed_cnt += hi->cnt();
1189       hi--;
1190     }
1191   }
1192 
1193   // Find the total number of cases and ranges
1194   int64_t num_cases = ((int64_t)hi->hi()) - ((int64_t)lo->lo()) + 1;
1195   int num_range = hi - lo + 1;
1196 
1197   // Don't create table if: too large, too small, or too sparse.
1198   if (num_cases > MaxJumpTableSize)
1199     return false;
1200   if (UseSwitchProfiling) {
1201     // MinJumpTableSize is set so with a well balanced binary tree,
1202     // when the number of ranges is MinJumpTableSize, it's cheaper to
1203     // go through a JumpNode that a tree of IfNodes. Average cost of a
1204     // tree of IfNodes with MinJumpTableSize is
1205     // log2f(MinJumpTableSize) comparisons. So if the cost computed
1206     // from profile data is less than log2f(MinJumpTableSize) then
1207     // going with the binary search is cheaper.
1208     if (cost < log2f(MinJumpTableSize)) {
1209       return false;
1210     }
1211   } else {
1212     if (num_cases < MinJumpTableSize)
1213       return false;
1214   }
1215   if (num_cases > (MaxJumpTableSparseness * num_range))
1216     return false;
1217 
1218   // Normalize table lookups to zero
1219   int lowval = lo->lo();
1220   key_val = _gvn.transform( new SubINode(key_val, _gvn.intcon(lowval)) );
1221 
1222   // Generate a guard to protect against input keyvals that aren't
1223   // in the switch domain.
1224   if (needs_guard) {
1225     Node*   size = _gvn.intcon(num_cases);
1226     Node*   cmp = _gvn.transform(new CmpUNode(key_val, size));
1227     Node*   tst = _gvn.transform(new BoolNode(cmp, BoolTest::ge));
1228     IfNode* iff = create_and_map_if(control(), tst, if_prob(trimmed_cnt, total), if_cnt(trimmed_cnt));
1229     jump_if_true_fork(iff, default_dest, trim_ranges && trimmed_cnt == 0);
1230 
1231     total -= trimmed_cnt;
1232   }
1233 
1234   // Create an ideal node JumpTable that has projections
1235   // of all possible ranges for a switch statement
1236   // The key_val input must be converted to a pointer offset and scaled.
1237   // Compare Parse::array_addressing above.
1238 
1239   // Clean the 32-bit int into a real 64-bit offset.
1240   // Otherwise, the jint value 0 might turn into an offset of 0x0800000000.
1241   // Make I2L conversion control dependent to prevent it from
1242   // floating above the range check during loop optimizations.
1243   // Do not use a narrow int type here to prevent the data path from dying
1244   // while the control path is not removed. This can happen if the type of key_val
1245   // is later known to be out of bounds of [0, num_cases] and therefore a narrow cast
1246   // would be replaced by TOP while C2 is not able to fold the corresponding range checks.
1247   // Set _carry_dependency for the cast to avoid being removed by IGVN.
1248 #ifdef _LP64
1249   key_val = C->constrained_convI2L(&_gvn, key_val, TypeInt::INT, control(), true /* carry_dependency */);
1250 #endif
1251 
1252   // Shift the value by wordsize so we have an index into the table, rather
1253   // than a switch value
1254   Node *shiftWord = _gvn.MakeConX(wordSize);
1255   key_val = _gvn.transform( new MulXNode( key_val, shiftWord));
1256 
1257   // Create the JumpNode
1258   Arena* arena = C->comp_arena();
1259   float* probs = (float*)arena->Amalloc(sizeof(float)*num_cases);
1260   int i = 0;
1261   if (total == 0) {
1262     for (SwitchRange* r = lo; r <= hi; r++) {
1263       for (int64_t j = r->lo(); j <= r->hi(); j++, i++) {
1264         probs[i] = 1.0F / num_cases;
1265       }
1266     }
1267   } else {
1268     for (SwitchRange* r = lo; r <= hi; r++) {
1269       float prob = r->cnt()/total;
1270       for (int64_t j = r->lo(); j <= r->hi(); j++, i++) {
1271         probs[i] = prob / (r->hi() - r->lo() + 1);
1272       }
1273     }
1274   }
1275 
1276   ciMethodData* methodData = method()->method_data();
1277   ciMultiBranchData* profile = nullptr;
1278   if (methodData->is_mature()) {
1279     ciProfileData* data = methodData->bci_to_data(bci());
1280     if (data != nullptr && data->is_MultiBranchData()) {
1281       profile = (ciMultiBranchData*)data;
1282     }
1283   }
1284 
1285   Node* jtn = _gvn.transform(new JumpNode(control(), key_val, num_cases, probs, profile == nullptr ? COUNT_UNKNOWN : total));
1286 
1287   // These are the switch destinations hanging off the jumpnode
1288   i = 0;
1289   for (SwitchRange* r = lo; r <= hi; r++) {
1290     for (int64_t j = r->lo(); j <= r->hi(); j++, i++) {
1291       Node* input = _gvn.transform(new JumpProjNode(jtn, i, r->dest(), (int)(j - lowval)));
1292       {
1293         PreserveJVMState pjvms(this);
1294         set_control(input);
1295         jump_if_always_fork(r->dest(), trim_ranges && r->cnt() == 0);
1296       }
1297     }
1298   }
1299   assert(i == num_cases, "miscount of cases");
1300   stop_and_kill_map();  // no more uses for this JVMS
1301   return true;
1302 }
1303 
1304 //----------------------------jump_switch_ranges-------------------------------
1305 void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi, int switch_depth) {
1306   Block* switch_block = block();
1307   bool trim_ranges = !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
1308 
1309   if (switch_depth == 0) {
1310     // Do special processing for the top-level call.
1311     assert(lo->lo() == min_jint, "initial range must exhaust Type::INT");
1312     assert(hi->hi() == max_jint, "initial range must exhaust Type::INT");
1313 
1314     // Decrement pred-numbers for the unique set of nodes.
1315 #ifdef ASSERT
1316     if (!trim_ranges) {
1317       // Ensure that the block's successors are a (duplicate-free) set.
1318       int successors_counted = 0;  // block occurrences in [hi..lo]
1319       int unique_successors = switch_block->num_successors();
1320       for (int i = 0; i < unique_successors; i++) {
1321         Block* target = switch_block->successor_at(i);
1322 
1323         // Check that the set of successors is the same in both places.
1324         int successors_found = 0;
1325         for (SwitchRange* p = lo; p <= hi; p++) {
1326           if (p->dest() == target->start())  successors_found++;
1327         }
1328         assert(successors_found > 0, "successor must be known");
1329         successors_counted += successors_found;
1330       }
1331       assert(successors_counted == (hi-lo)+1, "no unexpected successors");
1332     }
1333 #endif
1334 
1335     // Maybe prune the inputs, based on the type of key_val.
1336     jint min_val = min_jint;
1337     jint max_val = max_jint;
1338     const TypeInt* ti = key_val->bottom_type()->isa_int();
1339     if (ti != nullptr) {
1340       min_val = ti->_lo;
1341       max_val = ti->_hi;
1342       assert(min_val <= max_val, "invalid int type");
1343     }
1344     while (lo->hi() < min_val) {
1345       lo++;
1346     }
1347     if (lo->lo() < min_val)  {
1348       lo->setRange(min_val, lo->hi(), lo->dest(), lo->cnt());
1349     }
1350     while (hi->lo() > max_val) {
1351       hi--;
1352     }
1353     if (hi->hi() > max_val) {
1354       hi->setRange(hi->lo(), max_val, hi->dest(), hi->cnt());
1355     }
1356 
1357     linear_search_switch_ranges(key_val, lo, hi);
1358   }
1359 
1360 #ifndef PRODUCT
1361   if (switch_depth == 0) {
1362     _max_switch_depth = 0;
1363     _est_switch_depth = log2i_graceful((hi - lo + 1) - 1) + 1;
1364   }
1365 #endif
1366 
1367   assert(lo <= hi, "must be a non-empty set of ranges");
1368   if (lo == hi) {
1369     jump_if_always_fork(lo->dest(), trim_ranges && lo->cnt() == 0);
1370   } else {
1371     assert(lo->hi() == (lo+1)->lo()-1, "contiguous ranges");
1372     assert(hi->lo() == (hi-1)->hi()+1, "contiguous ranges");
1373 
1374     if (create_jump_tables(key_val, lo, hi)) return;
1375 
1376     SwitchRange* mid = nullptr;
1377     float total_cnt = sum_of_cnts(lo, hi);
1378 
1379     int nr = hi - lo + 1;
1380     if (UseSwitchProfiling) {
1381       // Don't keep the binary search tree balanced: pick up mid point
1382       // that split frequencies in half.
1383       float cnt = 0;
1384       for (SwitchRange* sr = lo; sr <= hi; sr++) {
1385         cnt += sr->cnt();
1386         if (cnt >= total_cnt / 2) {
1387           mid = sr;
1388           break;
1389         }
1390       }
1391     } else {
1392       mid = lo + nr/2;
1393 
1394       // if there is an easy choice, pivot at a singleton:
1395       if (nr > 3 && !mid->is_singleton() && (mid-1)->is_singleton())  mid--;
1396 
1397       assert(lo < mid && mid <= hi, "good pivot choice");
1398       assert(nr != 2 || mid == hi,   "should pick higher of 2");
1399       assert(nr != 3 || mid == hi-1, "should pick middle of 3");
1400     }
1401 
1402 
1403     Node *test_val = _gvn.intcon(mid == lo ? mid->hi() : mid->lo());
1404 
1405     if (mid->is_singleton()) {
1406       IfNode *iff_ne = jump_if_fork_int(key_val, test_val, BoolTest::ne, 1-if_prob(mid->cnt(), total_cnt), if_cnt(mid->cnt()));
1407       jump_if_false_fork(iff_ne, mid->dest(), trim_ranges && mid->cnt() == 0);
1408 
1409       // Special Case:  If there are exactly three ranges, and the high
1410       // and low range each go to the same place, omit the "gt" test,
1411       // since it will not discriminate anything.
1412       bool eq_test_only = (hi == lo+2 && hi->dest() == lo->dest() && mid == hi-1) || mid == lo;
1413 
1414       // if there is a higher range, test for it and process it:
1415       if (mid < hi && !eq_test_only) {
1416         // two comparisons of same values--should enable 1 test for 2 branches
1417         // Use BoolTest::lt instead of BoolTest::gt
1418         float cnt = sum_of_cnts(lo, mid-1);
1419         IfNode *iff_lt  = jump_if_fork_int(key_val, test_val, BoolTest::lt, if_prob(cnt, total_cnt), if_cnt(cnt));
1420         Node   *iftrue  = _gvn.transform( new IfTrueNode(iff_lt) );
1421         Node   *iffalse = _gvn.transform( new IfFalseNode(iff_lt) );
1422         { PreserveJVMState pjvms(this);
1423           set_control(iffalse);
1424           jump_switch_ranges(key_val, mid+1, hi, switch_depth+1);
1425         }
1426         set_control(iftrue);
1427       }
1428 
1429     } else {
1430       // mid is a range, not a singleton, so treat mid..hi as a unit
1431       float cnt = sum_of_cnts(mid == lo ? mid+1 : mid, hi);
1432       IfNode *iff_ge = jump_if_fork_int(key_val, test_val, mid == lo ? BoolTest::gt : BoolTest::ge, if_prob(cnt, total_cnt), if_cnt(cnt));
1433 
1434       // if there is a higher range, test for it and process it:
1435       if (mid == hi) {
1436         jump_if_true_fork(iff_ge, mid->dest(), trim_ranges && cnt == 0);
1437       } else {
1438         Node *iftrue  = _gvn.transform( new IfTrueNode(iff_ge) );
1439         Node *iffalse = _gvn.transform( new IfFalseNode(iff_ge) );
1440         { PreserveJVMState pjvms(this);
1441           set_control(iftrue);
1442           jump_switch_ranges(key_val, mid == lo ? mid+1 : mid, hi, switch_depth+1);
1443         }
1444         set_control(iffalse);
1445       }
1446     }
1447 
1448     // in any case, process the lower range
1449     if (mid == lo) {
1450       if (mid->is_singleton()) {
1451         jump_switch_ranges(key_val, lo+1, hi, switch_depth+1);
1452       } else {
1453         jump_if_always_fork(lo->dest(), trim_ranges && lo->cnt() == 0);
1454       }
1455     } else {
1456       jump_switch_ranges(key_val, lo, mid-1, switch_depth+1);
1457     }
1458   }
1459 
1460   // Decrease pred_count for each successor after all is done.
1461   if (switch_depth == 0) {
1462     int unique_successors = switch_block->num_successors();
1463     for (int i = 0; i < unique_successors; i++) {
1464       Block* target = switch_block->successor_at(i);
1465       // Throw away the pre-allocated path for each unique successor.
1466       target->next_path_num();
1467     }
1468   }
1469 
1470 #ifndef PRODUCT
1471   _max_switch_depth = MAX2(switch_depth, _max_switch_depth);
1472   if (TraceOptoParse && Verbose && WizardMode && switch_depth == 0) {
1473     SwitchRange* r;
1474     int nsing = 0;
1475     for( r = lo; r <= hi; r++ ) {
1476       if( r->is_singleton() )  nsing++;
1477     }
1478     tty->print(">>> ");
1479     _method->print_short_name();
1480     tty->print_cr(" switch decision tree");
1481     tty->print_cr("    %d ranges (%d singletons), max_depth=%d, est_depth=%d",
1482                   (int) (hi-lo+1), nsing, _max_switch_depth, _est_switch_depth);
1483     if (_max_switch_depth > _est_switch_depth) {
1484       tty->print_cr("******** BAD SWITCH DEPTH ********");
1485     }
1486     tty->print("   ");
1487     for( r = lo; r <= hi; r++ ) {
1488       r->print();
1489     }
1490     tty->cr();
1491   }
1492 #endif
1493 }
1494 
1495 Node* Parse::floating_point_mod(Node* a, Node* b, BasicType type) {
1496   assert(type == BasicType::T_FLOAT || type == BasicType::T_DOUBLE, "only float and double are floating points");
1497   CallNode* mod = type == BasicType::T_DOUBLE ? static_cast<CallNode*>(new ModDNode(C, a, b)) : new ModFNode(C, a, b);
1498 
1499   Node* prev_mem = set_predefined_input_for_runtime_call(mod);
1500   mod = _gvn.transform(mod)->as_Call();
1501   set_predefined_output_for_runtime_call(mod, prev_mem, TypeRawPtr::BOTTOM);
1502   Node* result = _gvn.transform(new ProjNode(mod, TypeFunc::Parms + 0));
1503   record_for_igvn(mod);
1504   return result;
1505 }
1506 
1507 void Parse::l2f() {
1508   Node* f2 = pop();
1509   Node* f1 = pop();
1510   Node* c = make_runtime_call(RC_LEAF, OptoRuntime::l2f_Type(),
1511                               CAST_FROM_FN_PTR(address, SharedRuntime::l2f),
1512                               "l2f", nullptr, //no memory effects
1513                               f1, f2);
1514   Node* res = _gvn.transform(new ProjNode(c, TypeFunc::Parms + 0));
1515 
1516   push(res);
1517 }
1518 
1519 // Handle jsr and jsr_w bytecode
1520 void Parse::do_jsr() {
1521   assert(bc() == Bytecodes::_jsr || bc() == Bytecodes::_jsr_w, "wrong bytecode");
1522 
1523   // Store information about current state, tagged with new _jsr_bci
1524   int return_bci = iter().next_bci();
1525   int jsr_bci    = (bc() == Bytecodes::_jsr) ? iter().get_dest() : iter().get_far_dest();
1526 
1527   // The way we do things now, there is only one successor block
1528   // for the jsr, because the target code is cloned by ciTypeFlow.
1529   Block* target = successor_for_bci(jsr_bci);
1530 
1531   // What got pushed?
1532   const Type* ret_addr = target->peek();
1533   assert(ret_addr->singleton(), "must be a constant (cloned jsr body)");
1534 
1535   // Effect on jsr on stack
1536   push(_gvn.makecon(ret_addr));
1537 
1538   // Flow to the jsr.
1539   merge(jsr_bci);
1540 }
1541 
1542 // Handle ret bytecode
1543 void Parse::do_ret() {
1544   // Find to whom we return.
1545   assert(block()->num_successors() == 1, "a ret can only go one place now");
1546   Block* target = block()->successor_at(0);
1547   assert(!target->is_ready(), "our arrival must be expected");
1548   int pnum = target->next_path_num();
1549   merge_common(target, pnum);
1550 }
1551 
1552 static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) {
1553   if (btest != BoolTest::eq && btest != BoolTest::ne) {
1554     // Only ::eq and ::ne are supported for profile injection.
1555     return false;
1556   }
1557   if (test->is_Cmp() &&
1558       test->in(1)->Opcode() == Op_ProfileBoolean) {
1559     ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1);
1560     int false_cnt = profile->false_count();
1561     int  true_cnt = profile->true_count();
1562 
1563     // Counts matching depends on the actual test operation (::eq or ::ne).
1564     // No need to scale the counts because profile injection was designed
1565     // to feed exact counts into VM.
1566     taken     = (btest == BoolTest::eq) ? false_cnt :  true_cnt;
1567     not_taken = (btest == BoolTest::eq) ?  true_cnt : false_cnt;
1568 
1569     profile->consume();
1570     return true;
1571   }
1572   return false;
1573 }
1574 
1575 // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful.
1576 // We also check that individual counters are positive first, otherwise the sum can become positive.
1577 // (check for saturation, integer overflow, and immature counts)
1578 static bool counters_are_meaningful(int counter1, int counter2, int min) {
1579   // check for saturation, including "uint" values too big to fit in "int"
1580   if (counter1 < 0 || counter2 < 0) {
1581     return false;
1582   }
1583   // check for integer overflow of the sum
1584   int64_t sum = (int64_t)counter1 + (int64_t)counter2;
1585   STATIC_ASSERT(sizeof(counter1) < sizeof(sum));
1586   if (sum > INT_MAX) {
1587     return false;
1588   }
1589   // check if mature
1590   return (counter1 + counter2) >= min;
1591 }
1592 
1593 //--------------------------dynamic_branch_prediction--------------------------
1594 // Try to gather dynamic branch prediction behavior.  Return a probability
1595 // of the branch being taken and set the "cnt" field.  Returns a -1.0
1596 // if we need to use static prediction for some reason.
1597 float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) {
1598   ResourceMark rm;
1599 
1600   cnt  = COUNT_UNKNOWN;
1601 
1602   int     taken = 0;
1603   int not_taken = 0;
1604 
1605   bool use_mdo = !has_injected_profile(btest, test, taken, not_taken);
1606 
1607   if (use_mdo) {
1608     // Use MethodData information if it is available
1609     // FIXME: free the ProfileData structure
1610     ciMethodData* methodData = method()->method_data();
1611     if (!methodData->is_mature())  return PROB_UNKNOWN;
1612     ciProfileData* data = methodData->bci_to_data(bci());
1613     if (data == nullptr) {
1614       return PROB_UNKNOWN;
1615     }
1616     if (!data->is_JumpData())  return PROB_UNKNOWN;
1617 
1618     // get taken and not taken values
1619     // NOTE: saturated UINT_MAX values become negative,
1620     // as do counts above INT_MAX.
1621     taken = data->as_JumpData()->taken();
1622     not_taken = 0;
1623     if (data->is_BranchData()) {
1624       not_taken = data->as_BranchData()->not_taken();
1625     }
1626 
1627     // scale the counts to be commensurate with invocation counts:
1628     // NOTE: overflow for positive values is clamped at INT_MAX
1629     taken = method()->scale_count(taken);
1630     not_taken = method()->scale_count(not_taken);
1631   }
1632   // At this point, saturation or overflow is indicated by INT_MAX
1633   // or a negative value.
1634 
1635   // Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful.
1636   // We also check that individual counters are positive first, otherwise the sum can become positive.
1637   if (!counters_are_meaningful(taken, not_taken, 40)) {
1638     if (C->log() != nullptr) {
1639       C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken);
1640     }
1641     return PROB_UNKNOWN;
1642   }
1643 
1644   // Compute frequency that we arrive here
1645   float sum = taken + not_taken;
1646   // Adjust, if this block is a cloned private block but the
1647   // Jump counts are shared.  Taken the private counts for
1648   // just this path instead of the shared counts.
1649   if( block()->count() > 0 )
1650     sum = block()->count();
1651   cnt = sum / FreqCountInvocations;
1652 
1653   // Pin probability to sane limits
1654   float prob;
1655   if( !taken )
1656     prob = (0+PROB_MIN) / 2;
1657   else if( !not_taken )
1658     prob = (1+PROB_MAX) / 2;
1659   else {                         // Compute probability of true path
1660     prob = (float)taken / (float)(taken + not_taken);
1661     if (prob > PROB_MAX)  prob = PROB_MAX;
1662     if (prob < PROB_MIN)   prob = PROB_MIN;
1663   }
1664 
1665   assert((cnt > 0.0f) && (prob > 0.0f),
1666          "Bad frequency assignment in if cnt=%g prob=%g taken=%d not_taken=%d", cnt, prob, taken, not_taken);
1667 
1668   if (C->log() != nullptr) {
1669     const char* prob_str = nullptr;
1670     if (prob >= PROB_MAX)  prob_str = (prob == PROB_MAX) ? "max" : "always";
1671     if (prob <= PROB_MIN)  prob_str = (prob == PROB_MIN) ? "min" : "never";
1672     char prob_str_buf[30];
1673     if (prob_str == nullptr) {
1674       jio_snprintf(prob_str_buf, sizeof(prob_str_buf), "%20.2f", prob);
1675       prob_str = prob_str_buf;
1676     }
1677     C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d' cnt='%f' prob='%s'",
1678                    iter().get_dest(), taken, not_taken, cnt, prob_str);
1679   }
1680   return prob;
1681 }
1682 
1683 //-----------------------------branch_prediction-------------------------------
1684 float Parse::branch_prediction(float& cnt,
1685                                BoolTest::mask btest,
1686                                int target_bci,
1687                                Node* test) {
1688   float prob = dynamic_branch_prediction(cnt, btest, test);
1689   // If prob is unknown, switch to static prediction
1690   if (prob != PROB_UNKNOWN)  return prob;
1691 
1692   prob = PROB_FAIR;                   // Set default value
1693   if (btest == BoolTest::eq)          // Exactly equal test?
1694     prob = PROB_STATIC_INFREQUENT;    // Assume its relatively infrequent
1695   else if (btest == BoolTest::ne)
1696     prob = PROB_STATIC_FREQUENT;      // Assume its relatively frequent
1697 
1698   // If this is a conditional test guarding a backwards branch,
1699   // assume its a loop-back edge.  Make it a likely taken branch.
1700   if (target_bci < bci()) {
1701     if (is_osr_parse()) {    // Could be a hot OSR'd loop; force deopt
1702       // Since it's an OSR, we probably have profile data, but since
1703       // branch_prediction returned PROB_UNKNOWN, the counts are too small.
1704       // Let's make a special check here for completely zero counts.
1705       ciMethodData* methodData = method()->method_data();
1706       if (!methodData->is_empty()) {
1707         ciProfileData* data = methodData->bci_to_data(bci());
1708         // Only stop for truly zero counts, which mean an unknown part
1709         // of the OSR-ed method, and we want to deopt to gather more stats.
1710         // If you have ANY counts, then this loop is simply 'cold' relative
1711         // to the OSR loop.
1712         if (data == nullptr ||
1713             (data->as_BranchData()->taken() +  data->as_BranchData()->not_taken() == 0)) {
1714           // This is the only way to return PROB_UNKNOWN:
1715           return PROB_UNKNOWN;
1716         }
1717       }
1718     }
1719     prob = PROB_STATIC_FREQUENT;     // Likely to take backwards branch
1720   }
1721 
1722   assert(prob != PROB_UNKNOWN, "must have some guess at this point");
1723   return prob;
1724 }
1725 
1726 // The magic constants are chosen so as to match the output of
1727 // branch_prediction() when the profile reports a zero taken count.
1728 // It is important to distinguish zero counts unambiguously, because
1729 // some branches (e.g., _213_javac.Assembler.eliminate) validly produce
1730 // very small but nonzero probabilities, which if confused with zero
1731 // counts would keep the program recompiling indefinitely.
1732 bool Parse::seems_never_taken(float prob) const {
1733   return prob < PROB_MIN;
1734 }
1735 
1736 //-------------------------------repush_if_args--------------------------------
1737 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp.
1738 inline int Parse::repush_if_args() {
1739   if (PrintOpto && WizardMode) {
1740     tty->print("defending against excessive implicit null exceptions on %s @%d in ",
1741                Bytecodes::name(iter().cur_bc()), iter().cur_bci());
1742     method()->print_name(); tty->cr();
1743   }
1744   int bc_depth = - Bytecodes::depth(iter().cur_bc());
1745   assert(bc_depth == 1 || bc_depth == 2, "only two kinds of branches");
1746   DEBUG_ONLY(sync_jvms());   // argument(n) requires a synced jvms
1747   assert(argument(0) != nullptr, "must exist");
1748   assert(bc_depth == 1 || argument(1) != nullptr, "two must exist");
1749   inc_sp(bc_depth);
1750   return bc_depth;
1751 }
1752 
1753 // Used by StressUnstableIfTraps
1754 static volatile int _trap_stress_counter = 0;
1755 
1756 void Parse::increment_trap_stress_counter(Node*& counter, Node*& incr_store) {
1757   Node* counter_addr = makecon(TypeRawPtr::make((address)&_trap_stress_counter));
1758   counter = make_load(control(), counter_addr, TypeInt::INT, T_INT, MemNode::unordered);
1759   counter = _gvn.transform(new AddINode(counter, intcon(1)));
1760   incr_store = store_to_memory(control(), counter_addr, counter, T_INT, MemNode::unordered);
1761 }
1762 
1763 //----------------------------------do_ifnull----------------------------------
1764 void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
1765   int target_bci = iter().get_dest();
1766 
1767   Node* counter = nullptr;
1768   Node* incr_store = nullptr;
1769   bool do_stress_trap = StressUnstableIfTraps && ((C->random() % 2) == 0);
1770   if (do_stress_trap) {
1771     increment_trap_stress_counter(counter, incr_store);
1772   }
1773 
1774   Block* branch_block = successor_for_bci(target_bci);
1775   Block* next_block   = successor_for_bci(iter().next_bci());
1776 
1777   float cnt;
1778   float prob = branch_prediction(cnt, btest, target_bci, c);
1779   if (prob == PROB_UNKNOWN) {
1780     // (An earlier version of do_ifnull omitted this trap for OSR methods.)
1781     if (PrintOpto && Verbose) {
1782       tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1783     }
1784     repush_if_args(); // to gather stats on loop
1785     uncommon_trap(Deoptimization::Reason_unreached,
1786                   Deoptimization::Action_reinterpret,
1787                   nullptr, "cold");
1788     if (C->eliminate_boxing()) {
1789       // Mark the successor blocks as parsed
1790       branch_block->next_path_num();
1791       next_block->next_path_num();
1792     }
1793     return;
1794   }
1795 
1796   NOT_PRODUCT(explicit_null_checks_inserted++);
1797 
1798   // Generate real control flow
1799   Node   *tst = _gvn.transform( new BoolNode( c, btest ) );
1800 
1801   // Sanity check the probability value
1802   assert(prob > 0.0f,"Bad probability in Parser");
1803  // Need xform to put node in hash table
1804   IfNode *iff = create_and_xform_if( control(), tst, prob, cnt );
1805   assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1806   // True branch
1807   { PreserveJVMState pjvms(this);
1808     Node* iftrue  = _gvn.transform( new IfTrueNode (iff) );
1809     set_control(iftrue);
1810 
1811     if (stopped()) {            // Path is dead?
1812       NOT_PRODUCT(explicit_null_checks_elided++);
1813       if (C->eliminate_boxing()) {
1814         // Mark the successor block as parsed
1815         branch_block->next_path_num();
1816       }
1817     } else {                    // Path is live.
1818       adjust_map_after_if(btest, c, prob, branch_block);
1819       if (!stopped()) {
1820         merge(target_bci);
1821       }
1822     }
1823   }
1824 
1825   // False branch
1826   Node* iffalse = _gvn.transform( new IfFalseNode(iff) );
1827   set_control(iffalse);
1828 
1829   if (stopped()) {              // Path is dead?
1830     NOT_PRODUCT(explicit_null_checks_elided++);
1831     if (C->eliminate_boxing()) {
1832       // Mark the successor block as parsed
1833       next_block->next_path_num();
1834     }
1835   } else  {                     // Path is live.
1836     adjust_map_after_if(BoolTest(btest).negate(), c, 1.0-prob, next_block);
1837   }
1838 
1839   if (do_stress_trap) {
1840     stress_trap(iff, counter, incr_store);
1841   }
1842 }
1843 
1844 //------------------------------------do_if------------------------------------
1845 void Parse::do_if(BoolTest::mask btest, Node* c, bool can_trap, bool new_path, Node** ctrl_taken) {
1846   int target_bci = iter().get_dest();
1847 
1848   Block* branch_block = successor_for_bci(target_bci);
1849   Block* next_block   = successor_for_bci(iter().next_bci());
1850 
1851   float cnt;
1852   float prob = branch_prediction(cnt, btest, target_bci, c);
1853   float untaken_prob = 1.0 - prob;
1854 
1855   if (prob == PROB_UNKNOWN) {
1856     if (PrintOpto && Verbose) {
1857       tty->print_cr("Never-taken edge stops compilation at bci %d", bci());
1858     }
1859     repush_if_args(); // to gather stats on loop
1860     uncommon_trap(Deoptimization::Reason_unreached,
1861                   Deoptimization::Action_reinterpret,
1862                   nullptr, "cold");
1863     if (C->eliminate_boxing()) {
1864       // Mark the successor blocks as parsed
1865       branch_block->next_path_num();
1866       next_block->next_path_num();
1867     }
1868     return;
1869   }
1870 
1871   Node* counter = nullptr;
1872   Node* incr_store = nullptr;
1873   bool do_stress_trap = StressUnstableIfTraps && ((C->random() % 2) == 0);
1874   if (do_stress_trap) {
1875     increment_trap_stress_counter(counter, incr_store);
1876   }
1877 
1878   // Sanity check the probability value
1879   assert(0.0f < prob && prob < 1.0f,"Bad probability in Parser");
1880 
1881   bool taken_if_true = true;
1882   // Convert BoolTest to canonical form:
1883   if (!BoolTest(btest).is_canonical()) {
1884     btest         = BoolTest(btest).negate();
1885     taken_if_true = false;
1886     // prob is NOT updated here; it remains the probability of the taken
1887     // path (as opposed to the prob of the path guarded by an 'IfTrueNode').
1888   }
1889   assert(btest != BoolTest::eq, "!= is the only canonical exact test");
1890 
1891   Node* tst0 = new BoolNode(c, btest);
1892   Node* tst = _gvn.transform(tst0);
1893   BoolTest::mask taken_btest   = BoolTest::illegal;
1894   BoolTest::mask untaken_btest = BoolTest::illegal;
1895 
1896   if (tst->is_Bool()) {
1897     // Refresh c from the transformed bool node, since it may be
1898     // simpler than the original c.  Also re-canonicalize btest.
1899     // This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p null)).
1900     // That can arise from statements like: if (x instanceof C) ...
1901     if (tst != tst0) {
1902       // Canonicalize one more time since transform can change it.
1903       btest = tst->as_Bool()->_test._test;
1904       if (!BoolTest(btest).is_canonical()) {
1905         // Reverse edges one more time...
1906         tst   = _gvn.transform( tst->as_Bool()->negate(&_gvn) );
1907         btest = tst->as_Bool()->_test._test;
1908         assert(BoolTest(btest).is_canonical(), "sanity");
1909         taken_if_true = !taken_if_true;
1910       }
1911       c = tst->in(1);
1912     }
1913     BoolTest::mask neg_btest = BoolTest(btest).negate();
1914     taken_btest   = taken_if_true ?     btest : neg_btest;
1915     untaken_btest = taken_if_true ? neg_btest :     btest;
1916   }
1917 
1918   // Generate real control flow
1919   float true_prob = (taken_if_true ? prob : untaken_prob);
1920   IfNode* iff = create_and_map_if(control(), tst, true_prob, cnt);
1921   assert(iff->_prob > 0.0f,"Optimizer made bad probability in parser");
1922   Node* taken_branch   = new IfTrueNode(iff);
1923   Node* untaken_branch = new IfFalseNode(iff);
1924   if (!taken_if_true) {  // Finish conversion to canonical form
1925     Node* tmp      = taken_branch;
1926     taken_branch   = untaken_branch;
1927     untaken_branch = tmp;
1928   }
1929 
1930   // Branch is taken:
1931   { PreserveJVMState pjvms(this);
1932     taken_branch = _gvn.transform(taken_branch);
1933     set_control(taken_branch);
1934 
1935     if (stopped()) {
1936       if (C->eliminate_boxing() && !new_path) {
1937         // Mark the successor block as parsed (if we haven't created a new path)
1938         branch_block->next_path_num();
1939       }
1940     } else {
1941       adjust_map_after_if(taken_btest, c, prob, branch_block, can_trap);
1942       if (!stopped()) {
1943         if (new_path) {
1944           // Merge by using a new path
1945           merge_new_path(target_bci);
1946         } else if (ctrl_taken != nullptr) {
1947           // Don't merge but save taken branch to be wired by caller
1948           *ctrl_taken = control();
1949         } else {
1950           merge(target_bci);
1951         }
1952       }
1953     }
1954   }
1955 
1956   untaken_branch = _gvn.transform(untaken_branch);
1957   set_control(untaken_branch);
1958 
1959   // Branch not taken.
1960   if (stopped() && ctrl_taken == nullptr) {
1961     if (C->eliminate_boxing()) {
1962       // Mark the successor block as parsed (if caller does not re-wire control flow)
1963       next_block->next_path_num();
1964     }
1965   } else {
1966     adjust_map_after_if(untaken_btest, c, untaken_prob, next_block, can_trap);
1967   }
1968 
1969   if (do_stress_trap) {
1970     stress_trap(iff, counter, incr_store);
1971   }
1972 }
1973 
1974 
1975 static ProfilePtrKind speculative_ptr_kind(const TypeOopPtr* t) {
1976   if (t->speculative() == nullptr) {
1977     return ProfileUnknownNull;
1978   }
1979   if (t->speculative_always_null()) {
1980     return ProfileAlwaysNull;
1981   }
1982   if (t->speculative_maybe_null()) {
1983     return ProfileMaybeNull;
1984   }
1985   return ProfileNeverNull;
1986 }
1987 
1988 void Parse::acmp_always_null_input(Node* input, const TypeOopPtr* tinput, BoolTest::mask btest, Node* eq_region) {
1989   inc_sp(2);
1990   Node* cast = null_check_common(input, T_OBJECT, true, nullptr,
1991                                  !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check) &&
1992                                  speculative_ptr_kind(tinput) == ProfileAlwaysNull);
1993   dec_sp(2);
1994   if (btest == BoolTest::ne) {
1995     {
1996       PreserveJVMState pjvms(this);
1997       replace_in_map(input, cast);
1998       int target_bci = iter().get_dest();
1999       merge(target_bci);
2000     }
2001     record_for_igvn(eq_region);
2002     set_control(_gvn.transform(eq_region));
2003   } else {
2004     replace_in_map(input, cast);
2005   }
2006 }
2007 
2008 Node* Parse::acmp_null_check(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, Node*& null_ctl) {
2009   inc_sp(2);
2010   null_ctl = top();
2011   Node* cast = null_check_oop(input, &null_ctl,
2012                               input_ptr == ProfileNeverNull || (input_ptr == ProfileUnknownNull && !too_many_traps_or_recompiles(Deoptimization::Reason_null_check)),
2013                               false,
2014                               speculative_ptr_kind(tinput) == ProfileNeverNull &&
2015                               !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check));
2016   dec_sp(2);
2017   assert(!stopped(), "null input should have been caught earlier");
2018   return cast;
2019 }
2020 
2021 void Parse::acmp_known_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, ciKlass* input_type, BoolTest::mask btest, Node* eq_region) {
2022   Node* ne_region = new RegionNode(1);
2023   Node* null_ctl;
2024   Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
2025   ne_region->add_req(null_ctl);
2026 
2027   Node* slow_ctl = type_check_receiver(cast, input_type, 1.0, &cast);
2028   {
2029     PreserveJVMState pjvms(this);
2030     inc_sp(2);
2031     set_control(slow_ctl);
2032     Deoptimization::DeoptReason reason;
2033     if (tinput->speculative_type() != nullptr && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
2034       reason = Deoptimization::Reason_speculate_class_check;
2035     } else {
2036       reason = Deoptimization::Reason_class_check;
2037     }
2038     uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
2039   }
2040   ne_region->add_req(control());
2041 
2042   record_for_igvn(ne_region);
2043   set_control(_gvn.transform(ne_region));
2044   if (btest == BoolTest::ne) {
2045     {
2046       PreserveJVMState pjvms(this);
2047       if (null_ctl == top()) {
2048         replace_in_map(input, cast);
2049       }
2050       int target_bci = iter().get_dest();
2051       merge(target_bci);
2052     }
2053     record_for_igvn(eq_region);
2054     set_control(_gvn.transform(eq_region));
2055   } else {
2056     if (null_ctl == top()) {
2057       replace_in_map(input, cast);
2058     }
2059     set_control(_gvn.transform(ne_region));
2060   }
2061 }
2062 
2063 void Parse::acmp_unknown_non_inline_type_input(Node* input, const TypeOopPtr* tinput, ProfilePtrKind input_ptr, BoolTest::mask btest, Node* eq_region) {
2064   Node* ne_region = new RegionNode(1);
2065   Node* null_ctl;
2066   Node* cast = acmp_null_check(input, tinput, input_ptr, null_ctl);
2067   ne_region->add_req(null_ctl);
2068 
2069   {
2070     BuildCutout unless(this, inline_type_test(cast, /* is_inline = */ false), PROB_MAX);
2071     inc_sp(2);
2072     uncommon_trap_exact(Deoptimization::Reason_class_check, Deoptimization::Action_maybe_recompile);
2073   }
2074 
2075   ne_region->add_req(control());
2076 
2077   record_for_igvn(ne_region);
2078   set_control(_gvn.transform(ne_region));
2079   if (btest == BoolTest::ne) {
2080     {
2081       PreserveJVMState pjvms(this);
2082       if (null_ctl == top()) {
2083         replace_in_map(input, cast);
2084       }
2085       int target_bci = iter().get_dest();
2086       merge(target_bci);
2087     }
2088     record_for_igvn(eq_region);
2089     set_control(_gvn.transform(eq_region));
2090   } else {
2091     if (null_ctl == top()) {
2092       replace_in_map(input, cast);
2093     }
2094     set_control(_gvn.transform(ne_region));
2095   }
2096 }
2097 
2098 void Parse::do_acmp(BoolTest::mask btest, Node* left, Node* right) {
2099   ciKlass* left_type = nullptr;
2100   ciKlass* right_type = nullptr;
2101   ProfilePtrKind left_ptr = ProfileUnknownNull;
2102   ProfilePtrKind right_ptr = ProfileUnknownNull;
2103   bool left_inline_type = true;
2104   bool right_inline_type = true;
2105 
2106   // Leverage profiling at acmp
2107   if (UseACmpProfile) {
2108     method()->acmp_profiled_type(bci(), left_type, right_type, left_ptr, right_ptr, left_inline_type, right_inline_type);
2109     if (too_many_traps_or_recompiles(Deoptimization::Reason_class_check)) {
2110       left_type = nullptr;
2111       right_type = nullptr;
2112       left_inline_type = true;
2113       right_inline_type = true;
2114     }
2115     if (too_many_traps_or_recompiles(Deoptimization::Reason_null_check)) {
2116       left_ptr = ProfileUnknownNull;
2117       right_ptr = ProfileUnknownNull;
2118     }
2119   }
2120 
2121   if (UseTypeSpeculation) {
2122     record_profile_for_speculation(left, left_type, left_ptr);
2123     record_profile_for_speculation(right, right_type, right_ptr);
2124   }
2125 
2126   if (!EnableValhalla) {
2127     Node* cmp = CmpP(left, right);
2128     cmp = optimize_cmp_with_klass(cmp);
2129     do_if(btest, cmp);
2130     return;
2131   }
2132 
2133   // Check for equality before potentially allocating
2134   if (left == right) {
2135     do_if(btest, makecon(TypeInt::CC_EQ));
2136     return;
2137   }
2138 
2139   // Allocate inline type operands and re-execute on deoptimization
2140   if (left->is_InlineType()) {
2141     if (_gvn.type(right)->is_zero_type() ||
2142         (right->is_InlineType() && _gvn.type(right->as_InlineType()->get_is_init())->is_zero_type())) {
2143       // Null checking a scalarized but nullable inline type. Check the IsInit
2144       // input instead of the oop input to avoid keeping buffer allocations alive.
2145       Node* cmp = CmpI(left->as_InlineType()->get_is_init(), intcon(0));
2146       do_if(btest, cmp);
2147       return;
2148     } else {
2149       PreserveReexecuteState preexecs(this);
2150       inc_sp(2);
2151       jvms()->set_should_reexecute(true);
2152       left = left->as_InlineType()->buffer(this)->get_oop();
2153     }
2154   }
2155   if (right->is_InlineType()) {
2156     PreserveReexecuteState preexecs(this);
2157     inc_sp(2);
2158     jvms()->set_should_reexecute(true);
2159     right = right->as_InlineType()->buffer(this)->get_oop();
2160   }
2161 
2162   // First, do a normal pointer comparison
2163   const TypeOopPtr* tleft = _gvn.type(left)->isa_oopptr();
2164   const TypeOopPtr* tright = _gvn.type(right)->isa_oopptr();
2165   Node* cmp = CmpP(left, right);
2166   cmp = optimize_cmp_with_klass(cmp);
2167   if (tleft == nullptr || !tleft->can_be_inline_type() ||
2168       tright == nullptr || !tright->can_be_inline_type()) {
2169     // This is sufficient, if one of the operands can't be an inline type
2170     do_if(btest, cmp);
2171     return;
2172   }
2173 
2174   // Don't add traps to unstable if branches because additional checks are required to
2175   // decide if the operands are equal/substitutable and we therefore shouldn't prune
2176   // branches for one if based on the profiling of the acmp branches.
2177   // Also, OptimizeUnstableIf would set an incorrect re-rexecution state because it
2178   // assumes that there is a 1-1 mapping between the if and the acmp branches and that
2179   // hitting a trap means that we will take the corresponding acmp branch on re-execution.
2180   const bool can_trap = true;
2181 
2182   Node* eq_region = nullptr;
2183   if (btest == BoolTest::eq) {
2184     do_if(btest, cmp, !can_trap, true);
2185     if (stopped()) {
2186       // Pointers are equal, operands must be equal
2187       return;
2188     }
2189   } else {
2190     assert(btest == BoolTest::ne, "only eq or ne");
2191     Node* is_not_equal = nullptr;
2192     eq_region = new RegionNode(3);
2193     {
2194       PreserveJVMState pjvms(this);
2195       // Pointers are not equal, but more checks are needed to determine if the operands are (not) substitutable
2196       do_if(btest, cmp, !can_trap, false, &is_not_equal);
2197       if (!stopped()) {
2198         eq_region->init_req(1, control());
2199       }
2200     }
2201     if (is_not_equal == nullptr || is_not_equal->is_top()) {
2202       record_for_igvn(eq_region);
2203       set_control(_gvn.transform(eq_region));
2204       return;
2205     }
2206     set_control(is_not_equal);
2207   }
2208 
2209   // Prefer speculative types if available
2210   if (!too_many_traps_or_recompiles(Deoptimization::Reason_speculate_class_check)) {
2211     if (tleft->speculative_type() != nullptr) {
2212       left_type = tleft->speculative_type();
2213     }
2214     if (tright->speculative_type() != nullptr) {
2215       right_type = tright->speculative_type();
2216     }
2217   }
2218 
2219   if (speculative_ptr_kind(tleft) != ProfileMaybeNull && speculative_ptr_kind(tleft) != ProfileUnknownNull) {
2220     ProfilePtrKind speculative_left_ptr = speculative_ptr_kind(tleft);
2221     if (speculative_left_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2222       left_ptr = speculative_left_ptr;
2223     } else if (speculative_left_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2224       left_ptr = speculative_left_ptr;
2225     }
2226   }
2227   if (speculative_ptr_kind(tright) != ProfileMaybeNull && speculative_ptr_kind(tright) != ProfileUnknownNull) {
2228     ProfilePtrKind speculative_right_ptr = speculative_ptr_kind(tright);
2229     if (speculative_right_ptr == ProfileAlwaysNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_assert)) {
2230       right_ptr = speculative_right_ptr;
2231     } else if (speculative_right_ptr == ProfileNeverNull && !too_many_traps_or_recompiles(Deoptimization::Reason_speculate_null_check)) {
2232       right_ptr = speculative_right_ptr;
2233     }
2234   }
2235 
2236   if (left_ptr == ProfileAlwaysNull) {
2237     // Comparison with null. Assert the input is indeed null and we're done.
2238     acmp_always_null_input(left, tleft, btest, eq_region);
2239     return;
2240   }
2241   if (right_ptr == ProfileAlwaysNull) {
2242     // Comparison with null. Assert the input is indeed null and we're done.
2243     acmp_always_null_input(right, tright, btest, eq_region);
2244     return;
2245   }
2246   if (left_type != nullptr && !left_type->is_inlinetype()) {
2247     // Comparison with an object of known type
2248     acmp_known_non_inline_type_input(left, tleft, left_ptr, left_type, btest, eq_region);
2249     return;
2250   }
2251   if (right_type != nullptr && !right_type->is_inlinetype()) {
2252     // Comparison with an object of known type
2253     acmp_known_non_inline_type_input(right, tright, right_ptr, right_type, btest, eq_region);
2254     return;
2255   }
2256   if (!left_inline_type) {
2257     // Comparison with an object known not to be an inline type
2258     acmp_unknown_non_inline_type_input(left, tleft, left_ptr, btest, eq_region);
2259     return;
2260   }
2261   if (!right_inline_type) {
2262     // Comparison with an object known not to be an inline type
2263     acmp_unknown_non_inline_type_input(right, tright, right_ptr, btest, eq_region);
2264     return;
2265   }
2266 
2267   // Pointers are not equal, check if first operand is non-null
2268   Node* ne_region = new RegionNode(6);
2269   Node* null_ctl;
2270   Node* not_null_right = acmp_null_check(right, tright, right_ptr, null_ctl);
2271   ne_region->init_req(1, null_ctl);
2272 
2273   // First operand is non-null, check if it is an inline type
2274   Node* is_value = inline_type_test(not_null_right);
2275   IfNode* is_value_iff = create_and_map_if(control(), is_value, PROB_FAIR, COUNT_UNKNOWN);
2276   Node* not_value = _gvn.transform(new IfFalseNode(is_value_iff));
2277   ne_region->init_req(2, not_value);
2278   set_control(_gvn.transform(new IfTrueNode(is_value_iff)));
2279 
2280   // The first operand is an inline type, check if the second operand is non-null
2281   Node* not_null_left = acmp_null_check(left, tleft, left_ptr, null_ctl);
2282   ne_region->init_req(3, null_ctl);
2283 
2284   // Check if both operands are of the same class.
2285   Node* kls_left = load_object_klass(not_null_left);
2286   Node* kls_right = load_object_klass(not_null_right);
2287   Node* kls_cmp = CmpP(kls_left, kls_right);
2288   Node* kls_bol = _gvn.transform(new BoolNode(kls_cmp, BoolTest::ne));
2289   IfNode* kls_iff = create_and_map_if(control(), kls_bol, PROB_FAIR, COUNT_UNKNOWN);
2290   Node* kls_ne = _gvn.transform(new IfTrueNode(kls_iff));
2291   set_control(_gvn.transform(new IfFalseNode(kls_iff)));
2292   ne_region->init_req(4, kls_ne);
2293 
2294   if (stopped()) {
2295     record_for_igvn(ne_region);
2296     set_control(_gvn.transform(ne_region));
2297     if (btest == BoolTest::ne) {
2298       {
2299         PreserveJVMState pjvms(this);
2300         int target_bci = iter().get_dest();
2301         merge(target_bci);
2302       }
2303       record_for_igvn(eq_region);
2304       set_control(_gvn.transform(eq_region));
2305     }
2306     return;
2307   }
2308 
2309   // Both operands are values types of the same class, we need to perform a
2310   // substitutability test. Delegate to ValueObjectMethods::isSubstitutable().
2311   Node* ne_io_phi = PhiNode::make(ne_region, i_o());
2312   Node* mem = reset_memory();
2313   Node* ne_mem_phi = PhiNode::make(ne_region, mem);
2314 
2315   Node* eq_io_phi = nullptr;
2316   Node* eq_mem_phi = nullptr;
2317   if (eq_region != nullptr) {
2318     eq_io_phi = PhiNode::make(eq_region, i_o());
2319     eq_mem_phi = PhiNode::make(eq_region, mem);
2320   }
2321 
2322   set_all_memory(mem);
2323 
2324   kill_dead_locals();
2325   ciMethod* subst_method = ciEnv::current()->ValueObjectMethods_klass()->find_method(ciSymbols::isSubstitutable_name(), ciSymbols::object_object_boolean_signature());
2326   CallStaticJavaNode *call = new CallStaticJavaNode(C, TypeFunc::make(subst_method), SharedRuntime::get_resolve_static_call_stub(), subst_method);
2327   call->set_override_symbolic_info(true);
2328   call->init_req(TypeFunc::Parms, not_null_left);
2329   call->init_req(TypeFunc::Parms+1, not_null_right);
2330   inc_sp(2);
2331   set_edges_for_java_call(call, false, false);
2332   Node* ret = set_results_for_java_call(call, false, true);
2333   dec_sp(2);
2334 
2335   // Test the return value of ValueObjectMethods::isSubstitutable()
2336   // This is the last check, do_if can emit traps now.
2337   Node* subst_cmp = _gvn.transform(new CmpINode(ret, intcon(1)));
2338   Node* ctl = C->top();
2339   if (btest == BoolTest::eq) {
2340     PreserveJVMState pjvms(this);
2341     do_if(btest, subst_cmp, can_trap);
2342     if (!stopped()) {
2343       ctl = control();
2344     }
2345   } else {
2346     assert(btest == BoolTest::ne, "only eq or ne");
2347     PreserveJVMState pjvms(this);
2348     do_if(btest, subst_cmp, can_trap, false, &ctl);
2349     if (!stopped()) {
2350       eq_region->init_req(2, control());
2351       eq_io_phi->init_req(2, i_o());
2352       eq_mem_phi->init_req(2, reset_memory());
2353     }
2354   }
2355   ne_region->init_req(5, ctl);
2356   ne_io_phi->init_req(5, i_o());
2357   ne_mem_phi->init_req(5, reset_memory());
2358 
2359   record_for_igvn(ne_region);
2360   set_control(_gvn.transform(ne_region));
2361   set_i_o(_gvn.transform(ne_io_phi));
2362   set_all_memory(_gvn.transform(ne_mem_phi));
2363 
2364   if (btest == BoolTest::ne) {
2365     {
2366       PreserveJVMState pjvms(this);
2367       int target_bci = iter().get_dest();
2368       merge(target_bci);
2369     }
2370 
2371     record_for_igvn(eq_region);
2372     set_control(_gvn.transform(eq_region));
2373     set_i_o(_gvn.transform(eq_io_phi));
2374     set_all_memory(_gvn.transform(eq_mem_phi));
2375   }
2376 }
2377 
2378 // Force unstable if traps to be taken randomly to trigger intermittent bugs such as incorrect debug information.
2379 // Add another if before the unstable if that checks a "random" condition at runtime (a simple shared counter) and
2380 // then either takes the trap or executes the original, unstable if.
2381 void Parse::stress_trap(IfNode* orig_iff, Node* counter, Node* incr_store) {
2382   // Search for an unstable if trap
2383   CallStaticJavaNode* trap = nullptr;
2384   assert(orig_iff->Opcode() == Op_If && orig_iff->outcnt() == 2, "malformed if");
2385   ProjNode* trap_proj = orig_iff->uncommon_trap_proj(trap, Deoptimization::Reason_unstable_if);
2386   if (trap == nullptr || !trap->jvms()->should_reexecute()) {
2387     // No suitable trap found. Remove unused counter load and increment.
2388     C->gvn_replace_by(incr_store, incr_store->in(MemNode::Memory));
2389     return;
2390   }
2391 
2392   // Remove trap from optimization list since we add another path to the trap.
2393   bool success = C->remove_unstable_if_trap(trap, true);
2394   assert(success, "Trap already modified");
2395 
2396   // Add a check before the original if that will trap with a certain frequency and execute the original if otherwise
2397   int freq_log = (C->random() % 31) + 1; // Random logarithmic frequency in [1, 31]
2398   Node* mask = intcon(right_n_bits(freq_log));
2399   counter = _gvn.transform(new AndINode(counter, mask));
2400   Node* cmp = _gvn.transform(new CmpINode(counter, intcon(0)));
2401   Node* bol = _gvn.transform(new BoolNode(cmp, BoolTest::mask::eq));
2402   IfNode* iff = _gvn.transform(new IfNode(orig_iff->in(0), bol, orig_iff->_prob, orig_iff->_fcnt))->as_If();
2403   Node* if_true = _gvn.transform(new IfTrueNode(iff));
2404   Node* if_false = _gvn.transform(new IfFalseNode(iff));
2405   assert(!if_true->is_top() && !if_false->is_top(), "trap always / never taken");
2406 
2407   // Trap
2408   assert(trap_proj->outcnt() == 1, "some other nodes are dependent on the trap projection");
2409 
2410   Node* trap_region = new RegionNode(3);
2411   trap_region->set_req(1, trap_proj);
2412   trap_region->set_req(2, if_true);
2413   trap->set_req(0, _gvn.transform(trap_region));
2414 
2415   // Don't trap, execute original if
2416   orig_iff->set_req(0, if_false);
2417 }
2418 
2419 bool Parse::path_is_suitable_for_uncommon_trap(float prob) const {
2420   // Randomly skip emitting an uncommon trap
2421   if (StressUnstableIfTraps && ((C->random() % 2) == 0)) {
2422     return false;
2423   }
2424   // Don't want to speculate on uncommon traps when running with -Xcomp
2425   if (!UseInterpreter) {
2426     return false;
2427   }
2428   return seems_never_taken(prob) &&
2429          !C->too_many_traps(method(), bci(), Deoptimization::Reason_unstable_if);
2430 }
2431 
2432 void Parse::maybe_add_predicate_after_if(Block* path) {
2433   if (path->is_SEL_head() && path->preds_parsed() == 0) {
2434     // Add predicates at bci of if dominating the loop so traps can be
2435     // recorded on the if's profile data
2436     int bc_depth = repush_if_args();
2437     add_parse_predicates();
2438     dec_sp(bc_depth);
2439     path->set_has_predicates();
2440   }
2441 }
2442 
2443 
2444 //----------------------------adjust_map_after_if------------------------------
2445 // Adjust the JVM state to reflect the result of taking this path.
2446 // Basically, it means inspecting the CmpNode controlling this
2447 // branch, seeing how it constrains a tested value, and then
2448 // deciding if it's worth our while to encode this constraint
2449 // as graph nodes in the current abstract interpretation map.
2450 void Parse::adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, Block* path, bool can_trap) {
2451   if (!c->is_Cmp()) {
2452     maybe_add_predicate_after_if(path);
2453     return;
2454   }
2455 
2456   if (stopped() || btest == BoolTest::illegal) {
2457     return;                             // nothing to do
2458   }
2459 
2460   bool is_fallthrough = (path == successor_for_bci(iter().next_bci()));
2461 
2462   if (can_trap && path_is_suitable_for_uncommon_trap(prob)) {
2463     repush_if_args();
2464     Node* call = uncommon_trap(Deoptimization::Reason_unstable_if,
2465                   Deoptimization::Action_reinterpret,
2466                   nullptr,
2467                   (is_fallthrough ? "taken always" : "taken never"));
2468 
2469     if (call != nullptr) {
2470       C->record_unstable_if_trap(new UnstableIfTrap(call->as_CallStaticJava(), path));
2471     }
2472     return;
2473   }
2474 
2475   Node* val = c->in(1);
2476   Node* con = c->in(2);
2477   const Type* tcon = _gvn.type(con);
2478   const Type* tval = _gvn.type(val);
2479   bool have_con = tcon->singleton();
2480   if (tval->singleton()) {
2481     if (!have_con) {
2482       // Swap, so constant is in con.
2483       con  = val;
2484       tcon = tval;
2485       val  = c->in(2);
2486       tval = _gvn.type(val);
2487       btest = BoolTest(btest).commute();
2488       have_con = true;
2489     } else {
2490       // Do we have two constants?  Then leave well enough alone.
2491       have_con = false;
2492     }
2493   }
2494   if (!have_con) {                        // remaining adjustments need a con
2495     maybe_add_predicate_after_if(path);
2496     return;
2497   }
2498 
2499   sharpen_type_after_if(btest, con, tcon, val, tval);
2500   maybe_add_predicate_after_if(path);
2501 }
2502 
2503 
2504 static Node* extract_obj_from_klass_load(PhaseGVN* gvn, Node* n) {
2505   Node* ldk;
2506   if (n->is_DecodeNKlass()) {
2507     if (n->in(1)->Opcode() != Op_LoadNKlass) {
2508       return nullptr;
2509     } else {
2510       ldk = n->in(1);
2511     }
2512   } else if (n->Opcode() != Op_LoadKlass) {
2513     return nullptr;
2514   } else {
2515     ldk = n;
2516   }
2517   assert(ldk != nullptr && ldk->is_Load(), "should have found a LoadKlass or LoadNKlass node");
2518 
2519   Node* adr = ldk->in(MemNode::Address);
2520   intptr_t off = 0;
2521   Node* obj = AddPNode::Ideal_base_and_offset(adr, gvn, off);
2522   if (obj == nullptr || off != oopDesc::klass_offset_in_bytes()) // loading oopDesc::_klass?
2523     return nullptr;
2524   const TypePtr* tp = gvn->type(obj)->is_ptr();
2525   if (tp == nullptr || !(tp->isa_instptr() || tp->isa_aryptr())) // is obj a Java object ptr?
2526     return nullptr;
2527 
2528   return obj;
2529 }
2530 
2531 void Parse::sharpen_type_after_if(BoolTest::mask btest,
2532                                   Node* con, const Type* tcon,
2533                                   Node* val, const Type* tval) {
2534   // Look for opportunities to sharpen the type of a node
2535   // whose klass is compared with a constant klass.
2536   if (btest == BoolTest::eq && tcon->isa_klassptr()) {
2537     Node* obj = extract_obj_from_klass_load(&_gvn, val);
2538     const TypeOopPtr* con_type = tcon->isa_klassptr()->as_instance_type();
2539     if (obj != nullptr && (con_type->isa_instptr() || con_type->isa_aryptr())) {
2540        // Found:
2541        //   Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
2542        // or the narrowOop equivalent.
2543        const Type* obj_type = _gvn.type(obj);
2544        const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr();
2545        if (tboth != nullptr && tboth->klass_is_exact() && tboth != obj_type &&
2546            tboth->higher_equal(obj_type)) {
2547           // obj has to be of the exact type Foo if the CmpP succeeds.
2548           int obj_in_map = map()->find_edge(obj);
2549           JVMState* jvms = this->jvms();
2550           if (obj_in_map >= 0 &&
2551               (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
2552             TypeNode* ccast = new CheckCastPPNode(control(), obj, tboth);
2553             const Type* tcc = ccast->as_Type()->type();
2554             assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
2555             // Delay transform() call to allow recovery of pre-cast value
2556             // at the control merge.
2557             _gvn.set_type_bottom(ccast);
2558             record_for_igvn(ccast);
2559             if (tboth->is_inlinetypeptr()) {
2560               ccast = InlineTypeNode::make_from_oop(this, ccast, tboth->exact_klass(true)->as_inline_klass());
2561             }
2562             // Here's the payoff.
2563             replace_in_map(obj, ccast);
2564           }
2565        }
2566     }
2567   }
2568 
2569   int val_in_map = map()->find_edge(val);
2570   if (val_in_map < 0)  return;          // replace_in_map would be useless
2571   {
2572     JVMState* jvms = this->jvms();
2573     if (!(jvms->is_loc(val_in_map) ||
2574           jvms->is_stk(val_in_map)))
2575       return;                           // again, it would be useless
2576   }
2577 
2578   // Check for a comparison to a constant, and "know" that the compared
2579   // value is constrained on this path.
2580   assert(tcon->singleton(), "");
2581   ConstraintCastNode* ccast = nullptr;
2582   Node* cast = nullptr;
2583 
2584   switch (btest) {
2585   case BoolTest::eq:                    // Constant test?
2586     {
2587       const Type* tboth = tcon->join_speculative(tval);
2588       if (tboth == tval)  break;        // Nothing to gain.
2589       if (tcon->isa_int()) {
2590         ccast = new CastIINode(control(), val, tboth);
2591       } else if (tcon == TypePtr::NULL_PTR) {
2592         // Cast to null, but keep the pointer identity temporarily live.
2593         ccast = new CastPPNode(control(), val, tboth);
2594       } else {
2595         const TypeF* tf = tcon->isa_float_constant();
2596         const TypeD* td = tcon->isa_double_constant();
2597         // Exclude tests vs float/double 0 as these could be
2598         // either +0 or -0.  Just because you are equal to +0
2599         // doesn't mean you ARE +0!
2600         // Note, following code also replaces Long and Oop values.
2601         if ((!tf || tf->_f != 0.0) &&
2602             (!td || td->_d != 0.0))
2603           cast = con;                   // Replace non-constant val by con.
2604       }
2605     }
2606     break;
2607 
2608   case BoolTest::ne:
2609     if (tcon == TypePtr::NULL_PTR) {
2610       cast = cast_not_null(val, false);
2611     }
2612     break;
2613 
2614   default:
2615     // (At this point we could record int range types with CastII.)
2616     break;
2617   }
2618 
2619   if (ccast != nullptr) {
2620     const Type* tcc = ccast->as_Type()->type();
2621     assert(tcc != tval && tcc->higher_equal(tval), "must improve");
2622     // Delay transform() call to allow recovery of pre-cast value
2623     // at the control merge.
2624     _gvn.set_type_bottom(ccast);
2625     record_for_igvn(ccast);
2626     cast = ccast;
2627   }
2628 
2629   if (cast != nullptr) {                   // Here's the payoff.
2630     replace_in_map(val, cast);
2631   }
2632 }
2633 
2634 /**
2635  * Use speculative type to optimize CmpP node: if comparison is
2636  * against the low level class, cast the object to the speculative
2637  * type if any. CmpP should then go away.
2638  *
2639  * @param c  expected CmpP node
2640  * @return   result of CmpP on object casted to speculative type
2641  *
2642  */
2643 Node* Parse::optimize_cmp_with_klass(Node* c) {
2644   // If this is transformed by the _gvn to a comparison with the low
2645   // level klass then we may be able to use speculation
2646   if (c->Opcode() == Op_CmpP &&
2647       (c->in(1)->Opcode() == Op_LoadKlass || c->in(1)->Opcode() == Op_DecodeNKlass) &&
2648       c->in(2)->is_Con()) {
2649     Node* load_klass = nullptr;
2650     Node* decode = nullptr;
2651     if (c->in(1)->Opcode() == Op_DecodeNKlass) {
2652       decode = c->in(1);
2653       load_klass = c->in(1)->in(1);
2654     } else {
2655       load_klass = c->in(1);
2656     }
2657     if (load_klass->in(2)->is_AddP()) {
2658       Node* addp = load_klass->in(2);
2659       Node* obj = addp->in(AddPNode::Address);
2660       const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr();
2661       if (obj_type->speculative_type_not_null() != nullptr) {
2662         ciKlass* k = obj_type->speculative_type();
2663         inc_sp(2);
2664         obj = maybe_cast_profiled_obj(obj, k);
2665         dec_sp(2);
2666         if (obj->is_InlineType()) {
2667           assert(obj->as_InlineType()->is_allocated(&_gvn), "must be allocated");
2668           obj = obj->as_InlineType()->get_oop();
2669         }
2670         // Make the CmpP use the casted obj
2671         addp = basic_plus_adr(obj, addp->in(AddPNode::Offset));
2672         load_klass = load_klass->clone();
2673         load_klass->set_req(2, addp);
2674         load_klass = _gvn.transform(load_klass);
2675         if (decode != nullptr) {
2676           decode = decode->clone();
2677           decode->set_req(1, load_klass);
2678           load_klass = _gvn.transform(decode);
2679         }
2680         c = c->clone();
2681         c->set_req(1, load_klass);
2682         c = _gvn.transform(c);
2683       }
2684     }
2685   }
2686   return c;
2687 }
2688 
2689 //------------------------------do_one_bytecode--------------------------------
2690 // Parse this bytecode, and alter the Parsers JVM->Node mapping
2691 void Parse::do_one_bytecode() {
2692   Node *a, *b, *c, *d;          // Handy temps
2693   BoolTest::mask btest;
2694   int i;
2695 
2696   assert(!has_exceptions(), "bytecode entry state must be clear of throws");
2697 
2698   if (C->check_node_count(NodeLimitFudgeFactor * 5,
2699                           "out of nodes parsing method")) {
2700     return;
2701   }
2702 
2703 #ifdef ASSERT
2704   // for setting breakpoints
2705   if (TraceOptoParse) {
2706     tty->print(" @");
2707     dump_bci(bci());
2708     tty->print(" %s", Bytecodes::name(bc()));
2709     tty->cr();
2710   }
2711 #endif
2712 
2713   switch (bc()) {
2714   case Bytecodes::_nop:
2715     // do nothing
2716     break;
2717   case Bytecodes::_lconst_0:
2718     push_pair(longcon(0));
2719     break;
2720 
2721   case Bytecodes::_lconst_1:
2722     push_pair(longcon(1));
2723     break;
2724 
2725   case Bytecodes::_fconst_0:
2726     push(zerocon(T_FLOAT));
2727     break;
2728 
2729   case Bytecodes::_fconst_1:
2730     push(makecon(TypeF::ONE));
2731     break;
2732 
2733   case Bytecodes::_fconst_2:
2734     push(makecon(TypeF::make(2.0f)));
2735     break;
2736 
2737   case Bytecodes::_dconst_0:
2738     push_pair(zerocon(T_DOUBLE));
2739     break;
2740 
2741   case Bytecodes::_dconst_1:
2742     push_pair(makecon(TypeD::ONE));
2743     break;
2744 
2745   case Bytecodes::_iconst_m1:push(intcon(-1)); break;
2746   case Bytecodes::_iconst_0: push(intcon( 0)); break;
2747   case Bytecodes::_iconst_1: push(intcon( 1)); break;
2748   case Bytecodes::_iconst_2: push(intcon( 2)); break;
2749   case Bytecodes::_iconst_3: push(intcon( 3)); break;
2750   case Bytecodes::_iconst_4: push(intcon( 4)); break;
2751   case Bytecodes::_iconst_5: push(intcon( 5)); break;
2752   case Bytecodes::_bipush:   push(intcon(iter().get_constant_u1())); break;
2753   case Bytecodes::_sipush:   push(intcon(iter().get_constant_u2())); break;
2754   case Bytecodes::_aconst_null: push(null());  break;
2755 
2756   case Bytecodes::_ldc:
2757   case Bytecodes::_ldc_w:
2758   case Bytecodes::_ldc2_w: {
2759     // ciTypeFlow should trap if the ldc is in error state or if the constant is not loaded
2760     assert(!iter().is_in_error(), "ldc is in error state");
2761     ciConstant constant = iter().get_constant();
2762     assert(constant.is_loaded(), "constant is not loaded");
2763     const Type* con_type = Type::make_from_constant(constant);
2764     if (con_type != nullptr) {
2765       push_node(con_type->basic_type(), makecon(con_type));
2766     }
2767     break;
2768   }
2769 
2770   case Bytecodes::_aload_0:
2771     push( local(0) );
2772     break;
2773   case Bytecodes::_aload_1:
2774     push( local(1) );
2775     break;
2776   case Bytecodes::_aload_2:
2777     push( local(2) );
2778     break;
2779   case Bytecodes::_aload_3:
2780     push( local(3) );
2781     break;
2782   case Bytecodes::_aload:
2783     push( local(iter().get_index()) );
2784     break;
2785 
2786   case Bytecodes::_fload_0:
2787   case Bytecodes::_iload_0:
2788     push( local(0) );
2789     break;
2790   case Bytecodes::_fload_1:
2791   case Bytecodes::_iload_1:
2792     push( local(1) );
2793     break;
2794   case Bytecodes::_fload_2:
2795   case Bytecodes::_iload_2:
2796     push( local(2) );
2797     break;
2798   case Bytecodes::_fload_3:
2799   case Bytecodes::_iload_3:
2800     push( local(3) );
2801     break;
2802   case Bytecodes::_fload:
2803   case Bytecodes::_iload:
2804     push( local(iter().get_index()) );
2805     break;
2806   case Bytecodes::_lload_0:
2807     push_pair_local( 0 );
2808     break;
2809   case Bytecodes::_lload_1:
2810     push_pair_local( 1 );
2811     break;
2812   case Bytecodes::_lload_2:
2813     push_pair_local( 2 );
2814     break;
2815   case Bytecodes::_lload_3:
2816     push_pair_local( 3 );
2817     break;
2818   case Bytecodes::_lload:
2819     push_pair_local( iter().get_index() );
2820     break;
2821 
2822   case Bytecodes::_dload_0:
2823     push_pair_local(0);
2824     break;
2825   case Bytecodes::_dload_1:
2826     push_pair_local(1);
2827     break;
2828   case Bytecodes::_dload_2:
2829     push_pair_local(2);
2830     break;
2831   case Bytecodes::_dload_3:
2832     push_pair_local(3);
2833     break;
2834   case Bytecodes::_dload:
2835     push_pair_local(iter().get_index());
2836     break;
2837   case Bytecodes::_fstore_0:
2838   case Bytecodes::_istore_0:
2839   case Bytecodes::_astore_0:
2840     set_local( 0, pop() );
2841     break;
2842   case Bytecodes::_fstore_1:
2843   case Bytecodes::_istore_1:
2844   case Bytecodes::_astore_1:
2845     set_local( 1, pop() );
2846     break;
2847   case Bytecodes::_fstore_2:
2848   case Bytecodes::_istore_2:
2849   case Bytecodes::_astore_2:
2850     set_local( 2, pop() );
2851     break;
2852   case Bytecodes::_fstore_3:
2853   case Bytecodes::_istore_3:
2854   case Bytecodes::_astore_3:
2855     set_local( 3, pop() );
2856     break;
2857   case Bytecodes::_fstore:
2858   case Bytecodes::_istore:
2859   case Bytecodes::_astore:
2860     set_local( iter().get_index(), pop() );
2861     break;
2862   // long stores
2863   case Bytecodes::_lstore_0:
2864     set_pair_local( 0, pop_pair() );
2865     break;
2866   case Bytecodes::_lstore_1:
2867     set_pair_local( 1, pop_pair() );
2868     break;
2869   case Bytecodes::_lstore_2:
2870     set_pair_local( 2, pop_pair() );
2871     break;
2872   case Bytecodes::_lstore_3:
2873     set_pair_local( 3, pop_pair() );
2874     break;
2875   case Bytecodes::_lstore:
2876     set_pair_local( iter().get_index(), pop_pair() );
2877     break;
2878 
2879   // double stores
2880   case Bytecodes::_dstore_0:
2881     set_pair_local( 0, pop_pair() );
2882     break;
2883   case Bytecodes::_dstore_1:
2884     set_pair_local( 1, pop_pair() );
2885     break;
2886   case Bytecodes::_dstore_2:
2887     set_pair_local( 2, pop_pair() );
2888     break;
2889   case Bytecodes::_dstore_3:
2890     set_pair_local( 3, pop_pair() );
2891     break;
2892   case Bytecodes::_dstore:
2893     set_pair_local( iter().get_index(), pop_pair() );
2894     break;
2895 
2896   case Bytecodes::_pop:  dec_sp(1);   break;
2897   case Bytecodes::_pop2: dec_sp(2);   break;
2898   case Bytecodes::_swap:
2899     a = pop();
2900     b = pop();
2901     push(a);
2902     push(b);
2903     break;
2904   case Bytecodes::_dup:
2905     a = pop();
2906     push(a);
2907     push(a);
2908     break;
2909   case Bytecodes::_dup_x1:
2910     a = pop();
2911     b = pop();
2912     push( a );
2913     push( b );
2914     push( a );
2915     break;
2916   case Bytecodes::_dup_x2:
2917     a = pop();
2918     b = pop();
2919     c = pop();
2920     push( a );
2921     push( c );
2922     push( b );
2923     push( a );
2924     break;
2925   case Bytecodes::_dup2:
2926     a = pop();
2927     b = pop();
2928     push( b );
2929     push( a );
2930     push( b );
2931     push( a );
2932     break;
2933 
2934   case Bytecodes::_dup2_x1:
2935     // before: .. c, b, a
2936     // after:  .. b, a, c, b, a
2937     // not tested
2938     a = pop();
2939     b = pop();
2940     c = pop();
2941     push( b );
2942     push( a );
2943     push( c );
2944     push( b );
2945     push( a );
2946     break;
2947   case Bytecodes::_dup2_x2:
2948     // before: .. d, c, b, a
2949     // after:  .. b, a, d, c, b, a
2950     // not tested
2951     a = pop();
2952     b = pop();
2953     c = pop();
2954     d = pop();
2955     push( b );
2956     push( a );
2957     push( d );
2958     push( c );
2959     push( b );
2960     push( a );
2961     break;
2962 
2963   case Bytecodes::_arraylength: {
2964     // Must do null-check with value on expression stack
2965     Node *ary = null_check(peek(), T_ARRAY);
2966     // Compile-time detect of null-exception?
2967     if (stopped())  return;
2968     a = pop();
2969     push(load_array_length(a));
2970     break;
2971   }
2972 
2973   case Bytecodes::_baload:  array_load(T_BYTE);    break;
2974   case Bytecodes::_caload:  array_load(T_CHAR);    break;
2975   case Bytecodes::_iaload:  array_load(T_INT);     break;
2976   case Bytecodes::_saload:  array_load(T_SHORT);   break;
2977   case Bytecodes::_faload:  array_load(T_FLOAT);   break;
2978   case Bytecodes::_aaload:  array_load(T_OBJECT);  break;
2979   case Bytecodes::_laload:  array_load(T_LONG);    break;
2980   case Bytecodes::_daload:  array_load(T_DOUBLE);  break;
2981   case Bytecodes::_bastore: array_store(T_BYTE);   break;
2982   case Bytecodes::_castore: array_store(T_CHAR);   break;
2983   case Bytecodes::_iastore: array_store(T_INT);    break;
2984   case Bytecodes::_sastore: array_store(T_SHORT);  break;
2985   case Bytecodes::_fastore: array_store(T_FLOAT);  break;
2986   case Bytecodes::_aastore: array_store(T_OBJECT); break;
2987   case Bytecodes::_lastore: array_store(T_LONG);   break;
2988   case Bytecodes::_dastore: array_store(T_DOUBLE); break;
2989 
2990   case Bytecodes::_getfield:
2991     do_getfield();
2992     break;
2993 
2994   case Bytecodes::_getstatic:
2995     do_getstatic();
2996     break;
2997 
2998   case Bytecodes::_putfield:
2999     do_putfield();
3000     break;
3001 
3002   case Bytecodes::_putstatic:
3003     do_putstatic();
3004     break;
3005 
3006   case Bytecodes::_irem:
3007     // Must keep both values on the expression-stack during null-check
3008     zero_check_int(peek());
3009     // Compile-time detect of null-exception?
3010     if (stopped())  return;
3011     b = pop();
3012     a = pop();
3013     push(_gvn.transform(new ModINode(control(), a, b)));
3014     break;
3015   case Bytecodes::_idiv:
3016     // Must keep both values on the expression-stack during null-check
3017     zero_check_int(peek());
3018     // Compile-time detect of null-exception?
3019     if (stopped())  return;
3020     b = pop();
3021     a = pop();
3022     push( _gvn.transform( new DivINode(control(),a,b) ) );
3023     break;
3024   case Bytecodes::_imul:
3025     b = pop(); a = pop();
3026     push( _gvn.transform( new MulINode(a,b) ) );
3027     break;
3028   case Bytecodes::_iadd:
3029     b = pop(); a = pop();
3030     push( _gvn.transform( new AddINode(a,b) ) );
3031     break;
3032   case Bytecodes::_ineg:
3033     a = pop();
3034     push( _gvn.transform( new SubINode(_gvn.intcon(0),a)) );
3035     break;
3036   case Bytecodes::_isub:
3037     b = pop(); a = pop();
3038     push( _gvn.transform( new SubINode(a,b) ) );
3039     break;
3040   case Bytecodes::_iand:
3041     b = pop(); a = pop();
3042     push( _gvn.transform( new AndINode(a,b) ) );
3043     break;
3044   case Bytecodes::_ior:
3045     b = pop(); a = pop();
3046     push( _gvn.transform( new OrINode(a,b) ) );
3047     break;
3048   case Bytecodes::_ixor:
3049     b = pop(); a = pop();
3050     push( _gvn.transform( new XorINode(a,b) ) );
3051     break;
3052   case Bytecodes::_ishl:
3053     b = pop(); a = pop();
3054     push( _gvn.transform( new LShiftINode(a,b) ) );
3055     break;
3056   case Bytecodes::_ishr:
3057     b = pop(); a = pop();
3058     push( _gvn.transform( new RShiftINode(a,b) ) );
3059     break;
3060   case Bytecodes::_iushr:
3061     b = pop(); a = pop();
3062     push( _gvn.transform( new URShiftINode(a,b) ) );
3063     break;
3064 
3065   case Bytecodes::_fneg:
3066     a = pop();
3067     b = _gvn.transform(new NegFNode (a));
3068     push(b);
3069     break;
3070 
3071   case Bytecodes::_fsub:
3072     b = pop();
3073     a = pop();
3074     c = _gvn.transform( new SubFNode(a,b) );
3075     push(c);
3076     break;
3077 
3078   case Bytecodes::_fadd:
3079     b = pop();
3080     a = pop();
3081     c = _gvn.transform( new AddFNode(a,b) );
3082     push(c);
3083     break;
3084 
3085   case Bytecodes::_fmul:
3086     b = pop();
3087     a = pop();
3088     c = _gvn.transform( new MulFNode(a,b) );
3089     push(c);
3090     break;
3091 
3092   case Bytecodes::_fdiv:
3093     b = pop();
3094     a = pop();
3095     c = _gvn.transform( new DivFNode(nullptr,a,b) );
3096     push(c);
3097     break;
3098 
3099   case Bytecodes::_frem:
3100     // Generate a ModF node.
3101     b = pop();
3102     a = pop();
3103     push(floating_point_mod(a, b, BasicType::T_FLOAT));
3104     break;
3105 
3106   case Bytecodes::_fcmpl:
3107     b = pop();
3108     a = pop();
3109     c = _gvn.transform( new CmpF3Node( a, b));
3110     push(c);
3111     break;
3112   case Bytecodes::_fcmpg:
3113     b = pop();
3114     a = pop();
3115 
3116     // Same as fcmpl but need to flip the unordered case.  Swap the inputs,
3117     // which negates the result sign except for unordered.  Flip the unordered
3118     // as well by using CmpF3 which implements unordered-lesser instead of
3119     // unordered-greater semantics.  Finally, commute the result bits.  Result
3120     // is same as using a CmpF3Greater except we did it with CmpF3 alone.
3121     c = _gvn.transform( new CmpF3Node( b, a));
3122     c = _gvn.transform( new SubINode(_gvn.intcon(0),c) );
3123     push(c);
3124     break;
3125 
3126   case Bytecodes::_f2i:
3127     a = pop();
3128     push(_gvn.transform(new ConvF2INode(a)));
3129     break;
3130 
3131   case Bytecodes::_d2i:
3132     a = pop_pair();
3133     b = _gvn.transform(new ConvD2INode(a));
3134     push( b );
3135     break;
3136 
3137   case Bytecodes::_f2d:
3138     a = pop();
3139     b = _gvn.transform( new ConvF2DNode(a));
3140     push_pair( b );
3141     break;
3142 
3143   case Bytecodes::_d2f:
3144     a = pop_pair();
3145     b = _gvn.transform( new ConvD2FNode(a));
3146     push( b );
3147     break;
3148 
3149   case Bytecodes::_l2f:
3150     if (Matcher::convL2FSupported()) {
3151       a = pop_pair();
3152       b = _gvn.transform( new ConvL2FNode(a));
3153       push(b);
3154     } else {
3155       l2f();
3156     }
3157     break;
3158 
3159   case Bytecodes::_l2d:
3160     a = pop_pair();
3161     b = _gvn.transform( new ConvL2DNode(a));
3162     push_pair(b);
3163     break;
3164 
3165   case Bytecodes::_f2l:
3166     a = pop();
3167     b = _gvn.transform( new ConvF2LNode(a));
3168     push_pair(b);
3169     break;
3170 
3171   case Bytecodes::_d2l:
3172     a = pop_pair();
3173     b = _gvn.transform( new ConvD2LNode(a));
3174     push_pair(b);
3175     break;
3176 
3177   case Bytecodes::_dsub:
3178     b = pop_pair();
3179     a = pop_pair();
3180     c = _gvn.transform( new SubDNode(a,b) );
3181     push_pair(c);
3182     break;
3183 
3184   case Bytecodes::_dadd:
3185     b = pop_pair();
3186     a = pop_pair();
3187     c = _gvn.transform( new AddDNode(a,b) );
3188     push_pair(c);
3189     break;
3190 
3191   case Bytecodes::_dmul:
3192     b = pop_pair();
3193     a = pop_pair();
3194     c = _gvn.transform( new MulDNode(a,b) );
3195     push_pair(c);
3196     break;
3197 
3198   case Bytecodes::_ddiv:
3199     b = pop_pair();
3200     a = pop_pair();
3201     c = _gvn.transform( new DivDNode(nullptr,a,b) );
3202     push_pair(c);
3203     break;
3204 
3205   case Bytecodes::_dneg:
3206     a = pop_pair();
3207     b = _gvn.transform(new NegDNode (a));
3208     push_pair(b);
3209     break;
3210 
3211   case Bytecodes::_drem:
3212     // Generate a ModD node.
3213     b = pop_pair();
3214     a = pop_pair();
3215     push_pair(floating_point_mod(a, b, BasicType::T_DOUBLE));
3216     break;
3217 
3218   case Bytecodes::_dcmpl:
3219     b = pop_pair();
3220     a = pop_pair();
3221     c = _gvn.transform( new CmpD3Node( a, b));
3222     push(c);
3223     break;
3224 
3225   case Bytecodes::_dcmpg:
3226     b = pop_pair();
3227     a = pop_pair();
3228     // Same as dcmpl but need to flip the unordered case.
3229     // Commute the inputs, which negates the result sign except for unordered.
3230     // Flip the unordered as well by using CmpD3 which implements
3231     // unordered-lesser instead of unordered-greater semantics.
3232     // Finally, negate the result bits.  Result is same as using a
3233     // CmpD3Greater except we did it with CmpD3 alone.
3234     c = _gvn.transform( new CmpD3Node( b, a));
3235     c = _gvn.transform( new SubINode(_gvn.intcon(0),c) );
3236     push(c);
3237     break;
3238 
3239 
3240     // Note for longs -> lo word is on TOS, hi word is on TOS - 1
3241   case Bytecodes::_land:
3242     b = pop_pair();
3243     a = pop_pair();
3244     c = _gvn.transform( new AndLNode(a,b) );
3245     push_pair(c);
3246     break;
3247   case Bytecodes::_lor:
3248     b = pop_pair();
3249     a = pop_pair();
3250     c = _gvn.transform( new OrLNode(a,b) );
3251     push_pair(c);
3252     break;
3253   case Bytecodes::_lxor:
3254     b = pop_pair();
3255     a = pop_pair();
3256     c = _gvn.transform( new XorLNode(a,b) );
3257     push_pair(c);
3258     break;
3259 
3260   case Bytecodes::_lshl:
3261     b = pop();                  // the shift count
3262     a = pop_pair();             // value to be shifted
3263     c = _gvn.transform( new LShiftLNode(a,b) );
3264     push_pair(c);
3265     break;
3266   case Bytecodes::_lshr:
3267     b = pop();                  // the shift count
3268     a = pop_pair();             // value to be shifted
3269     c = _gvn.transform( new RShiftLNode(a,b) );
3270     push_pair(c);
3271     break;
3272   case Bytecodes::_lushr:
3273     b = pop();                  // the shift count
3274     a = pop_pair();             // value to be shifted
3275     c = _gvn.transform( new URShiftLNode(a,b) );
3276     push_pair(c);
3277     break;
3278   case Bytecodes::_lmul:
3279     b = pop_pair();
3280     a = pop_pair();
3281     c = _gvn.transform( new MulLNode(a,b) );
3282     push_pair(c);
3283     break;
3284 
3285   case Bytecodes::_lrem:
3286     // Must keep both values on the expression-stack during null-check
3287     assert(peek(0) == top(), "long word order");
3288     zero_check_long(peek(1));
3289     // Compile-time detect of null-exception?
3290     if (stopped())  return;
3291     b = pop_pair();
3292     a = pop_pair();
3293     c = _gvn.transform( new ModLNode(control(),a,b) );
3294     push_pair(c);
3295     break;
3296 
3297   case Bytecodes::_ldiv:
3298     // Must keep both values on the expression-stack during null-check
3299     assert(peek(0) == top(), "long word order");
3300     zero_check_long(peek(1));
3301     // Compile-time detect of null-exception?
3302     if (stopped())  return;
3303     b = pop_pair();
3304     a = pop_pair();
3305     c = _gvn.transform( new DivLNode(control(),a,b) );
3306     push_pair(c);
3307     break;
3308 
3309   case Bytecodes::_ladd:
3310     b = pop_pair();
3311     a = pop_pair();
3312     c = _gvn.transform( new AddLNode(a,b) );
3313     push_pair(c);
3314     break;
3315   case Bytecodes::_lsub:
3316     b = pop_pair();
3317     a = pop_pair();
3318     c = _gvn.transform( new SubLNode(a,b) );
3319     push_pair(c);
3320     break;
3321   case Bytecodes::_lcmp:
3322     // Safepoints are now inserted _before_ branches.  The long-compare
3323     // bytecode painfully produces a 3-way value (-1,0,+1) which requires a
3324     // slew of control flow.  These are usually followed by a CmpI vs zero and
3325     // a branch; this pattern then optimizes to the obvious long-compare and
3326     // branch.  However, if the branch is backwards there's a Safepoint
3327     // inserted.  The inserted Safepoint captures the JVM state at the
3328     // pre-branch point, i.e. it captures the 3-way value.  Thus if a
3329     // long-compare is used to control a loop the debug info will force
3330     // computation of the 3-way value, even though the generated code uses a
3331     // long-compare and branch.  We try to rectify the situation by inserting
3332     // a SafePoint here and have it dominate and kill the safepoint added at a
3333     // following backwards branch.  At this point the JVM state merely holds 2
3334     // longs but not the 3-way value.
3335     switch (iter().next_bc()) {
3336       case Bytecodes::_ifgt:
3337       case Bytecodes::_iflt:
3338       case Bytecodes::_ifge:
3339       case Bytecodes::_ifle:
3340       case Bytecodes::_ifne:
3341       case Bytecodes::_ifeq:
3342         // If this is a backwards branch in the bytecodes, add Safepoint
3343         maybe_add_safepoint(iter().next_get_dest());
3344       default:
3345         break;
3346     }
3347     b = pop_pair();
3348     a = pop_pair();
3349     c = _gvn.transform( new CmpL3Node( a, b ));
3350     push(c);
3351     break;
3352 
3353   case Bytecodes::_lneg:
3354     a = pop_pair();
3355     b = _gvn.transform( new SubLNode(longcon(0),a));
3356     push_pair(b);
3357     break;
3358   case Bytecodes::_l2i:
3359     a = pop_pair();
3360     push( _gvn.transform( new ConvL2INode(a)));
3361     break;
3362   case Bytecodes::_i2l:
3363     a = pop();
3364     b = _gvn.transform( new ConvI2LNode(a));
3365     push_pair(b);
3366     break;
3367   case Bytecodes::_i2b:
3368     // Sign extend
3369     a = pop();
3370     a = Compile::narrow_value(T_BYTE, a, nullptr, &_gvn, true);
3371     push(a);
3372     break;
3373   case Bytecodes::_i2s:
3374     a = pop();
3375     a = Compile::narrow_value(T_SHORT, a, nullptr, &_gvn, true);
3376     push(a);
3377     break;
3378   case Bytecodes::_i2c:
3379     a = pop();
3380     a = Compile::narrow_value(T_CHAR, a, nullptr, &_gvn, true);
3381     push(a);
3382     break;
3383 
3384   case Bytecodes::_i2f:
3385     a = pop();
3386     b = _gvn.transform( new ConvI2FNode(a) ) ;
3387     push(b);
3388     break;
3389 
3390   case Bytecodes::_i2d:
3391     a = pop();
3392     b = _gvn.transform( new ConvI2DNode(a));
3393     push_pair(b);
3394     break;
3395 
3396   case Bytecodes::_iinc:        // Increment local
3397     i = iter().get_index();     // Get local index
3398     set_local( i, _gvn.transform( new AddINode( _gvn.intcon(iter().get_iinc_con()), local(i) ) ) );
3399     break;
3400 
3401   // Exit points of synchronized methods must have an unlock node
3402   case Bytecodes::_return:
3403     return_current(nullptr);
3404     break;
3405 
3406   case Bytecodes::_ireturn:
3407   case Bytecodes::_areturn:
3408   case Bytecodes::_freturn:
3409     return_current(pop());
3410     break;
3411   case Bytecodes::_lreturn:
3412     return_current(pop_pair());
3413     break;
3414   case Bytecodes::_dreturn:
3415     return_current(pop_pair());
3416     break;
3417 
3418   case Bytecodes::_athrow:
3419     // null exception oop throws null pointer exception
3420     null_check(peek());
3421     if (stopped())  return;
3422     // Hook the thrown exception directly to subsequent handlers.
3423     if (BailoutToInterpreterForThrows) {
3424       // Keep method interpreted from now on.
3425       uncommon_trap(Deoptimization::Reason_unhandled,
3426                     Deoptimization::Action_make_not_compilable);
3427       return;
3428     }
3429     if (env()->jvmti_can_post_on_exceptions()) {
3430       // check if we must post exception events, take uncommon trap if so (with must_throw = false)
3431       uncommon_trap_if_should_post_on_exceptions(Deoptimization::Reason_unhandled, false);
3432     }
3433     // Here if either can_post_on_exceptions or should_post_on_exceptions is false
3434     add_exception_state(make_exception_state(peek()));
3435     break;
3436 
3437   case Bytecodes::_goto:   // fall through
3438   case Bytecodes::_goto_w: {
3439     int target_bci = (bc() == Bytecodes::_goto) ? iter().get_dest() : iter().get_far_dest();
3440 
3441     // If this is a backwards branch in the bytecodes, add Safepoint
3442     maybe_add_safepoint(target_bci);
3443 
3444     // Merge the current control into the target basic block
3445     merge(target_bci);
3446 
3447     // See if we can get some profile data and hand it off to the next block
3448     Block *target_block = block()->successor_for_bci(target_bci);
3449     if (target_block->pred_count() != 1)  break;
3450     ciMethodData* methodData = method()->method_data();
3451     if (!methodData->is_mature())  break;
3452     ciProfileData* data = methodData->bci_to_data(bci());
3453     assert(data != nullptr && data->is_JumpData(), "need JumpData for taken branch");
3454     int taken = ((ciJumpData*)data)->taken();
3455     taken = method()->scale_count(taken);
3456     target_block->set_count(taken);
3457     break;
3458   }
3459 
3460   case Bytecodes::_ifnull:    btest = BoolTest::eq; goto handle_if_null;
3461   case Bytecodes::_ifnonnull: btest = BoolTest::ne; goto handle_if_null;
3462   handle_if_null:
3463     // If this is a backwards branch in the bytecodes, add Safepoint
3464     maybe_add_safepoint(iter().get_dest());
3465     a = null();
3466     b = pop();
3467     if (b->is_InlineType()) {
3468       // Null checking a scalarized but nullable inline type. Check the IsInit
3469       // input instead of the oop input to avoid keeping buffer allocations alive
3470       c = _gvn.transform(new CmpINode(b->as_InlineType()->get_is_init(), zerocon(T_INT)));
3471     } else {
3472       if (!_gvn.type(b)->speculative_maybe_null() &&
3473           !too_many_traps(Deoptimization::Reason_speculate_null_check)) {
3474         inc_sp(1);
3475         Node* null_ctl = top();
3476         b = null_check_oop(b, &null_ctl, true, true, true);
3477         assert(null_ctl->is_top(), "no null control here");
3478         dec_sp(1);
3479       } else if (_gvn.type(b)->speculative_always_null() &&
3480                  !too_many_traps(Deoptimization::Reason_speculate_null_assert)) {
3481         inc_sp(1);
3482         b = null_assert(b);
3483         dec_sp(1);
3484       }
3485       c = _gvn.transform( new CmpPNode(b, a) );
3486     }
3487     do_ifnull(btest, c);
3488     break;
3489 
3490   case Bytecodes::_if_acmpeq: btest = BoolTest::eq; goto handle_if_acmp;
3491   case Bytecodes::_if_acmpne: btest = BoolTest::ne; goto handle_if_acmp;
3492   handle_if_acmp:
3493     // If this is a backwards branch in the bytecodes, add Safepoint
3494     maybe_add_safepoint(iter().get_dest());
3495     a = pop();
3496     b = pop();
3497     do_acmp(btest, b, a);
3498     break;
3499 
3500   case Bytecodes::_ifeq: btest = BoolTest::eq; goto handle_ifxx;
3501   case Bytecodes::_ifne: btest = BoolTest::ne; goto handle_ifxx;
3502   case Bytecodes::_iflt: btest = BoolTest::lt; goto handle_ifxx;
3503   case Bytecodes::_ifle: btest = BoolTest::le; goto handle_ifxx;
3504   case Bytecodes::_ifgt: btest = BoolTest::gt; goto handle_ifxx;
3505   case Bytecodes::_ifge: btest = BoolTest::ge; goto handle_ifxx;
3506   handle_ifxx:
3507     // If this is a backwards branch in the bytecodes, add Safepoint
3508     maybe_add_safepoint(iter().get_dest());
3509     a = _gvn.intcon(0);
3510     b = pop();
3511     c = _gvn.transform( new CmpINode(b, a) );
3512     do_if(btest, c);
3513     break;
3514 
3515   case Bytecodes::_if_icmpeq: btest = BoolTest::eq; goto handle_if_icmp;
3516   case Bytecodes::_if_icmpne: btest = BoolTest::ne; goto handle_if_icmp;
3517   case Bytecodes::_if_icmplt: btest = BoolTest::lt; goto handle_if_icmp;
3518   case Bytecodes::_if_icmple: btest = BoolTest::le; goto handle_if_icmp;
3519   case Bytecodes::_if_icmpgt: btest = BoolTest::gt; goto handle_if_icmp;
3520   case Bytecodes::_if_icmpge: btest = BoolTest::ge; goto handle_if_icmp;
3521   handle_if_icmp:
3522     // If this is a backwards branch in the bytecodes, add Safepoint
3523     maybe_add_safepoint(iter().get_dest());
3524     a = pop();
3525     b = pop();
3526     c = _gvn.transform( new CmpINode( b, a ) );
3527     do_if(btest, c);
3528     break;
3529 
3530   case Bytecodes::_tableswitch:
3531     do_tableswitch();
3532     break;
3533 
3534   case Bytecodes::_lookupswitch:
3535     do_lookupswitch();
3536     break;
3537 
3538   case Bytecodes::_invokestatic:
3539   case Bytecodes::_invokedynamic:
3540   case Bytecodes::_invokespecial:
3541   case Bytecodes::_invokevirtual:
3542   case Bytecodes::_invokeinterface:
3543     do_call();
3544     break;
3545   case Bytecodes::_checkcast:
3546     do_checkcast();
3547     break;
3548   case Bytecodes::_instanceof:
3549     do_instanceof();
3550     break;
3551   case Bytecodes::_anewarray:
3552     do_newarray();
3553     break;
3554   case Bytecodes::_newarray:
3555     do_newarray((BasicType)iter().get_index());
3556     break;
3557   case Bytecodes::_multianewarray:
3558     do_multianewarray();
3559     break;
3560   case Bytecodes::_new:
3561     do_new();
3562     break;
3563 
3564   case Bytecodes::_jsr:
3565   case Bytecodes::_jsr_w:
3566     do_jsr();
3567     break;
3568 
3569   case Bytecodes::_ret:
3570     do_ret();
3571     break;
3572 
3573 
3574   case Bytecodes::_monitorenter:
3575     do_monitor_enter();
3576     break;
3577 
3578   case Bytecodes::_monitorexit:
3579     do_monitor_exit();
3580     break;
3581 
3582   case Bytecodes::_breakpoint:
3583     // Breakpoint set concurrently to compile
3584     // %%% use an uncommon trap?
3585     C->record_failure("breakpoint in method");
3586     return;
3587 
3588   default:
3589 #ifndef PRODUCT
3590     map()->dump(99);
3591 #endif
3592     tty->print("\nUnhandled bytecode %s\n", Bytecodes::name(bc()) );
3593     ShouldNotReachHere();
3594   }
3595 
3596 #ifndef PRODUCT
3597   if (failing()) { return; }
3598   constexpr int perBytecode = 6;
3599   if (C->should_print_igv(perBytecode)) {
3600     IdealGraphPrinter* printer = C->igv_printer();
3601     char buffer[256];
3602     jio_snprintf(buffer, sizeof(buffer), "Bytecode %d: %s", bci(), Bytecodes::name(bc()));
3603     bool old = printer->traverse_outs();
3604     printer->set_traverse_outs(true);
3605     printer->print_graph(buffer);
3606     printer->set_traverse_outs(old);
3607   }
3608 #endif
3609 }