1 /*
   2  * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "ci/ciMethodData.hpp"
  26 #include "classfile/vmSymbols.hpp"
  27 #include "compiler/compilationPolicy.hpp"
  28 #include "compiler/compilerDefinitions.inline.hpp"
  29 #include "compiler/compilerOracle.hpp"
  30 #include "interpreter/bytecode.hpp"
  31 #include "interpreter/bytecodeStream.hpp"
  32 #include "interpreter/linkResolver.hpp"
  33 #include "memory/metaspaceClosure.hpp"
  34 #include "memory/resourceArea.hpp"
  35 #include "oops/klass.inline.hpp"
  36 #include "oops/method.inline.hpp"
  37 #include "oops/methodData.inline.hpp"
  38 #include "prims/jvmtiRedefineClasses.hpp"
  39 #include "runtime/atomic.hpp"
  40 #include "runtime/deoptimization.hpp"
  41 #include "runtime/handles.inline.hpp"
  42 #include "runtime/orderAccess.hpp"
  43 #include "runtime/safepointVerifiers.hpp"
  44 #include "runtime/signature.hpp"
  45 #include "utilities/align.hpp"
  46 #include "utilities/checkedCast.hpp"
  47 #include "utilities/copy.hpp"
  48 
  49 // ==================================================================
  50 // DataLayout
  51 //
  52 // Overlay for generic profiling data.
  53 
  54 // Some types of data layouts need a length field.
  55 bool DataLayout::needs_array_len(u1 tag) {
  56   return (tag == multi_branch_data_tag) || (tag == arg_info_data_tag) || (tag == parameters_type_data_tag);
  57 }
  58 
  59 // Perform generic initialization of the data.  More specific
  60 // initialization occurs in overrides of ProfileData::post_initialize.
  61 void DataLayout::initialize(u1 tag, u2 bci, int cell_count) {
  62   DataLayout temp;
  63   temp._header._bits = (intptr_t)0;
  64   temp._header._struct._tag = tag;
  65   temp._header._struct._bci = bci;
  66   // Write the header using a single intptr_t write.  This ensures that if the layout is
  67   // reinitialized readers will never see the transient state where the header is 0.
  68   _header = temp._header;
  69 
  70   for (int i = 0; i < cell_count; i++) {
  71     set_cell_at(i, (intptr_t)0);
  72   }
  73   if (needs_array_len(tag)) {
  74     set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header.
  75   }
  76   if (tag == call_type_data_tag) {
  77     CallTypeData::initialize(this, cell_count);
  78   } else if (tag == virtual_call_type_data_tag) {
  79     VirtualCallTypeData::initialize(this, cell_count);
  80   }
  81 }
  82 
  83 void DataLayout::clean_weak_klass_links(bool always_clean) {
  84   ResourceMark m;
  85   data_in()->clean_weak_klass_links(always_clean);
  86 }
  87 
  88 
  89 // ==================================================================
  90 // ProfileData
  91 //
  92 // A ProfileData object is created to refer to a section of profiling
  93 // data in a structured way.
  94 
  95 // Constructor for invalid ProfileData.
  96 ProfileData::ProfileData() {
  97   _data = nullptr;
  98 }
  99 
 100 char* ProfileData::print_data_on_helper(const MethodData* md) const {
 101   DataLayout* dp  = md->extra_data_base();
 102   DataLayout* end = md->args_data_limit();
 103   stringStream ss;
 104   for (;; dp = MethodData::next_extra(dp)) {
 105     assert(dp < end, "moved past end of extra data");
 106     switch(dp->tag()) {
 107     case DataLayout::speculative_trap_data_tag:
 108       if (dp->bci() == bci()) {
 109         SpeculativeTrapData* data = new SpeculativeTrapData(dp);
 110         int trap = data->trap_state();
 111         char buf[100];
 112         ss.print("trap/");
 113         data->method()->print_short_name(&ss);
 114         ss.print("(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
 115       }
 116       break;
 117     case DataLayout::bit_data_tag:
 118       break;
 119     case DataLayout::no_tag:
 120     case DataLayout::arg_info_data_tag:
 121       return ss.as_string();
 122       break;
 123     default:
 124       fatal("unexpected tag %d", dp->tag());
 125     }
 126   }
 127   return nullptr;
 128 }
 129 
 130 void ProfileData::print_data_on(outputStream* st, const MethodData* md) const {
 131   print_data_on(st, print_data_on_helper(md));
 132 }
 133 
 134 void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const {
 135   st->print("bci: %d ", bci());
 136   st->fill_to(tab_width_one + 1);
 137   st->print("%s", name);
 138   tab(st);
 139   int trap = trap_state();
 140   if (trap != 0) {
 141     char buf[100];
 142     st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
 143   }
 144   if (extra != nullptr) {
 145     st->print("%s", extra);
 146   }
 147   int flags = data()->flags();
 148   if (flags != 0) {
 149     st->print("flags(%d) %p/%d", flags, data(), in_bytes(DataLayout::flags_offset()));
 150   }
 151 }
 152 
 153 void ProfileData::tab(outputStream* st, bool first) const {
 154   st->fill_to(first ? tab_width_one : tab_width_two);
 155 }
 156 
 157 // ==================================================================
 158 // BitData
 159 //
 160 // A BitData corresponds to a one-bit flag.  This is used to indicate
 161 // whether a checkcast bytecode has seen a null value.
 162 
 163 
 164 void BitData::print_data_on(outputStream* st, const char* extra) const {
 165   print_shared(st, "BitData", extra);
 166   st->cr();
 167 }
 168 
 169 // ==================================================================
 170 // CounterData
 171 //
 172 // A CounterData corresponds to a simple counter.
 173 
 174 void CounterData::print_data_on(outputStream* st, const char* extra) const {
 175   print_shared(st, "CounterData", extra);
 176   st->print_cr("count(%u)", count());
 177 }
 178 
 179 // ==================================================================
 180 // JumpData
 181 //
 182 // A JumpData is used to access profiling information for a direct
 183 // branch.  It is a counter, used for counting the number of branches,
 184 // plus a data displacement, used for realigning the data pointer to
 185 // the corresponding target bci.
 186 
 187 void JumpData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 188   assert(stream->bci() == bci(), "wrong pos");
 189   int target;
 190   Bytecodes::Code c = stream->code();
 191   if (c == Bytecodes::_goto_w || c == Bytecodes::_jsr_w) {
 192     target = stream->dest_w();
 193   } else {
 194     target = stream->dest();
 195   }
 196   int my_di = mdo->dp_to_di(dp());
 197   int target_di = mdo->bci_to_di(target);
 198   int offset = target_di - my_di;
 199   set_displacement(offset);
 200 }
 201 
 202 void JumpData::print_data_on(outputStream* st, const char* extra) const {
 203   print_shared(st, "JumpData", extra);
 204   st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
 205 }
 206 
 207 int TypeStackSlotEntries::compute_cell_count(Symbol* signature, bool include_receiver, int max) {
 208   // Parameter profiling include the receiver
 209   int args_count = include_receiver ? 1 : 0;
 210   ResourceMark rm;
 211   ReferenceArgumentCount rac(signature);
 212   args_count += rac.count();
 213   args_count = MIN2(args_count, max);
 214   return args_count * per_arg_cell_count;
 215 }
 216 
 217 int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) {
 218   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 219   assert(TypeStackSlotEntries::per_arg_count() > SingleTypeEntry::static_cell_count(), "code to test for arguments/results broken");
 220   const methodHandle m = stream->method();
 221   int bci = stream->bci();
 222   Bytecode_invoke inv(m, bci);
 223   int args_cell = 0;
 224   if (MethodData::profile_arguments_for_invoke(m, bci)) {
 225     args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), false, TypeProfileArgsLimit);
 226   }
 227   int ret_cell = 0;
 228   if (MethodData::profile_return_for_invoke(m, bci) && is_reference_type(inv.result_type())) {
 229     ret_cell = SingleTypeEntry::static_cell_count();
 230   }
 231   int header_cell = 0;
 232   if (args_cell + ret_cell > 0) {
 233     header_cell = header_cell_count();
 234   }
 235 
 236   return header_cell + args_cell + ret_cell;
 237 }
 238 
 239 class ArgumentOffsetComputer : public SignatureIterator {
 240 private:
 241   int _max;
 242   int _offset;
 243   GrowableArray<int> _offsets;
 244 
 245   friend class SignatureIterator;  // so do_parameters_on can call do_type
 246   void do_type(BasicType type) {
 247     if (is_reference_type(type) && _offsets.length() < _max) {
 248       _offsets.push(_offset);
 249     }
 250     _offset += parameter_type_word_count(type);
 251   }
 252 
 253  public:
 254   ArgumentOffsetComputer(Symbol* signature, int max)
 255     : SignatureIterator(signature),
 256       _max(max), _offset(0),
 257       _offsets(max) {
 258     do_parameters_on(this);  // non-virtual template execution
 259   }
 260 
 261   int off_at(int i) const { return _offsets.at(i); }
 262 };
 263 
 264 void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver, bool include_receiver) {
 265   ResourceMark rm;
 266   int start = 0;
 267   // Parameter profiling include the receiver
 268   if (include_receiver && has_receiver) {
 269     set_stack_slot(0, 0);
 270     set_type(0, type_none());
 271     start += 1;
 272   }
 273   ArgumentOffsetComputer aos(signature, _number_of_entries-start);
 274   for (int i = start; i < _number_of_entries; i++) {
 275     set_stack_slot(i, aos.off_at(i-start) + (has_receiver ? 1 : 0));
 276     set_type(i, type_none());
 277   }
 278 }
 279 
 280 void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 281   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 282   Bytecode_invoke inv(stream->method(), stream->bci());
 283 
 284   if (has_arguments()) {
 285 #ifdef ASSERT
 286     ResourceMark rm;
 287     ReferenceArgumentCount rac(inv.signature());
 288     int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
 289     assert(count > 0, "room for args type but none found?");
 290     check_number_of_arguments(count);
 291 #endif
 292     _args.post_initialize(inv.signature(), inv.has_receiver(), false);
 293   }
 294 
 295   if (has_return()) {
 296     assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
 297     _ret.post_initialize();
 298   }
 299 }
 300 
 301 void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 302   assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
 303   Bytecode_invoke inv(stream->method(), stream->bci());
 304 
 305   if (has_arguments()) {
 306 #ifdef ASSERT
 307     ResourceMark rm;
 308     ReferenceArgumentCount rac(inv.signature());
 309     int count = MIN2(rac.count(), (int)TypeProfileArgsLimit);
 310     assert(count > 0, "room for args type but none found?");
 311     check_number_of_arguments(count);
 312 #endif
 313     _args.post_initialize(inv.signature(), inv.has_receiver(), false);
 314   }
 315 
 316   if (has_return()) {
 317     assert(is_reference_type(inv.result_type()), "room for a ret type but doesn't return obj?");
 318     _ret.post_initialize();
 319   }
 320 }
 321 
 322 void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) {
 323   for (int i = 0; i < _number_of_entries; i++) {
 324     intptr_t p = type(i);
 325     Klass* k = (Klass*)klass_part(p);
 326     if (k != nullptr && (always_clean || !k->is_loader_alive())) {
 327       set_type(i, with_status((Klass*)nullptr, p));
 328     }
 329   }
 330 }
 331 
 332 void SingleTypeEntry::clean_weak_klass_links(bool always_clean) {
 333   intptr_t p = type();
 334   Klass* k = (Klass*)klass_part(p);
 335   if (k != nullptr && (always_clean || !k->is_loader_alive())) {
 336     set_type(with_status((Klass*)nullptr, p));
 337   }
 338 }
 339 
 340 bool TypeEntriesAtCall::return_profiling_enabled() {
 341   return MethodData::profile_return();
 342 }
 343 
 344 bool TypeEntriesAtCall::arguments_profiling_enabled() {
 345   return MethodData::profile_arguments();
 346 }
 347 
 348 void TypeEntries::print_klass(outputStream* st, intptr_t k) {
 349   if (is_type_none(k)) {
 350     st->print("none");
 351   } else if (is_type_unknown(k)) {
 352     st->print("unknown");
 353   } else {
 354     valid_klass(k)->print_value_on(st);
 355   }
 356   if (was_null_seen(k)) {
 357     st->print(" (null seen)");
 358   }
 359 }
 360 
 361 void TypeStackSlotEntries::print_data_on(outputStream* st) const {
 362   for (int i = 0; i < _number_of_entries; i++) {
 363     _pd->tab(st);
 364     st->print("%d: stack(%u) ", i, stack_slot(i));
 365     print_klass(st, type(i));
 366     st->cr();
 367   }
 368 }
 369 
 370 void SingleTypeEntry::print_data_on(outputStream* st) const {
 371   _pd->tab(st);
 372   print_klass(st, type());
 373   st->cr();
 374 }
 375 
 376 void CallTypeData::print_data_on(outputStream* st, const char* extra) const {
 377   CounterData::print_data_on(st, extra);
 378   if (has_arguments()) {
 379     tab(st, true);
 380     st->print("argument types");
 381     _args.print_data_on(st);
 382   }
 383   if (has_return()) {
 384     tab(st, true);
 385     st->print("return type");
 386     _ret.print_data_on(st);
 387   }
 388 }
 389 
 390 void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const {
 391   VirtualCallData::print_data_on(st, extra);
 392   if (has_arguments()) {
 393     tab(st, true);
 394     st->print("argument types");
 395     _args.print_data_on(st);
 396   }
 397   if (has_return()) {
 398     tab(st, true);
 399     st->print("return type");
 400     _ret.print_data_on(st);
 401   }
 402 }
 403 
 404 // ==================================================================
 405 // ReceiverTypeData
 406 //
 407 // A ReceiverTypeData is used to access profiling information about a
 408 // dynamic type check.  It consists of a counter which counts the total times
 409 // that the check is reached, and a series of (Klass*, count) pairs
 410 // which are used to store a type profile for the receiver of the check.
 411 
 412 void ReceiverTypeData::clean_weak_klass_links(bool always_clean) {
 413     for (uint row = 0; row < row_limit(); row++) {
 414     Klass* p = receiver(row);
 415     if (p != nullptr && (always_clean || !p->is_loader_alive())) {
 416       clear_row(row);
 417     }
 418   }
 419 }
 420 
 421 void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
 422   uint row;
 423   int entries = 0;
 424   for (row = 0; row < row_limit(); row++) {
 425     if (receiver(row) != nullptr)  entries++;
 426   }
 427   st->print_cr("count(%u) entries(%u)", count(), entries);
 428   int total = count();
 429   for (row = 0; row < row_limit(); row++) {
 430     if (receiver(row) != nullptr) {
 431       total += receiver_count(row);
 432     }
 433   }
 434   for (row = 0; row < row_limit(); row++) {
 435     if (receiver(row) != nullptr) {
 436       tab(st);
 437       receiver(row)->print_value_on(st);
 438       st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total);
 439     }
 440   }
 441 }
 442 void ReceiverTypeData::print_data_on(outputStream* st, const char* extra) const {
 443   print_shared(st, "ReceiverTypeData", extra);
 444   print_receiver_data_on(st);
 445 }
 446 
 447 void VirtualCallData::print_data_on(outputStream* st, const char* extra) const {
 448   print_shared(st, "VirtualCallData", extra);
 449   print_receiver_data_on(st);
 450 }
 451 
 452 // ==================================================================
 453 // RetData
 454 //
 455 // A RetData is used to access profiling information for a ret bytecode.
 456 // It is composed of a count of the number of times that the ret has
 457 // been executed, followed by a series of triples of the form
 458 // (bci, count, di) which count the number of times that some bci was the
 459 // target of the ret and cache a corresponding displacement.
 460 
 461 void RetData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 462   for (uint row = 0; row < row_limit(); row++) {
 463     set_bci_displacement(row, -1);
 464     set_bci(row, no_bci);
 465   }
 466   // release so other threads see a consistent state.  bci is used as
 467   // a valid flag for bci_displacement.
 468   OrderAccess::release();
 469 }
 470 
 471 // This routine needs to atomically update the RetData structure, so the
 472 // caller needs to hold the RetData_lock before it gets here.  Since taking
 473 // the lock can block (and allow GC) and since RetData is a ProfileData is a
 474 // wrapper around a derived oop, taking the lock in _this_ method will
 475 // basically cause the 'this' pointer's _data field to contain junk after the
 476 // lock.  We require the caller to take the lock before making the ProfileData
 477 // structure.  Currently the only caller is InterpreterRuntime::update_mdp_for_ret
 478 address RetData::fixup_ret(int return_bci, MethodData* h_mdo) {
 479   // First find the mdp which corresponds to the return bci.
 480   address mdp = h_mdo->bci_to_dp(return_bci);
 481 
 482   // Now check to see if any of the cache slots are open.
 483   for (uint row = 0; row < row_limit(); row++) {
 484     if (bci(row) == no_bci) {
 485       set_bci_displacement(row, checked_cast<int>(mdp - dp()));
 486       set_bci_count(row, DataLayout::counter_increment);
 487       // Barrier to ensure displacement is written before the bci; allows
 488       // the interpreter to read displacement without fear of race condition.
 489       release_set_bci(row, return_bci);
 490       break;
 491     }
 492   }
 493   return mdp;
 494 }
 495 
 496 void RetData::print_data_on(outputStream* st, const char* extra) const {
 497   print_shared(st, "RetData", extra);
 498   uint row;
 499   int entries = 0;
 500   for (row = 0; row < row_limit(); row++) {
 501     if (bci(row) != no_bci)  entries++;
 502   }
 503   st->print_cr("count(%u) entries(%u)", count(), entries);
 504   for (row = 0; row < row_limit(); row++) {
 505     if (bci(row) != no_bci) {
 506       tab(st);
 507       st->print_cr("bci(%d: count(%u) displacement(%d))",
 508                    bci(row), bci_count(row), bci_displacement(row));
 509     }
 510   }
 511 }
 512 
 513 // ==================================================================
 514 // BranchData
 515 //
 516 // A BranchData is used to access profiling data for a two-way branch.
 517 // It consists of taken and not_taken counts as well as a data displacement
 518 // for the taken case.
 519 
 520 void BranchData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 521   assert(stream->bci() == bci(), "wrong pos");
 522   int target = stream->dest();
 523   int my_di = mdo->dp_to_di(dp());
 524   int target_di = mdo->bci_to_di(target);
 525   int offset = target_di - my_di;
 526   set_displacement(offset);
 527 }
 528 
 529 void BranchData::print_data_on(outputStream* st, const char* extra) const {
 530   print_shared(st, "BranchData", extra);
 531   if (data()->flags()) {
 532     st->cr();
 533     tab(st);
 534   }
 535   st->print_cr("taken(%u) displacement(%d)",
 536                taken(), displacement());
 537   tab(st);
 538   st->print_cr("not taken(%u)", not_taken());
 539 }
 540 
 541 // ==================================================================
 542 // MultiBranchData
 543 //
 544 // A MultiBranchData is used to access profiling information for
 545 // a multi-way branch (*switch bytecodes).  It consists of a series
 546 // of (count, displacement) pairs, which count the number of times each
 547 // case was taken and specify the data displacement for each branch target.
 548 
 549 int MultiBranchData::compute_cell_count(BytecodeStream* stream) {
 550   int cell_count = 0;
 551   if (stream->code() == Bytecodes::_tableswitch) {
 552     Bytecode_tableswitch sw(stream->method()(), stream->bcp());
 553     cell_count = 1 + per_case_cell_count * (1 + sw.length()); // 1 for default
 554   } else {
 555     Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
 556     cell_count = 1 + per_case_cell_count * (sw.number_of_pairs() + 1); // 1 for default
 557   }
 558   return cell_count;
 559 }
 560 
 561 void MultiBranchData::post_initialize(BytecodeStream* stream,
 562                                       MethodData* mdo) {
 563   assert(stream->bci() == bci(), "wrong pos");
 564   int target;
 565   int my_di;
 566   int target_di;
 567   int offset;
 568   if (stream->code() == Bytecodes::_tableswitch) {
 569     Bytecode_tableswitch sw(stream->method()(), stream->bcp());
 570     int len = sw.length();
 571     assert(array_len() == per_case_cell_count * (len + 1), "wrong len");
 572     for (int count = 0; count < len; count++) {
 573       target = sw.dest_offset_at(count) + bci();
 574       my_di = mdo->dp_to_di(dp());
 575       target_di = mdo->bci_to_di(target);
 576       offset = target_di - my_di;
 577       set_displacement_at(count, offset);
 578     }
 579     target = sw.default_offset() + bci();
 580     my_di = mdo->dp_to_di(dp());
 581     target_di = mdo->bci_to_di(target);
 582     offset = target_di - my_di;
 583     set_default_displacement(offset);
 584 
 585   } else {
 586     Bytecode_lookupswitch sw(stream->method()(), stream->bcp());
 587     int npairs = sw.number_of_pairs();
 588     assert(array_len() == per_case_cell_count * (npairs + 1), "wrong len");
 589     for (int count = 0; count < npairs; count++) {
 590       LookupswitchPair pair = sw.pair_at(count);
 591       target = pair.offset() + bci();
 592       my_di = mdo->dp_to_di(dp());
 593       target_di = mdo->bci_to_di(target);
 594       offset = target_di - my_di;
 595       set_displacement_at(count, offset);
 596     }
 597     target = sw.default_offset() + bci();
 598     my_di = mdo->dp_to_di(dp());
 599     target_di = mdo->bci_to_di(target);
 600     offset = target_di - my_di;
 601     set_default_displacement(offset);
 602   }
 603 }
 604 
 605 void MultiBranchData::print_data_on(outputStream* st, const char* extra) const {
 606   print_shared(st, "MultiBranchData", extra);
 607   st->print_cr("default_count(%u) displacement(%d)",
 608                default_count(), default_displacement());
 609   int cases = number_of_cases();
 610   for (int i = 0; i < cases; i++) {
 611     tab(st);
 612     st->print_cr("count(%u) displacement(%d)",
 613                  count_at(i), displacement_at(i));
 614   }
 615 }
 616 
 617 void ArgInfoData::print_data_on(outputStream* st, const char* extra) const {
 618   print_shared(st, "ArgInfoData", extra);
 619   int nargs = number_of_args();
 620   for (int i = 0; i < nargs; i++) {
 621     st->print("  0x%x", arg_modified(i));
 622   }
 623   st->cr();
 624 }
 625 
 626 int ParametersTypeData::compute_cell_count(Method* m) {
 627   if (!MethodData::profile_parameters_for_method(methodHandle(Thread::current(), m))) {
 628     return 0;
 629   }
 630   int max = TypeProfileParmsLimit == -1 ? INT_MAX : TypeProfileParmsLimit;
 631   int obj_args = TypeStackSlotEntries::compute_cell_count(m->signature(), !m->is_static(), max);
 632   if (obj_args > 0) {
 633     return obj_args + 1; // 1 cell for array len
 634   }
 635   return 0;
 636 }
 637 
 638 void ParametersTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
 639   _parameters.post_initialize(mdo->method()->signature(), !mdo->method()->is_static(), true);
 640 }
 641 
 642 bool ParametersTypeData::profiling_enabled() {
 643   return MethodData::profile_parameters();
 644 }
 645 
 646 void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
 647   print_shared(st, "ParametersTypeData", extra);
 648   tab(st);
 649   _parameters.print_data_on(st);
 650   st->cr();
 651 }
 652 
 653 void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
 654   print_shared(st, "SpeculativeTrapData", extra);
 655   tab(st);
 656   method()->print_short_name(st);
 657   st->cr();
 658 }
 659 
 660 void ArrayStoreData::print_data_on(outputStream* st, const char* extra) const {
 661   print_shared(st, "ArrayStore", extra);
 662   st->cr();
 663   tab(st, true);
 664   st->print("array");
 665   _array.print_data_on(st);
 666   tab(st, true);
 667   st->print("element");
 668   if (null_seen()) {
 669     st->print(" (null seen)");
 670   }
 671   tab(st);
 672   print_receiver_data_on(st);
 673 }
 674 
 675 void ArrayLoadData::print_data_on(outputStream* st, const char* extra) const {
 676   print_shared(st, "ArrayLoad", extra);
 677   st->cr();
 678   tab(st, true);
 679   st->print("array");
 680   _array.print_data_on(st);
 681   tab(st, true);
 682   st->print("element");
 683   _element.print_data_on(st);
 684 }
 685 
 686 void ACmpData::print_data_on(outputStream* st, const char* extra) const {
 687   BranchData::print_data_on(st, extra);
 688   tab(st, true);
 689   st->print("left");
 690   _left.print_data_on(st);
 691   tab(st, true);
 692   st->print("right");
 693   _right.print_data_on(st);
 694 }
 695 
 696 // ==================================================================
 697 // MethodData*
 698 //
 699 // A MethodData* holds information which has been collected about
 700 // a method.
 701 
 702 MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS) {
 703   assert(!THREAD->owns_locks(), "Should not own any locks");
 704   int size = MethodData::compute_allocation_size_in_words(method);
 705 
 706   return new (loader_data, size, MetaspaceObj::MethodDataType, THREAD)
 707     MethodData(method);
 708 }
 709 
 710 int MethodData::bytecode_cell_count(Bytecodes::Code code) {
 711   switch (code) {
 712   case Bytecodes::_checkcast:
 713   case Bytecodes::_instanceof:
 714     if (TypeProfileCasts) {
 715       return ReceiverTypeData::static_cell_count();
 716     } else {
 717       return BitData::static_cell_count();
 718     }
 719   case Bytecodes::_aaload:
 720     return ArrayLoadData::static_cell_count();
 721   case Bytecodes::_aastore:
 722     return ArrayStoreData::static_cell_count();
 723   case Bytecodes::_invokespecial:
 724   case Bytecodes::_invokestatic:
 725     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 726       return variable_cell_count;
 727     } else {
 728       return CounterData::static_cell_count();
 729     }
 730   case Bytecodes::_goto:
 731   case Bytecodes::_goto_w:
 732   case Bytecodes::_jsr:
 733   case Bytecodes::_jsr_w:
 734     return JumpData::static_cell_count();
 735   case Bytecodes::_invokevirtual:
 736   case Bytecodes::_invokeinterface:
 737     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 738       return variable_cell_count;
 739     } else {
 740       return VirtualCallData::static_cell_count();
 741     }
 742   case Bytecodes::_invokedynamic:
 743     if (MethodData::profile_arguments() || MethodData::profile_return()) {
 744       return variable_cell_count;
 745     } else {
 746       return CounterData::static_cell_count();
 747     }
 748   case Bytecodes::_ret:
 749     return RetData::static_cell_count();
 750   case Bytecodes::_ifeq:
 751   case Bytecodes::_ifne:
 752   case Bytecodes::_iflt:
 753   case Bytecodes::_ifge:
 754   case Bytecodes::_ifgt:
 755   case Bytecodes::_ifle:
 756   case Bytecodes::_if_icmpeq:
 757   case Bytecodes::_if_icmpne:
 758   case Bytecodes::_if_icmplt:
 759   case Bytecodes::_if_icmpge:
 760   case Bytecodes::_if_icmpgt:
 761   case Bytecodes::_if_icmple:
 762   case Bytecodes::_ifnull:
 763   case Bytecodes::_ifnonnull:
 764     return BranchData::static_cell_count();
 765   case Bytecodes::_if_acmpne:
 766   case Bytecodes::_if_acmpeq:
 767     return ACmpData::static_cell_count();
 768   case Bytecodes::_lookupswitch:
 769   case Bytecodes::_tableswitch:
 770     return variable_cell_count;
 771   default:
 772     return no_profile_data;
 773   }
 774 }
 775 
 776 // Compute the size of the profiling information corresponding to
 777 // the current bytecode.
 778 int MethodData::compute_data_size(BytecodeStream* stream) {
 779   int cell_count = bytecode_cell_count(stream->code());
 780   if (cell_count == no_profile_data) {
 781     return 0;
 782   }
 783   if (cell_count == variable_cell_count) {
 784     switch (stream->code()) {
 785     case Bytecodes::_lookupswitch:
 786     case Bytecodes::_tableswitch:
 787       cell_count = MultiBranchData::compute_cell_count(stream);
 788       break;
 789     case Bytecodes::_invokespecial:
 790     case Bytecodes::_invokestatic:
 791     case Bytecodes::_invokedynamic:
 792       assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
 793       if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
 794           profile_return_for_invoke(stream->method(), stream->bci())) {
 795         cell_count = CallTypeData::compute_cell_count(stream);
 796       } else {
 797         cell_count = CounterData::static_cell_count();
 798       }
 799       break;
 800     case Bytecodes::_invokevirtual:
 801     case Bytecodes::_invokeinterface: {
 802       assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
 803       if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
 804           profile_return_for_invoke(stream->method(), stream->bci())) {
 805         cell_count = VirtualCallTypeData::compute_cell_count(stream);
 806       } else {
 807         cell_count = VirtualCallData::static_cell_count();
 808       }
 809       break;
 810     }
 811     default:
 812       fatal("unexpected bytecode for var length profile data");
 813     }
 814   }
 815   // Note:  cell_count might be zero, meaning that there is just
 816   //        a DataLayout header, with no extra cells.
 817   assert(cell_count >= 0, "sanity");
 818   return DataLayout::compute_size_in_bytes(cell_count);
 819 }
 820 
 821 bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) {
 822   // Bytecodes for which we may use speculation
 823   switch (code) {
 824   case Bytecodes::_checkcast:
 825   case Bytecodes::_instanceof:
 826   case Bytecodes::_aaload:
 827   case Bytecodes::_aastore:
 828   case Bytecodes::_invokevirtual:
 829   case Bytecodes::_invokeinterface:
 830   case Bytecodes::_if_acmpeq:
 831   case Bytecodes::_if_acmpne:
 832   case Bytecodes::_ifnull:
 833   case Bytecodes::_ifnonnull:
 834   case Bytecodes::_invokestatic:
 835 #ifdef COMPILER2
 836     if (CompilerConfig::is_c2_enabled()) {
 837       return UseTypeSpeculation;
 838     }
 839 #endif
 840   default:
 841     return false;
 842   }
 843   return false;
 844 }
 845 
 846 #if INCLUDE_JVMCI
 847 
 848 void* FailedSpeculation::operator new(size_t size, size_t fs_size) throw() {
 849   return CHeapObj<mtCompiler>::operator new(fs_size, std::nothrow);
 850 }
 851 
 852 FailedSpeculation::FailedSpeculation(address speculation, int speculation_len) : _data_len(speculation_len), _next(nullptr) {
 853   memcpy(data(), speculation, speculation_len);
 854 }
 855 
 856 // A heuristic check to detect nmethods that outlive a failed speculations list.
 857 static void guarantee_failed_speculations_alive(nmethod* nm, FailedSpeculation** failed_speculations_address) {
 858   jlong head = (jlong)(address) *failed_speculations_address;
 859   if ((head & 0x1) == 0x1) {
 860     stringStream st;
 861     if (nm != nullptr) {
 862       st.print("%d", nm->compile_id());
 863       Method* method = nm->method();
 864       st.print_raw("{");
 865       if (method != nullptr) {
 866         method->print_name(&st);
 867       } else {
 868         const char* jvmci_name = nm->jvmci_name();
 869         if (jvmci_name != nullptr) {
 870           st.print_raw(jvmci_name);
 871         }
 872       }
 873       st.print_raw("}");
 874     } else {
 875       st.print("<unknown>");
 876     }
 877     fatal("Adding to failed speculations list that appears to have been freed. Source: %s", st.as_string());
 878   }
 879 }
 880 
 881 bool FailedSpeculation::add_failed_speculation(nmethod* nm, FailedSpeculation** failed_speculations_address, address speculation, int speculation_len) {
 882   assert(failed_speculations_address != nullptr, "must be");
 883   size_t fs_size = sizeof(FailedSpeculation) + speculation_len;
 884 
 885   guarantee_failed_speculations_alive(nm, failed_speculations_address);
 886 
 887   FailedSpeculation** cursor = failed_speculations_address;
 888   FailedSpeculation* fs = nullptr;
 889   do {
 890     if (*cursor == nullptr) {
 891       if (fs == nullptr) {
 892         // lazily allocate FailedSpeculation
 893         fs = new (fs_size) FailedSpeculation(speculation, speculation_len);
 894         if (fs == nullptr) {
 895           // no memory -> ignore failed speculation
 896           return false;
 897         }
 898         guarantee(is_aligned(fs, sizeof(FailedSpeculation*)), "FailedSpeculation objects must be pointer aligned");
 899       }
 900       FailedSpeculation* old_fs = Atomic::cmpxchg(cursor, (FailedSpeculation*) nullptr, fs);
 901       if (old_fs == nullptr) {
 902         // Successfully appended fs to end of the list
 903         return true;
 904       }
 905     }
 906     guarantee(*cursor != nullptr, "cursor must point to non-null FailedSpeculation");
 907     // check if the current entry matches this thread's failed speculation
 908     if ((*cursor)->data_len() == speculation_len && memcmp(speculation, (*cursor)->data(), speculation_len) == 0) {
 909       if (fs != nullptr) {
 910         delete fs;
 911       }
 912       return false;
 913     }
 914     cursor = (*cursor)->next_adr();
 915   } while (true);
 916 }
 917 
 918 void FailedSpeculation::free_failed_speculations(FailedSpeculation** failed_speculations_address) {
 919   assert(failed_speculations_address != nullptr, "must be");
 920   FailedSpeculation* fs = *failed_speculations_address;
 921   while (fs != nullptr) {
 922     FailedSpeculation* next = fs->next();
 923     delete fs;
 924     fs = next;
 925   }
 926 
 927   // Write an unaligned value to failed_speculations_address to denote
 928   // that it is no longer a valid pointer. This is allows for the check
 929   // in add_failed_speculation against adding to a freed failed
 930   // speculations list.
 931   long* head = (long*) failed_speculations_address;
 932   (*head) = (*head) | 0x1;
 933 }
 934 #endif // INCLUDE_JVMCI
 935 
 936 int MethodData::compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps) {
 937 #if INCLUDE_JVMCI
 938   if (ProfileTraps) {
 939     // Assume that up to 30% of the possibly trapping BCIs with no MDP will need to allocate one.
 940     int extra_data_count = MIN2(empty_bc_count, MAX2(4, (empty_bc_count * 30) / 100));
 941 
 942     // Make sure we have a minimum number of extra data slots to
 943     // allocate SpeculativeTrapData entries. We would want to have one
 944     // entry per compilation that inlines this method and for which
 945     // some type speculation assumption fails. So the room we need for
 946     // the SpeculativeTrapData entries doesn't directly depend on the
 947     // size of the method. Because it's hard to estimate, we reserve
 948     // space for an arbitrary number of entries.
 949     int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
 950       (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
 951 
 952     return MAX2(extra_data_count, spec_data_count);
 953   } else {
 954     return 0;
 955   }
 956 #else // INCLUDE_JVMCI
 957   if (ProfileTraps) {
 958     // Assume that up to 3% of BCIs with no MDP will need to allocate one.
 959     int extra_data_count = (uint)(empty_bc_count * 3) / 128 + 1;
 960     // If the method is large, let the extra BCIs grow numerous (to ~1%).
 961     int one_percent_of_data
 962       = (uint)data_size / (DataLayout::header_size_in_bytes()*128);
 963     if (extra_data_count < one_percent_of_data)
 964       extra_data_count = one_percent_of_data;
 965     if (extra_data_count > empty_bc_count)
 966       extra_data_count = empty_bc_count;  // no need for more
 967 
 968     // Make sure we have a minimum number of extra data slots to
 969     // allocate SpeculativeTrapData entries. We would want to have one
 970     // entry per compilation that inlines this method and for which
 971     // some type speculation assumption fails. So the room we need for
 972     // the SpeculativeTrapData entries doesn't directly depend on the
 973     // size of the method. Because it's hard to estimate, we reserve
 974     // space for an arbitrary number of entries.
 975     int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
 976       (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
 977 
 978     return MAX2(extra_data_count, spec_data_count);
 979   } else {
 980     return 0;
 981   }
 982 #endif // INCLUDE_JVMCI
 983 }
 984 
 985 // Compute the size of the MethodData* necessary to store
 986 // profiling information about a given method.  Size is in bytes.
 987 int MethodData::compute_allocation_size_in_bytes(const methodHandle& method) {
 988   int data_size = 0;
 989   BytecodeStream stream(method);
 990   Bytecodes::Code c;
 991   int empty_bc_count = 0;  // number of bytecodes lacking data
 992   bool needs_speculative_traps = false;
 993   while ((c = stream.next()) >= 0) {
 994     int size_in_bytes = compute_data_size(&stream);
 995     data_size += size_in_bytes;
 996     if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c)))  empty_bc_count += 1;
 997     needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
 998   }
 999   int object_size = in_bytes(data_offset()) + data_size;
1000 
1001   // Add some extra DataLayout cells (at least one) to track stray traps.
1002   int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
1003   object_size += extra_data_count * DataLayout::compute_size_in_bytes(0);
1004 
1005   // Add a cell to record information about modified arguments.
1006   int arg_size = method->size_of_parameters();
1007   object_size += DataLayout::compute_size_in_bytes(arg_size+1);
1008 
1009   // Reserve room for an area of the MDO dedicated to profiling of
1010   // parameters
1011   int args_cell = ParametersTypeData::compute_cell_count(method());
1012   if (args_cell > 0) {
1013     object_size += DataLayout::compute_size_in_bytes(args_cell);
1014   }
1015 
1016   if (ProfileExceptionHandlers && method()->has_exception_handler()) {
1017     int num_exception_handlers = method()->exception_table_length();
1018     object_size += num_exception_handlers * single_exception_handler_data_size();
1019   }
1020 
1021   return object_size;
1022 }
1023 
1024 // Compute the size of the MethodData* necessary to store
1025 // profiling information about a given method.  Size is in words
1026 int MethodData::compute_allocation_size_in_words(const methodHandle& method) {
1027   int byte_size = compute_allocation_size_in_bytes(method);
1028   int word_size = align_up(byte_size, BytesPerWord) / BytesPerWord;
1029   return align_metadata_size(word_size);
1030 }
1031 
1032 // Initialize an individual data segment.  Returns the size of
1033 // the segment in bytes.
1034 int MethodData::initialize_data(BytecodeStream* stream,
1035                                        int data_index) {
1036   int cell_count = -1;
1037   u1 tag = DataLayout::no_tag;
1038   DataLayout* data_layout = data_layout_at(data_index);
1039   Bytecodes::Code c = stream->code();
1040   switch (c) {
1041   case Bytecodes::_checkcast:
1042   case Bytecodes::_instanceof:
1043     if (TypeProfileCasts) {
1044       cell_count = ReceiverTypeData::static_cell_count();
1045       tag = DataLayout::receiver_type_data_tag;
1046     } else {
1047       cell_count = BitData::static_cell_count();
1048       tag = DataLayout::bit_data_tag;
1049     }
1050     break;
1051   case Bytecodes::_aaload:
1052     cell_count = ArrayLoadData::static_cell_count();
1053     tag = DataLayout::array_load_data_tag;
1054     break;
1055   case Bytecodes::_aastore:
1056     cell_count = ArrayStoreData::static_cell_count();
1057     tag = DataLayout::array_store_data_tag;
1058     break;
1059   case Bytecodes::_invokespecial:
1060   case Bytecodes::_invokestatic: {
1061     int counter_data_cell_count = CounterData::static_cell_count();
1062     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1063         profile_return_for_invoke(stream->method(), stream->bci())) {
1064       cell_count = CallTypeData::compute_cell_count(stream);
1065     } else {
1066       cell_count = counter_data_cell_count;
1067     }
1068     if (cell_count > counter_data_cell_count) {
1069       tag = DataLayout::call_type_data_tag;
1070     } else {
1071       tag = DataLayout::counter_data_tag;
1072     }
1073     break;
1074   }
1075   case Bytecodes::_goto:
1076   case Bytecodes::_goto_w:
1077   case Bytecodes::_jsr:
1078   case Bytecodes::_jsr_w:
1079     cell_count = JumpData::static_cell_count();
1080     tag = DataLayout::jump_data_tag;
1081     break;
1082   case Bytecodes::_invokevirtual:
1083   case Bytecodes::_invokeinterface: {
1084     int virtual_call_data_cell_count = VirtualCallData::static_cell_count();
1085     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1086         profile_return_for_invoke(stream->method(), stream->bci())) {
1087       cell_count = VirtualCallTypeData::compute_cell_count(stream);
1088     } else {
1089       cell_count = virtual_call_data_cell_count;
1090     }
1091     if (cell_count > virtual_call_data_cell_count) {
1092       tag = DataLayout::virtual_call_type_data_tag;
1093     } else {
1094       tag = DataLayout::virtual_call_data_tag;
1095     }
1096     break;
1097   }
1098   case Bytecodes::_invokedynamic: {
1099     // %%% should make a type profile for any invokedynamic that takes a ref argument
1100     int counter_data_cell_count = CounterData::static_cell_count();
1101     if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
1102         profile_return_for_invoke(stream->method(), stream->bci())) {
1103       cell_count = CallTypeData::compute_cell_count(stream);
1104     } else {
1105       cell_count = counter_data_cell_count;
1106     }
1107     if (cell_count > counter_data_cell_count) {
1108       tag = DataLayout::call_type_data_tag;
1109     } else {
1110       tag = DataLayout::counter_data_tag;
1111     }
1112     break;
1113   }
1114   case Bytecodes::_ret:
1115     cell_count = RetData::static_cell_count();
1116     tag = DataLayout::ret_data_tag;
1117     break;
1118   case Bytecodes::_ifeq:
1119   case Bytecodes::_ifne:
1120   case Bytecodes::_iflt:
1121   case Bytecodes::_ifge:
1122   case Bytecodes::_ifgt:
1123   case Bytecodes::_ifle:
1124   case Bytecodes::_if_icmpeq:
1125   case Bytecodes::_if_icmpne:
1126   case Bytecodes::_if_icmplt:
1127   case Bytecodes::_if_icmpge:
1128   case Bytecodes::_if_icmpgt:
1129   case Bytecodes::_if_icmple:
1130   case Bytecodes::_ifnull:
1131   case Bytecodes::_ifnonnull:
1132     cell_count = BranchData::static_cell_count();
1133     tag = DataLayout::branch_data_tag;
1134     break;
1135   case Bytecodes::_if_acmpeq:
1136   case Bytecodes::_if_acmpne:
1137     cell_count = ACmpData::static_cell_count();
1138     tag = DataLayout::acmp_data_tag;
1139     break;
1140   case Bytecodes::_lookupswitch:
1141   case Bytecodes::_tableswitch:
1142     cell_count = MultiBranchData::compute_cell_count(stream);
1143     tag = DataLayout::multi_branch_data_tag;
1144     break;
1145   default:
1146     break;
1147   }
1148   assert(tag == DataLayout::multi_branch_data_tag ||
1149          ((MethodData::profile_arguments() || MethodData::profile_return()) &&
1150           (tag == DataLayout::call_type_data_tag ||
1151            tag == DataLayout::counter_data_tag ||
1152            tag == DataLayout::virtual_call_type_data_tag ||
1153            tag == DataLayout::virtual_call_data_tag)) ||
1154          cell_count == bytecode_cell_count(c), "cell counts must agree");
1155   if (cell_count >= 0) {
1156     assert(tag != DataLayout::no_tag, "bad tag");
1157     assert(bytecode_has_profile(c), "agree w/ BHP");
1158     data_layout->initialize(tag, checked_cast<u2>(stream->bci()), cell_count);
1159     return DataLayout::compute_size_in_bytes(cell_count);
1160   } else {
1161     assert(!bytecode_has_profile(c), "agree w/ !BHP");
1162     return 0;
1163   }
1164 }
1165 
1166 // Get the data at an arbitrary (sort of) data index.
1167 ProfileData* MethodData::data_at(int data_index) const {
1168   if (out_of_bounds(data_index)) {
1169     return nullptr;
1170   }
1171   DataLayout* data_layout = data_layout_at(data_index);
1172   return data_layout->data_in();
1173 }
1174 
1175 int DataLayout::cell_count() {
1176   switch (tag()) {
1177   case DataLayout::no_tag:
1178   default:
1179     ShouldNotReachHere();
1180     return 0;
1181   case DataLayout::bit_data_tag:
1182     return BitData::static_cell_count();
1183   case DataLayout::counter_data_tag:
1184     return CounterData::static_cell_count();
1185   case DataLayout::jump_data_tag:
1186     return JumpData::static_cell_count();
1187   case DataLayout::receiver_type_data_tag:
1188     return ReceiverTypeData::static_cell_count();
1189   case DataLayout::virtual_call_data_tag:
1190     return VirtualCallData::static_cell_count();
1191   case DataLayout::ret_data_tag:
1192     return RetData::static_cell_count();
1193   case DataLayout::branch_data_tag:
1194     return BranchData::static_cell_count();
1195   case DataLayout::multi_branch_data_tag:
1196     return ((new MultiBranchData(this))->cell_count());
1197   case DataLayout::arg_info_data_tag:
1198     return ((new ArgInfoData(this))->cell_count());
1199   case DataLayout::call_type_data_tag:
1200     return ((new CallTypeData(this))->cell_count());
1201   case DataLayout::virtual_call_type_data_tag:
1202     return ((new VirtualCallTypeData(this))->cell_count());
1203   case DataLayout::parameters_type_data_tag:
1204     return ((new ParametersTypeData(this))->cell_count());
1205   case DataLayout::speculative_trap_data_tag:
1206     return SpeculativeTrapData::static_cell_count();
1207   case DataLayout::array_store_data_tag:
1208     return ((new ArrayStoreData(this))->cell_count());
1209   case DataLayout::array_load_data_tag:
1210     return ((new ArrayLoadData(this))->cell_count());
1211   case DataLayout::acmp_data_tag:
1212     return ((new ACmpData(this))->cell_count());
1213   }
1214 }
1215 ProfileData* DataLayout::data_in() {
1216   switch (tag()) {
1217   case DataLayout::no_tag:
1218   default:
1219     ShouldNotReachHere();
1220     return nullptr;
1221   case DataLayout::bit_data_tag:
1222     return new BitData(this);
1223   case DataLayout::counter_data_tag:
1224     return new CounterData(this);
1225   case DataLayout::jump_data_tag:
1226     return new JumpData(this);
1227   case DataLayout::receiver_type_data_tag:
1228     return new ReceiverTypeData(this);
1229   case DataLayout::virtual_call_data_tag:
1230     return new VirtualCallData(this);
1231   case DataLayout::ret_data_tag:
1232     return new RetData(this);
1233   case DataLayout::branch_data_tag:
1234     return new BranchData(this);
1235   case DataLayout::multi_branch_data_tag:
1236     return new MultiBranchData(this);
1237   case DataLayout::arg_info_data_tag:
1238     return new ArgInfoData(this);
1239   case DataLayout::call_type_data_tag:
1240     return new CallTypeData(this);
1241   case DataLayout::virtual_call_type_data_tag:
1242     return new VirtualCallTypeData(this);
1243   case DataLayout::parameters_type_data_tag:
1244     return new ParametersTypeData(this);
1245   case DataLayout::speculative_trap_data_tag:
1246     return new SpeculativeTrapData(this);
1247   case DataLayout::array_store_data_tag:
1248     return new ArrayStoreData(this);
1249   case DataLayout::array_load_data_tag:
1250     return new ArrayLoadData(this);
1251   case DataLayout::acmp_data_tag:
1252     return new ACmpData(this);
1253   }
1254 }
1255 
1256 // Iteration over data.
1257 ProfileData* MethodData::next_data(ProfileData* current) const {
1258   int current_index = dp_to_di(current->dp());
1259   int next_index = current_index + current->size_in_bytes();
1260   ProfileData* next = data_at(next_index);
1261   return next;
1262 }
1263 
1264 DataLayout* MethodData::next_data_layout(DataLayout* current) const {
1265   int current_index = dp_to_di((address)current);
1266   int next_index = current_index + current->size_in_bytes();
1267   if (out_of_bounds(next_index)) {
1268     return nullptr;
1269   }
1270   DataLayout* next = data_layout_at(next_index);
1271   return next;
1272 }
1273 
1274 // Give each of the data entries a chance to perform specific
1275 // data initialization.
1276 void MethodData::post_initialize(BytecodeStream* stream) {
1277   ResourceMark rm;
1278   ProfileData* data;
1279   for (data = first_data(); is_valid(data); data = next_data(data)) {
1280     stream->set_start(data->bci());
1281     stream->next();
1282     data->post_initialize(stream, this);
1283   }
1284   if (_parameters_type_data_di != no_parameters) {
1285     parameters_type_data()->post_initialize(nullptr, this);
1286   }
1287 }
1288 
1289 // Initialize the MethodData* corresponding to a given method.
1290 MethodData::MethodData(const methodHandle& method)
1291   : _method(method()),
1292     // Holds Compile_lock
1293     _extra_data_lock(Mutex::nosafepoint, "MDOExtraData_lock"),
1294     _compiler_counters(),
1295     _parameters_type_data_di(parameters_uninitialized) {
1296   initialize();
1297 }
1298 
1299 // Reinitialize the storage of an existing MDO at a safepoint.  Doing it this way will ensure it's
1300 // not being accessed while the contents are being rewritten.
1301 class VM_ReinitializeMDO: public VM_Operation {
1302  private:
1303   MethodData* _mdo;
1304  public:
1305   VM_ReinitializeMDO(MethodData* mdo): _mdo(mdo) {}
1306   VMOp_Type type() const                         { return VMOp_ReinitializeMDO; }
1307   void doit() {
1308     // The extra data is being zero'd, we'd like to acquire the extra_data_lock but it can't be held
1309     // over a safepoint.  This means that we don't actually need to acquire the lock.
1310     _mdo->initialize();
1311   }
1312   bool allow_nested_vm_operations() const        { return true; }
1313 };
1314 
1315 void MethodData::reinitialize() {
1316   VM_ReinitializeMDO op(this);
1317   VMThread::execute(&op);
1318 }
1319 
1320 
1321 void MethodData::initialize() {
1322   Thread* thread = Thread::current();
1323   NoSafepointVerifier no_safepoint;  // init function atomic wrt GC
1324   ResourceMark rm(thread);
1325 
1326   init();
1327   set_creation_mileage(mileage_of(method()));
1328 
1329   // Go through the bytecodes and allocate and initialize the
1330   // corresponding data cells.
1331   int data_size = 0;
1332   int empty_bc_count = 0;  // number of bytecodes lacking data
1333   _data[0] = 0;  // apparently not set below.
1334   BytecodeStream stream(methodHandle(thread, method()));
1335   Bytecodes::Code c;
1336   bool needs_speculative_traps = false;
1337   while ((c = stream.next()) >= 0) {
1338     int size_in_bytes = initialize_data(&stream, data_size);
1339     data_size += size_in_bytes;
1340     if (size_in_bytes == 0 JVMCI_ONLY(&& Bytecodes::can_trap(c)))  empty_bc_count += 1;
1341     needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
1342   }
1343   _data_size = data_size;
1344   int object_size = in_bytes(data_offset()) + data_size;
1345 
1346   // Add some extra DataLayout cells (at least one) to track stray traps.
1347   int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
1348   int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0);
1349 
1350   // Let's zero the space for the extra data
1351   if (extra_size > 0) {
1352     Copy::zero_to_bytes(((address)_data) + data_size, extra_size);
1353   }
1354 
1355   // Add a cell to record information about modified arguments.
1356   // Set up _args_modified array after traps cells so that
1357   // the code for traps cells works.
1358   DataLayout *dp = data_layout_at(data_size + extra_size);
1359 
1360   int arg_size = method()->size_of_parameters();
1361   dp->initialize(DataLayout::arg_info_data_tag, 0, arg_size+1);
1362 
1363   int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1);
1364   object_size += extra_size + arg_data_size;
1365 
1366   int parms_cell = ParametersTypeData::compute_cell_count(method());
1367   // If we are profiling parameters, we reserved an area near the end
1368   // of the MDO after the slots for bytecodes (because there's no bci
1369   // for method entry so they don't fit with the framework for the
1370   // profiling of bytecodes). We store the offset within the MDO of
1371   // this area (or -1 if no parameter is profiled)
1372   int parm_data_size = 0;
1373   if (parms_cell > 0) {
1374     parm_data_size = DataLayout::compute_size_in_bytes(parms_cell);
1375     object_size += parm_data_size;
1376     _parameters_type_data_di = data_size + extra_size + arg_data_size;
1377     DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size);
1378     dp->initialize(DataLayout::parameters_type_data_tag, 0, parms_cell);
1379   } else {
1380     _parameters_type_data_di = no_parameters;
1381   }
1382 
1383   _exception_handler_data_di = data_size + extra_size + arg_data_size + parm_data_size;
1384   if (ProfileExceptionHandlers && method()->has_exception_handler()) {
1385     int num_exception_handlers = method()->exception_table_length();
1386     object_size += num_exception_handlers * single_exception_handler_data_size();
1387     ExceptionTableElement* exception_handlers = method()->exception_table_start();
1388     for (int i = 0; i < num_exception_handlers; i++) {
1389       DataLayout *dp = exception_handler_data_at(i);
1390       dp->initialize(DataLayout::bit_data_tag, exception_handlers[i].handler_pc, single_exception_handler_data_cell_count());
1391     }
1392   }
1393 
1394   // Set an initial hint. Don't use set_hint_di() because
1395   // first_di() may be out of bounds if data_size is 0.
1396   // In that situation, _hint_di is never used, but at
1397   // least well-defined.
1398   _hint_di = first_di();
1399 
1400   post_initialize(&stream);
1401 
1402   assert(object_size == compute_allocation_size_in_bytes(methodHandle(thread, _method)), "MethodData: computed size != initialized size");
1403   set_size(object_size);
1404 }
1405 
1406 void MethodData::init() {
1407   _compiler_counters = CompilerCounters(); // reset compiler counters
1408   _invocation_counter.init();
1409   _backedge_counter.init();
1410   _invocation_counter_start = 0;
1411   _backedge_counter_start = 0;
1412 
1413   // Set per-method invoke- and backedge mask.
1414   double scale = 1.0;
1415   methodHandle mh(Thread::current(), _method);
1416   CompilerOracle::has_option_value(mh, CompileCommandEnum::CompileThresholdScaling, scale);
1417   _invoke_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1418   _backedge_mask = (int)right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
1419 
1420   _tenure_traps = 0;
1421   _num_loops = 0;
1422   _num_blocks = 0;
1423   _would_profile = unknown;
1424 
1425 #if INCLUDE_JVMCI
1426   _jvmci_ir_size = 0;
1427   _failed_speculations = nullptr;
1428 #endif
1429 
1430   // Initialize escape flags.
1431   clear_escape_info();
1432 }
1433 
1434 // Get a measure of how much mileage the method has on it.
1435 int MethodData::mileage_of(Method* method) {
1436   return MAX2(method->invocation_count(), method->backedge_count());
1437 }
1438 
1439 bool MethodData::is_mature() const {
1440   return CompilationPolicy::is_mature(_method);
1441 }
1442 
1443 // Translate a bci to its corresponding data index (di).
1444 address MethodData::bci_to_dp(int bci) {
1445   ResourceMark rm;
1446   DataLayout* data = data_layout_before(bci);
1447   DataLayout* prev = nullptr;
1448   for ( ; is_valid(data); data = next_data_layout(data)) {
1449     if (data->bci() >= bci) {
1450       if (data->bci() == bci)  set_hint_di(dp_to_di((address)data));
1451       else if (prev != nullptr)   set_hint_di(dp_to_di((address)prev));
1452       return (address)data;
1453     }
1454     prev = data;
1455   }
1456   return (address)limit_data_position();
1457 }
1458 
1459 // Translate a bci to its corresponding data, or null.
1460 ProfileData* MethodData::bci_to_data(int bci) {
1461   check_extra_data_locked();
1462 
1463   DataLayout* data = data_layout_before(bci);
1464   for ( ; is_valid(data); data = next_data_layout(data)) {
1465     if (data->bci() == bci) {
1466       set_hint_di(dp_to_di((address)data));
1467       return data->data_in();
1468     } else if (data->bci() > bci) {
1469       break;
1470     }
1471   }
1472   return bci_to_extra_data(bci, nullptr, false);
1473 }
1474 
1475 DataLayout* MethodData::exception_handler_bci_to_data_helper(int bci) {
1476   assert(ProfileExceptionHandlers, "not profiling");
1477   for (int i = 0; i < num_exception_handler_data(); i++) {
1478     DataLayout* exception_handler_data = exception_handler_data_at(i);
1479     if (exception_handler_data->bci() == bci) {
1480       return exception_handler_data;
1481     }
1482   }
1483   return nullptr;
1484 }
1485 
1486 BitData* MethodData::exception_handler_bci_to_data_or_null(int bci) {
1487   DataLayout* data = exception_handler_bci_to_data_helper(bci);
1488   return data != nullptr ? new BitData(data) : nullptr;
1489 }
1490 
1491 BitData MethodData::exception_handler_bci_to_data(int bci) {
1492   DataLayout* data = exception_handler_bci_to_data_helper(bci);
1493   assert(data != nullptr, "invalid bci");
1494   return BitData(data);
1495 }
1496 
1497 DataLayout* MethodData::next_extra(DataLayout* dp) {
1498   int nb_cells = 0;
1499   switch(dp->tag()) {
1500   case DataLayout::bit_data_tag:
1501   case DataLayout::no_tag:
1502     nb_cells = BitData::static_cell_count();
1503     break;
1504   case DataLayout::speculative_trap_data_tag:
1505     nb_cells = SpeculativeTrapData::static_cell_count();
1506     break;
1507   default:
1508     fatal("unexpected tag %d", dp->tag());
1509   }
1510   return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells));
1511 }
1512 
1513 ProfileData* MethodData::bci_to_extra_data_find(int bci, Method* m, DataLayout*& dp) {
1514   check_extra_data_locked();
1515 
1516   DataLayout* end = args_data_limit();
1517 
1518   for (;; dp = next_extra(dp)) {
1519     assert(dp < end, "moved past end of extra data");
1520     // No need for "Atomic::load_acquire" ops,
1521     // since the data structure is monotonic.
1522     switch(dp->tag()) {
1523     case DataLayout::no_tag:
1524       return nullptr;
1525     case DataLayout::arg_info_data_tag:
1526       dp = end;
1527       return nullptr; // ArgInfoData is at the end of extra data section.
1528     case DataLayout::bit_data_tag:
1529       if (m == nullptr && dp->bci() == bci) {
1530         return new BitData(dp);
1531       }
1532       break;
1533     case DataLayout::speculative_trap_data_tag:
1534       if (m != nullptr) {
1535         SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1536         if (dp->bci() == bci) {
1537           assert(data->method() != nullptr, "method must be set");
1538           if (data->method() == m) {
1539             return data;
1540           }
1541         }
1542       }
1543       break;
1544     default:
1545       fatal("unexpected tag %d", dp->tag());
1546     }
1547   }
1548   return nullptr;
1549 }
1550 
1551 
1552 // Translate a bci to its corresponding extra data, or null.
1553 ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) {
1554   check_extra_data_locked();
1555 
1556   // This code assumes an entry for a SpeculativeTrapData is 2 cells
1557   assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) ==
1558          DataLayout::compute_size_in_bytes(SpeculativeTrapData::static_cell_count()),
1559          "code needs to be adjusted");
1560 
1561   // Do not create one of these if method has been redefined.
1562   if (m != nullptr && m->is_old()) {
1563     return nullptr;
1564   }
1565 
1566   DataLayout* dp  = extra_data_base();
1567   DataLayout* end = args_data_limit();
1568 
1569   // Find if already exists
1570   ProfileData* result = bci_to_extra_data_find(bci, m, dp);
1571   if (result != nullptr || dp >= end) {
1572     return result;
1573   }
1574 
1575   if (create_if_missing) {
1576     // Not found -> Allocate
1577     assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != nullptr), "should be free");
1578     assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info");
1579     u1 tag = m == nullptr ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag;
1580     // SpeculativeTrapData is 2 slots. Make sure we have room.
1581     if (m != nullptr && next_extra(dp)->tag() != DataLayout::no_tag) {
1582       return nullptr;
1583     }
1584     DataLayout temp;
1585     temp.initialize(tag, checked_cast<u2>(bci), 0);
1586 
1587     dp->set_header(temp.header());
1588     assert(dp->tag() == tag, "sane");
1589     assert(dp->bci() == bci, "no concurrent allocation");
1590     if (tag == DataLayout::bit_data_tag) {
1591       return new BitData(dp);
1592     } else {
1593       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1594       data->set_method(m);
1595       return data;
1596     }
1597   }
1598   return nullptr;
1599 }
1600 
1601 ArgInfoData *MethodData::arg_info() {
1602   DataLayout* dp    = extra_data_base();
1603   DataLayout* end   = args_data_limit();
1604   for (; dp < end; dp = next_extra(dp)) {
1605     if (dp->tag() == DataLayout::arg_info_data_tag)
1606       return new ArgInfoData(dp);
1607   }
1608   return nullptr;
1609 }
1610 
1611 // Printing
1612 
1613 void MethodData::print_on(outputStream* st) const {
1614   assert(is_methodData(), "should be method data");
1615   st->print("method data for ");
1616   method()->print_value_on(st);
1617   st->cr();
1618   print_data_on(st);
1619 }
1620 
1621 void MethodData::print_value_on(outputStream* st) const {
1622   assert(is_methodData(), "should be method data");
1623   st->print("method data for ");
1624   method()->print_value_on(st);
1625 }
1626 
1627 void MethodData::print_data_on(outputStream* st) const {
1628   ConditionalMutexLocker ml(extra_data_lock(), !extra_data_lock()->owned_by_self(),
1629                             Mutex::_no_safepoint_check_flag);
1630   ResourceMark rm;
1631   ProfileData* data = first_data();
1632   if (_parameters_type_data_di != no_parameters) {
1633     parameters_type_data()->print_data_on(st);
1634   }
1635   for ( ; is_valid(data); data = next_data(data)) {
1636     st->print("%d", dp_to_di(data->dp()));
1637     st->fill_to(6);
1638     data->print_data_on(st, this);
1639   }
1640 
1641   st->print_cr("--- Extra data:");
1642   DataLayout* dp    = extra_data_base();
1643   DataLayout* end   = args_data_limit();
1644   for (;; dp = next_extra(dp)) {
1645     assert(dp < end, "moved past end of extra data");
1646     // No need for "Atomic::load_acquire" ops,
1647     // since the data structure is monotonic.
1648     switch(dp->tag()) {
1649     case DataLayout::no_tag:
1650       continue;
1651     case DataLayout::bit_data_tag:
1652       data = new BitData(dp);
1653       break;
1654     case DataLayout::speculative_trap_data_tag:
1655       data = new SpeculativeTrapData(dp);
1656       break;
1657     case DataLayout::arg_info_data_tag:
1658       data = new ArgInfoData(dp);
1659       dp = end; // ArgInfoData is at the end of extra data section.
1660       break;
1661     default:
1662       fatal("unexpected tag %d", dp->tag());
1663     }
1664     st->print("%d", dp_to_di(data->dp()));
1665     st->fill_to(6);
1666     data->print_data_on(st);
1667     if (dp >= end) return;
1668   }
1669 }
1670 
1671 // Verification
1672 
1673 void MethodData::verify_on(outputStream* st) {
1674   guarantee(is_methodData(), "object must be method data");
1675   // guarantee(m->is_perm(), "should be in permspace");
1676   this->verify_data_on(st);
1677 }
1678 
1679 void MethodData::verify_data_on(outputStream* st) {
1680   NEEDS_CLEANUP;
1681   // not yet implemented.
1682 }
1683 
1684 bool MethodData::profile_jsr292(const methodHandle& m, int bci) {
1685   if (m->is_compiled_lambda_form()) {
1686     return true;
1687   }
1688 
1689   Bytecode_invoke inv(m , bci);
1690   return inv.is_invokedynamic() || inv.is_invokehandle();
1691 }
1692 
1693 bool MethodData::profile_unsafe(const methodHandle& m, int bci) {
1694   Bytecode_invoke inv(m , bci);
1695   if (inv.is_invokevirtual()) {
1696     Symbol* klass = inv.klass();
1697     if (klass == vmSymbols::jdk_internal_misc_Unsafe() ||
1698         klass == vmSymbols::sun_misc_Unsafe() ||
1699         klass == vmSymbols::jdk_internal_misc_ScopedMemoryAccess()) {
1700       Symbol* name = inv.name();
1701       if (name->starts_with("get") || name->starts_with("put")) {
1702         return true;
1703       }
1704     }
1705   }
1706   return false;
1707 }
1708 
1709 int MethodData::profile_arguments_flag() {
1710   return TypeProfileLevel % 10;
1711 }
1712 
1713 bool MethodData::profile_arguments() {
1714   return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all && TypeProfileArgsLimit > 0;
1715 }
1716 
1717 bool MethodData::profile_arguments_jsr292_only() {
1718   return profile_arguments_flag() == type_profile_jsr292;
1719 }
1720 
1721 bool MethodData::profile_all_arguments() {
1722   return profile_arguments_flag() == type_profile_all;
1723 }
1724 
1725 bool MethodData::profile_arguments_for_invoke(const methodHandle& m, int bci) {
1726   if (!profile_arguments()) {
1727     return false;
1728   }
1729 
1730   if (profile_all_arguments()) {
1731     return true;
1732   }
1733 
1734   if (profile_unsafe(m, bci)) {
1735     return true;
1736   }
1737 
1738   assert(profile_arguments_jsr292_only(), "inconsistent");
1739   return profile_jsr292(m, bci);
1740 }
1741 
1742 int MethodData::profile_return_flag() {
1743   return (TypeProfileLevel % 100) / 10;
1744 }
1745 
1746 bool MethodData::profile_return() {
1747   return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all;
1748 }
1749 
1750 bool MethodData::profile_return_jsr292_only() {
1751   return profile_return_flag() == type_profile_jsr292;
1752 }
1753 
1754 bool MethodData::profile_all_return() {
1755   return profile_return_flag() == type_profile_all;
1756 }
1757 
1758 bool MethodData::profile_return_for_invoke(const methodHandle& m, int bci) {
1759   if (!profile_return()) {
1760     return false;
1761   }
1762 
1763   if (profile_all_return()) {
1764     return true;
1765   }
1766 
1767   assert(profile_return_jsr292_only(), "inconsistent");
1768   return profile_jsr292(m, bci);
1769 }
1770 
1771 int MethodData::profile_parameters_flag() {
1772   return TypeProfileLevel / 100;
1773 }
1774 
1775 bool MethodData::profile_parameters() {
1776   return profile_parameters_flag() > no_type_profile && profile_parameters_flag() <= type_profile_all;
1777 }
1778 
1779 bool MethodData::profile_parameters_jsr292_only() {
1780   return profile_parameters_flag() == type_profile_jsr292;
1781 }
1782 
1783 bool MethodData::profile_all_parameters() {
1784   return profile_parameters_flag() == type_profile_all;
1785 }
1786 
1787 bool MethodData::profile_parameters_for_method(const methodHandle& m) {
1788   if (!profile_parameters()) {
1789     return false;
1790   }
1791 
1792   if (profile_all_parameters()) {
1793     return true;
1794   }
1795 
1796   assert(profile_parameters_jsr292_only(), "inconsistent");
1797   return m->is_compiled_lambda_form();
1798 }
1799 
1800 void MethodData::metaspace_pointers_do(MetaspaceClosure* it) {
1801   log_trace(cds)("Iter(MethodData): %p", this);
1802   it->push(&_method);
1803 }
1804 
1805 void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
1806   check_extra_data_locked();
1807 
1808   if (shift == 0) {
1809     return;
1810   }
1811   if (!reset) {
1812     // Move all cells of trap entry at dp left by "shift" cells
1813     intptr_t* start = (intptr_t*)dp;
1814     intptr_t* end = (intptr_t*)next_extra(dp);
1815     for (intptr_t* ptr = start; ptr < end; ptr++) {
1816       *(ptr-shift) = *ptr;
1817     }
1818   } else {
1819     // Reset "shift" cells stopping at dp
1820     intptr_t* start = ((intptr_t*)dp) - shift;
1821     intptr_t* end = (intptr_t*)dp;
1822     for (intptr_t* ptr = start; ptr < end; ptr++) {
1823       *ptr = 0;
1824     }
1825   }
1826 }
1827 
1828 // Check for entries that reference an unloaded method
1829 class CleanExtraDataKlassClosure : public CleanExtraDataClosure {
1830   bool _always_clean;
1831 public:
1832   CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {}
1833   bool is_live(Method* m) {
1834     return !(_always_clean) && m->method_holder()->is_loader_alive();
1835   }
1836 };
1837 
1838 // Check for entries that reference a redefined method
1839 class CleanExtraDataMethodClosure : public CleanExtraDataClosure {
1840 public:
1841   CleanExtraDataMethodClosure() {}
1842   bool is_live(Method* m) { return !m->is_old(); }
1843 };
1844 
1845 
1846 // Remove SpeculativeTrapData entries that reference an unloaded or
1847 // redefined method
1848 void MethodData::clean_extra_data(CleanExtraDataClosure* cl) {
1849   check_extra_data_locked();
1850 
1851   DataLayout* dp  = extra_data_base();
1852   DataLayout* end = args_data_limit();
1853 
1854   int shift = 0;
1855   for (; dp < end; dp = next_extra(dp)) {
1856     switch(dp->tag()) {
1857     case DataLayout::speculative_trap_data_tag: {
1858       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1859       Method* m = data->method();
1860       assert(m != nullptr, "should have a method");
1861       if (!cl->is_live(m)) {
1862         // "shift" accumulates the number of cells for dead
1863         // SpeculativeTrapData entries that have been seen so
1864         // far. Following entries must be shifted left by that many
1865         // cells to remove the dead SpeculativeTrapData entries.
1866         shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp);
1867       } else {
1868         // Shift this entry left if it follows dead
1869         // SpeculativeTrapData entries
1870         clean_extra_data_helper(dp, shift);
1871       }
1872       break;
1873     }
1874     case DataLayout::bit_data_tag:
1875       // Shift this entry left if it follows dead SpeculativeTrapData
1876       // entries
1877       clean_extra_data_helper(dp, shift);
1878       continue;
1879     case DataLayout::no_tag:
1880     case DataLayout::arg_info_data_tag:
1881       // We are at end of the live trap entries. The previous "shift"
1882       // cells contain entries that are either dead or were shifted
1883       // left. They need to be reset to no_tag
1884       clean_extra_data_helper(dp, shift, true);
1885       return;
1886     default:
1887       fatal("unexpected tag %d", dp->tag());
1888     }
1889   }
1890 }
1891 
1892 // Verify there's no unloaded or redefined method referenced by a
1893 // SpeculativeTrapData entry
1894 void MethodData::verify_extra_data_clean(CleanExtraDataClosure* cl) {
1895   check_extra_data_locked();
1896 
1897 #ifdef ASSERT
1898   DataLayout* dp  = extra_data_base();
1899   DataLayout* end = args_data_limit();
1900 
1901   for (; dp < end; dp = next_extra(dp)) {
1902     switch(dp->tag()) {
1903     case DataLayout::speculative_trap_data_tag: {
1904       SpeculativeTrapData* data = new SpeculativeTrapData(dp);
1905       Method* m = data->method();
1906       assert(m != nullptr && cl->is_live(m), "Method should exist");
1907       break;
1908     }
1909     case DataLayout::bit_data_tag:
1910       continue;
1911     case DataLayout::no_tag:
1912     case DataLayout::arg_info_data_tag:
1913       return;
1914     default:
1915       fatal("unexpected tag %d", dp->tag());
1916     }
1917   }
1918 #endif
1919 }
1920 
1921 void MethodData::clean_method_data(bool always_clean) {
1922   ResourceMark rm;
1923   for (ProfileData* data = first_data();
1924        is_valid(data);
1925        data = next_data(data)) {
1926     data->clean_weak_klass_links(always_clean);
1927   }
1928   ParametersTypeData* parameters = parameters_type_data();
1929   if (parameters != nullptr) {
1930     parameters->clean_weak_klass_links(always_clean);
1931   }
1932 
1933   CleanExtraDataKlassClosure cl(always_clean);
1934 
1935   // Lock to modify extra data, and prevent Safepoint from breaking the lock
1936   MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag);
1937 
1938   clean_extra_data(&cl);
1939   verify_extra_data_clean(&cl);
1940 }
1941 
1942 // This is called during redefinition to clean all "old" redefined
1943 // methods out of MethodData for all methods.
1944 void MethodData::clean_weak_method_links() {
1945   ResourceMark rm;
1946   CleanExtraDataMethodClosure cl;
1947 
1948   // Lock to modify extra data, and prevent Safepoint from breaking the lock
1949   MutexLocker ml(extra_data_lock(), Mutex::_no_safepoint_check_flag);
1950 
1951   clean_extra_data(&cl);
1952   verify_extra_data_clean(&cl);
1953 }
1954 
1955 void MethodData::deallocate_contents(ClassLoaderData* loader_data) {
1956   release_C_heap_structures();
1957 }
1958 
1959 void MethodData::release_C_heap_structures() {
1960 #if INCLUDE_JVMCI
1961   FailedSpeculation::free_failed_speculations(get_failed_speculations_address());
1962 #endif
1963 }
1964 
1965 #ifdef ASSERT
1966 void MethodData::check_extra_data_locked() const {
1967     // Cast const away, just to be able to verify the lock
1968     // Usually we only want non-const accesses on the lock,
1969     // so this here is an exception.
1970     MethodData* self = (MethodData*)this;
1971     assert(self->extra_data_lock()->owned_by_self(), "must have lock");
1972     assert(!Thread::current()->is_Java_thread() ||
1973            JavaThread::current()->is_in_no_safepoint_scope(),
1974            "JavaThread must have NoSafepointVerifier inside lock scope");
1975 }
1976 #endif