1 /* 2 * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "code/codeBlob.hpp" 26 #include "code/codeCache.hpp" 27 #include "code/relocInfo.hpp" 28 #include "code/vtableStubs.hpp" 29 #include "compiler/disassembler.hpp" 30 #include "compiler/oopMap.hpp" 31 #include "interpreter/bytecode.hpp" 32 #include "interpreter/interpreter.hpp" 33 #include "jvm.h" 34 #include "memory/allocation.inline.hpp" 35 #include "memory/heap.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "prims/forte.hpp" 39 #include "prims/jvmtiExport.hpp" 40 #include "runtime/handles.inline.hpp" 41 #include "runtime/interfaceSupport.inline.hpp" 42 #include "runtime/javaFrameAnchor.hpp" 43 #include "runtime/jniHandles.inline.hpp" 44 #include "runtime/mutexLocker.hpp" 45 #include "runtime/safepoint.hpp" 46 #include "runtime/sharedRuntime.hpp" 47 #include "runtime/stubCodeGenerator.hpp" 48 #include "runtime/stubRoutines.hpp" 49 #include "runtime/vframe.hpp" 50 #include "services/memoryService.hpp" 51 #include "utilities/align.hpp" 52 #ifdef COMPILER1 53 #include "c1/c1_Runtime1.hpp" 54 #endif 55 56 #include <type_traits> 57 58 // Virtual methods are not allowed in code blobs to simplify caching compiled code. 59 // Check all "leaf" subclasses of CodeBlob class. 60 61 static_assert(!std::is_polymorphic<nmethod>::value, "no virtual methods are allowed in nmethod"); 62 static_assert(!std::is_polymorphic<AdapterBlob>::value, "no virtual methods are allowed in code blobs"); 63 static_assert(!std::is_polymorphic<VtableBlob>::value, "no virtual methods are allowed in code blobs"); 64 static_assert(!std::is_polymorphic<MethodHandlesAdapterBlob>::value, "no virtual methods are allowed in code blobs"); 65 static_assert(!std::is_polymorphic<RuntimeStub>::value, "no virtual methods are allowed in code blobs"); 66 static_assert(!std::is_polymorphic<DeoptimizationBlob>::value, "no virtual methods are allowed in code blobs"); 67 static_assert(!std::is_polymorphic<SafepointBlob>::value, "no virtual methods are allowed in code blobs"); 68 static_assert(!std::is_polymorphic<UpcallStub>::value, "no virtual methods are allowed in code blobs"); 69 #ifdef COMPILER2 70 static_assert(!std::is_polymorphic<ExceptionBlob>::value, "no virtual methods are allowed in code blobs"); 71 static_assert(!std::is_polymorphic<UncommonTrapBlob>::value, "no virtual methods are allowed in code blobs"); 72 #endif 73 74 // Add proxy vtables. 75 // We need only few for now - they are used only from prints. 76 const nmethod::Vptr nmethod::_vpntr; 77 const BufferBlob::Vptr BufferBlob::_vpntr; 78 const RuntimeStub::Vptr RuntimeStub::_vpntr; 79 const SingletonBlob::Vptr SingletonBlob::_vpntr; 80 const DeoptimizationBlob::Vptr DeoptimizationBlob::_vpntr; 81 const UpcallStub::Vptr UpcallStub::_vpntr; 82 83 const CodeBlob::Vptr* CodeBlob::vptr() const { 84 constexpr const CodeBlob::Vptr* array[(size_t)CodeBlobKind::Number_Of_Kinds] = { 85 nullptr/* None */, 86 &nmethod::_vpntr, 87 &BufferBlob::_vpntr, 88 &AdapterBlob::_vpntr, 89 &VtableBlob::_vpntr, 90 &MethodHandlesAdapterBlob::_vpntr, 91 &RuntimeStub::_vpntr, 92 &DeoptimizationBlob::_vpntr, 93 &SafepointBlob::_vpntr, 94 #ifdef COMPILER2 95 &ExceptionBlob::_vpntr, 96 &UncommonTrapBlob::_vpntr, 97 #endif 98 &UpcallStub::_vpntr 99 }; 100 101 return array[(size_t)_kind]; 102 } 103 104 unsigned int CodeBlob::align_code_offset(int offset) { 105 // align the size to CodeEntryAlignment 106 int header_size = (int)CodeHeap::header_size(); 107 return align_up(offset + header_size, CodeEntryAlignment) - header_size; 108 } 109 110 // This must be consistent with the CodeBlob constructor's layout actions. 111 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) { 112 // align the size to CodeEntryAlignment 113 unsigned int size = align_code_offset(header_size); 114 size += align_up(cb->total_content_size(), oopSize); 115 size += align_up(cb->total_oop_size(), oopSize); 116 return size; 117 } 118 119 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size, 120 int16_t frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments, 121 int mutable_data_size) : 122 _oop_maps(nullptr), // will be set by set_oop_maps() call 123 _name(name), 124 _mutable_data(header_begin() + size), // default value is blob_end() 125 _size(size), 126 _relocation_size(align_up(cb->total_relocation_size(), oopSize)), 127 _content_offset(CodeBlob::align_code_offset(header_size)), 128 _code_offset(_content_offset + cb->total_offset_of(cb->insts())), 129 _data_offset(_content_offset + align_up(cb->total_content_size(), oopSize)), 130 _frame_size(frame_size), 131 _mutable_data_size(mutable_data_size), 132 S390_ONLY(_ctable_offset(0) COMMA) 133 _header_size(header_size), 134 _frame_complete_offset(frame_complete_offset), 135 _kind(kind), 136 _caller_must_gc_arguments(caller_must_gc_arguments) 137 { 138 assert(is_aligned(_size, oopSize), "unaligned size"); 139 assert(is_aligned(header_size, oopSize), "unaligned size"); 140 assert(is_aligned(_relocation_size, oopSize), "unaligned size"); 141 assert(_data_offset <= _size, "codeBlob is too small: %d > %d", _data_offset, _size); 142 assert(is_nmethod() || (cb->total_oop_size() + cb->total_metadata_size() == 0), "must be nmethod"); 143 assert(code_end() == content_end(), "must be the same - see code_end()"); 144 #ifdef COMPILER1 145 // probably wrong for tiered 146 assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs"); 147 #endif // COMPILER1 148 149 if (_mutable_data_size > 0) { 150 _mutable_data = (address)os::malloc(_mutable_data_size, mtCode); 151 if (_mutable_data == nullptr) { 152 vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data"); 153 } 154 } else { 155 // We need unique and valid not null address 156 assert(_mutable_data = blob_end(), "sanity"); 157 } 158 159 set_oop_maps(oop_maps); 160 } 161 162 // Simple CodeBlob used for simple BufferBlob. 163 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size) : 164 _oop_maps(nullptr), 165 _name(name), 166 _mutable_data(header_begin() + size), // default value is blob_end() 167 _size(size), 168 _relocation_size(0), 169 _content_offset(CodeBlob::align_code_offset(header_size)), 170 _code_offset(_content_offset), 171 _data_offset(size), 172 _frame_size(0), 173 S390_ONLY(_ctable_offset(0) COMMA) 174 _header_size(header_size), 175 _frame_complete_offset(CodeOffsets::frame_never_safe), 176 _kind(kind), 177 _caller_must_gc_arguments(false) 178 { 179 assert(is_aligned(size, oopSize), "unaligned size"); 180 assert(is_aligned(header_size, oopSize), "unaligned size"); 181 assert(_mutable_data = blob_end(), "sanity"); 182 } 183 184 void CodeBlob::purge() { 185 assert(_mutable_data != nullptr, "should never be null"); 186 if (_mutable_data != blob_end()) { 187 os::free(_mutable_data); 188 _mutable_data = blob_end(); // Valid not null address 189 } 190 if (_oop_maps != nullptr && !AOTCodeCache::is_address_in_aot_cache((address)_oop_maps)) { 191 delete _oop_maps; 192 _oop_maps = nullptr; 193 } 194 NOT_PRODUCT(_asm_remarks.clear()); 195 NOT_PRODUCT(_dbg_strings.clear()); 196 } 197 198 void CodeBlob::set_oop_maps(OopMapSet* p) { 199 // Danger Will Robinson! This method allocates a big 200 // chunk of memory, its your job to free it. 201 if (p != nullptr) { 202 _oop_maps = ImmutableOopMapSet::build_from(p); 203 } else { 204 _oop_maps = nullptr; 205 } 206 } 207 208 const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) const { 209 assert(_oop_maps != nullptr, "nope"); 210 return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin()); 211 } 212 213 void CodeBlob::print_code_on(outputStream* st) { 214 ResourceMark m; 215 Disassembler::decode(this, st); 216 } 217 218 void CodeBlob::prepare_for_archiving() { 219 set_name(nullptr); 220 _oop_maps = nullptr; 221 _mutable_data = nullptr; 222 #ifndef PRODUCT 223 asm_remarks().clear(); 224 dbg_strings().clear(); 225 #endif /* PRODUCT */ 226 } 227 228 //----------------------------------------------------------------------------------------- 229 // Creates a RuntimeBlob from a CodeBuffer and copy code and relocation info. 230 231 RuntimeBlob::RuntimeBlob( 232 const char* name, 233 CodeBlobKind kind, 234 CodeBuffer* cb, 235 int size, 236 uint16_t header_size, 237 int16_t frame_complete, 238 int frame_size, 239 OopMapSet* oop_maps, 240 bool caller_must_gc_arguments) 241 : CodeBlob(name, kind, cb, size, header_size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments, 242 align_up(cb->total_relocation_size(), oopSize)) 243 { 244 cb->copy_code_and_locs_to(this); 245 } 246 247 void RuntimeBlob::free(RuntimeBlob* blob) { 248 assert(blob != nullptr, "caller must check for nullptr"); 249 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 250 blob->purge(); 251 { 252 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 253 CodeCache::free(blob); 254 } 255 // Track memory usage statistic after releasing CodeCache_lock 256 MemoryService::track_code_cache_memory_usage(); 257 } 258 259 void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) { 260 // Do not hold the CodeCache lock during name formatting. 261 assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub"); 262 263 if (stub != nullptr && (PrintStubCode || 264 Forte::is_enabled() || 265 JvmtiExport::should_post_dynamic_code_generated())) { 266 char stub_id[256]; 267 assert(strlen(name1) + strlen(name2) < sizeof(stub_id), ""); 268 jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2); 269 if (PrintStubCode) { 270 ttyLocker ttyl; 271 tty->print_cr("- - - [BEGIN] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 272 tty->print_cr("Decoding %s " PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%d bytes)", 273 stub_id, p2i(stub), p2i(stub->code_begin()), p2i(stub->code_end()), stub->code_size()); 274 Disassembler::decode(stub->code_begin(), stub->code_end(), tty 275 NOT_PRODUCT(COMMA &stub->asm_remarks())); 276 if ((stub->oop_maps() != nullptr) && AbstractDisassembler::show_structs()) { 277 tty->print_cr("- - - [OOP MAPS]- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 278 stub->oop_maps()->print(); 279 } 280 tty->print_cr("- - - [END] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"); 281 tty->cr(); 282 } 283 if (Forte::is_enabled()) { 284 Forte::register_stub(stub_id, stub->code_begin(), stub->code_end()); 285 } 286 287 if (JvmtiExport::should_post_dynamic_code_generated()) { 288 const char* stub_name = name2; 289 if (name2[0] == '\0') stub_name = name1; 290 JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end()); 291 } 292 } 293 294 // Track memory usage statistic after releasing CodeCache_lock 295 MemoryService::track_code_cache_memory_usage(); 296 } 297 298 //---------------------------------------------------------------------------------------------------- 299 // Implementation of BufferBlob 300 301 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, int size) 302 : RuntimeBlob(name, kind, size, sizeof(BufferBlob)) 303 {} 304 305 BufferBlob* BufferBlob::create(const char* name, uint buffer_size) { 306 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 307 308 BufferBlob* blob = nullptr; 309 unsigned int size = sizeof(BufferBlob); 310 // align the size to CodeEntryAlignment 311 size = CodeBlob::align_code_offset(size); 312 size += align_up(buffer_size, oopSize); 313 assert(name != nullptr, "must provide a name"); 314 { 315 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 316 blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, size); 317 } 318 // Track memory usage statistic after releasing CodeCache_lock 319 MemoryService::track_code_cache_memory_usage(); 320 321 return blob; 322 } 323 324 325 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size) 326 : RuntimeBlob(name, kind, cb, size, sizeof(BufferBlob), CodeOffsets::frame_never_safe, 0, nullptr) 327 {} 328 329 // Used by gtest 330 BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) { 331 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 332 333 BufferBlob* blob = nullptr; 334 unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob)); 335 assert(name != nullptr, "must provide a name"); 336 { 337 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 338 blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, cb, size); 339 } 340 // Track memory usage statistic after releasing CodeCache_lock 341 MemoryService::track_code_cache_memory_usage(); 342 343 return blob; 344 } 345 346 void* BufferBlob::operator new(size_t s, unsigned size) throw() { 347 return CodeCache::allocate(size, CodeBlobType::NonNMethod); 348 } 349 350 void BufferBlob::free(BufferBlob *blob) { 351 RuntimeBlob::free(blob); 352 } 353 354 355 //---------------------------------------------------------------------------------------------------- 356 // Implementation of AdapterBlob 357 358 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb) : 359 BufferBlob("I2C/C2I adapters", CodeBlobKind::Adapter, cb, size) { 360 CodeCache::commit(this); 361 } 362 363 AdapterBlob* AdapterBlob::create(CodeBuffer* cb) { 364 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 365 366 CodeCache::gc_on_allocation(); 367 368 AdapterBlob* blob = nullptr; 369 unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob)); 370 { 371 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 372 blob = new (size) AdapterBlob(size, cb); 373 } 374 // Track memory usage statistic after releasing CodeCache_lock 375 MemoryService::track_code_cache_memory_usage(); 376 377 return blob; 378 } 379 380 //---------------------------------------------------------------------------------------------------- 381 // Implementation of VtableBlob 382 383 void* VtableBlob::operator new(size_t s, unsigned size) throw() { 384 // Handling of allocation failure stops compilation and prints a bunch of 385 // stuff, which requires unlocking the CodeCache_lock, so that the Compile_lock 386 // can be locked, and then re-locking the CodeCache_lock. That is not safe in 387 // this context as we hold the CompiledICLocker. So we just don't handle code 388 // cache exhaustion here; we leave that for a later allocation that does not 389 // hold the CompiledICLocker. 390 return CodeCache::allocate(size, CodeBlobType::NonNMethod, false /* handle_alloc_failure */); 391 } 392 393 VtableBlob::VtableBlob(const char* name, int size) : 394 BufferBlob(name, CodeBlobKind::Vtable, size) { 395 } 396 397 VtableBlob* VtableBlob::create(const char* name, int buffer_size) { 398 assert(JavaThread::current()->thread_state() == _thread_in_vm, "called with the wrong state"); 399 400 VtableBlob* blob = nullptr; 401 unsigned int size = sizeof(VtableBlob); 402 // align the size to CodeEntryAlignment 403 size = align_code_offset(size); 404 size += align_up(buffer_size, oopSize); 405 assert(name != nullptr, "must provide a name"); 406 { 407 if (!CodeCache_lock->try_lock()) { 408 // If we can't take the CodeCache_lock, then this is a bad time to perform the ongoing 409 // IC transition to megamorphic, for which this stub will be needed. It is better to 410 // bail out the transition, and wait for a more opportune moment. Not only is it not 411 // worth waiting for the lock blockingly for the megamorphic transition, it might 412 // also result in a deadlock to blockingly wait, when concurrent class unloading is 413 // performed. At this point in time, the CompiledICLocker is taken, so we are not 414 // allowed to blockingly wait for the CodeCache_lock, as these two locks are otherwise 415 // consistently taken in the opposite order. Bailing out results in an IC transition to 416 // the clean state instead, which will cause subsequent calls to retry the transitioning 417 // eventually. 418 return nullptr; 419 } 420 blob = new (size) VtableBlob(name, size); 421 CodeCache_lock->unlock(); 422 } 423 // Track memory usage statistic after releasing CodeCache_lock 424 MemoryService::track_code_cache_memory_usage(); 425 426 return blob; 427 } 428 429 //---------------------------------------------------------------------------------------------------- 430 // Implementation of MethodHandlesAdapterBlob 431 432 MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) { 433 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 434 435 MethodHandlesAdapterBlob* blob = nullptr; 436 unsigned int size = sizeof(MethodHandlesAdapterBlob); 437 // align the size to CodeEntryAlignment 438 size = CodeBlob::align_code_offset(size); 439 size += align_up(buffer_size, oopSize); 440 { 441 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 442 blob = new (size) MethodHandlesAdapterBlob(size); 443 if (blob == nullptr) { 444 vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for method handle adapter blob"); 445 } 446 } 447 // Track memory usage statistic after releasing CodeCache_lock 448 MemoryService::track_code_cache_memory_usage(); 449 450 return blob; 451 } 452 453 //---------------------------------------------------------------------------------------------------- 454 // Implementation of RuntimeStub 455 456 RuntimeStub::RuntimeStub( 457 const char* name, 458 CodeBuffer* cb, 459 int size, 460 int16_t frame_complete, 461 int frame_size, 462 OopMapSet* oop_maps, 463 bool caller_must_gc_arguments 464 ) 465 : RuntimeBlob(name, CodeBlobKind::RuntimeStub, cb, size, sizeof(RuntimeStub), 466 frame_complete, frame_size, oop_maps, caller_must_gc_arguments) 467 { 468 } 469 470 RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name, 471 CodeBuffer* cb, 472 int16_t frame_complete, 473 int frame_size, 474 OopMapSet* oop_maps, 475 bool caller_must_gc_arguments, 476 bool alloc_fail_is_fatal) 477 { 478 RuntimeStub* stub = nullptr; 479 unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub)); 480 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 481 { 482 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 483 stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments); 484 if (stub == nullptr) { 485 if (!alloc_fail_is_fatal) { 486 return nullptr; 487 } 488 fatal("Initial size of CodeCache is too small"); 489 } 490 } 491 492 trace_new_stub(stub, "RuntimeStub - ", stub_name); 493 494 return stub; 495 } 496 497 498 void* RuntimeStub::operator new(size_t s, unsigned size) throw() { 499 return CodeCache::allocate(size, CodeBlobType::NonNMethod); 500 } 501 502 // operator new shared by all singletons: 503 void* SingletonBlob::operator new(size_t s, unsigned size, bool alloc_fail_is_fatal) throw() { 504 void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod); 505 if (alloc_fail_is_fatal && !p) fatal("Initial size of CodeCache is too small"); 506 return p; 507 } 508 509 510 //---------------------------------------------------------------------------------------------------- 511 // Implementation of DeoptimizationBlob 512 513 DeoptimizationBlob::DeoptimizationBlob( 514 CodeBuffer* cb, 515 int size, 516 OopMapSet* oop_maps, 517 int unpack_offset, 518 int unpack_with_exception_offset, 519 int unpack_with_reexecution_offset, 520 int frame_size 521 ) 522 : SingletonBlob("DeoptimizationBlob", CodeBlobKind::Deoptimization, cb, 523 size, sizeof(DeoptimizationBlob), frame_size, oop_maps) 524 { 525 _unpack_offset = unpack_offset; 526 _unpack_with_exception = unpack_with_exception_offset; 527 _unpack_with_reexecution = unpack_with_reexecution_offset; 528 #ifdef COMPILER1 529 _unpack_with_exception_in_tls = -1; 530 #endif 531 } 532 533 534 DeoptimizationBlob* DeoptimizationBlob::create( 535 CodeBuffer* cb, 536 OopMapSet* oop_maps, 537 int unpack_offset, 538 int unpack_with_exception_offset, 539 int unpack_with_reexecution_offset, 540 int frame_size) 541 { 542 DeoptimizationBlob* blob = nullptr; 543 unsigned int size = CodeBlob::allocation_size(cb, sizeof(DeoptimizationBlob)); 544 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 545 { 546 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 547 blob = new (size) DeoptimizationBlob(cb, 548 size, 549 oop_maps, 550 unpack_offset, 551 unpack_with_exception_offset, 552 unpack_with_reexecution_offset, 553 frame_size); 554 } 555 556 trace_new_stub(blob, "DeoptimizationBlob"); 557 558 return blob; 559 } 560 561 #ifdef COMPILER2 562 563 //---------------------------------------------------------------------------------------------------- 564 // Implementation of UncommonTrapBlob 565 566 UncommonTrapBlob::UncommonTrapBlob( 567 CodeBuffer* cb, 568 int size, 569 OopMapSet* oop_maps, 570 int frame_size 571 ) 572 : SingletonBlob("UncommonTrapBlob", CodeBlobKind::UncommonTrap, cb, 573 size, sizeof(UncommonTrapBlob), frame_size, oop_maps) 574 {} 575 576 577 UncommonTrapBlob* UncommonTrapBlob::create( 578 CodeBuffer* cb, 579 OopMapSet* oop_maps, 580 int frame_size) 581 { 582 UncommonTrapBlob* blob = nullptr; 583 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UncommonTrapBlob)); 584 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 585 { 586 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 587 blob = new (size, false) UncommonTrapBlob(cb, size, oop_maps, frame_size); 588 } 589 590 trace_new_stub(blob, "UncommonTrapBlob"); 591 592 return blob; 593 } 594 595 //---------------------------------------------------------------------------------------------------- 596 // Implementation of ExceptionBlob 597 598 ExceptionBlob::ExceptionBlob( 599 CodeBuffer* cb, 600 int size, 601 OopMapSet* oop_maps, 602 int frame_size 603 ) 604 : SingletonBlob("ExceptionBlob", CodeBlobKind::Exception, cb, 605 size, sizeof(ExceptionBlob), frame_size, oop_maps) 606 {} 607 608 609 ExceptionBlob* ExceptionBlob::create( 610 CodeBuffer* cb, 611 OopMapSet* oop_maps, 612 int frame_size) 613 { 614 ExceptionBlob* blob = nullptr; 615 unsigned int size = CodeBlob::allocation_size(cb, sizeof(ExceptionBlob)); 616 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 617 { 618 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 619 blob = new (size, false) ExceptionBlob(cb, size, oop_maps, frame_size); 620 } 621 622 trace_new_stub(blob, "ExceptionBlob"); 623 624 return blob; 625 } 626 627 #endif // COMPILER2 628 629 //---------------------------------------------------------------------------------------------------- 630 // Implementation of SafepointBlob 631 632 SafepointBlob::SafepointBlob( 633 CodeBuffer* cb, 634 int size, 635 OopMapSet* oop_maps, 636 int frame_size 637 ) 638 : SingletonBlob("SafepointBlob", CodeBlobKind::Safepoint, cb, 639 size, sizeof(SafepointBlob), frame_size, oop_maps) 640 {} 641 642 643 SafepointBlob* SafepointBlob::create( 644 CodeBuffer* cb, 645 OopMapSet* oop_maps, 646 int frame_size) 647 { 648 SafepointBlob* blob = nullptr; 649 unsigned int size = CodeBlob::allocation_size(cb, sizeof(SafepointBlob)); 650 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 651 { 652 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 653 blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size); 654 } 655 656 trace_new_stub(blob, "SafepointBlob"); 657 658 return blob; 659 } 660 661 //---------------------------------------------------------------------------------------------------- 662 // Implementation of UpcallStub 663 664 UpcallStub::UpcallStub(const char* name, CodeBuffer* cb, int size, jobject receiver, ByteSize frame_data_offset) : 665 RuntimeBlob(name, CodeBlobKind::Upcall, cb, size, sizeof(UpcallStub), 666 CodeOffsets::frame_never_safe, 0 /* no frame size */, 667 /* oop maps = */ nullptr, /* caller must gc arguments = */ false), 668 _receiver(receiver), 669 _frame_data_offset(frame_data_offset) 670 { 671 CodeCache::commit(this); 672 } 673 674 void* UpcallStub::operator new(size_t s, unsigned size) throw() { 675 return CodeCache::allocate(size, CodeBlobType::NonNMethod); 676 } 677 678 UpcallStub* UpcallStub::create(const char* name, CodeBuffer* cb, jobject receiver, ByteSize frame_data_offset) { 679 ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock 680 681 UpcallStub* blob = nullptr; 682 unsigned int size = CodeBlob::allocation_size(cb, sizeof(UpcallStub)); 683 { 684 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 685 blob = new (size) UpcallStub(name, cb, size, receiver, frame_data_offset); 686 } 687 if (blob == nullptr) { 688 return nullptr; // caller must handle this 689 } 690 691 // Track memory usage statistic after releasing CodeCache_lock 692 MemoryService::track_code_cache_memory_usage(); 693 694 trace_new_stub(blob, "UpcallStub - ", name); 695 696 return blob; 697 } 698 699 void UpcallStub::oops_do(OopClosure* f, const frame& frame) { 700 frame_data_for_frame(frame)->old_handles->oops_do(f); 701 } 702 703 JavaFrameAnchor* UpcallStub::jfa_for_frame(const frame& frame) const { 704 return &frame_data_for_frame(frame)->jfa; 705 } 706 707 void UpcallStub::free(UpcallStub* blob) { 708 assert(blob != nullptr, "caller must check for nullptr"); 709 JNIHandles::destroy_global(blob->receiver()); 710 RuntimeBlob::free(blob); 711 } 712 713 //---------------------------------------------------------------------------------------------------- 714 // Verification and printing 715 716 void CodeBlob::verify() { 717 if (is_nmethod()) { 718 as_nmethod()->verify(); 719 } 720 } 721 722 void CodeBlob::print_on(outputStream* st) const { 723 vptr()->print_on(this, st); 724 } 725 726 void CodeBlob::print() const { print_on(tty); } 727 728 void CodeBlob::print_value_on(outputStream* st) const { 729 vptr()->print_value_on(this, st); 730 } 731 732 void CodeBlob::print_on_impl(outputStream* st) const { 733 st->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", p2i(this)); 734 st->print_cr("Framesize: %d", _frame_size); 735 } 736 737 void CodeBlob::print_value_on_impl(outputStream* st) const { 738 st->print_cr("[CodeBlob]"); 739 } 740 741 void CodeBlob::print_block_comment(outputStream* stream, address block_begin) const { 742 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY) 743 if (is_nmethod()) { 744 as_nmethod()->print_nmethod_labels(stream, block_begin); 745 } 746 #endif 747 748 #ifndef PRODUCT 749 ptrdiff_t offset = block_begin - code_begin(); 750 assert(offset >= 0, "Expecting non-negative offset!"); 751 _asm_remarks.print(uint(offset), stream); 752 #endif 753 } 754 755 void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const { 756 if (is_buffer_blob() || is_adapter_blob() || is_vtable_blob() || is_method_handles_adapter_blob()) { 757 // the interpreter is generated into a buffer blob 758 InterpreterCodelet* i = Interpreter::codelet_containing(addr); 759 if (i != nullptr) { 760 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", p2i(addr), (int)(addr - i->code_begin())); 761 i->print_on(st); 762 return; 763 } 764 if (Interpreter::contains(addr)) { 765 st->print_cr(INTPTR_FORMAT " is pointing into interpreter code" 766 " (not bytecode specific)", p2i(addr)); 767 return; 768 } 769 // 770 if (AdapterHandlerLibrary::contains(this)) { 771 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", p2i(addr), (int)(addr - code_begin())); 772 AdapterHandlerLibrary::print_handler_on(st, this); 773 } 774 // the stubroutines are generated into a buffer blob 775 StubCodeDesc* d = StubCodeDesc::desc_for(addr); 776 if (d != nullptr) { 777 st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", p2i(addr), (int)(addr - d->begin())); 778 d->print_on(st); 779 st->cr(); 780 return; 781 } 782 if (StubRoutines::contains(addr)) { 783 st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr)); 784 return; 785 } 786 VtableStub* v = VtableStubs::stub_containing(addr); 787 if (v != nullptr) { 788 st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point())); 789 v->print_on(st); 790 st->cr(); 791 return; 792 } 793 } 794 if (is_nmethod()) { 795 nmethod* nm = (nmethod*)this; 796 ResourceMark rm; 797 st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT, 798 p2i(addr), (int)(addr - nm->entry_point()), p2i(nm)); 799 if (verbose) { 800 st->print(" for "); 801 nm->method()->print_value_on(st); 802 } 803 st->cr(); 804 if (verbose && st == tty) { 805 // verbose is only ever true when called from findpc in debug.cpp 806 nm->print_nmethod(true); 807 } else { 808 nm->print_on(st); 809 } 810 return; 811 } 812 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", p2i(addr), (int)(addr - code_begin())); 813 print_on(st); 814 } 815 816 void BufferBlob::print_on_impl(outputStream* st) const { 817 RuntimeBlob::print_on_impl(st); 818 print_value_on_impl(st); 819 } 820 821 void BufferBlob::print_value_on_impl(outputStream* st) const { 822 st->print_cr("BufferBlob (" INTPTR_FORMAT ") used for %s", p2i(this), name()); 823 } 824 825 void RuntimeStub::print_on_impl(outputStream* st) const { 826 ttyLocker ttyl; 827 RuntimeBlob::print_on_impl(st); 828 st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this)); 829 st->print_cr("%s", name()); 830 Disassembler::decode((RuntimeBlob*)this, st); 831 } 832 833 void RuntimeStub::print_value_on_impl(outputStream* st) const { 834 st->print("RuntimeStub (" INTPTR_FORMAT "): ", p2i(this)); st->print("%s", name()); 835 } 836 837 void SingletonBlob::print_on_impl(outputStream* st) const { 838 ttyLocker ttyl; 839 RuntimeBlob::print_on_impl(st); 840 st->print_cr("%s", name()); 841 Disassembler::decode((RuntimeBlob*)this, st); 842 } 843 844 void SingletonBlob::print_value_on_impl(outputStream* st) const { 845 st->print_cr("%s", name()); 846 } 847 848 void DeoptimizationBlob::print_value_on_impl(outputStream* st) const { 849 st->print_cr("Deoptimization (frame not available)"); 850 } 851 852 void UpcallStub::print_on_impl(outputStream* st) const { 853 RuntimeBlob::print_on_impl(st); 854 print_value_on_impl(st); 855 st->print_cr("Frame data offset: %d", (int) _frame_data_offset); 856 oop recv = JNIHandles::resolve(_receiver); 857 st->print("Receiver MH="); 858 recv->print_on(st); 859 Disassembler::decode((RuntimeBlob*)this, st); 860 } 861 862 void UpcallStub::print_value_on_impl(outputStream* st) const { 863 st->print_cr("UpcallStub (" INTPTR_FORMAT ") used for %s", p2i(this), name()); 864 }