1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "cds/cdsAccess.hpp" 26 #include "code/codeBlob.hpp" 27 #include "code/codeCache.hpp" 28 #include "code/codeHeapState.hpp" 29 #include "code/compiledIC.hpp" 30 #include "code/dependencies.hpp" 31 #include "code/dependencyContext.hpp" 32 #include "code/nmethod.hpp" 33 #include "code/pcDesc.hpp" 34 #include "compiler/compilationPolicy.hpp" 35 #include "compiler/compileBroker.hpp" 36 #include "compiler/compilerDefinitions.inline.hpp" 37 #include "compiler/oopMap.hpp" 38 #include "gc/shared/barrierSetNMethod.hpp" 39 #include "gc/shared/classUnloadingContext.hpp" 40 #include "gc/shared/collectedHeap.hpp" 41 #include "jfr/jfrEvents.hpp" 42 #include "jvm_io.h" 43 #include "logging/log.hpp" 44 #include "logging/logStream.hpp" 45 #include "memory/allocation.inline.hpp" 46 #include "memory/iterator.hpp" 47 #include "memory/memoryReserver.hpp" 48 #include "memory/resourceArea.hpp" 49 #include "memory/universe.hpp" 50 #include "oops/method.inline.hpp" 51 #include "oops/objArrayOop.hpp" 52 #include "oops/oop.inline.hpp" 53 #include "oops/verifyOopClosure.hpp" 54 #include "runtime/arguments.hpp" 55 #include "runtime/atomic.hpp" 56 #include "runtime/deoptimization.hpp" 57 #include "runtime/globals_extension.hpp" 58 #include "runtime/handles.inline.hpp" 59 #include "runtime/icache.hpp" 60 #include "runtime/init.hpp" 61 #include "runtime/java.hpp" 62 #include "runtime/mutexLocker.hpp" 63 #include "runtime/os.inline.hpp" 64 #include "runtime/safepointVerifiers.hpp" 65 #include "runtime/vmThread.hpp" 66 #include "sanitizers/leak.hpp" 67 #include "services/memoryService.hpp" 68 #include "utilities/align.hpp" 69 #include "utilities/vmError.hpp" 70 #include "utilities/xmlstream.hpp" 71 #ifdef COMPILER1 72 #include "c1/c1_Compilation.hpp" 73 #include "c1/c1_Compiler.hpp" 74 #endif 75 #ifdef COMPILER2 76 #include "opto/c2compiler.hpp" 77 #include "opto/compile.hpp" 78 #include "opto/node.hpp" 79 #endif 80 81 // Helper class for printing in CodeCache 82 class CodeBlob_sizes { 83 private: 84 int count; 85 int total_size; 86 int header_size; 87 int code_size; 88 int stub_size; 89 int relocation_size; 90 int scopes_oop_size; 91 int scopes_metadata_size; 92 int scopes_data_size; 93 int scopes_pcs_size; 94 95 public: 96 CodeBlob_sizes() { 97 count = 0; 98 total_size = 0; 99 header_size = 0; 100 code_size = 0; 101 stub_size = 0; 102 relocation_size = 0; 103 scopes_oop_size = 0; 104 scopes_metadata_size = 0; 105 scopes_data_size = 0; 106 scopes_pcs_size = 0; 107 } 108 109 int total() const { return total_size; } 110 bool is_empty() const { return count == 0; } 111 112 void print(const char* title) const { 113 if (is_empty()) { 114 tty->print_cr(" #%d %s = %dK", 115 count, 116 title, 117 total() / (int)K); 118 } else { 119 tty->print_cr(" #%d %s = %dK (hdr %dK %d%%, loc %dK %d%%, code %dK %d%%, stub %dK %d%%, [oops %dK %d%%, metadata %dK %d%%, data %dK %d%%, pcs %dK %d%%])", 120 count, 121 title, 122 total() / (int)K, 123 header_size / (int)K, 124 header_size * 100 / total_size, 125 relocation_size / (int)K, 126 relocation_size * 100 / total_size, 127 code_size / (int)K, 128 code_size * 100 / total_size, 129 stub_size / (int)K, 130 stub_size * 100 / total_size, 131 scopes_oop_size / (int)K, 132 scopes_oop_size * 100 / total_size, 133 scopes_metadata_size / (int)K, 134 scopes_metadata_size * 100 / total_size, 135 scopes_data_size / (int)K, 136 scopes_data_size * 100 / total_size, 137 scopes_pcs_size / (int)K, 138 scopes_pcs_size * 100 / total_size); 139 } 140 } 141 142 void add(CodeBlob* cb) { 143 count++; 144 total_size += cb->size(); 145 header_size += cb->header_size(); 146 relocation_size += cb->relocation_size(); 147 if (cb->is_nmethod()) { 148 nmethod* nm = cb->as_nmethod_or_null(); 149 code_size += nm->insts_size(); 150 stub_size += nm->stub_size(); 151 152 scopes_oop_size += nm->oops_size(); 153 scopes_metadata_size += nm->metadata_size(); 154 scopes_data_size += nm->scopes_data_size(); 155 scopes_pcs_size += nm->scopes_pcs_size(); 156 } else { 157 code_size += cb->code_size(); 158 } 159 } 160 }; 161 162 // Iterate over all CodeHeaps 163 #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _heaps->begin(); heap != _heaps->end(); ++heap) 164 #define FOR_ALL_ALLOCABLE_HEAPS(heap) for (GrowableArrayIterator<CodeHeap*> heap = _allocable_heaps->begin(); heap != _allocable_heaps->end(); ++heap) 165 166 // Iterate over all CodeBlobs (cb) on the given CodeHeap 167 #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != nullptr; cb = next_blob(heap, cb)) 168 169 address CodeCache::_low_bound = nullptr; 170 address CodeCache::_high_bound = nullptr; 171 volatile int CodeCache::_number_of_nmethods_with_dependencies = 0; 172 ExceptionCache* volatile CodeCache::_exception_cache_purge_list = nullptr; 173 174 static ReservedSpace _cds_code_space; 175 176 // Initialize arrays of CodeHeap subsets 177 GrowableArray<CodeHeap*>* CodeCache::_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode); 178 GrowableArray<CodeHeap*>* CodeCache::_nmethod_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode); 179 GrowableArray<CodeHeap*>* CodeCache::_allocable_heaps = new(mtCode) GrowableArray<CodeHeap*> (static_cast<int>(CodeBlobType::All), mtCode); 180 181 static void check_min_size(const char* codeheap, size_t size, size_t required_size) { 182 if (size < required_size) { 183 log_debug(codecache)("Code heap (%s) size %zuK below required minimal size %zuK", 184 codeheap, size/K, required_size/K); 185 err_msg title("Not enough space in %s to run VM", codeheap); 186 err_msg message("%zuK < %zuK", size/K, required_size/K); 187 vm_exit_during_initialization(title, message); 188 } 189 } 190 191 struct CodeHeapInfo { 192 size_t size; 193 bool set; 194 bool enabled; 195 }; 196 197 static void set_size_of_unset_code_heap(CodeHeapInfo* heap, size_t available_size, size_t used_size, size_t min_size) { 198 assert(!heap->set, "sanity"); 199 heap->size = (available_size > (used_size + min_size)) ? (available_size - used_size) : min_size; 200 } 201 202 void CodeCache::initialize_heaps() { 203 CodeHeapInfo non_nmethod = {NonNMethodCodeHeapSize, FLAG_IS_CMDLINE(NonNMethodCodeHeapSize), true}; 204 CodeHeapInfo profiled = {ProfiledCodeHeapSize, FLAG_IS_CMDLINE(ProfiledCodeHeapSize), true}; 205 CodeHeapInfo non_profiled = {NonProfiledCodeHeapSize, FLAG_IS_CMDLINE(NonProfiledCodeHeapSize), true}; 206 207 const bool cache_size_set = FLAG_IS_CMDLINE(ReservedCodeCacheSize); 208 const size_t ps = page_size(false, 8); 209 const size_t min_size = MAX2(os::vm_allocation_granularity(), ps); 210 const size_t min_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); // Make sure we have enough space for VM internal code 211 size_t cache_size = align_up(ReservedCodeCacheSize, min_size); 212 213 // Prerequisites 214 if (!heap_available(CodeBlobType::MethodProfiled)) { 215 // For compatibility reasons, disabled tiered compilation overrides 216 // segment size even if it is set explicitly. 217 non_profiled.size += profiled.size; 218 // Profiled code heap is not available, forcibly set size to 0 219 profiled.size = 0; 220 profiled.set = true; 221 profiled.enabled = false; 222 } 223 224 assert(heap_available(CodeBlobType::MethodNonProfiled), "MethodNonProfiled heap is always available for segmented code heap"); 225 226 size_t compiler_buffer_size = 0; 227 COMPILER1_PRESENT(compiler_buffer_size += CompilationPolicy::c1_count() * Compiler::code_buffer_size()); 228 COMPILER2_PRESENT(compiler_buffer_size += CompilationPolicy::c2_count() * C2Compiler::initial_code_buffer_size()); 229 COMPILER2_PRESENT(compiler_buffer_size += (CompilationPolicy::c2_count() + CompilationPolicy::c3_count()) * C2Compiler::initial_code_buffer_size()); 230 231 if (!non_nmethod.set) { 232 non_nmethod.size += compiler_buffer_size; 233 // Further down, just before FLAG_SET_ERGO(), all segment sizes are 234 // aligned down to the next lower multiple of min_size. For large page 235 // sizes, this may result in (non_nmethod.size == 0) which is not acceptable. 236 // Therefore, force non_nmethod.size to at least min_size. 237 non_nmethod.size = MAX2(non_nmethod.size, min_size); 238 } 239 240 if (!profiled.set && !non_profiled.set) { 241 non_profiled.size = profiled.size = (cache_size > non_nmethod.size + 2 * min_size) ? 242 (cache_size - non_nmethod.size) / 2 : min_size; 243 } 244 245 if (profiled.set && !non_profiled.set) { 246 set_size_of_unset_code_heap(&non_profiled, cache_size, non_nmethod.size + profiled.size, min_size); 247 } 248 249 if (!profiled.set && non_profiled.set) { 250 set_size_of_unset_code_heap(&profiled, cache_size, non_nmethod.size + non_profiled.size, min_size); 251 } 252 253 // Compatibility. 254 size_t non_nmethod_min_size = min_cache_size + compiler_buffer_size; 255 if (!non_nmethod.set && profiled.set && non_profiled.set) { 256 set_size_of_unset_code_heap(&non_nmethod, cache_size, profiled.size + non_profiled.size, non_nmethod_min_size); 257 } 258 259 size_t total = non_nmethod.size + profiled.size + non_profiled.size; 260 if (total != cache_size && !cache_size_set) { 261 log_info(codecache)("ReservedCodeCache size %zuK changed to total segments size NonNMethod " 262 "%zuK NonProfiled %zuK Profiled %zuK = %zuK", 263 cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K, total/K); 264 // Adjust ReservedCodeCacheSize as necessary because it was not set explicitly 265 cache_size = total; 266 } 267 268 log_debug(codecache)("Initializing code heaps ReservedCodeCache %zuK NonNMethod %zuK" 269 " NonProfiled %zuK Profiled %zuK", 270 cache_size/K, non_nmethod.size/K, non_profiled.size/K, profiled.size/K); 271 272 // Validation 273 // Check minimal required sizes 274 check_min_size("non-nmethod code heap", non_nmethod.size, non_nmethod_min_size); 275 if (profiled.enabled) { 276 check_min_size("profiled code heap", profiled.size, min_size); 277 } 278 if (non_profiled.enabled) { // non_profiled.enabled is always ON for segmented code heap, leave it checked for clarity 279 check_min_size("non-profiled code heap", non_profiled.size, min_size); 280 } 281 if (cache_size_set) { 282 check_min_size("reserved code cache", cache_size, min_cache_size); 283 } 284 285 // ReservedCodeCacheSize was set explicitly, so report an error and abort if it doesn't match the segment sizes 286 if (total != cache_size && cache_size_set) { 287 err_msg message("NonNMethodCodeHeapSize (%zuK)", non_nmethod.size/K); 288 if (profiled.enabled) { 289 message.append(" + ProfiledCodeHeapSize (%zuK)", profiled.size/K); 290 } 291 if (non_profiled.enabled) { 292 message.append(" + NonProfiledCodeHeapSize (%zuK)", non_profiled.size/K); 293 } 294 message.append(" = %zuK", total/K); 295 message.append((total > cache_size) ? " is greater than " : " is less than "); 296 message.append("ReservedCodeCacheSize (%zuK).", cache_size/K); 297 298 vm_exit_during_initialization("Invalid code heap sizes", message); 299 } 300 301 // Compatibility. Print warning if using large pages but not able to use the size given 302 if (UseLargePages) { 303 const size_t lg_ps = page_size(false, 1); 304 if (ps < lg_ps) { 305 log_warning(codecache)("Code cache size too small for " PROPERFMT " pages. " 306 "Reverting to smaller page size (" PROPERFMT ").", 307 PROPERFMTARGS(lg_ps), PROPERFMTARGS(ps)); 308 } 309 } 310 311 // Note: if large page support is enabled, min_size is at least the large 312 // page size. This ensures that the code cache is covered by large pages. 313 non_profiled.size += non_nmethod.size & alignment_mask(min_size); 314 non_profiled.size += profiled.size & alignment_mask(min_size); 315 non_nmethod.size = align_down(non_nmethod.size, min_size); 316 profiled.size = align_down(profiled.size, min_size); 317 non_profiled.size = align_down(non_profiled.size, min_size); 318 319 FLAG_SET_ERGO(NonNMethodCodeHeapSize, non_nmethod.size); 320 FLAG_SET_ERGO(ProfiledCodeHeapSize, profiled.size); 321 FLAG_SET_ERGO(NonProfiledCodeHeapSize, non_profiled.size); 322 FLAG_SET_ERGO(ReservedCodeCacheSize, cache_size); 323 324 const size_t cds_code_size = align_up(CDSAccess::get_cached_code_size(), min_size); 325 cache_size += cds_code_size; 326 327 ReservedSpace rs = reserve_heap_memory(cache_size, ps); 328 329 // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory. 330 LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size()); 331 332 size_t offset = 0; 333 if (cds_code_size > 0) { 334 // FIXME: use CodeHeapInfo for this hack ... 335 _cds_code_space = rs.partition(offset, cds_code_size); 336 offset += cds_code_size; 337 } 338 339 if (profiled.enabled) { 340 ReservedSpace profiled_space = rs.partition(offset, profiled.size); 341 offset += profiled.size; 342 // Tier 2 and tier 3 (profiled) methods 343 add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled); 344 } 345 346 ReservedSpace non_method_space = rs.partition(offset, non_nmethod.size); 347 offset += non_nmethod.size; 348 // Non-nmethods (stubs, adapters, ...) 349 add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod); 350 351 if (non_profiled.enabled) { 352 ReservedSpace non_profiled_space = rs.partition(offset, non_profiled.size); 353 // Tier 1 and tier 4 (non-profiled) methods and native methods 354 add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled); 355 } 356 } 357 358 void* CodeCache::map_cached_code() { 359 if (_cds_code_space.size() > 0 && CDSAccess::map_cached_code(_cds_code_space)) { 360 return _cds_code_space.base(); 361 } else { 362 return nullptr; 363 } 364 } 365 366 size_t CodeCache::page_size(bool aligned, size_t min_pages) { 367 return aligned ? os::page_size_for_region_aligned(ReservedCodeCacheSize, min_pages) : 368 os::page_size_for_region_unaligned(ReservedCodeCacheSize, min_pages); 369 } 370 371 ReservedSpace CodeCache::reserve_heap_memory(size_t size, size_t rs_ps) { 372 // Align and reserve space for code cache 373 const size_t rs_align = MAX2(rs_ps, os::vm_allocation_granularity()); 374 const size_t rs_size = align_up(size, rs_align); 375 376 ReservedSpace rs = CodeMemoryReserver::reserve(rs_size, rs_align, rs_ps); 377 if (!rs.is_reserved()) { 378 vm_exit_during_initialization(err_msg("Could not reserve enough space for code cache (%zuK)", 379 rs_size/K)); 380 } 381 382 // Initialize bounds 383 _low_bound = (address)rs.base(); 384 _high_bound = _low_bound + rs.size(); 385 return rs; 386 } 387 388 // Heaps available for allocation 389 bool CodeCache::heap_available(CodeBlobType code_blob_type) { 390 if (!SegmentedCodeCache) { 391 // No segmentation: use a single code heap 392 return (code_blob_type == CodeBlobType::All); 393 } else if (CompilerConfig::is_interpreter_only()) { 394 // Interpreter only: we don't need any method code heaps 395 return (code_blob_type == CodeBlobType::NonNMethod); 396 } else if (CompilerConfig::is_c1_profiling()) { 397 // Tiered compilation: use all code heaps 398 return (code_blob_type < CodeBlobType::All); 399 } else { 400 // No TieredCompilation: we only need the non-nmethod and non-profiled code heap 401 return (code_blob_type == CodeBlobType::NonNMethod) || 402 (code_blob_type == CodeBlobType::MethodNonProfiled); 403 } 404 } 405 406 const char* CodeCache::get_code_heap_flag_name(CodeBlobType code_blob_type) { 407 switch(code_blob_type) { 408 case CodeBlobType::NonNMethod: 409 return "NonNMethodCodeHeapSize"; 410 break; 411 case CodeBlobType::MethodNonProfiled: 412 return "NonProfiledCodeHeapSize"; 413 break; 414 case CodeBlobType::MethodProfiled: 415 return "ProfiledCodeHeapSize"; 416 break; 417 default: 418 ShouldNotReachHere(); 419 return nullptr; 420 } 421 } 422 423 int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) { 424 if (lhs->code_blob_type() == rhs->code_blob_type()) { 425 return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0); 426 } else { 427 return static_cast<int>(lhs->code_blob_type()) - static_cast<int>(rhs->code_blob_type()); 428 } 429 } 430 431 void CodeCache::add_heap(CodeHeap* heap) { 432 assert(!Universe::is_fully_initialized(), "late heap addition?"); 433 434 _heaps->insert_sorted<code_heap_compare>(heap); 435 436 CodeBlobType type = heap->code_blob_type(); 437 if (code_blob_type_accepts_nmethod(type)) { 438 _nmethod_heaps->insert_sorted<code_heap_compare>(heap); 439 } 440 if (code_blob_type_accepts_allocable(type)) { 441 _allocable_heaps->insert_sorted<code_heap_compare>(heap); 442 } 443 } 444 445 void CodeCache::add_heap(ReservedSpace rs, const char* name, CodeBlobType code_blob_type) { 446 // Check if heap is needed 447 if (!heap_available(code_blob_type)) { 448 return; 449 } 450 451 // Create CodeHeap 452 CodeHeap* heap = new CodeHeap(name, code_blob_type); 453 add_heap(heap); 454 455 // Reserve Space 456 size_t size_initial = MIN2((size_t)InitialCodeCacheSize, rs.size()); 457 size_initial = align_up(size_initial, rs.page_size()); 458 if (!heap->reserve(rs, size_initial, CodeCacheSegmentSize)) { 459 vm_exit_during_initialization(err_msg("Could not reserve enough space in %s (%zuK)", 460 heap->name(), size_initial/K)); 461 } 462 463 // Register the CodeHeap 464 MemoryService::add_code_heap_memory_pool(heap, name); 465 } 466 467 CodeHeap* CodeCache::get_code_heap_containing(void* start) { 468 FOR_ALL_HEAPS(heap) { 469 if ((*heap)->contains(start)) { 470 return *heap; 471 } 472 } 473 return nullptr; 474 } 475 476 CodeHeap* CodeCache::get_code_heap(const void* cb) { 477 assert(cb != nullptr, "CodeBlob is null"); 478 FOR_ALL_HEAPS(heap) { 479 if ((*heap)->contains(cb)) { 480 return *heap; 481 } 482 } 483 ShouldNotReachHere(); 484 return nullptr; 485 } 486 487 CodeHeap* CodeCache::get_code_heap(CodeBlobType code_blob_type) { 488 FOR_ALL_HEAPS(heap) { 489 if ((*heap)->accepts(code_blob_type)) { 490 return *heap; 491 } 492 } 493 return nullptr; 494 } 495 496 CodeBlob* CodeCache::first_blob(CodeHeap* heap) { 497 assert_locked_or_safepoint(CodeCache_lock); 498 assert(heap != nullptr, "heap is null"); 499 return (CodeBlob*)heap->first(); 500 } 501 502 CodeBlob* CodeCache::first_blob(CodeBlobType code_blob_type) { 503 if (heap_available(code_blob_type)) { 504 return first_blob(get_code_heap(code_blob_type)); 505 } else { 506 return nullptr; 507 } 508 } 509 510 CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) { 511 assert_locked_or_safepoint(CodeCache_lock); 512 assert(heap != nullptr, "heap is null"); 513 return (CodeBlob*)heap->next(cb); 514 } 515 516 /** 517 * Do not seize the CodeCache lock here--if the caller has not 518 * already done so, we are going to lose bigtime, since the code 519 * cache will contain a garbage CodeBlob until the caller can 520 * run the constructor for the CodeBlob subclass he is busy 521 * instantiating. 522 */ 523 CodeBlob* CodeCache::allocate(uint size, CodeBlobType code_blob_type, bool handle_alloc_failure, CodeBlobType orig_code_blob_type) { 524 assert_locked_or_safepoint(CodeCache_lock); 525 assert(size > 0, "Code cache allocation request must be > 0"); 526 if (size == 0) { 527 return nullptr; 528 } 529 CodeBlob* cb = nullptr; 530 531 // Get CodeHeap for the given CodeBlobType 532 CodeHeap* heap = get_code_heap(code_blob_type); 533 assert(heap != nullptr, "heap is null"); 534 535 while (true) { 536 cb = (CodeBlob*)heap->allocate(size); 537 if (cb != nullptr) break; 538 if (!heap->expand_by(CodeCacheExpansionSize)) { 539 // Save original type for error reporting 540 if (orig_code_blob_type == CodeBlobType::All) { 541 orig_code_blob_type = code_blob_type; 542 } 543 // Expansion failed 544 if (SegmentedCodeCache) { 545 // Fallback solution: Try to store code in another code heap. 546 // NonNMethod -> MethodNonProfiled -> MethodProfiled (-> MethodNonProfiled) 547 CodeBlobType type = code_blob_type; 548 switch (type) { 549 case CodeBlobType::NonNMethod: 550 type = CodeBlobType::MethodNonProfiled; 551 break; 552 case CodeBlobType::MethodNonProfiled: 553 type = CodeBlobType::MethodProfiled; 554 break; 555 case CodeBlobType::MethodProfiled: 556 // Avoid loop if we already tried that code heap 557 if (type == orig_code_blob_type) { 558 type = CodeBlobType::MethodNonProfiled; 559 } 560 break; 561 default: 562 break; 563 } 564 if (type != code_blob_type && type != orig_code_blob_type && heap_available(type)) { 565 if (PrintCodeCacheExtension) { 566 tty->print_cr("Extension of %s failed. Trying to allocate in %s.", 567 heap->name(), get_code_heap(type)->name()); 568 } 569 return allocate(size, type, handle_alloc_failure, orig_code_blob_type); 570 } 571 } 572 if (handle_alloc_failure) { 573 MutexUnlocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 574 CompileBroker::handle_full_code_cache(orig_code_blob_type); 575 } 576 return nullptr; 577 } else { 578 OrderAccess::release(); // ensure heap expansion is visible to an asynchronous observer (e.g. CodeHeapPool::get_memory_usage()) 579 } 580 if (PrintCodeCacheExtension) { 581 ResourceMark rm; 582 if (_nmethod_heaps->length() >= 1) { 583 tty->print("%s", heap->name()); 584 } else { 585 tty->print("CodeCache"); 586 } 587 tty->print_cr(" extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%zd bytes)", 588 (intptr_t)heap->low_boundary(), (intptr_t)heap->high(), 589 (address)heap->high() - (address)heap->low_boundary()); 590 } 591 } 592 print_trace("allocation", cb, size); 593 return cb; 594 } 595 596 void CodeCache::free(CodeBlob* cb) { 597 assert_locked_or_safepoint(CodeCache_lock); 598 CodeHeap* heap = get_code_heap(cb); 599 print_trace("free", cb); 600 if (cb->is_nmethod()) { 601 heap->set_nmethod_count(heap->nmethod_count() - 1); 602 if (((nmethod *)cb)->has_dependencies()) { 603 Atomic::dec(&_number_of_nmethods_with_dependencies); 604 } 605 } 606 if (cb->is_adapter_blob()) { 607 heap->set_adapter_count(heap->adapter_count() - 1); 608 } 609 610 cb->~CodeBlob(); 611 // Get heap for given CodeBlob and deallocate 612 heap->deallocate(cb); 613 614 assert(heap->blob_count() >= 0, "sanity check"); 615 } 616 617 void CodeCache::free_unused_tail(CodeBlob* cb, size_t used) { 618 assert_locked_or_safepoint(CodeCache_lock); 619 guarantee(cb->is_buffer_blob() && strncmp("Interpreter", cb->name(), 11) == 0, "Only possible for interpreter!"); 620 print_trace("free_unused_tail", cb); 621 622 // We also have to account for the extra space (i.e. header) used by the CodeBlob 623 // which provides the memory (see BufferBlob::create() in codeBlob.cpp). 624 used += CodeBlob::align_code_offset(cb->header_size()); 625 626 // Get heap for given CodeBlob and deallocate its unused tail 627 get_code_heap(cb)->deallocate_tail(cb, used); 628 // Adjust the sizes of the CodeBlob 629 cb->adjust_size(used); 630 } 631 632 void CodeCache::commit(CodeBlob* cb) { 633 // this is called by nmethod::nmethod, which must already own CodeCache_lock 634 assert_locked_or_safepoint(CodeCache_lock); 635 CodeHeap* heap = get_code_heap(cb); 636 if (cb->is_nmethod()) { 637 heap->set_nmethod_count(heap->nmethod_count() + 1); 638 if (((nmethod *)cb)->has_dependencies()) { 639 Atomic::inc(&_number_of_nmethods_with_dependencies); 640 } 641 } 642 if (cb->is_adapter_blob()) { 643 heap->set_adapter_count(heap->adapter_count() + 1); 644 } 645 } 646 647 bool CodeCache::contains(void *p) { 648 // S390 uses contains() in current_frame(), which is used before 649 // code cache initialization if NativeMemoryTracking=detail is set. 650 S390_ONLY(if (_heaps == nullptr) return false;) 651 // It should be ok to call contains without holding a lock. 652 FOR_ALL_HEAPS(heap) { 653 if ((*heap)->contains(p)) { 654 return true; 655 } 656 } 657 return false; 658 } 659 660 bool CodeCache::contains(nmethod *nm) { 661 return contains((void *)nm); 662 } 663 664 // This method is safe to call without holding the CodeCache_lock. It only depends on the _segmap to contain 665 // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. 666 CodeBlob* CodeCache::find_blob(void* start) { 667 // NMT can walk the stack before code cache is created 668 if (_heaps != nullptr) { 669 CodeHeap* heap = get_code_heap_containing(start); 670 if (heap != nullptr) { 671 return heap->find_blob(start); 672 } 673 } 674 return nullptr; 675 } 676 677 nmethod* CodeCache::find_nmethod(void* start) { 678 CodeBlob* cb = find_blob(start); 679 assert(cb == nullptr || cb->is_nmethod(), "did not find an nmethod"); 680 return (nmethod*)cb; 681 } 682 683 void CodeCache::blobs_do(void f(CodeBlob* nm)) { 684 assert_locked_or_safepoint(CodeCache_lock); 685 FOR_ALL_HEAPS(heap) { 686 FOR_ALL_BLOBS(cb, *heap) { 687 f(cb); 688 } 689 } 690 } 691 692 void CodeCache::nmethods_do(void f(nmethod* nm)) { 693 assert_locked_or_safepoint(CodeCache_lock); 694 NMethodIterator iter(NMethodIterator::all); 695 while(iter.next()) { 696 f(iter.method()); 697 } 698 } 699 700 void CodeCache::nmethods_do(NMethodClosure* cl) { 701 assert_locked_or_safepoint(CodeCache_lock); 702 NMethodIterator iter(NMethodIterator::all); 703 while(iter.next()) { 704 cl->do_nmethod(iter.method()); 705 } 706 } 707 708 void CodeCache::metadata_do(MetadataClosure* f) { 709 assert_locked_or_safepoint(CodeCache_lock); 710 NMethodIterator iter(NMethodIterator::all); 711 while(iter.next()) { 712 iter.method()->metadata_do(f); 713 } 714 } 715 716 // Calculate the number of GCs after which an nmethod is expected to have been 717 // used in order to not be classed as cold. 718 void CodeCache::update_cold_gc_count() { 719 if (!MethodFlushing || !UseCodeCacheFlushing || NmethodSweepActivity == 0) { 720 // No aging 721 return; 722 } 723 724 size_t last_used = _last_unloading_used; 725 double last_time = _last_unloading_time; 726 727 double time = os::elapsedTime(); 728 729 size_t free = unallocated_capacity(); 730 size_t max = max_capacity(); 731 size_t used = max - free; 732 double gc_interval = time - last_time; 733 734 _unloading_threshold_gc_requested = false; 735 _last_unloading_time = time; 736 _last_unloading_used = used; 737 738 if (last_time == 0.0) { 739 // The first GC doesn't have enough information to make good 740 // decisions, so just keep everything afloat 741 log_info(codecache)("Unknown code cache pressure; don't age code"); 742 return; 743 } 744 745 if (gc_interval <= 0.0 || last_used >= used) { 746 // Dodge corner cases where there is no pressure or negative pressure 747 // on the code cache. Just don't unload when this happens. 748 _cold_gc_count = INT_MAX; 749 log_info(codecache)("No code cache pressure; don't age code"); 750 return; 751 } 752 753 double allocation_rate = (used - last_used) / gc_interval; 754 755 _unloading_allocation_rates.add(allocation_rate); 756 _unloading_gc_intervals.add(gc_interval); 757 758 size_t aggressive_sweeping_free_threshold = StartAggressiveSweepingAt / 100.0 * max; 759 if (free < aggressive_sweeping_free_threshold) { 760 // We are already in the red zone; be very aggressive to avoid disaster 761 // But not more aggressive than 2. This ensures that an nmethod must 762 // have been unused at least between two GCs to be considered cold still. 763 _cold_gc_count = 2; 764 log_info(codecache)("Code cache critically low; use aggressive aging"); 765 return; 766 } 767 768 // The code cache has an expected time for cold nmethods to "time out" 769 // when they have not been used. The time for nmethods to time out 770 // depends on how long we expect we can keep allocating code until 771 // aggressive sweeping starts, based on sampled allocation rates. 772 double average_gc_interval = _unloading_gc_intervals.avg(); 773 double average_allocation_rate = _unloading_allocation_rates.avg(); 774 double time_to_aggressive = ((double)(free - aggressive_sweeping_free_threshold)) / average_allocation_rate; 775 double cold_timeout = time_to_aggressive / NmethodSweepActivity; 776 777 // Convert time to GC cycles, and crop at INT_MAX. The reason for 778 // that is that the _cold_gc_count will be added to an epoch number 779 // and that addition must not overflow, or we can crash the VM. 780 // But not more aggressive than 2. This ensures that an nmethod must 781 // have been unused at least between two GCs to be considered cold still. 782 _cold_gc_count = MAX2(MIN2((uint64_t)(cold_timeout / average_gc_interval), (uint64_t)INT_MAX), (uint64_t)2); 783 784 double used_ratio = double(used) / double(max); 785 double last_used_ratio = double(last_used) / double(max); 786 log_info(codecache)("Allocation rate: %.3f KB/s, time to aggressive unloading: %.3f s, cold timeout: %.3f s, cold gc count: " UINT64_FORMAT 787 ", used: %.3f MB (%.3f%%), last used: %.3f MB (%.3f%%), gc interval: %.3f s", 788 average_allocation_rate / K, time_to_aggressive, cold_timeout, _cold_gc_count, 789 double(used) / M, used_ratio * 100.0, double(last_used) / M, last_used_ratio * 100.0, average_gc_interval); 790 791 } 792 793 uint64_t CodeCache::cold_gc_count() { 794 return _cold_gc_count; 795 } 796 797 void CodeCache::gc_on_allocation() { 798 if (!is_init_completed()) { 799 // Let's not heuristically trigger GCs before the JVM is ready for GCs, no matter what 800 return; 801 } 802 803 size_t free = unallocated_capacity(); 804 size_t max = max_capacity(); 805 size_t used = max - free; 806 double free_ratio = double(free) / double(max); 807 if (free_ratio <= StartAggressiveSweepingAt / 100.0) { 808 // In case the GC is concurrent, we make sure only one thread requests the GC. 809 if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) { 810 log_info(codecache)("Triggering aggressive GC due to having only %.3f%% free memory", free_ratio * 100.0); 811 Universe::heap()->collect(GCCause::_codecache_GC_aggressive); 812 } 813 return; 814 } 815 816 size_t last_used = _last_unloading_used; 817 if (last_used >= used) { 818 // No increase since last GC; no need to sweep yet 819 return; 820 } 821 size_t allocated_since_last = used - last_used; 822 double allocated_since_last_ratio = double(allocated_since_last) / double(max); 823 double threshold = SweeperThreshold / 100.0; 824 double used_ratio = double(used) / double(max); 825 double last_used_ratio = double(last_used) / double(max); 826 if (used_ratio > threshold) { 827 // After threshold is reached, scale it by free_ratio so that more aggressive 828 // GC is triggered as we approach code cache exhaustion 829 threshold *= free_ratio; 830 } 831 // If code cache has been allocated without any GC at all, let's make sure 832 // it is eventually invoked to avoid trouble. 833 if (allocated_since_last_ratio > threshold) { 834 // In case the GC is concurrent, we make sure only one thread requests the GC. 835 if (Atomic::cmpxchg(&_unloading_threshold_gc_requested, false, true) == false) { 836 log_info(codecache)("Triggering threshold (%.3f%%) GC due to allocating %.3f%% since last unloading (%.3f%% used -> %.3f%% used)", 837 threshold * 100.0, allocated_since_last_ratio * 100.0, last_used_ratio * 100.0, used_ratio * 100.0); 838 Universe::heap()->collect(GCCause::_codecache_GC_threshold); 839 } 840 } 841 } 842 843 // We initialize the _gc_epoch to 2, because previous_completed_gc_marking_cycle 844 // subtracts the value by 2, and the type is unsigned. We don't want underflow. 845 // 846 // Odd values mean that marking is in progress, and even values mean that no 847 // marking is currently active. 848 uint64_t CodeCache::_gc_epoch = 2; 849 850 // How many GCs after an nmethod has not been used, do we consider it cold? 851 uint64_t CodeCache::_cold_gc_count = INT_MAX; 852 853 double CodeCache::_last_unloading_time = 0.0; 854 size_t CodeCache::_last_unloading_used = 0; 855 volatile bool CodeCache::_unloading_threshold_gc_requested = false; 856 TruncatedSeq CodeCache::_unloading_gc_intervals(10 /* samples */); 857 TruncatedSeq CodeCache::_unloading_allocation_rates(10 /* samples */); 858 859 uint64_t CodeCache::gc_epoch() { 860 return _gc_epoch; 861 } 862 863 bool CodeCache::is_gc_marking_cycle_active() { 864 // Odd means that marking is active 865 return (_gc_epoch % 2) == 1; 866 } 867 868 uint64_t CodeCache::previous_completed_gc_marking_cycle() { 869 if (is_gc_marking_cycle_active()) { 870 return _gc_epoch - 2; 871 } else { 872 return _gc_epoch - 1; 873 } 874 } 875 876 void CodeCache::on_gc_marking_cycle_start() { 877 assert(!is_gc_marking_cycle_active(), "Previous marking cycle never ended"); 878 ++_gc_epoch; 879 } 880 881 // Once started the code cache marking cycle must only be finished after marking of 882 // the java heap is complete. Otherwise nmethods could appear to be not on stack even 883 // if they have frames in continuation StackChunks that were not yet visited. 884 void CodeCache::on_gc_marking_cycle_finish() { 885 assert(is_gc_marking_cycle_active(), "Marking cycle started before last one finished"); 886 ++_gc_epoch; 887 update_cold_gc_count(); 888 } 889 890 void CodeCache::arm_all_nmethods() { 891 BarrierSet::barrier_set()->barrier_set_nmethod()->arm_all_nmethods(); 892 } 893 894 // Mark nmethods for unloading if they contain otherwise unreachable oops. 895 void CodeCache::do_unloading(bool unloading_occurred) { 896 assert_locked_or_safepoint(CodeCache_lock); 897 NMethodIterator iter(NMethodIterator::all); 898 while(iter.next()) { 899 iter.method()->do_unloading(unloading_occurred); 900 } 901 } 902 903 void CodeCache::verify_clean_inline_caches() { 904 #ifdef ASSERT 905 NMethodIterator iter(NMethodIterator::not_unloading); 906 while(iter.next()) { 907 nmethod* nm = iter.method(); 908 nm->verify_clean_inline_caches(); 909 nm->verify(); 910 } 911 #endif 912 } 913 914 // Defer freeing of concurrently cleaned ExceptionCache entries until 915 // after a global handshake operation. 916 void CodeCache::release_exception_cache(ExceptionCache* entry) { 917 if (SafepointSynchronize::is_at_safepoint()) { 918 delete entry; 919 } else { 920 for (;;) { 921 ExceptionCache* purge_list_head = Atomic::load(&_exception_cache_purge_list); 922 entry->set_purge_list_next(purge_list_head); 923 if (Atomic::cmpxchg(&_exception_cache_purge_list, purge_list_head, entry) == purge_list_head) { 924 break; 925 } 926 } 927 } 928 } 929 930 // Delete exception caches that have been concurrently unlinked, 931 // followed by a global handshake operation. 932 void CodeCache::purge_exception_caches() { 933 ExceptionCache* curr = _exception_cache_purge_list; 934 while (curr != nullptr) { 935 ExceptionCache* next = curr->purge_list_next(); 936 delete curr; 937 curr = next; 938 } 939 _exception_cache_purge_list = nullptr; 940 } 941 942 // Restart compiler if possible and required.. 943 void CodeCache::maybe_restart_compiler(size_t freed_memory) { 944 945 // Try to start the compiler again if we freed any memory 946 if (!CompileBroker::should_compile_new_jobs() && freed_memory != 0) { 947 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation); 948 log_info(codecache)("Restarting compiler"); 949 EventJITRestart event; 950 event.set_freedMemory(freed_memory); 951 event.set_codeCacheMaxCapacity(CodeCache::max_capacity()); 952 event.commit(); 953 } 954 } 955 956 uint8_t CodeCache::_unloading_cycle = 1; 957 958 void CodeCache::increment_unloading_cycle() { 959 // 2-bit value (see IsUnloadingState in nmethod.cpp for details) 960 // 0 is reserved for new methods. 961 _unloading_cycle = (_unloading_cycle + 1) % 4; 962 if (_unloading_cycle == 0) { 963 _unloading_cycle = 1; 964 } 965 } 966 967 CodeCache::UnlinkingScope::UnlinkingScope(BoolObjectClosure* is_alive) 968 : _is_unloading_behaviour(is_alive) 969 { 970 _saved_behaviour = IsUnloadingBehaviour::current(); 971 IsUnloadingBehaviour::set_current(&_is_unloading_behaviour); 972 increment_unloading_cycle(); 973 DependencyContext::cleaning_start(); 974 } 975 976 CodeCache::UnlinkingScope::~UnlinkingScope() { 977 IsUnloadingBehaviour::set_current(_saved_behaviour); 978 DependencyContext::cleaning_end(); 979 } 980 981 void CodeCache::verify_oops() { 982 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 983 VerifyOopClosure voc; 984 NMethodIterator iter(NMethodIterator::not_unloading); 985 while(iter.next()) { 986 nmethod* nm = iter.method(); 987 nm->oops_do(&voc); 988 nm->verify_oop_relocations(); 989 } 990 } 991 992 int CodeCache::blob_count(CodeBlobType code_blob_type) { 993 CodeHeap* heap = get_code_heap(code_blob_type); 994 return (heap != nullptr) ? heap->blob_count() : 0; 995 } 996 997 int CodeCache::blob_count() { 998 int count = 0; 999 FOR_ALL_HEAPS(heap) { 1000 count += (*heap)->blob_count(); 1001 } 1002 return count; 1003 } 1004 1005 int CodeCache::nmethod_count(CodeBlobType code_blob_type) { 1006 CodeHeap* heap = get_code_heap(code_blob_type); 1007 return (heap != nullptr) ? heap->nmethod_count() : 0; 1008 } 1009 1010 int CodeCache::nmethod_count() { 1011 int count = 0; 1012 for (CodeHeap* heap : *_nmethod_heaps) { 1013 count += heap->nmethod_count(); 1014 } 1015 return count; 1016 } 1017 1018 int CodeCache::adapter_count(CodeBlobType code_blob_type) { 1019 CodeHeap* heap = get_code_heap(code_blob_type); 1020 return (heap != nullptr) ? heap->adapter_count() : 0; 1021 } 1022 1023 int CodeCache::adapter_count() { 1024 int count = 0; 1025 FOR_ALL_HEAPS(heap) { 1026 count += (*heap)->adapter_count(); 1027 } 1028 return count; 1029 } 1030 1031 address CodeCache::low_bound(CodeBlobType code_blob_type) { 1032 CodeHeap* heap = get_code_heap(code_blob_type); 1033 return (heap != nullptr) ? (address)heap->low_boundary() : nullptr; 1034 } 1035 1036 address CodeCache::high_bound(CodeBlobType code_blob_type) { 1037 CodeHeap* heap = get_code_heap(code_blob_type); 1038 return (heap != nullptr) ? (address)heap->high_boundary() : nullptr; 1039 } 1040 1041 size_t CodeCache::capacity() { 1042 size_t cap = 0; 1043 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1044 cap += (*heap)->capacity(); 1045 } 1046 return cap; 1047 } 1048 1049 size_t CodeCache::unallocated_capacity(CodeBlobType code_blob_type) { 1050 CodeHeap* heap = get_code_heap(code_blob_type); 1051 return (heap != nullptr) ? heap->unallocated_capacity() : 0; 1052 } 1053 1054 size_t CodeCache::unallocated_capacity() { 1055 size_t unallocated_cap = 0; 1056 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1057 unallocated_cap += (*heap)->unallocated_capacity(); 1058 } 1059 return unallocated_cap; 1060 } 1061 1062 size_t CodeCache::max_capacity() { 1063 size_t max_cap = 0; 1064 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1065 max_cap += (*heap)->max_capacity(); 1066 } 1067 return max_cap; 1068 } 1069 1070 bool CodeCache::is_non_nmethod(address addr) { 1071 CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod); 1072 return blob->contains(addr); 1073 } 1074 1075 size_t CodeCache::max_distance_to_non_nmethod() { 1076 if (!SegmentedCodeCache) { 1077 return ReservedCodeCacheSize; 1078 } else { 1079 CodeHeap* blob = get_code_heap(CodeBlobType::NonNMethod); 1080 // the max distance is minimized by placing the NonNMethod segment 1081 // in between MethodProfiled and MethodNonProfiled segments 1082 size_t dist1 = (size_t)blob->high() - (size_t)_low_bound; 1083 size_t dist2 = (size_t)_high_bound - (size_t)blob->low(); 1084 return dist1 > dist2 ? dist1 : dist2; 1085 } 1086 } 1087 1088 // Returns the reverse free ratio. E.g., if 25% (1/4) of the code cache 1089 // is free, reverse_free_ratio() returns 4. 1090 // Since code heap for each type of code blobs falls forward to the next 1091 // type of code heap, return the reverse free ratio for the entire 1092 // code cache. 1093 double CodeCache::reverse_free_ratio() { 1094 double unallocated = MAX2((double)unallocated_capacity(), 1.0); // Avoid division by 0; 1095 double max = (double)max_capacity(); 1096 double result = max / unallocated; 1097 assert (max >= unallocated, "Must be"); 1098 assert (result >= 1.0, "reverse_free_ratio must be at least 1. It is %f", result); 1099 return result; 1100 } 1101 1102 size_t CodeCache::bytes_allocated_in_freelists() { 1103 size_t allocated_bytes = 0; 1104 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1105 allocated_bytes += (*heap)->allocated_in_freelist(); 1106 } 1107 return allocated_bytes; 1108 } 1109 1110 int CodeCache::allocated_segments() { 1111 int number_of_segments = 0; 1112 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1113 number_of_segments += (*heap)->allocated_segments(); 1114 } 1115 return number_of_segments; 1116 } 1117 1118 size_t CodeCache::freelists_length() { 1119 size_t length = 0; 1120 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1121 length += (*heap)->freelist_length(); 1122 } 1123 return length; 1124 } 1125 1126 void icache_init(); 1127 1128 void CodeCache::initialize() { 1129 assert(CodeCacheSegmentSize >= (uintx)CodeEntryAlignment, "CodeCacheSegmentSize must be large enough to align entry points"); 1130 #ifdef COMPILER2 1131 assert(CodeCacheSegmentSize >= (uintx)OptoLoopAlignment, "CodeCacheSegmentSize must be large enough to align inner loops"); 1132 #endif 1133 assert(CodeCacheSegmentSize >= sizeof(jdouble), "CodeCacheSegmentSize must be large enough to align constants"); 1134 // This was originally just a check of the alignment, causing failure, instead, round 1135 // the code cache to the page size. In particular, Solaris is moving to a larger 1136 // default page size. 1137 CodeCacheExpansionSize = align_up(CodeCacheExpansionSize, os::vm_page_size()); 1138 1139 if (SegmentedCodeCache) { 1140 // Use multiple code heaps 1141 initialize_heaps(); 1142 } else { 1143 // Use a single code heap 1144 FLAG_SET_ERGO(NonNMethodCodeHeapSize, (uintx)os::vm_page_size()); 1145 FLAG_SET_ERGO(ProfiledCodeHeapSize, 0); 1146 FLAG_SET_ERGO(NonProfiledCodeHeapSize, 0); 1147 1148 // If InitialCodeCacheSize is equal to ReservedCodeCacheSize, then it's more likely 1149 // users want to use the largest available page. 1150 const size_t min_pages = (InitialCodeCacheSize == ReservedCodeCacheSize) ? 1 : 8; 1151 ReservedSpace rs = reserve_heap_memory(ReservedCodeCacheSize, page_size(false, min_pages)); 1152 // Register CodeHeaps with LSan as we sometimes embed pointers to malloc memory. 1153 LSAN_REGISTER_ROOT_REGION(rs.base(), rs.size()); 1154 add_heap(rs, "CodeCache", CodeBlobType::All); 1155 } 1156 1157 // Initialize ICache flush mechanism 1158 // This service is needed for os::register_code_area 1159 icache_init(); 1160 1161 // Give OS a chance to register generated code area. 1162 // This is used on Windows 64 bit platforms to register 1163 // Structured Exception Handlers for our generated code. 1164 os::register_code_area((char*)low_bound(), (char*)high_bound()); 1165 } 1166 1167 void codeCache_init() { 1168 CodeCache::initialize(); 1169 } 1170 1171 //------------------------------------------------------------------------------------------------ 1172 1173 bool CodeCache::has_nmethods_with_dependencies() { 1174 return Atomic::load_acquire(&_number_of_nmethods_with_dependencies) != 0; 1175 } 1176 1177 void CodeCache::clear_inline_caches() { 1178 assert_locked_or_safepoint(CodeCache_lock); 1179 NMethodIterator iter(NMethodIterator::not_unloading); 1180 while(iter.next()) { 1181 iter.method()->clear_inline_caches(); 1182 } 1183 } 1184 1185 // Only used by whitebox API 1186 void CodeCache::cleanup_inline_caches_whitebox() { 1187 assert_locked_or_safepoint(CodeCache_lock); 1188 NMethodIterator iter(NMethodIterator::not_unloading); 1189 while(iter.next()) { 1190 iter.method()->cleanup_inline_caches_whitebox(); 1191 } 1192 } 1193 1194 // Keeps track of time spent for checking dependencies 1195 NOT_PRODUCT(static elapsedTimer dependentCheckTime;) 1196 1197 #ifndef PRODUCT 1198 // Check if any of live methods dependencies have been invalidated. 1199 // (this is expensive!) 1200 static void check_live_nmethods_dependencies(DepChange& changes) { 1201 // Checked dependencies are allocated into this ResourceMark 1202 ResourceMark rm; 1203 1204 // Turn off dependency tracing while actually testing dependencies. 1205 FlagSetting fs(Dependencies::_verify_in_progress, true); 1206 1207 typedef ResourceHashtable<DependencySignature, int, 11027, 1208 AnyObj::RESOURCE_AREA, mtInternal, 1209 &DependencySignature::hash, 1210 &DependencySignature::equals> DepTable; 1211 1212 DepTable* table = new DepTable(); 1213 1214 // Iterate over live nmethods and check dependencies of all nmethods that are not 1215 // marked for deoptimization. A particular dependency is only checked once. 1216 NMethodIterator iter(NMethodIterator::not_unloading); 1217 while(iter.next()) { 1218 nmethod* nm = iter.method(); 1219 // Only notify for live nmethods 1220 if (!nm->is_marked_for_deoptimization()) { 1221 for (Dependencies::DepStream deps(nm); deps.next(); ) { 1222 // Construct abstraction of a dependency. 1223 DependencySignature* current_sig = new DependencySignature(deps); 1224 1225 // Determine if dependency is already checked. table->put(...) returns 1226 // 'true' if the dependency is added (i.e., was not in the hashtable). 1227 if (table->put(*current_sig, 1)) { 1228 Klass* witness = deps.check_dependency(); 1229 if (witness != nullptr) { 1230 // Dependency checking failed. Print out information about the failed 1231 // dependency and finally fail with an assert. We can fail here, since 1232 // dependency checking is never done in a product build. 1233 deps.print_dependency(tty, witness, true); 1234 changes.print(); 1235 nm->print(); 1236 nm->print_dependencies_on(tty); 1237 assert(false, "Should have been marked for deoptimization"); 1238 } 1239 } 1240 } 1241 } 1242 } 1243 } 1244 #endif 1245 1246 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, KlassDepChange& changes) { 1247 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1248 1249 // search the hierarchy looking for nmethods which are affected by the loading of this class 1250 1251 // then search the interfaces this class implements looking for nmethods 1252 // which might be dependent of the fact that an interface only had one 1253 // implementor. 1254 // nmethod::check_all_dependencies works only correctly, if no safepoint 1255 // can happen 1256 NoSafepointVerifier nsv; 1257 for (DepChange::ContextStream str(changes, nsv); str.next(); ) { 1258 InstanceKlass* d = str.klass(); 1259 { 1260 LogStreamHandle(Trace, dependencies) log; 1261 if (log.is_enabled()) { 1262 log.print("Processing context "); 1263 d->name()->print_value_on(&log); 1264 } 1265 } 1266 d->mark_dependent_nmethods(deopt_scope, changes); 1267 } 1268 1269 #ifndef PRODUCT 1270 if (VerifyDependencies) { 1271 // Object pointers are used as unique identifiers for dependency arguments. This 1272 // is only possible if no safepoint, i.e., GC occurs during the verification code. 1273 dependentCheckTime.start(); 1274 check_live_nmethods_dependencies(changes); 1275 dependentCheckTime.stop(); 1276 } 1277 #endif 1278 } 1279 1280 #if INCLUDE_JVMTI 1281 // RedefineClasses support for saving nmethods that are dependent on "old" methods. 1282 // We don't really expect this table to grow very large. If it does, it can become a hashtable. 1283 static GrowableArray<nmethod*>* old_nmethod_table = nullptr; 1284 1285 static void add_to_old_table(nmethod* c) { 1286 if (old_nmethod_table == nullptr) { 1287 old_nmethod_table = new (mtCode) GrowableArray<nmethod*>(100, mtCode); 1288 } 1289 old_nmethod_table->push(c); 1290 } 1291 1292 static void reset_old_method_table() { 1293 if (old_nmethod_table != nullptr) { 1294 delete old_nmethod_table; 1295 old_nmethod_table = nullptr; 1296 } 1297 } 1298 1299 // Remove this method when flushed. 1300 void CodeCache::unregister_old_nmethod(nmethod* c) { 1301 assert_lock_strong(CodeCache_lock); 1302 if (old_nmethod_table != nullptr) { 1303 int index = old_nmethod_table->find(c); 1304 if (index != -1) { 1305 old_nmethod_table->delete_at(index); 1306 } 1307 } 1308 } 1309 1310 void CodeCache::old_nmethods_do(MetadataClosure* f) { 1311 // Walk old method table and mark those on stack. 1312 int length = 0; 1313 if (old_nmethod_table != nullptr) { 1314 length = old_nmethod_table->length(); 1315 for (int i = 0; i < length; i++) { 1316 // Walk all methods saved on the last pass. Concurrent class unloading may 1317 // also be looking at this method's metadata, so don't delete it yet if 1318 // it is marked as unloaded. 1319 old_nmethod_table->at(i)->metadata_do(f); 1320 } 1321 } 1322 log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length); 1323 } 1324 1325 // Walk compiled methods and mark dependent methods for deoptimization. 1326 void CodeCache::mark_dependents_for_evol_deoptimization(DeoptimizationScope* deopt_scope) { 1327 assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); 1328 // Each redefinition creates a new set of nmethods that have references to "old" Methods 1329 // So delete old method table and create a new one. 1330 reset_old_method_table(); 1331 1332 NMethodIterator iter(NMethodIterator::all); 1333 while(iter.next()) { 1334 nmethod* nm = iter.method(); 1335 // Walk all alive nmethods to check for old Methods. 1336 // This includes methods whose inline caches point to old methods, so 1337 // inline cache clearing is unnecessary. 1338 if (nm->has_evol_metadata()) { 1339 deopt_scope->mark(nm); 1340 add_to_old_table(nm); 1341 } 1342 } 1343 } 1344 1345 void CodeCache::mark_all_nmethods_for_evol_deoptimization(DeoptimizationScope* deopt_scope) { 1346 assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); 1347 NMethodIterator iter(NMethodIterator::all); 1348 while(iter.next()) { 1349 nmethod* nm = iter.method(); 1350 if (!nm->method()->is_method_handle_intrinsic()) { 1351 if (nm->can_be_deoptimized()) { 1352 deopt_scope->mark(nm); 1353 } 1354 if (nm->has_evol_metadata()) { 1355 add_to_old_table(nm); 1356 } 1357 } 1358 } 1359 } 1360 1361 #endif // INCLUDE_JVMTI 1362 1363 // Mark methods for deopt (if safe or possible). 1364 void CodeCache::mark_all_nmethods_for_deoptimization(DeoptimizationScope* deopt_scope) { 1365 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1366 NMethodIterator iter(NMethodIterator::not_unloading); 1367 while(iter.next()) { 1368 nmethod* nm = iter.method(); 1369 if (!nm->is_native_method()) { 1370 deopt_scope->mark(nm); 1371 } 1372 } 1373 } 1374 1375 void CodeCache::mark_for_deoptimization(DeoptimizationScope* deopt_scope, Method* dependee) { 1376 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1377 1378 NMethodIterator iter(NMethodIterator::not_unloading); 1379 while(iter.next()) { 1380 nmethod* nm = iter.method(); 1381 if (nm->is_dependent_on_method(dependee)) { 1382 deopt_scope->mark(nm); 1383 } 1384 } 1385 } 1386 1387 void CodeCache::make_marked_nmethods_deoptimized() { 1388 RelaxedNMethodIterator iter(RelaxedNMethodIterator::not_unloading); 1389 while(iter.next()) { 1390 nmethod* nm = iter.method(); 1391 if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) { 1392 nm->make_not_entrant("marked for deoptimization"); 1393 nm->make_deoptimized(); 1394 } 1395 } 1396 } 1397 1398 // Marks compiled methods dependent on dependee. 1399 void CodeCache::mark_dependents_on(DeoptimizationScope* deopt_scope, InstanceKlass* dependee) { 1400 assert_lock_strong(Compile_lock); 1401 1402 if (!has_nmethods_with_dependencies()) { 1403 return; 1404 } 1405 1406 if (dependee->is_linked()) { 1407 // Class initialization state change. 1408 KlassInitDepChange changes(dependee); 1409 mark_for_deoptimization(deopt_scope, changes); 1410 } else { 1411 // New class is loaded. 1412 NewKlassDepChange changes(dependee); 1413 mark_for_deoptimization(deopt_scope, changes); 1414 } 1415 } 1416 1417 // Marks compiled methods dependent on dependee 1418 void CodeCache::mark_dependents_on_method_for_breakpoint(const methodHandle& m_h) { 1419 assert(SafepointSynchronize::is_at_safepoint(), "invariant"); 1420 1421 DeoptimizationScope deopt_scope; 1422 // Compute the dependent nmethods 1423 mark_for_deoptimization(&deopt_scope, m_h()); 1424 deopt_scope.deoptimize_marked(); 1425 } 1426 1427 void CodeCache::verify() { 1428 assert_locked_or_safepoint(CodeCache_lock); 1429 FOR_ALL_HEAPS(heap) { 1430 (*heap)->verify(); 1431 FOR_ALL_BLOBS(cb, *heap) { 1432 cb->verify(); 1433 } 1434 } 1435 } 1436 1437 // A CodeHeap is full. Print out warning and report event. 1438 PRAGMA_DIAG_PUSH 1439 PRAGMA_FORMAT_NONLITERAL_IGNORED 1440 void CodeCache::report_codemem_full(CodeBlobType code_blob_type, bool print) { 1441 // Get nmethod heap for the given CodeBlobType and build CodeCacheFull event 1442 CodeHeap* heap = get_code_heap(code_blob_type); 1443 assert(heap != nullptr, "heap is null"); 1444 1445 int full_count = heap->report_full(); 1446 1447 if ((full_count == 1) || print) { 1448 // Not yet reported for this heap, report 1449 if (SegmentedCodeCache) { 1450 ResourceMark rm; 1451 stringStream msg1_stream, msg2_stream; 1452 msg1_stream.print("%s is full. Compiler has been disabled.", 1453 get_code_heap_name(code_blob_type)); 1454 msg2_stream.print("Try increasing the code heap size using -XX:%s=", 1455 get_code_heap_flag_name(code_blob_type)); 1456 const char *msg1 = msg1_stream.as_string(); 1457 const char *msg2 = msg2_stream.as_string(); 1458 1459 log_warning(codecache)("%s", msg1); 1460 log_warning(codecache)("%s", msg2); 1461 warning("%s", msg1); 1462 warning("%s", msg2); 1463 } else { 1464 const char *msg1 = "CodeCache is full. Compiler has been disabled."; 1465 const char *msg2 = "Try increasing the code cache size using -XX:ReservedCodeCacheSize="; 1466 1467 log_warning(codecache)("%s", msg1); 1468 log_warning(codecache)("%s", msg2); 1469 warning("%s", msg1); 1470 warning("%s", msg2); 1471 } 1472 stringStream s; 1473 // Dump code cache into a buffer before locking the tty. 1474 { 1475 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1476 print_summary(&s); 1477 } 1478 { 1479 ttyLocker ttyl; 1480 tty->print("%s", s.freeze()); 1481 } 1482 1483 if (full_count == 1) { 1484 if (PrintCodeHeapAnalytics) { 1485 CompileBroker::print_heapinfo(tty, "all", 4096); // details, may be a lot! 1486 } 1487 } 1488 } 1489 1490 EventCodeCacheFull event; 1491 if (event.should_commit()) { 1492 event.set_codeBlobType((u1)code_blob_type); 1493 event.set_startAddress((u8)heap->low_boundary()); 1494 event.set_commitedTopAddress((u8)heap->high()); 1495 event.set_reservedTopAddress((u8)heap->high_boundary()); 1496 event.set_entryCount(heap->blob_count()); 1497 event.set_methodCount(heap->nmethod_count()); 1498 event.set_adaptorCount(heap->adapter_count()); 1499 event.set_unallocatedCapacity(heap->unallocated_capacity()); 1500 event.set_fullCount(heap->full_count()); 1501 event.set_codeCacheMaxCapacity(CodeCache::max_capacity()); 1502 event.commit(); 1503 } 1504 } 1505 PRAGMA_DIAG_POP 1506 1507 void CodeCache::print_memory_overhead() { 1508 size_t wasted_bytes = 0; 1509 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1510 CodeHeap* curr_heap = *heap; 1511 for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != nullptr; cb = (CodeBlob*)curr_heap->next(cb)) { 1512 HeapBlock* heap_block = ((HeapBlock*)cb) - 1; 1513 wasted_bytes += heap_block->length() * CodeCacheSegmentSize - cb->size(); 1514 } 1515 } 1516 // Print bytes that are allocated in the freelist 1517 ttyLocker ttl; 1518 tty->print_cr("Number of elements in freelist: %zd", freelists_length()); 1519 tty->print_cr("Allocated in freelist: %zdkB", bytes_allocated_in_freelists()/K); 1520 tty->print_cr("Unused bytes in CodeBlobs: %zdkB", (wasted_bytes/K)); 1521 tty->print_cr("Segment map size: %zdkB", allocated_segments()/K); // 1 byte per segment 1522 } 1523 1524 static void print_helper1(outputStream* st, const char* prefix, int total, int not_entrant, int used) { 1525 if (total > 0) { 1526 double ratio = (100.0 * used) / total; 1527 st->print("%s %3d nmethods: %3d not_entrant, %d used (%2.1f%%)", prefix, total, not_entrant, used, ratio); 1528 } 1529 } 1530 1531 void CodeCache::print_nmethod_statistics_on(outputStream* st) { 1532 int stats [2][6][3][2] = {0}; 1533 int stats_used[2][6][3][2] = {0}; 1534 1535 int total_osr = 0; 1536 int total_entrant = 0; 1537 int total_non_entrant = 0; 1538 int total_other = 0; 1539 int total_used = 0; 1540 1541 NMethodIterator iter(NMethodIterator::all); 1542 while (iter.next()) { 1543 nmethod* nm = iter.method(); 1544 if (nm->is_in_use()) { 1545 ++total_entrant; 1546 } else if (nm->is_not_entrant()) { 1547 ++total_non_entrant; 1548 } else { 1549 ++total_other; 1550 } 1551 if (nm->is_osr_method()) { 1552 ++total_osr; 1553 } 1554 if (nm->used()) { 1555 ++total_used; 1556 } 1557 assert(!nm->preloaded() || nm->comp_level() == CompLevel_full_optimization, ""); 1558 1559 int idx1 = nm->is_aot() ? 1 : 0; 1560 int idx2 = nm->comp_level() + (nm->preloaded() ? 1 : 0); 1561 int idx3 = (nm->is_in_use() ? 0 : 1562 (nm->is_not_entrant() ? 1 : 1563 2)); 1564 int idx4 = (nm->is_osr_method() ? 1 : 0); 1565 stats[idx1][idx2][idx3][idx4] += 1; 1566 if (nm->used()) { 1567 stats_used[idx1][idx2][idx3][idx4] += 1; 1568 } 1569 } 1570 1571 st->print("Total: %d methods (%d entrant / %d not_entrant; osr: %d ", 1572 total_entrant + total_non_entrant + total_other, 1573 total_entrant, total_non_entrant, total_osr); 1574 if (total_other > 0) { 1575 st->print("; %d other", total_other); 1576 } 1577 st->print_cr(")"); 1578 1579 for (int i = CompLevel_simple; i <= CompLevel_full_optimization; i++) { 1580 int total_normal = stats[0][i][0][0] + stats[0][i][1][0] + stats[0][i][2][0]; 1581 int total_osr = stats[0][i][0][1] + stats[0][i][1][1] + stats[0][i][2][1]; 1582 if (total_normal + total_osr > 0) { 1583 st->print(" Tier%d:", i); 1584 print_helper1(st, "", total_normal, stats[0][i][1][0], stats_used[0][i][0][0] + stats_used[0][i][1][0]); 1585 print_helper1(st, "; osr:", total_osr, stats[0][i][1][1], stats_used[0][i][0][1] + stats_used[0][i][1][1]); 1586 st->cr(); 1587 } 1588 } 1589 st->cr(); 1590 for (int i = CompLevel_simple; i <= CompLevel_full_optimization + 1; i++) { 1591 int total_normal = stats[1][i][0][0] + stats[1][i][1][0] + stats[1][i][2][0]; 1592 int total_osr = stats[1][i][0][1] + stats[1][i][1][1] + stats[1][i][2][1]; 1593 assert(total_osr == 0, "sanity"); 1594 if (total_normal + total_osr > 0) { 1595 st->print(" AOT Code T%d:", i); 1596 print_helper1(st, "", total_normal, stats[1][i][1][0], stats_used[1][i][0][0] + stats_used[1][i][1][0]); 1597 print_helper1(st, "; osr:", total_osr, stats[1][i][1][1], stats_used[1][i][0][1] + stats_used[1][i][1][1]); 1598 st->cr(); 1599 } 1600 } 1601 } 1602 1603 //------------------------------------------------------------------------------------------------ 1604 // Non-product version 1605 1606 #ifndef PRODUCT 1607 1608 void CodeCache::print_trace(const char* event, CodeBlob* cb, uint size) { 1609 if (PrintCodeCache2) { // Need to add a new flag 1610 ResourceMark rm; 1611 if (size == 0) { 1612 int s = cb->size(); 1613 assert(s >= 0, "CodeBlob size is negative: %d", s); 1614 size = (uint) s; 1615 } 1616 tty->print_cr("CodeCache %s: addr: " INTPTR_FORMAT ", size: 0x%x", event, p2i(cb), size); 1617 } 1618 } 1619 1620 void CodeCache::print_internals() { 1621 int nmethodCount = 0; 1622 int runtimeStubCount = 0; 1623 int upcallStubCount = 0; 1624 int adapterCount = 0; 1625 int mhAdapterCount = 0; 1626 int vtableBlobCount = 0; 1627 int deoptimizationStubCount = 0; 1628 int uncommonTrapStubCount = 0; 1629 int exceptionStubCount = 0; 1630 int safepointStubCount = 0; 1631 int bufferBlobCount = 0; 1632 int total = 0; 1633 int nmethodNotEntrant = 0; 1634 int nmethodJava = 0; 1635 int nmethodNative = 0; 1636 int max_nm_size = 0; 1637 ResourceMark rm; 1638 1639 int i = 0; 1640 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1641 int heap_total = 0; 1642 tty->print_cr("-- %s --", (*heap)->name()); 1643 FOR_ALL_BLOBS(cb, *heap) { 1644 total++; 1645 heap_total++; 1646 if (cb->is_nmethod()) { 1647 nmethod* nm = (nmethod*)cb; 1648 1649 tty->print("%4d: ", heap_total); 1650 CompileTask::print(tty, nm, (nm->is_not_entrant() ? "non-entrant" : ""), true, true); 1651 1652 nmethodCount++; 1653 1654 if(nm->is_not_entrant()) { nmethodNotEntrant++; } 1655 if(nm->method() != nullptr && nm->is_native_method()) { nmethodNative++; } 1656 1657 if(nm->method() != nullptr && nm->is_java_method()) { 1658 nmethodJava++; 1659 max_nm_size = MAX2(max_nm_size, nm->size()); 1660 } 1661 } else if (cb->is_runtime_stub()) { 1662 runtimeStubCount++; 1663 } else if (cb->is_upcall_stub()) { 1664 upcallStubCount++; 1665 } else if (cb->is_deoptimization_stub()) { 1666 deoptimizationStubCount++; 1667 } else if (cb->is_uncommon_trap_stub()) { 1668 uncommonTrapStubCount++; 1669 } else if (cb->is_exception_stub()) { 1670 exceptionStubCount++; 1671 } else if (cb->is_safepoint_stub()) { 1672 safepointStubCount++; 1673 } else if (cb->is_adapter_blob()) { 1674 adapterCount++; 1675 } else if (cb->is_method_handles_adapter_blob()) { 1676 mhAdapterCount++; 1677 } else if (cb->is_vtable_blob()) { 1678 vtableBlobCount++; 1679 } else if (cb->is_buffer_blob()) { 1680 bufferBlobCount++; 1681 } 1682 } 1683 } 1684 1685 int bucketSize = 512; 1686 int bucketLimit = max_nm_size / bucketSize + 1; 1687 int *buckets = NEW_C_HEAP_ARRAY(int, bucketLimit, mtCode); 1688 memset(buckets, 0, sizeof(int) * bucketLimit); 1689 1690 NMethodIterator iter(NMethodIterator::all); 1691 while(iter.next()) { 1692 nmethod* nm = iter.method(); 1693 if(nm->method() != nullptr && nm->is_java_method()) { 1694 buckets[nm->size() / bucketSize]++; 1695 } 1696 } 1697 1698 tty->print_cr("Code Cache Entries (total of %d)",total); 1699 tty->print_cr("-------------------------------------------------"); 1700 tty->print_cr("nmethods: %d",nmethodCount); 1701 tty->print_cr("\tnot_entrant: %d",nmethodNotEntrant); 1702 tty->print_cr("\tjava: %d",nmethodJava); 1703 tty->print_cr("\tnative: %d",nmethodNative); 1704 tty->print_cr("runtime_stubs: %d",runtimeStubCount); 1705 tty->print_cr("upcall_stubs: %d",upcallStubCount); 1706 tty->print_cr("adapters: %d",adapterCount); 1707 tty->print_cr("MH adapters: %d",mhAdapterCount); 1708 tty->print_cr("VTables: %d",vtableBlobCount); 1709 tty->print_cr("buffer blobs: %d",bufferBlobCount); 1710 tty->print_cr("deoptimization_stubs: %d",deoptimizationStubCount); 1711 tty->print_cr("uncommon_traps: %d",uncommonTrapStubCount); 1712 tty->print_cr("exception_stubs: %d",exceptionStubCount); 1713 tty->print_cr("safepoint_stubs: %d",safepointStubCount); 1714 tty->print_cr("\nnmethod size distribution"); 1715 tty->print_cr("-------------------------------------------------"); 1716 1717 for(int i=0; i<bucketLimit; i++) { 1718 if(buckets[i] != 0) { 1719 tty->print("%d - %d bytes",i*bucketSize,(i+1)*bucketSize); 1720 tty->fill_to(40); 1721 tty->print_cr("%d",buckets[i]); 1722 } 1723 } 1724 1725 FREE_C_HEAP_ARRAY(int, buckets); 1726 print_memory_overhead(); 1727 } 1728 1729 #endif // !PRODUCT 1730 1731 void CodeCache::print() { 1732 print_summary(tty); 1733 1734 #ifndef PRODUCT 1735 if (!Verbose) return; 1736 1737 CodeBlob_sizes live[CompLevel_full_optimization + 1]; 1738 CodeBlob_sizes runtimeStub; 1739 CodeBlob_sizes upcallStub; 1740 CodeBlob_sizes uncommonTrapStub; 1741 CodeBlob_sizes deoptimizationStub; 1742 CodeBlob_sizes exceptionStub; 1743 CodeBlob_sizes safepointStub; 1744 CodeBlob_sizes adapter; 1745 CodeBlob_sizes mhAdapter; 1746 CodeBlob_sizes vtableBlob; 1747 CodeBlob_sizes bufferBlob; 1748 CodeBlob_sizes other; 1749 1750 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1751 FOR_ALL_BLOBS(cb, *heap) { 1752 if (cb->is_nmethod()) { 1753 const int level = cb->as_nmethod()->comp_level(); 1754 assert(0 <= level && level <= CompLevel_full_optimization, "Invalid compilation level"); 1755 live[level].add(cb); 1756 } else if (cb->is_runtime_stub()) { 1757 runtimeStub.add(cb); 1758 } else if (cb->is_upcall_stub()) { 1759 upcallStub.add(cb); 1760 } else if (cb->is_deoptimization_stub()) { 1761 deoptimizationStub.add(cb); 1762 } else if (cb->is_uncommon_trap_stub()) { 1763 uncommonTrapStub.add(cb); 1764 } else if (cb->is_exception_stub()) { 1765 exceptionStub.add(cb); 1766 } else if (cb->is_safepoint_stub()) { 1767 safepointStub.add(cb); 1768 } else if (cb->is_adapter_blob()) { 1769 adapter.add(cb); 1770 } else if (cb->is_method_handles_adapter_blob()) { 1771 mhAdapter.add(cb); 1772 } else if (cb->is_vtable_blob()) { 1773 vtableBlob.add(cb); 1774 } else if (cb->is_buffer_blob()) { 1775 bufferBlob.add(cb); 1776 } else { 1777 other.add(cb); 1778 } 1779 } 1780 } 1781 1782 tty->print_cr("nmethod dependency checking time %fs", dependentCheckTime.seconds()); 1783 1784 tty->print_cr("nmethod blobs per compilation level:"); 1785 for (int i = 0; i <= CompLevel_full_optimization; i++) { 1786 const char *level_name; 1787 switch (i) { 1788 case CompLevel_none: level_name = "none"; break; 1789 case CompLevel_simple: level_name = "simple"; break; 1790 case CompLevel_limited_profile: level_name = "limited profile"; break; 1791 case CompLevel_full_profile: level_name = "full profile"; break; 1792 case CompLevel_full_optimization: level_name = "full optimization"; break; 1793 default: assert(false, "invalid compilation level"); 1794 } 1795 tty->print_cr("%s:", level_name); 1796 live[i].print("live"); 1797 } 1798 1799 struct { 1800 const char* name; 1801 const CodeBlob_sizes* sizes; 1802 } non_nmethod_blobs[] = { 1803 { "runtime", &runtimeStub }, 1804 { "upcall", &upcallStub }, 1805 { "uncommon trap", &uncommonTrapStub }, 1806 { "deoptimization", &deoptimizationStub }, 1807 { "exception", &exceptionStub }, 1808 { "safepoint", &safepointStub }, 1809 { "adapter", &adapter }, 1810 { "mh_adapter", &mhAdapter }, 1811 { "vtable", &vtableBlob }, 1812 { "buffer blob", &bufferBlob }, 1813 { "other", &other }, 1814 }; 1815 tty->print_cr("Non-nmethod blobs:"); 1816 for (auto& blob: non_nmethod_blobs) { 1817 blob.sizes->print(blob.name); 1818 } 1819 1820 if (WizardMode) { 1821 // print the oop_map usage 1822 int code_size = 0; 1823 int number_of_blobs = 0; 1824 int number_of_oop_maps = 0; 1825 int map_size = 0; 1826 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1827 FOR_ALL_BLOBS(cb, *heap) { 1828 number_of_blobs++; 1829 code_size += cb->code_size(); 1830 ImmutableOopMapSet* set = cb->oop_maps(); 1831 if (set != nullptr) { 1832 number_of_oop_maps += set->count(); 1833 map_size += set->nr_of_bytes(); 1834 } 1835 } 1836 } 1837 tty->print_cr("OopMaps"); 1838 tty->print_cr(" #blobs = %d", number_of_blobs); 1839 tty->print_cr(" code size = %d", code_size); 1840 tty->print_cr(" #oop_maps = %d", number_of_oop_maps); 1841 tty->print_cr(" map size = %d", map_size); 1842 } 1843 1844 #endif // !PRODUCT 1845 } 1846 1847 void CodeCache::print_nmethods_on(outputStream* st) { 1848 ResourceMark rm; 1849 int i = 0; 1850 FOR_ALL_ALLOCABLE_HEAPS(heap) { 1851 st->print_cr("-- %s --", (*heap)->name()); 1852 FOR_ALL_BLOBS(cb, *heap) { 1853 i++; 1854 if (cb->is_nmethod()) { 1855 nmethod* nm = (nmethod*)cb; 1856 st->print("%4d: ", i); 1857 CompileTask::print(st, nm, nullptr, true, false); 1858 1859 const char non_entrant_char = (nm->is_not_entrant() ? 'N' : ' '); 1860 st->print_cr(" %c", non_entrant_char); 1861 } 1862 } 1863 } 1864 } 1865 1866 void CodeCache::print_summary(outputStream* st, bool detailed) { 1867 int full_count = 0; 1868 julong total_used = 0; 1869 julong total_max_used = 0; 1870 julong total_free = 0; 1871 julong total_size = 0; 1872 FOR_ALL_HEAPS(heap_iterator) { 1873 CodeHeap* heap = (*heap_iterator); 1874 size_t total = (heap->high_boundary() - heap->low_boundary()); 1875 if (_heaps->length() >= 1) { 1876 st->print("%s:", heap->name()); 1877 } else { 1878 st->print("CodeCache:"); 1879 } 1880 size_t size = total/K; 1881 size_t used = (total - heap->unallocated_capacity())/K; 1882 size_t max_used = heap->max_allocated_capacity()/K; 1883 size_t free = heap->unallocated_capacity()/K; 1884 total_size += size; 1885 total_used += used; 1886 total_max_used += max_used; 1887 total_free += free; 1888 st->print_cr(" size=%zuKb used=%zu" 1889 "Kb max_used=%zuKb free=%zuKb", 1890 size, used, max_used, free); 1891 1892 if (detailed) { 1893 st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]", 1894 p2i(heap->low_boundary()), 1895 p2i(heap->high()), 1896 p2i(heap->high_boundary())); 1897 1898 full_count += get_codemem_full_count(heap->code_blob_type()); 1899 } 1900 } 1901 1902 if (detailed) { 1903 if (SegmentedCodeCache) { 1904 st->print("CodeCache:"); 1905 st->print_cr(" size=" JULONG_FORMAT "Kb, used=" JULONG_FORMAT 1906 "Kb, max_used=" JULONG_FORMAT "Kb, free=" JULONG_FORMAT "Kb", 1907 total_size, total_used, total_max_used, total_free); 1908 } 1909 st->print_cr(" total_blobs=" UINT32_FORMAT ", nmethods=" UINT32_FORMAT 1910 ", adapters=" UINT32_FORMAT ", full_count=" UINT32_FORMAT, 1911 blob_count(), nmethod_count(), adapter_count(), full_count); 1912 st->print_cr("Compilation: %s, stopped_count=%d, restarted_count=%d", 1913 CompileBroker::should_compile_new_jobs() ? 1914 "enabled" : Arguments::mode() == Arguments::_int ? 1915 "disabled (interpreter mode)" : 1916 "disabled (not enough contiguous free space left)", 1917 CompileBroker::get_total_compiler_stopped_count(), 1918 CompileBroker::get_total_compiler_restarted_count()); 1919 } 1920 } 1921 1922 void CodeCache::print_codelist(outputStream* st) { 1923 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1924 1925 NMethodIterator iter(NMethodIterator::not_unloading); 1926 while (iter.next()) { 1927 nmethod* nm = iter.method(); 1928 ResourceMark rm; 1929 char* method_name = nm->method()->name_and_sig_as_C_string(); 1930 const char* jvmci_name = nullptr; 1931 #if INCLUDE_JVMCI 1932 jvmci_name = nm->jvmci_name(); 1933 #endif 1934 st->print_cr("%d %d %d %s%s%s [" INTPTR_FORMAT ", " INTPTR_FORMAT " - " INTPTR_FORMAT "]", 1935 nm->compile_id(), nm->comp_level(), nm->get_state(), 1936 method_name, jvmci_name ? " jvmci_name=" : "", jvmci_name ? jvmci_name : "", 1937 (intptr_t)nm->header_begin(), (intptr_t)nm->code_begin(), (intptr_t)nm->code_end()); 1938 } 1939 } 1940 1941 void CodeCache::print_layout(outputStream* st) { 1942 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1943 ResourceMark rm; 1944 print_summary(st, true); 1945 } 1946 1947 void CodeCache::log_state(outputStream* st) { 1948 st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'" 1949 " adapters='" UINT32_FORMAT "' free_code_cache='%zu'", 1950 blob_count(), nmethod_count(), adapter_count(), 1951 unallocated_capacity()); 1952 } 1953 1954 #ifdef LINUX 1955 void CodeCache::write_perf_map(const char* filename, outputStream* st) { 1956 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 1957 char fname[JVM_MAXPATHLEN]; 1958 if (filename == nullptr) { 1959 // Invocation outside of jcmd requires pid substitution. 1960 if (!Arguments::copy_expand_pid(DEFAULT_PERFMAP_FILENAME, 1961 strlen(DEFAULT_PERFMAP_FILENAME), 1962 fname, JVM_MAXPATHLEN)) { 1963 st->print_cr("Warning: Not writing perf map as pid substitution failed."); 1964 return; 1965 } 1966 filename = fname; 1967 } 1968 fileStream fs(filename, "w"); 1969 if (!fs.is_open()) { 1970 st->print_cr("Warning: Failed to create %s for perf map", filename); 1971 return; 1972 } 1973 1974 AllCodeBlobsIterator iter(AllCodeBlobsIterator::not_unloading); 1975 while (iter.next()) { 1976 CodeBlob *cb = iter.method(); 1977 ResourceMark rm; 1978 const char* method_name = nullptr; 1979 const char* jvmci_name = nullptr; 1980 if (cb->is_nmethod()) { 1981 nmethod* nm = cb->as_nmethod(); 1982 method_name = nm->method()->external_name(); 1983 #if INCLUDE_JVMCI 1984 jvmci_name = nm->jvmci_name(); 1985 #endif 1986 } else { 1987 method_name = cb->name(); 1988 } 1989 fs.print_cr(INTPTR_FORMAT " " INTPTR_FORMAT " %s%s%s", 1990 (intptr_t)cb->code_begin(), (intptr_t)cb->code_size(), 1991 method_name, jvmci_name ? " jvmci_name=" : "", jvmci_name ? jvmci_name : ""); 1992 } 1993 } 1994 #endif // LINUX 1995 1996 //---< BEGIN >--- CodeHeap State Analytics. 1997 1998 void CodeCache::aggregate(outputStream *out, size_t granularity) { 1999 FOR_ALL_ALLOCABLE_HEAPS(heap) { 2000 CodeHeapState::aggregate(out, (*heap), granularity); 2001 } 2002 } 2003 2004 void CodeCache::discard(outputStream *out) { 2005 FOR_ALL_ALLOCABLE_HEAPS(heap) { 2006 CodeHeapState::discard(out, (*heap)); 2007 } 2008 } 2009 2010 void CodeCache::print_usedSpace(outputStream *out) { 2011 FOR_ALL_ALLOCABLE_HEAPS(heap) { 2012 CodeHeapState::print_usedSpace(out, (*heap)); 2013 } 2014 } 2015 2016 void CodeCache::print_freeSpace(outputStream *out) { 2017 FOR_ALL_ALLOCABLE_HEAPS(heap) { 2018 CodeHeapState::print_freeSpace(out, (*heap)); 2019 } 2020 } 2021 2022 void CodeCache::print_count(outputStream *out) { 2023 FOR_ALL_ALLOCABLE_HEAPS(heap) { 2024 CodeHeapState::print_count(out, (*heap)); 2025 } 2026 } 2027 2028 void CodeCache::print_space(outputStream *out) { 2029 FOR_ALL_ALLOCABLE_HEAPS(heap) { 2030 CodeHeapState::print_space(out, (*heap)); 2031 } 2032 } 2033 2034 void CodeCache::print_age(outputStream *out) { 2035 FOR_ALL_ALLOCABLE_HEAPS(heap) { 2036 CodeHeapState::print_age(out, (*heap)); 2037 } 2038 } 2039 2040 void CodeCache::print_names(outputStream *out) { 2041 FOR_ALL_ALLOCABLE_HEAPS(heap) { 2042 CodeHeapState::print_names(out, (*heap)); 2043 } 2044 } 2045 //---< END >--- CodeHeap State Analytics.