1 /*
  2  * Copyright (c) 1998, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "code/codeBlob.hpp"
 26 #include "code/codeCache.hpp"
 27 #include "code/relocInfo.hpp"
 28 #include "code/vtableStubs.hpp"
 29 #include "compiler/disassembler.hpp"
 30 #include "compiler/oopMap.hpp"
 31 #include "interpreter/bytecode.hpp"
 32 #include "interpreter/interpreter.hpp"
 33 #include "jvm.h"
 34 #include "memory/allocation.inline.hpp"
 35 #include "memory/heap.hpp"
 36 #include "memory/resourceArea.hpp"
 37 #include "oops/oop.inline.hpp"
 38 #include "prims/forte.hpp"
 39 #include "prims/jvmtiExport.hpp"
 40 #include "runtime/handles.inline.hpp"
 41 #include "runtime/interfaceSupport.inline.hpp"
 42 #include "runtime/javaFrameAnchor.hpp"
 43 #include "runtime/jniHandles.inline.hpp"
 44 #include "runtime/mutexLocker.hpp"
 45 #include "runtime/safepoint.hpp"
 46 #include "runtime/sharedRuntime.hpp"
 47 #include "runtime/stubCodeGenerator.hpp"
 48 #include "runtime/stubRoutines.hpp"
 49 #include "runtime/vframe.hpp"
 50 #include "services/memoryService.hpp"
 51 #include "utilities/align.hpp"
 52 #ifdef COMPILER1
 53 #include "c1/c1_Runtime1.hpp"
 54 #endif
 55 
 56 #include <type_traits>
 57 
 58 // Virtual methods are not allowed in code blobs to simplify caching compiled code.
 59 // Check all "leaf" subclasses of CodeBlob class.
 60 
 61 static_assert(!std::is_polymorphic<nmethod>::value,            "no virtual methods are allowed in nmethod");
 62 static_assert(!std::is_polymorphic<AdapterBlob>::value,        "no virtual methods are allowed in code blobs");
 63 static_assert(!std::is_polymorphic<VtableBlob>::value,         "no virtual methods are allowed in code blobs");
 64 static_assert(!std::is_polymorphic<MethodHandlesAdapterBlob>::value, "no virtual methods are allowed in code blobs");
 65 static_assert(!std::is_polymorphic<RuntimeStub>::value,        "no virtual methods are allowed in code blobs");
 66 static_assert(!std::is_polymorphic<BufferedInlineTypeBlob>::value,   "no virtual methods are allowed in code blobs");
 67 static_assert(!std::is_polymorphic<DeoptimizationBlob>::value, "no virtual methods are allowed in code blobs");
 68 static_assert(!std::is_polymorphic<SafepointBlob>::value,      "no virtual methods are allowed in code blobs");
 69 static_assert(!std::is_polymorphic<UpcallStub>::value,         "no virtual methods are allowed in code blobs");
 70 #ifdef COMPILER2
 71 static_assert(!std::is_polymorphic<ExceptionBlob>::value,      "no virtual methods are allowed in code blobs");
 72 static_assert(!std::is_polymorphic<UncommonTrapBlob>::value,   "no virtual methods are allowed in code blobs");
 73 #endif
 74 
 75 // Add proxy vtables.
 76 // We need only few for now - they are used only from prints.
 77 const nmethod::Vptr                  nmethod::_vpntr;
 78 const BufferBlob::Vptr               BufferBlob::_vpntr;
 79 const RuntimeStub::Vptr              RuntimeStub::_vpntr;
 80 const SingletonBlob::Vptr            SingletonBlob::_vpntr;
 81 const DeoptimizationBlob::Vptr       DeoptimizationBlob::_vpntr;
 82 const UpcallStub::Vptr               UpcallStub::_vpntr;
 83 
 84 const CodeBlob::Vptr* CodeBlob::vptr() const {
 85   constexpr const CodeBlob::Vptr* array[(size_t)CodeBlobKind::Number_Of_Kinds] = {
 86       nullptr/* None */,
 87       &nmethod::_vpntr,
 88       &BufferBlob::_vpntr,
 89       &AdapterBlob::_vpntr,
 90       &VtableBlob::_vpntr,
 91       &MethodHandlesAdapterBlob::_vpntr,
 92       &BufferedInlineTypeBlob::_vpntr,
 93       &RuntimeStub::_vpntr,
 94       &DeoptimizationBlob::_vpntr,
 95       &SafepointBlob::_vpntr,
 96 #ifdef COMPILER2
 97       &ExceptionBlob::_vpntr,
 98       &UncommonTrapBlob::_vpntr,
 99 #endif
100       &UpcallStub::_vpntr
101   };
102 
103   return array[(size_t)_kind];
104 }
105 
106 unsigned int CodeBlob::align_code_offset(int offset) {
107   // align the size to CodeEntryAlignment
108   int header_size = (int)CodeHeap::header_size();
109   return align_up(offset + header_size, CodeEntryAlignment) - header_size;
110 }
111 
112 // This must be consistent with the CodeBlob constructor's layout actions.
113 unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
114   // align the size to CodeEntryAlignment
115   unsigned int size = align_code_offset(header_size);
116   size += align_up(cb->total_content_size(), oopSize);
117   size += align_up(cb->total_oop_size(), oopSize);
118   return size;
119 }
120 
121 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, uint16_t header_size,
122                    int16_t frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments,
123                    int mutable_data_size) :
124   _oop_maps(nullptr), // will be set by set_oop_maps() call
125   _name(name),
126   _mutable_data(header_begin() + size), // default value is blob_end()
127   _size(size),
128   _relocation_size(align_up(cb->total_relocation_size(), oopSize)),
129   _content_offset(CodeBlob::align_code_offset(header_size)),
130   _code_offset(_content_offset + cb->total_offset_of(cb->insts())),
131   _data_offset(_content_offset + align_up(cb->total_content_size(), oopSize)),
132   _frame_size(frame_size),
133   _mutable_data_size(mutable_data_size),
134   S390_ONLY(_ctable_offset(0) COMMA)
135   _header_size(header_size),
136   _frame_complete_offset(frame_complete_offset),
137   _kind(kind),
138   _caller_must_gc_arguments(caller_must_gc_arguments)
139 {
140   assert(is_aligned(_size,            oopSize), "unaligned size");
141   assert(is_aligned(header_size,      oopSize), "unaligned size");
142   assert(is_aligned(_relocation_size, oopSize), "unaligned size");
143   assert(_data_offset <= _size, "codeBlob is too small: %d > %d", _data_offset, _size);
144   assert(is_nmethod() || (cb->total_oop_size() + cb->total_metadata_size() == 0), "must be nmethod");
145   assert(code_end() == content_end(), "must be the same - see code_end()");
146 #ifdef COMPILER1
147   // probably wrong for tiered
148   assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
149 #endif // COMPILER1
150 
151   if (_mutable_data_size > 0) {
152     _mutable_data = (address)os::malloc(_mutable_data_size, mtCode);
153     if (_mutable_data == nullptr) {
154       vm_exit_out_of_memory(_mutable_data_size, OOM_MALLOC_ERROR, "codebuffer: no space for mutable data");
155     }
156   } else {
157     // We need unique and valid not null address
158     assert(_mutable_data = blob_end(), "sanity");
159   }
160 
161   set_oop_maps(oop_maps);
162 }
163 
164 // Simple CodeBlob used for simple BufferBlob.
165 CodeBlob::CodeBlob(const char* name, CodeBlobKind kind, int size, uint16_t header_size) :
166   _oop_maps(nullptr),
167   _name(name),
168   _mutable_data(header_begin() + size), // default value is blob_end()
169   _size(size),
170   _relocation_size(0),
171   _content_offset(CodeBlob::align_code_offset(header_size)),
172   _code_offset(_content_offset),
173   _data_offset(size),
174   _frame_size(0),
175   S390_ONLY(_ctable_offset(0) COMMA)
176   _header_size(header_size),
177   _frame_complete_offset(CodeOffsets::frame_never_safe),
178   _kind(kind),
179   _caller_must_gc_arguments(false)
180 {
181   assert(is_aligned(size,            oopSize), "unaligned size");
182   assert(is_aligned(header_size,     oopSize), "unaligned size");
183   assert(_mutable_data = blob_end(), "sanity");
184 }
185 
186 void CodeBlob::purge() {
187   assert(_mutable_data != nullptr, "should never be null");
188   if (_mutable_data != blob_end()) {
189     os::free(_mutable_data);
190     _mutable_data = blob_end(); // Valid not null address
191   }
192   if (_oop_maps != nullptr) {
193     delete _oop_maps;
194     _oop_maps = nullptr;
195   }
196   NOT_PRODUCT(_asm_remarks.clear());
197   NOT_PRODUCT(_dbg_strings.clear());
198 }
199 
200 void CodeBlob::set_oop_maps(OopMapSet* p) {
201   // Danger Will Robinson! This method allocates a big
202   // chunk of memory, its your job to free it.
203   if (p != nullptr) {
204     _oop_maps = ImmutableOopMapSet::build_from(p);
205   } else {
206     _oop_maps = nullptr;
207   }
208 }
209 
210 const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) const {
211   assert(_oop_maps != nullptr, "nope");
212   return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
213 }
214 
215 void CodeBlob::print_code_on(outputStream* st) {
216   ResourceMark m;
217   Disassembler::decode(this, st);
218 }
219 
220 //-----------------------------------------------------------------------------------------
221 // Creates a RuntimeBlob from a CodeBuffer and copy code and relocation info.
222 
223 RuntimeBlob::RuntimeBlob(
224   const char* name,
225   CodeBlobKind kind,
226   CodeBuffer* cb,
227   int         size,
228   uint16_t    header_size,
229   int16_t     frame_complete,
230   int         frame_size,
231   OopMapSet*  oop_maps,
232   bool        caller_must_gc_arguments)
233   : CodeBlob(name, kind, cb, size, header_size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments,
234              align_up(cb->total_relocation_size(), oopSize))
235 {
236   cb->copy_code_and_locs_to(this);
237 }
238 
239 void RuntimeBlob::free(RuntimeBlob* blob) {
240   assert(blob != nullptr, "caller must check for nullptr");
241   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
242   blob->purge();
243   {
244     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
245     CodeCache::free(blob);
246   }
247   // Track memory usage statistic after releasing CodeCache_lock
248   MemoryService::track_code_cache_memory_usage();
249 }
250 
251 void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) {
252   // Do not hold the CodeCache lock during name formatting.
253   assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub");
254 
255   if (stub != nullptr && (PrintStubCode ||
256                        Forte::is_enabled() ||
257                        JvmtiExport::should_post_dynamic_code_generated())) {
258     char stub_id[256];
259     assert(strlen(name1) + strlen(name2) < sizeof(stub_id), "");
260     jio_snprintf(stub_id, sizeof(stub_id), "%s%s", name1, name2);
261     if (PrintStubCode) {
262       ttyLocker ttyl;
263       tty->print_cr("- - - [BEGIN] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
264       tty->print_cr("Decoding %s " PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%d bytes)",
265                     stub_id, p2i(stub), p2i(stub->code_begin()), p2i(stub->code_end()), stub->code_size());
266       Disassembler::decode(stub->code_begin(), stub->code_end(), tty
267                            NOT_PRODUCT(COMMA &stub->asm_remarks()));
268       if ((stub->oop_maps() != nullptr) && AbstractDisassembler::show_structs()) {
269         tty->print_cr("- - - [OOP MAPS]- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
270         stub->oop_maps()->print();
271       }
272       tty->print_cr("- - - [END] - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -");
273       tty->cr();
274     }
275     if (Forte::is_enabled()) {
276       Forte::register_stub(stub_id, stub->code_begin(), stub->code_end());
277     }
278 
279     if (JvmtiExport::should_post_dynamic_code_generated()) {
280       const char* stub_name = name2;
281       if (name2[0] == '\0')  stub_name = name1;
282       JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end());
283     }
284   }
285 
286   // Track memory usage statistic after releasing CodeCache_lock
287   MemoryService::track_code_cache_memory_usage();
288 }
289 
290 //----------------------------------------------------------------------------------------------------
291 // Implementation of BufferBlob
292 
293 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, int size)
294 : RuntimeBlob(name, kind, size, sizeof(BufferBlob))
295 {}
296 
297 BufferBlob* BufferBlob::create(const char* name, uint buffer_size) {
298   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
299 
300   BufferBlob* blob = nullptr;
301   unsigned int size = sizeof(BufferBlob);
302   // align the size to CodeEntryAlignment
303   size = CodeBlob::align_code_offset(size);
304   size += align_up(buffer_size, oopSize);
305   assert(name != nullptr, "must provide a name");
306   {
307     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
308     blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, size);
309   }
310   // Track memory usage statistic after releasing CodeCache_lock
311   MemoryService::track_code_cache_memory_usage();
312 
313   return blob;
314 }
315 
316 
317 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, int header_size)
318   : RuntimeBlob(name, kind, cb, size, header_size, CodeOffsets::frame_never_safe, 0, nullptr)
319 {}
320 
321 // Used by gtest
322 BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
323   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
324 
325   BufferBlob* blob = nullptr;
326   unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob));
327   assert(name != nullptr, "must provide a name");
328   {
329     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
330     blob = new (size) BufferBlob(name, CodeBlobKind::Buffer, cb, size, sizeof(BufferBlob));
331   }
332   // Track memory usage statistic after releasing CodeCache_lock
333   MemoryService::track_code_cache_memory_usage();
334 
335   return blob;
336 }
337 
338 void* BufferBlob::operator new(size_t s, unsigned size) throw() {
339   return CodeCache::allocate(size, CodeBlobType::NonNMethod);
340 }
341 
342 void BufferBlob::free(BufferBlob *blob) {
343   RuntimeBlob::free(blob);
344 }
345 
346 BufferBlob::BufferBlob(const char* name, CodeBlobKind kind, CodeBuffer* cb, int size, int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
347   : RuntimeBlob(name, kind, cb, size, sizeof(BufferBlob), frame_complete, frame_size, oop_maps, caller_must_gc_arguments)
348 {}
349 
350 
351 //----------------------------------------------------------------------------------------------------
352 // Implementation of AdapterBlob
353 
354 AdapterBlob::AdapterBlob(int size, CodeBuffer* cb, int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) :
355   BufferBlob("I2C/C2I adapters", CodeBlobKind::Adapter, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) {
356   CodeCache::commit(this);
357 }
358 
359 AdapterBlob* AdapterBlob::create(CodeBuffer* cb, int frame_complete, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) {
360   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
361 
362   CodeCache::gc_on_allocation();
363 
364   AdapterBlob* blob = nullptr;
365   unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob));
366   {
367     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
368     blob = new (size) AdapterBlob(size, cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
369   }
370   // Track memory usage statistic after releasing CodeCache_lock
371   MemoryService::track_code_cache_memory_usage();
372 
373   return blob;
374 }
375 
376 //----------------------------------------------------------------------------------------------------
377 // Implementation of VtableBlob
378 
379 void* VtableBlob::operator new(size_t s, unsigned size) throw() {
380   // Handling of allocation failure stops compilation and prints a bunch of
381   // stuff, which requires unlocking the CodeCache_lock, so that the Compile_lock
382   // can be locked, and then re-locking the CodeCache_lock. That is not safe in
383   // this context as we hold the CompiledICLocker. So we just don't handle code
384   // cache exhaustion here; we leave that for a later allocation that does not
385   // hold the CompiledICLocker.
386   return CodeCache::allocate(size, CodeBlobType::NonNMethod, false /* handle_alloc_failure */);
387 }
388 
389 VtableBlob::VtableBlob(const char* name, int size) :
390   BufferBlob(name, CodeBlobKind::Vtable, size) {
391 }
392 
393 VtableBlob* VtableBlob::create(const char* name, int buffer_size) {
394   assert(JavaThread::current()->thread_state() == _thread_in_vm, "called with the wrong state");
395 
396   VtableBlob* blob = nullptr;
397   unsigned int size = sizeof(VtableBlob);
398   // align the size to CodeEntryAlignment
399   size = align_code_offset(size);
400   size += align_up(buffer_size, oopSize);
401   assert(name != nullptr, "must provide a name");
402   {
403     if (!CodeCache_lock->try_lock()) {
404       // If we can't take the CodeCache_lock, then this is a bad time to perform the ongoing
405       // IC transition to megamorphic, for which this stub will be needed. It is better to
406       // bail out the transition, and wait for a more opportune moment. Not only is it not
407       // worth waiting for the lock blockingly for the megamorphic transition, it might
408       // also result in a deadlock to blockingly wait, when concurrent class unloading is
409       // performed. At this point in time, the CompiledICLocker is taken, so we are not
410       // allowed to blockingly wait for the CodeCache_lock, as these two locks are otherwise
411       // consistently taken in the opposite order. Bailing out results in an IC transition to
412       // the clean state instead, which will cause subsequent calls to retry the transitioning
413       // eventually.
414       return nullptr;
415     }
416     blob = new (size) VtableBlob(name, size);
417     CodeCache_lock->unlock();
418   }
419   // Track memory usage statistic after releasing CodeCache_lock
420   MemoryService::track_code_cache_memory_usage();
421 
422   return blob;
423 }
424 
425 //----------------------------------------------------------------------------------------------------
426 // Implementation of MethodHandlesAdapterBlob
427 
428 MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
429   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
430 
431   MethodHandlesAdapterBlob* blob = nullptr;
432   unsigned int size = sizeof(MethodHandlesAdapterBlob);
433   // align the size to CodeEntryAlignment
434   size = CodeBlob::align_code_offset(size);
435   size += align_up(buffer_size, oopSize);
436   {
437     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
438     blob = new (size) MethodHandlesAdapterBlob(size);
439     if (blob == nullptr) {
440       vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "CodeCache: no room for method handle adapter blob");
441     }
442   }
443   // Track memory usage statistic after releasing CodeCache_lock
444   MemoryService::track_code_cache_memory_usage();
445 
446   return blob;
447 }
448 
449 //----------------------------------------------------------------------------------------------------
450 // Implementation of BufferedInlineTypeBlob
451 BufferedInlineTypeBlob::BufferedInlineTypeBlob(int size, CodeBuffer* cb, int pack_fields_off, int pack_fields_jobject_off, int unpack_fields_off) :
452   BufferBlob("buffered inline type", CodeBlobKind::BufferedInlineType, cb, size, sizeof(BufferedInlineTypeBlob)),
453   _pack_fields_off(pack_fields_off),
454   _pack_fields_jobject_off(pack_fields_jobject_off),
455   _unpack_fields_off(unpack_fields_off) {
456   CodeCache::commit(this);
457 }
458 
459 BufferedInlineTypeBlob* BufferedInlineTypeBlob::create(CodeBuffer* cb, int pack_fields_off, int pack_fields_jobject_off, int unpack_fields_off) {
460   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
461 
462   BufferedInlineTypeBlob* blob = nullptr;
463   unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferedInlineTypeBlob));
464   {
465     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
466     blob = new (size) BufferedInlineTypeBlob(size, cb, pack_fields_off, pack_fields_jobject_off, unpack_fields_off);
467   }
468   // Track memory usage statistic after releasing CodeCache_lock
469   MemoryService::track_code_cache_memory_usage();
470 
471   return blob;
472 }
473 
474 //----------------------------------------------------------------------------------------------------
475 // Implementation of RuntimeStub
476 
477 RuntimeStub::RuntimeStub(
478   const char* name,
479   CodeBuffer* cb,
480   int         size,
481   int16_t     frame_complete,
482   int         frame_size,
483   OopMapSet*  oop_maps,
484   bool        caller_must_gc_arguments
485 )
486 : RuntimeBlob(name, CodeBlobKind::RuntimeStub, cb, size, sizeof(RuntimeStub),
487               frame_complete, frame_size, oop_maps, caller_must_gc_arguments)
488 {
489 }
490 
491 RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
492                                            CodeBuffer* cb,
493                                            int16_t frame_complete,
494                                            int frame_size,
495                                            OopMapSet* oop_maps,
496                                            bool caller_must_gc_arguments,
497                                            bool alloc_fail_is_fatal)
498 {
499   RuntimeStub* stub = nullptr;
500   unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub));
501   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
502   {
503     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
504     stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
505     if (stub == nullptr) {
506       if (!alloc_fail_is_fatal) {
507         return nullptr;
508       }
509       fatal("Initial size of CodeCache is too small");
510     }
511   }
512 
513   trace_new_stub(stub, "RuntimeStub - ", stub_name);
514 
515   return stub;
516 }
517 
518 
519 void* RuntimeStub::operator new(size_t s, unsigned size) throw() {
520   return CodeCache::allocate(size, CodeBlobType::NonNMethod);
521 }
522 
523 // operator new shared by all singletons:
524 void* SingletonBlob::operator new(size_t s, unsigned size, bool alloc_fail_is_fatal) throw() {
525   void* p = CodeCache::allocate(size, CodeBlobType::NonNMethod);
526   if (alloc_fail_is_fatal && !p) fatal("Initial size of CodeCache is too small");
527   return p;
528 }
529 
530 
531 //----------------------------------------------------------------------------------------------------
532 // Implementation of DeoptimizationBlob
533 
534 DeoptimizationBlob::DeoptimizationBlob(
535   CodeBuffer* cb,
536   int         size,
537   OopMapSet*  oop_maps,
538   int         unpack_offset,
539   int         unpack_with_exception_offset,
540   int         unpack_with_reexecution_offset,
541   int         frame_size
542 )
543 : SingletonBlob("DeoptimizationBlob", CodeBlobKind::Deoptimization, cb,
544                 size, sizeof(DeoptimizationBlob), frame_size, oop_maps)
545 {
546   _unpack_offset           = unpack_offset;
547   _unpack_with_exception   = unpack_with_exception_offset;
548   _unpack_with_reexecution = unpack_with_reexecution_offset;
549 #ifdef COMPILER1
550   _unpack_with_exception_in_tls   = -1;
551 #endif
552 }
553 
554 
555 DeoptimizationBlob* DeoptimizationBlob::create(
556   CodeBuffer* cb,
557   OopMapSet*  oop_maps,
558   int        unpack_offset,
559   int        unpack_with_exception_offset,
560   int        unpack_with_reexecution_offset,
561   int        frame_size)
562 {
563   DeoptimizationBlob* blob = nullptr;
564   unsigned int size = CodeBlob::allocation_size(cb, sizeof(DeoptimizationBlob));
565   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
566   {
567     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
568     blob = new (size) DeoptimizationBlob(cb,
569                                          size,
570                                          oop_maps,
571                                          unpack_offset,
572                                          unpack_with_exception_offset,
573                                          unpack_with_reexecution_offset,
574                                          frame_size);
575   }
576 
577   trace_new_stub(blob, "DeoptimizationBlob");
578 
579   return blob;
580 }
581 
582 #ifdef COMPILER2
583 
584 //----------------------------------------------------------------------------------------------------
585 // Implementation of UncommonTrapBlob
586 
587 UncommonTrapBlob::UncommonTrapBlob(
588   CodeBuffer* cb,
589   int         size,
590   OopMapSet*  oop_maps,
591   int         frame_size
592 )
593 : SingletonBlob("UncommonTrapBlob", CodeBlobKind::UncommonTrap, cb,
594                 size, sizeof(UncommonTrapBlob), frame_size, oop_maps)
595 {}
596 
597 
598 UncommonTrapBlob* UncommonTrapBlob::create(
599   CodeBuffer* cb,
600   OopMapSet*  oop_maps,
601   int        frame_size)
602 {
603   UncommonTrapBlob* blob = nullptr;
604   unsigned int size = CodeBlob::allocation_size(cb, sizeof(UncommonTrapBlob));
605   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
606   {
607     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
608     blob = new (size, false) UncommonTrapBlob(cb, size, oop_maps, frame_size);
609   }
610 
611   trace_new_stub(blob, "UncommonTrapBlob");
612 
613   return blob;
614 }
615 
616 //----------------------------------------------------------------------------------------------------
617 // Implementation of ExceptionBlob
618 
619 ExceptionBlob::ExceptionBlob(
620   CodeBuffer* cb,
621   int         size,
622   OopMapSet*  oop_maps,
623   int         frame_size
624 )
625 : SingletonBlob("ExceptionBlob", CodeBlobKind::Exception, cb,
626                 size, sizeof(ExceptionBlob), frame_size, oop_maps)
627 {}
628 
629 
630 ExceptionBlob* ExceptionBlob::create(
631   CodeBuffer* cb,
632   OopMapSet*  oop_maps,
633   int         frame_size)
634 {
635   ExceptionBlob* blob = nullptr;
636   unsigned int size = CodeBlob::allocation_size(cb, sizeof(ExceptionBlob));
637   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
638   {
639     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
640     blob = new (size, false) ExceptionBlob(cb, size, oop_maps, frame_size);
641   }
642 
643   trace_new_stub(blob, "ExceptionBlob");
644 
645   return blob;
646 }
647 
648 #endif // COMPILER2
649 
650 //----------------------------------------------------------------------------------------------------
651 // Implementation of SafepointBlob
652 
653 SafepointBlob::SafepointBlob(
654   CodeBuffer* cb,
655   int         size,
656   OopMapSet*  oop_maps,
657   int         frame_size
658 )
659 : SingletonBlob("SafepointBlob", CodeBlobKind::Safepoint, cb,
660                 size, sizeof(SafepointBlob), frame_size, oop_maps)
661 {}
662 
663 
664 SafepointBlob* SafepointBlob::create(
665   CodeBuffer* cb,
666   OopMapSet*  oop_maps,
667   int         frame_size)
668 {
669   SafepointBlob* blob = nullptr;
670   unsigned int size = CodeBlob::allocation_size(cb, sizeof(SafepointBlob));
671   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
672   {
673     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
674     blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size);
675   }
676 
677   trace_new_stub(blob, "SafepointBlob");
678 
679   return blob;
680 }
681 
682 //----------------------------------------------------------------------------------------------------
683 // Implementation of UpcallStub
684 
685 UpcallStub::UpcallStub(const char* name, CodeBuffer* cb, int size, jobject receiver, ByteSize frame_data_offset) :
686   RuntimeBlob(name, CodeBlobKind::Upcall, cb, size, sizeof(UpcallStub),
687               CodeOffsets::frame_never_safe, 0 /* no frame size */,
688               /* oop maps = */ nullptr, /* caller must gc arguments = */ false),
689   _receiver(receiver),
690   _frame_data_offset(frame_data_offset)
691 {
692   CodeCache::commit(this);
693 }
694 
695 void* UpcallStub::operator new(size_t s, unsigned size) throw() {
696   return CodeCache::allocate(size, CodeBlobType::NonNMethod);
697 }
698 
699 UpcallStub* UpcallStub::create(const char* name, CodeBuffer* cb, jobject receiver, ByteSize frame_data_offset) {
700   ThreadInVMfromUnknown __tiv;  // get to VM state in case we block on CodeCache_lock
701 
702   UpcallStub* blob = nullptr;
703   unsigned int size = CodeBlob::allocation_size(cb, sizeof(UpcallStub));
704   {
705     MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
706     blob = new (size) UpcallStub(name, cb, size, receiver, frame_data_offset);
707   }
708   if (blob == nullptr) {
709     return nullptr; // caller must handle this
710   }
711 
712   // Track memory usage statistic after releasing CodeCache_lock
713   MemoryService::track_code_cache_memory_usage();
714 
715   trace_new_stub(blob, "UpcallStub - ", name);
716 
717   return blob;
718 }
719 
720 void UpcallStub::oops_do(OopClosure* f, const frame& frame) {
721   frame_data_for_frame(frame)->old_handles->oops_do(f);
722 }
723 
724 JavaFrameAnchor* UpcallStub::jfa_for_frame(const frame& frame) const {
725   return &frame_data_for_frame(frame)->jfa;
726 }
727 
728 void UpcallStub::free(UpcallStub* blob) {
729   assert(blob != nullptr, "caller must check for nullptr");
730   JNIHandles::destroy_global(blob->receiver());
731   RuntimeBlob::free(blob);
732 }
733 
734 //----------------------------------------------------------------------------------------------------
735 // Verification and printing
736 
737 void CodeBlob::verify() {
738   if (is_nmethod()) {
739     as_nmethod()->verify();
740   }
741 }
742 
743 void CodeBlob::print_on(outputStream* st) const {
744   vptr()->print_on(this, st);
745 }
746 
747 void CodeBlob::print() const { print_on(tty); }
748 
749 void CodeBlob::print_value_on(outputStream* st) const {
750   vptr()->print_value_on(this, st);
751 }
752 
753 void CodeBlob::print_on_impl(outputStream* st) const {
754   st->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", p2i(this));
755   st->print_cr("Framesize: %d", _frame_size);
756 }
757 
758 void CodeBlob::print_value_on_impl(outputStream* st) const {
759   st->print_cr("[CodeBlob]");
760 }
761 
762 void CodeBlob::print_block_comment(outputStream* stream, address block_begin) const {
763 #if defined(SUPPORT_ASSEMBLY) || defined(SUPPORT_ABSTRACT_ASSEMBLY)
764   if (is_nmethod()) {
765     as_nmethod()->print_nmethod_labels(stream, block_begin);
766   }
767 #endif
768 
769 #ifndef PRODUCT
770   ptrdiff_t offset = block_begin - code_begin();
771   assert(offset >= 0, "Expecting non-negative offset!");
772   _asm_remarks.print(uint(offset), stream);
773 #endif
774   }
775 
776 void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const {
777   if (is_buffer_blob() || is_adapter_blob() || is_vtable_blob() || is_method_handles_adapter_blob()) {
778     // the interpreter is generated into a buffer blob
779     InterpreterCodelet* i = Interpreter::codelet_containing(addr);
780     if (i != nullptr) {
781       st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", p2i(addr), (int)(addr - i->code_begin()));
782       i->print_on(st);
783       return;
784     }
785     if (Interpreter::contains(addr)) {
786       st->print_cr(INTPTR_FORMAT " is pointing into interpreter code"
787                    " (not bytecode specific)", p2i(addr));
788       return;
789     }
790     //
791     if (AdapterHandlerLibrary::contains(this)) {
792       st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", p2i(addr), (int)(addr - code_begin()));
793       AdapterHandlerLibrary::print_handler_on(st, this);
794     }
795     // the stubroutines are generated into a buffer blob
796     StubCodeDesc* d = StubCodeDesc::desc_for(addr);
797     if (d != nullptr) {
798       st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", p2i(addr), (int)(addr - d->begin()));
799       d->print_on(st);
800       st->cr();
801       return;
802     }
803     if (StubRoutines::contains(addr)) {
804       st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr));
805       return;
806     }
807     VtableStub* v = VtableStubs::stub_containing(addr);
808     if (v != nullptr) {
809       st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point()));
810       v->print_on(st);
811       st->cr();
812       return;
813     }
814   }
815   if (is_nmethod()) {
816     nmethod* nm = (nmethod*)this;
817     ResourceMark rm;
818     st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT,
819               p2i(addr), (int)(addr - nm->entry_point()), p2i(nm));
820     if (verbose) {
821       st->print(" for ");
822       nm->method()->print_value_on(st);
823     }
824     st->cr();
825     if (verbose && st == tty) {
826       // verbose is only ever true when called from findpc in debug.cpp
827       nm->print_nmethod(true);
828     } else {
829       nm->print_on(st);
830     }
831     return;
832   }
833   st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", p2i(addr), (int)(addr - code_begin()));
834   print_on(st);
835 }
836 
837 void BufferBlob::print_on_impl(outputStream* st) const {
838   RuntimeBlob::print_on_impl(st);
839   print_value_on_impl(st);
840 }
841 
842 void BufferBlob::print_value_on_impl(outputStream* st) const {
843   st->print_cr("BufferBlob (" INTPTR_FORMAT  ") used for %s", p2i(this), name());
844 }
845 
846 void RuntimeStub::print_on_impl(outputStream* st) const {
847   ttyLocker ttyl;
848   RuntimeBlob::print_on_impl(st);
849   st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this));
850   st->print_cr("%s", name());
851   Disassembler::decode((RuntimeBlob*)this, st);
852 }
853 
854 void RuntimeStub::print_value_on_impl(outputStream* st) const {
855   st->print("RuntimeStub (" INTPTR_FORMAT "): ", p2i(this)); st->print("%s", name());
856 }
857 
858 void SingletonBlob::print_on_impl(outputStream* st) const {
859   ttyLocker ttyl;
860   RuntimeBlob::print_on_impl(st);
861   st->print_cr("%s", name());
862   Disassembler::decode((RuntimeBlob*)this, st);
863 }
864 
865 void SingletonBlob::print_value_on_impl(outputStream* st) const {
866   st->print_cr("%s", name());
867 }
868 
869 void DeoptimizationBlob::print_value_on_impl(outputStream* st) const {
870   st->print_cr("Deoptimization (frame not available)");
871 }
872 
873 void UpcallStub::print_on_impl(outputStream* st) const {
874   RuntimeBlob::print_on_impl(st);
875   print_value_on_impl(st);
876   st->print_cr("Frame data offset: %d", (int) _frame_data_offset);
877   oop recv = JNIHandles::resolve(_receiver);
878   st->print("Receiver MH=");
879   recv->print_on(st);
880   Disassembler::decode((RuntimeBlob*)this, st);
881 }
882 
883 void UpcallStub::print_value_on_impl(outputStream* st) const {
884   st->print_cr("UpcallStub (" INTPTR_FORMAT  ") used for %s", p2i(this), name());
885 }