1 /*
  2  * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "asm/codeBuffer.hpp"
 26 #include "asm/macroAssembler.inline.hpp"
 27 #include "memory/resourceArea.hpp"
 28 #include "oops/access.inline.hpp"
 29 #include "oops/klass.hpp"
 30 #include "oops/oop.inline.hpp"
 31 #include "prims/vectorSupport.hpp"
 32 #include "runtime/continuation.hpp"
 33 #include "runtime/interfaceSupport.inline.hpp"
 34 #include "runtime/timerTrace.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "runtime/stubRoutines.hpp"
 37 #include "utilities/align.hpp"
 38 #include "utilities/copy.hpp"
 39 #ifdef COMPILER2
 40 #include "opto/runtime.hpp"
 41 #endif
 42 
 43 UnsafeMemoryAccess* UnsafeMemoryAccess::_table                  = nullptr;
 44 int UnsafeMemoryAccess::_table_length                           = 0;
 45 int UnsafeMemoryAccess::_table_max_length                       = 0;
 46 address UnsafeMemoryAccess::_common_exit_stub_pc                = nullptr;
 47 
 48 // Implementation of StubRoutines - for a description of how to
 49 // declare new blobs, stubs and entries , see stubDefinitions.hpp.
 50 
 51 // define arrays to hold stub and blob names
 52 
 53 // use a template to generate the initializer for the blob names array
 54 
 55 #define DEFINE_BLOB_NAME(blob_name)             \
 56   # blob_name,
 57 
 58 const char* StubRoutines::_blob_names[StubGenBlobId::NUM_BLOBIDS] = {
 59   STUBGEN_BLOBS_DO(DEFINE_BLOB_NAME)
 60 };
 61 
 62 #undef DEFINE_BLOB_NAME
 63 
 64 #define DEFINE_STUB_NAME(blob_name, stub_name)          \
 65   # stub_name ,                                         \
 66 
 67 // use a template to generate the initializer for the stub names array
 68 const char* StubRoutines::_stub_names[StubGenStubId::NUM_STUBIDS] = {
 69   STUBGEN_STUBS_DO(DEFINE_STUB_NAME)
 70 };
 71 
 72 #undef DEFINE_STUB_NAME
 73 
 74 // Define fields used to store blobs
 75 
 76 #define DEFINE_BLOB_FIELD(blob_name) \
 77   BufferBlob* StubRoutines:: STUBGEN_BLOB_FIELD_NAME(blob_name) = nullptr;
 78 
 79 STUBGEN_BLOBS_DO(DEFINE_BLOB_FIELD)
 80 
 81 #undef DEFINE_BLOB_FIELD
 82 
 83 // Define fields used to store stub entries
 84 
 85 #define DEFINE_ENTRY_FIELD(blob_name, stub_name, field_name, getter_name) \
 86   address StubRoutines:: STUB_FIELD_NAME(field_name) = nullptr;
 87 
 88 #define DEFINE_ENTRY_FIELD_INIT(blob_name, stub_name, field_name, getter_name, init_function) \
 89   address StubRoutines:: STUB_FIELD_NAME(field_name) = CAST_FROM_FN_PTR(address, init_function);
 90 
 91 #define DEFINE_ENTRY_FIELD_ARRAY(blob_name, stub_name, field_name, getter_name, count) \
 92   address StubRoutines:: STUB_FIELD_NAME(field_name)[count] = { nullptr };
 93 
 94 STUBGEN_ENTRIES_DO(DEFINE_ENTRY_FIELD, DEFINE_ENTRY_FIELD_INIT, DEFINE_ENTRY_FIELD_ARRAY)
 95 
 96 #undef DEFINE_ENTRY_FIELD_ARRAY
 97 #undef DEFINE_ENTRY_FIELD_INIT
 98 #undef DEFINE_ENTRY_FIELD
 99 
100 jint    StubRoutines::_verify_oop_count                         = 0;
101 
102 
103 address StubRoutines::_string_indexof_array[4]   =    { nullptr };
104 address StubRoutines::_vector_f_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_VECTOR_OP_MATH] = {{nullptr}, {nullptr}};
105 address StubRoutines::_vector_d_math[VectorSupport::NUM_VEC_SIZES][VectorSupport::NUM_VECTOR_OP_MATH] = {{nullptr}, {nullptr}};
106 
107 const char* StubRoutines::get_blob_name(StubGenBlobId id) {
108   assert(0 <= id && id < StubGenBlobId::NUM_BLOBIDS, "invalid blob id");
109   return _blob_names[id];
110 }
111 
112 const char* StubRoutines::get_stub_name(StubGenStubId id) {
113   assert(0 <= id && id < StubGenStubId::NUM_STUBIDS, "invalid stub id");
114   return _stub_names[id];
115 }
116 
117 #ifdef ASSERT
118 
119 // array holding start and end indices for stub ids associated with a
120 // given blob. Given a blob with id (StubGenBlobId) blob_id for any
121 // stub with id (StubGenStubId) stub_id declared within the blob:
122 // _blob_offsets[blob_id] <= stub_id < _blob_offsets[blob_id+1]
123 
124 static int _blob_limits[StubGenBlobId::NUM_BLOBIDS + 1];
125 
126 // macro used to compute blob limits
127 #define BLOB_COUNT(blob_name)                                           \
128   counter += StubGenStubId_ ## blob_name :: NUM_STUBIDS_ ## blob_name;  \
129   _blob_limits[++index] = counter;                                      \
130 
131 // macro that checks stubs are associated with the correct blobs
132 #define STUB_VERIFY(blob_name, stub_name)                               \
133   localStubId = (int) (StubGenStubId_ ## blob_name :: blob_name ## _ ## stub_name ## _id); \
134   globalStubId = (int) (StubGenStubId:: stub_name ## _id);              \
135   blobId = (int) (StubGenBlobId:: blob_name ## _id);                    \
136   assert((globalStubId >= _blob_limits[blobId] &&                       \
137           globalStubId < _blob_limits[blobId+1]),                       \
138          "stub " # stub_name " uses incorrect blob name " # blob_name); \
139   assert(globalStubId == _blob_limits[blobId] + localStubId,            \
140          "stub " # stub_name " id found at wrong offset!");             \
141 
142 bool verifyStubIds() {
143   // first compute the blob limits
144   int counter = 0;
145   int index = 0;
146   // populate offsets table with cumulative total of local enum counts
147   STUBGEN_BLOBS_DO(BLOB_COUNT);
148 
149   // ensure 1) global stub ids lie in the range of the associated blob
150   // and 2) each blob's base + local stub id == global stub id
151   int globalStubId, blobId, localStubId;
152   STUBGEN_STUBS_DO(STUB_VERIFY);
153   return true;
154 }
155 
156 #undef BLOB_COUNT
157 #undef STUB_VERIFY
158 
159 // ensure we verify the blob ids when this compile unit is first entered
160 bool _verified_stub_ids = verifyStubIds();
161 
162 
163 // macro used by stub to blob translation
164 
165 #define BLOB_CHECK_OFFSET(blob_name)                                \
166   if (id < _blob_limits[((int)blobId) + 1]) { return blobId; }      \
167   blobId = StubGenBlobId:: blob_name ## _id;                        \
168 
169 // translate a global stub id to an associated blob id based on the
170 // computed blob limits
171 
172 StubGenBlobId StubRoutines::stub_to_blob(StubGenStubId stubId) {
173   int id = (int)stubId;
174   assert(id > ((int)StubGenStubId::NO_STUBID) && id < ((int)StubGenStubId::NUM_STUBIDS), "stub id out of range!");
175   // start with no blob to catch stub id == -1
176   StubGenBlobId blobId = StubGenBlobId::NO_BLOBID;
177   STUBGEN_BLOBS_DO(BLOB_CHECK_OFFSET);
178   // if we reach here we should have the last blob id
179   assert(blobId == StubGenBlobId::NUM_BLOBIDS - 1, "unexpected blob id");
180   return blobId;
181 }
182 
183 #endif // ASSERT
184 
185 // Initialization
186 //
187 // Note: to break cycle with universe initialization, stubs are generated in two phases.
188 // The first one generates stubs needed during universe init (e.g., _handle_must_compile_first_entry).
189 // The second phase includes all other stubs (which may depend on universe being initialized.)
190 
191 extern void StubGenerator_generate(CodeBuffer* code, StubGenBlobId blob_id); // only interface to generators
192 
193 void UnsafeMemoryAccess::create_table(int max_size) {
194   UnsafeMemoryAccess::_table = new UnsafeMemoryAccess[max_size];
195   UnsafeMemoryAccess::_table_max_length = max_size;
196 }
197 
198 bool UnsafeMemoryAccess::contains_pc(address pc) {
199   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
200     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
201     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
202       return true;
203     }
204   }
205   return false;
206 }
207 
208 address UnsafeMemoryAccess::page_error_continue_pc(address pc) {
209   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
210     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
211     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
212       return entry->error_exit_pc();
213     }
214   }
215   return nullptr;
216 }
217 
218 
219 static BufferBlob* initialize_stubs(StubGenBlobId blob_id,
220                                     int code_size, int max_aligned_stubs,
221                                     const char* timer_msg,
222                                     const char* buffer_name,
223                                     const char* assert_msg) {
224   ResourceMark rm;
225   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
226   // Add extra space for large CodeEntryAlignment
227   int size = code_size + CodeEntryAlignment * max_aligned_stubs;
228   BufferBlob* stubs_code = BufferBlob::create(buffer_name, size);
229   if (stubs_code == nullptr) {
230     vm_exit_out_of_memory(code_size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", buffer_name);
231   }
232   CodeBuffer buffer(stubs_code);
233   StubGenerator_generate(&buffer, blob_id);
234   // When new stubs added we need to make sure there is some space left
235   // to catch situation when we should increase size again.
236   assert(code_size == 0 || buffer.insts_remaining() > 200, "increase %s", assert_msg);
237 
238   LogTarget(Info, stubs) lt;
239   if (lt.is_enabled()) {
240     LogStream ls(lt);
241     ls.print_cr("%s\t [" INTPTR_FORMAT ", " INTPTR_FORMAT "] used: %d, free: %d",
242                 buffer_name, p2i(stubs_code->content_begin()), p2i(stubs_code->content_end()),
243                 buffer.total_content_size(), buffer.insts_remaining());
244   }
245   return stubs_code;
246 }
247 
248 #define DEFINE_BLOB_INIT_METHOD(blob_name)                              \
249   void StubRoutines::initialize_ ## blob_name ## _stubs() {             \
250     if (STUBGEN_BLOB_FIELD_NAME(blob_name) == nullptr) {                \
251       StubGenBlobId blob_id = StubGenBlobId:: STUB_ID_NAME(blob_name);  \
252       int size = _ ## blob_name ## _code_size;                          \
253       int max_aligned_size = 10;                                        \
254       const char* timer_msg = "StubRoutines generation " # blob_name " stubs"; \
255       const char* name = "StubRoutines (" # blob_name "stubs)";         \
256       const char* assert_msg = "_" # blob_name "_code_size";            \
257       STUBGEN_BLOB_FIELD_NAME(blob_name) =                              \
258         initialize_stubs(blob_id, size, max_aligned_size, timer_msg,    \
259                          name, assert_msg);                             \
260     }                                                                   \
261   }
262 
263 
264 STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_METHOD)
265 
266 #undef DEFINE_BLOB_INIT_METHOD
267 
268 
269 #define DEFINE_BLOB_INIT_FUNCTION(blob_name)            \
270 void blob_name ## _stubs_init()  {                      \
271   StubRoutines::initialize_ ## blob_name ## _stubs();   \
272 }
273 
274 STUBGEN_BLOBS_DO(DEFINE_BLOB_INIT_FUNCTION)
275 
276 #undef DEFINE_BLOB_INIT_FUNCTION
277 
278 /*
279  * we generate the underlying driver method but this wrapper is needed
280  * to perform special handling depending on where the compiler init
281  * gets called from. it ought to be possible to remove this at some
282  * point and have adeterminate ordered init.
283  */
284 
285 void compiler_stubs_init(bool in_compiler_thread) {
286   if (in_compiler_thread && DelayCompilerStubsGeneration) {
287     // Temporarily revert state of stubs generation because
288     // it is called after final_stubs_init() finished
289     // during compiler runtime initialization.
290     // It is fine because these stubs are only used by
291     // compiled code and compiler is not running yet.
292     StubCodeDesc::unfreeze();
293     StubRoutines::initialize_compiler_stubs();
294     StubCodeDesc::freeze();
295   } else if (!in_compiler_thread && !DelayCompilerStubsGeneration) {
296     StubRoutines::initialize_compiler_stubs();
297   }
298 }
299 
300 
301 //
302 // Default versions of arraycopy functions
303 //
304 
305 JRT_LEAF(void, StubRoutines::jbyte_copy(jbyte* src, jbyte* dest, size_t count))
306 #ifndef PRODUCT
307   SharedRuntime::_jbyte_array_copy_ctr++;      // Slow-path byte array copy
308 #endif // !PRODUCT
309   Copy::conjoint_jbytes_atomic(src, dest, count);
310 JRT_END
311 
312 JRT_LEAF(void, StubRoutines::jshort_copy(jshort* src, jshort* dest, size_t count))
313 #ifndef PRODUCT
314   SharedRuntime::_jshort_array_copy_ctr++;     // Slow-path short/char array copy
315 #endif // !PRODUCT
316   Copy::conjoint_jshorts_atomic(src, dest, count);
317 JRT_END
318 
319 JRT_LEAF(void, StubRoutines::jint_copy(jint* src, jint* dest, size_t count))
320 #ifndef PRODUCT
321   SharedRuntime::_jint_array_copy_ctr++;       // Slow-path int/float array copy
322 #endif // !PRODUCT
323   Copy::conjoint_jints_atomic(src, dest, count);
324 JRT_END
325 
326 JRT_LEAF(void, StubRoutines::jlong_copy(jlong* src, jlong* dest, size_t count))
327 #ifndef PRODUCT
328   SharedRuntime::_jlong_array_copy_ctr++;      // Slow-path long/double array copy
329 #endif // !PRODUCT
330   Copy::conjoint_jlongs_atomic(src, dest, count);
331 JRT_END
332 
333 JRT_LEAF(void, StubRoutines::oop_copy(oop* src, oop* dest, size_t count))
334 #ifndef PRODUCT
335   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
336 #endif // !PRODUCT
337   assert(count != 0, "count should be non-zero");
338   ArrayAccess<>::oop_arraycopy_raw((HeapWord*)src, (HeapWord*)dest, count);
339 JRT_END
340 
341 JRT_LEAF(void, StubRoutines::oop_copy_uninit(oop* src, oop* dest, size_t count))
342 #ifndef PRODUCT
343   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
344 #endif // !PRODUCT
345   assert(count != 0, "count should be non-zero");
346   ArrayAccess<IS_DEST_UNINITIALIZED>::oop_arraycopy_raw((HeapWord*)src, (HeapWord*)dest, count);
347 JRT_END
348 
349 JRT_LEAF(void, StubRoutines::arrayof_jbyte_copy(HeapWord* src, HeapWord* dest, size_t count))
350 #ifndef PRODUCT
351   SharedRuntime::_jbyte_array_copy_ctr++;      // Slow-path byte array copy
352 #endif // !PRODUCT
353   Copy::arrayof_conjoint_jbytes(src, dest, count);
354 JRT_END
355 
356 JRT_LEAF(void, StubRoutines::arrayof_jshort_copy(HeapWord* src, HeapWord* dest, size_t count))
357 #ifndef PRODUCT
358   SharedRuntime::_jshort_array_copy_ctr++;     // Slow-path short/char array copy
359 #endif // !PRODUCT
360   Copy::arrayof_conjoint_jshorts(src, dest, count);
361 JRT_END
362 
363 JRT_LEAF(void, StubRoutines::arrayof_jint_copy(HeapWord* src, HeapWord* dest, size_t count))
364 #ifndef PRODUCT
365   SharedRuntime::_jint_array_copy_ctr++;       // Slow-path int/float array copy
366 #endif // !PRODUCT
367   Copy::arrayof_conjoint_jints(src, dest, count);
368 JRT_END
369 
370 JRT_LEAF(void, StubRoutines::arrayof_jlong_copy(HeapWord* src, HeapWord* dest, size_t count))
371 #ifndef PRODUCT
372   SharedRuntime::_jlong_array_copy_ctr++;       // Slow-path int/float array copy
373 #endif // !PRODUCT
374   Copy::arrayof_conjoint_jlongs(src, dest, count);
375 JRT_END
376 
377 JRT_LEAF(void, StubRoutines::arrayof_oop_copy(HeapWord* src, HeapWord* dest, size_t count))
378 #ifndef PRODUCT
379   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
380 #endif // !PRODUCT
381   assert(count != 0, "count should be non-zero");
382   ArrayAccess<ARRAYCOPY_ARRAYOF>::oop_arraycopy_raw(src, dest, count);
383 JRT_END
384 
385 JRT_LEAF(void, StubRoutines::arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count))
386 #ifndef PRODUCT
387   SharedRuntime::_oop_array_copy_ctr++;        // Slow-path oop array copy
388 #endif // !PRODUCT
389   assert(count != 0, "count should be non-zero");
390   ArrayAccess<ARRAYCOPY_ARRAYOF | IS_DEST_UNINITIALIZED>::oop_arraycopy_raw(src, dest, count);
391 JRT_END
392 
393 address StubRoutines::select_fill_function(BasicType t, bool aligned, const char* &name) {
394 #define RETURN_STUB(xxx_fill) { \
395   name = #xxx_fill; \
396   return StubRoutines::xxx_fill(); }
397 
398   switch (t) {
399   case T_BYTE:
400   case T_BOOLEAN:
401     if (!aligned) RETURN_STUB(jbyte_fill);
402     RETURN_STUB(arrayof_jbyte_fill);
403   case T_CHAR:
404   case T_SHORT:
405     if (!aligned) RETURN_STUB(jshort_fill);
406     RETURN_STUB(arrayof_jshort_fill);
407   case T_INT:
408   case T_FLOAT:
409     if (!aligned) RETURN_STUB(jint_fill);
410     RETURN_STUB(arrayof_jint_fill);
411   case T_DOUBLE:
412   case T_LONG:
413   case T_ARRAY:
414   case T_OBJECT:
415   case T_NARROWOOP:
416   case T_NARROWKLASS:
417   case T_ADDRESS:
418   case T_VOID:
419     // Currently unsupported
420     return nullptr;
421 
422   default:
423     ShouldNotReachHere();
424     return nullptr;
425   }
426 
427 #undef RETURN_STUB
428 }
429 
430 // constants for computing the copy function
431 enum {
432   COPYFUNC_UNALIGNED = 0,
433   COPYFUNC_ALIGNED = 1,                 // src, dest aligned to HeapWordSize
434   COPYFUNC_CONJOINT = 0,
435   COPYFUNC_DISJOINT = 2                 // src != dest, or transfer can descend
436 };
437 
438 // Note:  The condition "disjoint" applies also for overlapping copies
439 // where an descending copy is permitted (i.e., dest_offset <= src_offset).
440 address
441 StubRoutines::select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name, bool dest_uninitialized) {
442   int selector =
443     (aligned  ? COPYFUNC_ALIGNED  : COPYFUNC_UNALIGNED) +
444     (disjoint ? COPYFUNC_DISJOINT : COPYFUNC_CONJOINT);
445 
446 #define RETURN_STUB(xxx_arraycopy) { \
447   name = #xxx_arraycopy; \
448   return StubRoutines::xxx_arraycopy(); }
449 
450 #define RETURN_STUB_PARM(xxx_arraycopy, parm) { \
451   name = parm ? #xxx_arraycopy "_uninit": #xxx_arraycopy; \
452   return StubRoutines::xxx_arraycopy(parm); }
453 
454   switch (t) {
455   case T_BYTE:
456   case T_BOOLEAN:
457     switch (selector) {
458     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jbyte_arraycopy);
459     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jbyte_arraycopy);
460     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jbyte_disjoint_arraycopy);
461     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jbyte_disjoint_arraycopy);
462     }
463   case T_CHAR:
464   case T_SHORT:
465     switch (selector) {
466     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jshort_arraycopy);
467     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jshort_arraycopy);
468     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jshort_disjoint_arraycopy);
469     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jshort_disjoint_arraycopy);
470     }
471   case T_INT:
472   case T_FLOAT:
473     switch (selector) {
474     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jint_arraycopy);
475     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jint_arraycopy);
476     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jint_disjoint_arraycopy);
477     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jint_disjoint_arraycopy);
478     }
479   case T_DOUBLE:
480   case T_LONG:
481     switch (selector) {
482     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jlong_arraycopy);
483     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jlong_arraycopy);
484     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB(jlong_disjoint_arraycopy);
485     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB(arrayof_jlong_disjoint_arraycopy);
486     }
487   case T_ARRAY:
488   case T_OBJECT:
489     switch (selector) {
490     case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB_PARM(oop_arraycopy, dest_uninitialized);
491     case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED:    RETURN_STUB_PARM(arrayof_oop_arraycopy, dest_uninitialized);
492     case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED:  RETURN_STUB_PARM(oop_disjoint_arraycopy, dest_uninitialized);
493     case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED:    RETURN_STUB_PARM(arrayof_oop_disjoint_arraycopy, dest_uninitialized);
494     }
495   default:
496     ShouldNotReachHere();
497     return nullptr;
498   }
499 
500 #undef RETURN_STUB
501 #undef RETURN_STUB_PARM
502 }
503 
504 UnsafeMemoryAccessMark::UnsafeMemoryAccessMark(StubCodeGenerator* cgen, bool add_entry, bool continue_at_scope_end, address error_exit_pc) {
505   _cgen = cgen;
506   _ucm_entry = nullptr;
507   if (add_entry) {
508     address err_exit_pc = nullptr;
509     if (!continue_at_scope_end) {
510       err_exit_pc = error_exit_pc != nullptr ? error_exit_pc : UnsafeMemoryAccess::common_exit_stub_pc();
511     }
512     assert(err_exit_pc != nullptr || continue_at_scope_end, "error exit not set");
513     _ucm_entry = UnsafeMemoryAccess::add_to_table(_cgen->assembler()->pc(), nullptr, err_exit_pc);
514   }
515 }
516 
517 UnsafeMemoryAccessMark::~UnsafeMemoryAccessMark() {
518   if (_ucm_entry != nullptr) {
519     _ucm_entry->set_end_pc(_cgen->assembler()->pc());
520     if (_ucm_entry->error_exit_pc() == nullptr) {
521       _ucm_entry->set_error_exit_pc(_cgen->assembler()->pc());
522     }
523   }
524 }