1 /*
  2  * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "classfile/classLoaderData.hpp"
 26 #include "gc/shared/barrierSet.hpp"
 27 #include "gc/shared/barrierSetAssembler.hpp"
 28 #include "gc/shared/barrierSetNMethod.hpp"
 29 #include "gc/shared/barrierSetRuntime.hpp"
 30 #include "gc/shared/collectedHeap.hpp"
 31 #include "interpreter/interp_masm.hpp"
 32 #include "memory/universe.hpp"
 33 #include "runtime/javaThread.hpp"
 34 #include "runtime/jniHandles.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "runtime/stubRoutines.hpp"
 37 #ifdef COMPILER2
 38 #include "code/vmreg.inline.hpp"
 39 #include "gc/shared/c2/barrierSetC2.hpp"
 40 #endif // COMPILER2
 41 
 42 
 43 #define __ masm->
 44 
 45 void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 46                                   Register dst, Address src, Register tmp1, Register tmp2) {
 47 
 48   // LR is live.  It must be saved around calls.
 49 
 50   bool in_heap = (decorators & IN_HEAP) != 0;
 51   bool in_native = (decorators & IN_NATIVE) != 0;
 52   bool is_not_null = (decorators & IS_NOT_NULL) != 0;
 53 
 54   switch (type) {
 55   case T_OBJECT:
 56   case T_ARRAY: {
 57     if (in_heap) {
 58       if (UseCompressedOops) {
 59         __ ldrw(dst, src);
 60         if (is_not_null) {
 61           __ decode_heap_oop_not_null(dst);
 62         } else {
 63           __ decode_heap_oop(dst);
 64         }
 65       } else {
 66         __ ldr(dst, src);
 67       }
 68     } else {
 69       assert(in_native, "why else?");
 70       __ ldr(dst, src);
 71     }
 72     break;
 73   }
 74   case T_BOOLEAN: __ load_unsigned_byte (dst, src); break;
 75   case T_BYTE:    __ load_signed_byte   (dst, src); break;
 76   case T_CHAR:    __ load_unsigned_short(dst, src); break;
 77   case T_SHORT:   __ load_signed_short  (dst, src); break;
 78   case T_INT:     __ ldrw               (dst, src); break;
 79   case T_LONG:    __ ldr                (dst, src); break;
 80   case T_ADDRESS: __ ldr                (dst, src); break;
 81   case T_FLOAT:   __ ldrs               (v0, src);  break;
 82   case T_DOUBLE:  __ ldrd               (v0, src);  break;
 83   default: Unimplemented();
 84   }
 85 }
 86 
 87 void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
 88                                    Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
 89   bool in_heap = (decorators & IN_HEAP) != 0;
 90   bool in_native = (decorators & IN_NATIVE) != 0;
 91   bool is_not_null = (decorators & IS_NOT_NULL) != 0;
 92 
 93   switch (type) {
 94   case T_OBJECT:
 95   case T_ARRAY: {
 96     if (in_heap) {
 97       if (val == noreg) {
 98         assert(!is_not_null, "inconsistent access");
 99         if (UseCompressedOops) {
100           __ strw(zr, dst);
101         } else {
102           __ str(zr, dst);
103         }
104       } else {
105         if (UseCompressedOops) {
106           assert(!dst.uses(val), "not enough registers");
107           if (is_not_null) {
108             __ encode_heap_oop_not_null(val);
109           } else {
110             __ encode_heap_oop(val);
111           }
112           __ strw(val, dst);
113         } else {
114           __ str(val, dst);
115         }
116       }
117     } else {
118       assert(in_native, "why else?");
119       assert(val != noreg, "not supported");
120       __ str(val, dst);
121     }
122     break;
123   }
124   case T_BOOLEAN:
125     __ andw(val, val, 0x1);  // boolean is true if LSB is 1
126     __ strb(val, dst);
127     break;
128   case T_BYTE:    __ strb(val, dst); break;
129   case T_CHAR:    __ strh(val, dst); break;
130   case T_SHORT:   __ strh(val, dst); break;
131   case T_INT:     __ strw(val, dst); break;
132   case T_LONG:    __ str (val, dst); break;
133   case T_ADDRESS: __ str (val, dst); break;
134   case T_FLOAT:   __ strs(v0,  dst); break;
135   case T_DOUBLE:  __ strd(v0,  dst); break;
136   default: Unimplemented();
137   }
138 }
139 
140 void BarrierSetAssembler::flat_field_copy(MacroAssembler* masm, DecoratorSet decorators,
141                                      Register src, Register dst, Register inline_layout_info) {
142   // flat_field_copy implementation is fairly complex, and there are not any
143   // "short-cuts" to be made from asm. What there is, appears to have the same
144   // cost in C++, so just "call_VM_leaf" for now rather than maintain hundreds
145   // of hand-rolled instructions...
146   if (decorators & IS_DEST_UNINITIALIZED) {
147     __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy_is_dest_uninitialized), src, dst, inline_layout_info);
148   } else {
149     __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSetRuntime::value_copy), src, dst, inline_layout_info);
150   }
151 }
152 
153 void BarrierSetAssembler::copy_load_at(MacroAssembler* masm,
154                                        DecoratorSet decorators,
155                                        BasicType type,
156                                        size_t bytes,
157                                        Register dst1,
158                                        Register dst2,
159                                        Address src,
160                                        Register tmp) {
161   if (bytes == 1) {
162     assert(dst2 == noreg, "invariant");
163     __ ldrb(dst1, src);
164   } else if (bytes == 2) {
165     assert(dst2 == noreg, "invariant");
166     __ ldrh(dst1, src);
167   } else if (bytes == 4) {
168     assert(dst2 == noreg, "invariant");
169     __ ldrw(dst1, src);
170   } else if (bytes == 8) {
171     assert(dst2 == noreg, "invariant");
172     __ ldr(dst1, src);
173   } else if (bytes == 16) {
174     assert(dst2 != noreg, "invariant");
175     assert(dst2 != dst1, "invariant");
176     __ ldp(dst1, dst2, src);
177   } else {
178     // Not the right size
179     ShouldNotReachHere();
180   }
181   if ((decorators & ARRAYCOPY_CHECKCAST) != 0 && UseCompressedOops) {
182     __ decode_heap_oop(dst1);
183   }
184 }
185 
186 void BarrierSetAssembler::copy_store_at(MacroAssembler* masm,
187                                         DecoratorSet decorators,
188                                         BasicType type,
189                                         size_t bytes,
190                                         Address dst,
191                                         Register src1,
192                                         Register src2,
193                                         Register tmp1,
194                                         Register tmp2,
195                                         Register tmp3) {
196   if ((decorators & ARRAYCOPY_CHECKCAST) != 0 && UseCompressedOops) {
197     __ encode_heap_oop(src1);
198   }
199   if (bytes == 1) {
200     assert(src2 == noreg, "invariant");
201     __ strb(src1, dst);
202   } else if (bytes == 2) {
203     assert(src2 == noreg, "invariant");
204     __ strh(src1, dst);
205   } else if (bytes == 4) {
206     assert(src2 == noreg, "invariant");
207     __ strw(src1, dst);
208   } else if (bytes == 8) {
209     assert(src2 == noreg, "invariant");
210     __ str(src1, dst);
211   } else if (bytes == 16) {
212     assert(src2 != noreg, "invariant");
213     assert(src2 != src1, "invariant");
214     __ stp(src1, src2, dst);
215   } else {
216     // Not the right size
217     ShouldNotReachHere();
218   }
219 }
220 
221 void BarrierSetAssembler::copy_load_at(MacroAssembler* masm,
222                                        DecoratorSet decorators,
223                                        BasicType type,
224                                        size_t bytes,
225                                        FloatRegister dst1,
226                                        FloatRegister dst2,
227                                        Address src,
228                                        Register tmp1,
229                                        Register tmp2,
230                                        FloatRegister vec_tmp) {
231   if (bytes == 32) {
232     __ ldpq(dst1, dst2, src);
233   } else {
234     ShouldNotReachHere();
235   }
236 }
237 
238 void BarrierSetAssembler::copy_store_at(MacroAssembler* masm,
239                                         DecoratorSet decorators,
240                                         BasicType type,
241                                         size_t bytes,
242                                         Address dst,
243                                         FloatRegister src1,
244                                         FloatRegister src2,
245                                         Register tmp1,
246                                         Register tmp2,
247                                         Register tmp3,
248                                         FloatRegister vec_tmp1,
249                                         FloatRegister vec_tmp2,
250                                         FloatRegister vec_tmp3) {
251   if (bytes == 32) {
252     __ stpq(src1, src2, dst);
253   } else {
254     ShouldNotReachHere();
255   }
256 }
257 
258 void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
259                                                         Register obj, Register tmp, Label& slowpath) {
260   // If mask changes we need to ensure that the inverse is still encodable as an immediate
261   STATIC_ASSERT(JNIHandles::tag_mask == 0b11);
262   __ andr(obj, obj, ~JNIHandles::tag_mask);
263   __ ldr(obj, Address(obj, 0));             // *obj
264 }
265 
266 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
267 void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj,
268                                         Register var_size_in_bytes,
269                                         int con_size_in_bytes,
270                                         Register t1,
271                                         Register t2,
272                                         Label& slow_case) {
273   assert_different_registers(obj, t2);
274   assert_different_registers(obj, var_size_in_bytes);
275   Register end = t2;
276 
277   // verify_tlab();
278 
279   __ ldr(obj, Address(rthread, JavaThread::tlab_top_offset()));
280   if (var_size_in_bytes == noreg) {
281     __ lea(end, Address(obj, con_size_in_bytes));
282   } else {
283     __ lea(end, Address(obj, var_size_in_bytes));
284   }
285   __ ldr(rscratch1, Address(rthread, JavaThread::tlab_end_offset()));
286   __ cmp(end, rscratch1);
287   __ br(Assembler::HI, slow_case);
288 
289   // update the tlab top pointer
290   __ str(end, Address(rthread, JavaThread::tlab_top_offset()));
291 
292   // recover var_size_in_bytes if necessary
293   if (var_size_in_bytes == end) {
294     __ sub(var_size_in_bytes, var_size_in_bytes, obj);
295   }
296   // verify_tlab();
297 }
298 
299 static volatile uint32_t _patching_epoch = 0;
300 
301 address BarrierSetAssembler::patching_epoch_addr() {
302   return (address)&_patching_epoch;
303 }
304 
305 void BarrierSetAssembler::increment_patching_epoch() {
306   Atomic::inc(&_patching_epoch);
307 }
308 
309 void BarrierSetAssembler::clear_patching_epoch() {
310   _patching_epoch = 0;
311 }
312 
313 void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm, Label* slow_path, Label* continuation, Label* guard) {
314   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
315 
316   Label local_guard;
317   Label skip_barrier;
318   NMethodPatchingType patching_type = nmethod_patching_type();
319 
320   if (slow_path == nullptr) {
321     guard = &local_guard;
322   }
323 
324   // If the slow path is out of line in a stub, we flip the condition
325   Assembler::Condition condition = slow_path == nullptr ? Assembler::EQ : Assembler::NE;
326   Label& barrier_target = slow_path == nullptr ? skip_barrier : *slow_path;
327 
328   __ ldrw(rscratch1, *guard);
329 
330   if (patching_type == NMethodPatchingType::stw_instruction_and_data_patch) {
331     // With STW patching, no data or instructions are updated concurrently,
332     // which means there isn't really any need for any fencing for neither
333     // data nor instruction modifications happening concurrently. The
334     // instruction patching is handled with isb fences on the way back
335     // from the safepoint to Java. So here we can do a plain conditional
336     // branch with no fencing.
337     Address thread_disarmed_addr(rthread, in_bytes(bs_nm->thread_disarmed_guard_value_offset()));
338     __ ldrw(rscratch2, thread_disarmed_addr);
339     __ cmp(rscratch1, rscratch2);
340   } else if (patching_type == NMethodPatchingType::conc_instruction_and_data_patch) {
341     // If we patch code we need both a code patching and a loadload
342     // fence. It's not super cheap, so we use a global epoch mechanism
343     // to hide them in a slow path.
344     // The high level idea of the global epoch mechanism is to detect
345     // when any thread has performed the required fencing, after the
346     // last nmethod was disarmed. This implies that the required
347     // fencing has been performed for all preceding nmethod disarms
348     // as well. Therefore, we do not need any further fencing.
349     __ lea(rscratch2, ExternalAddress((address)&_patching_epoch));
350     // Embed an artificial data dependency to order the guard load
351     // before the epoch load.
352     __ orr(rscratch2, rscratch2, rscratch1, Assembler::LSR, 32);
353     // Read the global epoch value.
354     __ ldrw(rscratch2, rscratch2);
355     // Combine the guard value (low order) with the epoch value (high order).
356     __ orr(rscratch1, rscratch1, rscratch2, Assembler::LSL, 32);
357     // Compare the global values with the thread-local values.
358     Address thread_disarmed_and_epoch_addr(rthread, in_bytes(bs_nm->thread_disarmed_guard_value_offset()));
359     __ ldr(rscratch2, thread_disarmed_and_epoch_addr);
360     __ cmp(rscratch1, rscratch2);
361   } else {
362     assert(patching_type == NMethodPatchingType::conc_data_patch, "must be");
363     // Subsequent loads of oops must occur after load of guard value.
364     // BarrierSetNMethod::disarm sets guard with release semantics.
365     __ membar(__ LoadLoad);
366     Address thread_disarmed_addr(rthread, in_bytes(bs_nm->thread_disarmed_guard_value_offset()));
367     __ ldrw(rscratch2, thread_disarmed_addr);
368     __ cmpw(rscratch1, rscratch2);
369   }
370   __ br(condition, barrier_target);
371 
372   if (slow_path == nullptr) {
373     __ lea(rscratch1, RuntimeAddress(StubRoutines::method_entry_barrier()));
374     __ blr(rscratch1);
375     __ b(skip_barrier);
376 
377     __ bind(local_guard);
378 
379     __ emit_int32(0);   // nmethod guard value. Skipped over in common case.
380   } else {
381     __ bind(*continuation);
382   }
383 
384   __ bind(skip_barrier);
385 }
386 
387 void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
388   Label bad_call;
389   __ cbz(rmethod, bad_call);
390 
391   // Pointer chase to the method holder to find out if the method is concurrently unloading.
392   Label method_live;
393   __ load_method_holder_cld(rscratch1, rmethod);
394 
395   // Is it a strong CLD?
396   __ ldrw(rscratch2, Address(rscratch1, ClassLoaderData::keep_alive_ref_count_offset()));
397   __ cbnz(rscratch2, method_live);
398 
399   // Is it a weak but alive CLD?
400   __ push(RegSet::of(r10), sp);
401   __ ldr(r10, Address(rscratch1, ClassLoaderData::holder_offset()));
402 
403   __ resolve_weak_handle(r10, rscratch1, rscratch2);
404   __ mov(rscratch1, r10);
405   __ pop(RegSet::of(r10), sp);
406   __ cbnz(rscratch1, method_live);
407 
408   __ bind(bad_call);
409 
410   __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
411   __ bind(method_live);
412 }
413 
414 void BarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register tmp1, Register tmp2, Label& error) {
415   // Check if the oop is in the right area of memory
416   __ mov(tmp2, (intptr_t) Universe::verify_oop_mask());
417   __ andr(tmp1, obj, tmp2);
418   __ mov(tmp2, (intptr_t) Universe::verify_oop_bits());
419 
420   // Compare tmp1 and tmp2.  We don't use a compare
421   // instruction here because the flags register is live.
422   __ eor(tmp1, tmp1, tmp2);
423   __ cbnz(tmp1, error);
424 
425   // make sure klass is 'reasonable', which is not zero.
426   __ load_klass(obj, obj); // get klass
427   __ cbz(obj, error);      // if klass is null it is broken
428 }
429 
430 #ifdef COMPILER2
431 
432 OptoReg::Name BarrierSetAssembler::encode_float_vector_register_size(const Node* node, OptoReg::Name opto_reg) {
433   switch (node->ideal_reg()) {
434     case Op_RegF:
435       // No need to refine. The original encoding is already fine to distinguish.
436       assert(opto_reg % 4 == 0, "Float register should only occupy a single slot");
437       break;
438     // Use different encoding values of the same fp/vector register to help distinguish different sizes.
439     // Such as V16. The OptoReg::name and its corresponding slot value are
440     // "V16": 64, "V16_H": 65, "V16_J": 66, "V16_K": 67.
441     case Op_RegD:
442     case Op_VecD:
443       opto_reg &= ~3;
444       opto_reg |= 1;
445       break;
446     case Op_VecX:
447       opto_reg &= ~3;
448       opto_reg |= 2;
449       break;
450     case Op_VecA:
451       opto_reg &= ~3;
452       opto_reg |= 3;
453       break;
454     default:
455       assert(false, "unexpected ideal register");
456       ShouldNotReachHere();
457   }
458   return opto_reg;
459 }
460 
461 OptoReg::Name BarrierSetAssembler::refine_register(const Node* node, OptoReg::Name opto_reg) {
462   if (!OptoReg::is_reg(opto_reg)) {
463     return OptoReg::Bad;
464   }
465 
466   const VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
467   if (vm_reg->is_FloatRegister()) {
468     opto_reg = encode_float_vector_register_size(node, opto_reg);
469   }
470 
471   return opto_reg;
472 }
473 
474 #undef __
475 #define __ _masm->
476 
477 void SaveLiveRegisters::initialize(BarrierStubC2* stub) {
478   int index = -1;
479   GrowableArray<RegisterData> registers;
480   VMReg prev_vm_reg = VMRegImpl::Bad();
481 
482   RegMaskIterator rmi(stub->preserve_set());
483   while (rmi.has_next()) {
484     OptoReg::Name opto_reg = rmi.next();
485     VMReg vm_reg = OptoReg::as_VMReg(opto_reg);
486 
487     if (vm_reg->is_Register()) {
488       // GPR may have one or two slots in regmask
489       // Determine whether the current vm_reg is the same physical register as the previous one
490       if (is_same_register(vm_reg, prev_vm_reg)) {
491         registers.at(index)._slots++;
492       } else {
493         RegisterData reg_data = { vm_reg, 1 };
494         index = registers.append(reg_data);
495       }
496     } else if (vm_reg->is_FloatRegister()) {
497       // We have size encoding in OptoReg of stub->preserve_set()
498       // After encoding, float/neon/sve register has only one slot in regmask
499       // Decode it to get the actual size
500       VMReg vm_reg_base = vm_reg->as_FloatRegister()->as_VMReg();
501       int slots = decode_float_vector_register_size(opto_reg);
502       RegisterData reg_data = { vm_reg_base, slots };
503       index = registers.append(reg_data);
504     } else if (vm_reg->is_PRegister()) {
505       // PRegister has only one slot in regmask
506       RegisterData reg_data = { vm_reg, 1 };
507       index = registers.append(reg_data);
508     } else {
509       assert(false, "Unknown register type");
510       ShouldNotReachHere();
511     }
512     prev_vm_reg = vm_reg;
513   }
514 
515   // Record registers that needs to be saved/restored
516   for (GrowableArrayIterator<RegisterData> it = registers.begin(); it != registers.end(); ++it) {
517     RegisterData reg_data = *it;
518     VMReg vm_reg = reg_data._reg;
519     int slots = reg_data._slots;
520     if (vm_reg->is_Register()) {
521       assert(slots == 1 || slots == 2, "Unexpected register save size");
522       _gp_regs += RegSet::of(vm_reg->as_Register());
523     } else if (vm_reg->is_FloatRegister()) {
524       if (slots == 1 || slots == 2) {
525         _fp_regs += FloatRegSet::of(vm_reg->as_FloatRegister());
526       } else if (slots == 4) {
527         _neon_regs += FloatRegSet::of(vm_reg->as_FloatRegister());
528       } else {
529         assert(slots == Matcher::scalable_vector_reg_size(T_FLOAT), "Unexpected register save size");
530         _sve_regs += FloatRegSet::of(vm_reg->as_FloatRegister());
531       }
532     } else {
533       assert(vm_reg->is_PRegister() && slots == 1, "Unknown register type");
534       _p_regs += PRegSet::of(vm_reg->as_PRegister());
535     }
536   }
537 
538   // Remove C-ABI SOE registers and scratch regs
539   _gp_regs -= RegSet::range(r19, r30) + RegSet::of(r8, r9);
540 
541   // Remove C-ABI SOE fp registers
542   _fp_regs -= FloatRegSet::range(v8, v15);
543 }
544 
545 enum RC SaveLiveRegisters::rc_class(VMReg reg) {
546   if (reg->is_reg()) {
547     if (reg->is_Register()) {
548       return rc_int;
549     } else if (reg->is_FloatRegister()) {
550       return rc_float;
551     } else if (reg->is_PRegister()) {
552       return rc_predicate;
553     }
554   }
555   if (reg->is_stack()) {
556     return rc_stack;
557   }
558   return rc_bad;
559 }
560 
561 bool SaveLiveRegisters::is_same_register(VMReg reg1, VMReg reg2) {
562   if (reg1 == reg2) {
563     return true;
564   }
565   if (rc_class(reg1) == rc_class(reg2)) {
566     if (reg1->is_Register()) {
567       return reg1->as_Register() == reg2->as_Register();
568     } else if (reg1->is_FloatRegister()) {
569       return reg1->as_FloatRegister() == reg2->as_FloatRegister();
570     } else if (reg1->is_PRegister()) {
571       return reg1->as_PRegister() == reg2->as_PRegister();
572     }
573   }
574   return false;
575 }
576 
577 int SaveLiveRegisters::decode_float_vector_register_size(OptoReg::Name opto_reg) {
578   switch (opto_reg & 3) {
579     case 0:
580       return 1;
581     case 1:
582       return 2;
583     case 2:
584       return 4;
585     case 3:
586       return Matcher::scalable_vector_reg_size(T_FLOAT);
587     default:
588       ShouldNotReachHere();
589       return 0;
590   }
591 }
592 
593 SaveLiveRegisters::SaveLiveRegisters(MacroAssembler* masm, BarrierStubC2* stub)
594   : _masm(masm),
595     _gp_regs(),
596     _fp_regs(),
597     _neon_regs(),
598     _sve_regs(),
599     _p_regs() {
600 
601   // Figure out what registers to save/restore
602   initialize(stub);
603 
604   // Save registers
605   __ push(_gp_regs, sp);
606   __ push_fp(_fp_regs, sp, MacroAssembler::PushPopFp);
607   __ push_fp(_neon_regs, sp, MacroAssembler::PushPopNeon);
608   __ push_fp(_sve_regs, sp, MacroAssembler::PushPopSVE);
609   __ push_p(_p_regs, sp);
610 }
611 
612 SaveLiveRegisters::~SaveLiveRegisters() {
613   // Restore registers
614   __ pop_p(_p_regs, sp);
615   __ pop_fp(_sve_regs, sp, MacroAssembler::PushPopSVE);
616   __ pop_fp(_neon_regs, sp, MacroAssembler::PushPopNeon);
617   __ pop_fp(_fp_regs, sp, MacroAssembler::PushPopFp);
618 
619   // External runtime call may clobber ptrue reg
620   __ reinitialize_ptrue();
621 
622   __ pop(_gp_regs, sp);
623 }
624 
625 #endif // COMPILER2