1 /*
  2  * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "code/codeCache.hpp"
 26 #include "code/nativeInst.hpp"
 27 #include "gc/shared/barrierSet.hpp"
 28 #include "gc/shared/barrierSetAssembler.hpp"
 29 #include "gc/shared/barrierSetNMethod.hpp"
 30 #include "logging/log.hpp"
 31 #include "memory/resourceArea.hpp"
 32 #include "runtime/frame.inline.hpp"
 33 #include "runtime/javaThread.hpp"
 34 #include "runtime/registerMap.hpp"
 35 #include "runtime/sharedRuntime.hpp"
 36 #include "utilities/align.hpp"
 37 #include "utilities/debug.hpp"
 38 #include "utilities/formatBuffer.hpp"
 39 #if INCLUDE_JVMCI
 40 #include "jvmci/jvmciRuntime.hpp"
 41 #endif
 42 
 43 static int slow_path_size(nmethod* nm) {
 44   // The slow path code is out of line with C2
 45   return nm->is_compiled_by_c2() ? 0 : 6;
 46 }
 47 
 48 // This is the offset of the entry barrier relative to where the frame is completed.
 49 // If any code changes between the end of the verified entry where the entry
 50 // barrier resides, and the completion of the frame, then
 51 // NativeNMethodCmpBarrier::verify() will immediately complain when it does
 52 // not find the expected native instruction at this offset, which needs updating.
 53 // Note that this offset is invariant of PreserveFramePointer.
 54 static int entry_barrier_offset(nmethod* nm) {
 55   BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
 56   switch (bs_asm->nmethod_patching_type()) {
 57   case NMethodPatchingType::stw_instruction_and_data_patch:
 58     return -4 * (4 + slow_path_size(nm));
 59   case NMethodPatchingType::conc_instruction_and_data_patch:
 60     return -4 * (10 + slow_path_size(nm));
 61   case NMethodPatchingType::conc_data_patch:
 62     return -4 * (5 + slow_path_size(nm));
 63   }
 64   ShouldNotReachHere();
 65   return 0;
 66 }
 67 
 68 class NativeNMethodBarrier {
 69   address  _instruction_address;
 70   int*     _guard_addr;
 71   nmethod* _nm;
 72 
 73 public:
 74   address instruction_address() const { return _instruction_address; }
 75 
 76   int *guard_addr() {
 77     return _guard_addr;
 78   }
 79 
 80   int local_guard_offset(nmethod* nm) {
 81     // It's the last instruction
 82     return (-entry_barrier_offset(nm)) - 4;
 83   }
 84 
 85   NativeNMethodBarrier(nmethod* nm, address alt_entry_instruction_address = 0): _nm(nm) {
 86 #if INCLUDE_JVMCI
 87     if (nm->is_compiled_by_jvmci()) {
 88       assert(alt_entry_instruction_address == 0, "invariant");
 89       address pc = nm->code_begin() + nm->jvmci_nmethod_data()->nmethod_entry_patch_offset();
 90       RelocIterator iter(nm, pc, pc + 4);
 91       guarantee(iter.next(), "missing relocs");
 92       guarantee(iter.type() == relocInfo::section_word_type, "unexpected reloc");
 93 
 94       _guard_addr = (int*) iter.section_word_reloc()->target();
 95       _instruction_address = pc;
 96     } else
 97 #endif
 98       {
 99         _instruction_address = (alt_entry_instruction_address != 0) ? alt_entry_instruction_address :
100           nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
101         if (nm->is_compiled_by_c2()) {
102           // With c2 compiled code, the guard is out-of-line in a stub
103           // We find it using the RelocIterator.
104           RelocIterator iter(nm);
105           while (iter.next()) {
106             if (iter.type() == relocInfo::entry_guard_type) {
107               entry_guard_Relocation* const reloc = iter.entry_guard_reloc();
108               _guard_addr = reinterpret_cast<int*>(reloc->addr());
109               return;
110             }
111           }
112           ShouldNotReachHere();
113         }
114         _guard_addr =  reinterpret_cast<int*>(instruction_address() + local_guard_offset(nm));
115       }
116   }
117 
118   int get_value() {
119     return Atomic::load_acquire(guard_addr());
120   }
121 
122   void set_value(int value) {
123     Atomic::release_store(guard_addr(), value);
124   }
125 
126   bool check_barrier(err_msg& msg) const;
127   void verify() const {
128     err_msg msg("%s", "");
129     assert(check_barrier(msg), "%s", msg.buffer());
130   }
131 };
132 
133 // The first instruction of the nmethod entry barrier is an ldr (literal)
134 // instruction. Verify that it's really there, so the offsets are not skewed.
135 bool NativeNMethodBarrier::check_barrier(err_msg& msg) const {
136   uint32_t* addr = (uint32_t*) instruction_address();
137   uint32_t inst = *addr;
138   if ((inst & 0xff000000) != 0x18000000) {
139     msg.print("Nmethod entry barrier did not start with ldr (literal) as expected. "
140               "Addr: " PTR_FORMAT " Code: " UINT32_FORMAT, p2i(addr), inst);
141     return false;
142   }
143   return true;
144 }
145 
146 
147 /* We're called from an nmethod when we need to deoptimize it. We do
148    this by throwing away the nmethod's frame and jumping to the
149    ic_miss stub. This looks like there has been an IC miss at the
150    entry of the nmethod, so we resolve the call, which will fall back
151    to the interpreter if the nmethod has been unloaded. */
152 void BarrierSetNMethod::deoptimize(nmethod* nm, address* return_address_ptr) {
153 
154   typedef struct {
155     intptr_t *sp; intptr_t *fp; address lr; address pc;
156   } frame_pointers_t;
157 
158   frame_pointers_t *new_frame = (frame_pointers_t *)(return_address_ptr - 5);
159 
160   JavaThread *thread = JavaThread::current();
161   RegisterMap reg_map(thread,
162                       RegisterMap::UpdateMap::skip,
163                       RegisterMap::ProcessFrames::include,
164                       RegisterMap::WalkContinuation::skip);
165   frame frame = thread->last_frame();
166 
167   assert(frame.is_compiled_frame() || frame.is_native_frame(), "must be");
168   assert(frame.cb() == nm, "must be");
169   frame = frame.sender(&reg_map);
170 
171   LogTarget(Trace, nmethod, barrier) out;
172   if (out.is_enabled()) {
173     ResourceMark mark;
174     log_trace(nmethod, barrier)("deoptimize(nmethod: %s(%p), return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p",
175                                 nm->method()->name_and_sig_as_C_string(),
176                                 nm, *(address *) return_address_ptr, nm->is_osr_method(), thread,
177                                 thread->name(), frame.sp(), nm->verified_entry_point());
178   }
179 
180   new_frame->sp = frame.sp();
181   new_frame->fp = frame.fp();
182   new_frame->lr = frame.pc();
183   new_frame->pc = SharedRuntime::get_handle_wrong_method_stub();
184 }
185 
186 static void set_value(nmethod* nm, jint val) {
187   NativeNMethodBarrier cmp1 = NativeNMethodBarrier(nm);
188   cmp1.set_value(val);
189 
190   if (!nm->is_osr_method() && nm->method()->has_scalarized_args()) {
191     // nmethods with scalarized arguments have multiple entry points that each have an own nmethod entry barrier
192     assert(nm->verified_entry_point() != nm->verified_inline_entry_point(), "scalarized entry point not found");
193     address method_body = nm->is_compiled_by_c1() ? nm->verified_inline_entry_point() : nm->verified_entry_point();
194     address entry_point2 = nm->is_compiled_by_c1() ? nm->verified_entry_point() : nm->verified_inline_entry_point();
195 
196     int barrier_offset = cmp1.instruction_address() - method_body;
197     NativeNMethodBarrier cmp2 = NativeNMethodBarrier(nm, entry_point2 + barrier_offset);
198     assert(cmp1.instruction_address() != cmp2.instruction_address(), "sanity");
199     debug_only(cmp2.verify());
200     cmp2.set_value(val);
201 
202     if (method_body != nm->verified_inline_ro_entry_point() && entry_point2 != nm->verified_inline_ro_entry_point()) {
203       NativeNMethodBarrier cmp3 = NativeNMethodBarrier(nm, nm->verified_inline_ro_entry_point() + barrier_offset);
204       assert(cmp1.instruction_address() != cmp3.instruction_address() && cmp2.instruction_address() != cmp3.instruction_address(), "sanity");
205       debug_only(cmp3.verify());
206       cmp3.set_value(val);
207     }
208   }
209 }
210 
211 void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
212   if (!supports_entry_barrier(nm)) {
213     return;
214   }
215 
216   if (value == disarmed_guard_value()) {
217     // The patching epoch is incremented before the nmethod is disarmed. Disarming
218     // is performed with a release store. In the nmethod entry barrier, the values
219     // are read in the opposite order, such that the load of the nmethod guard
220     // acquires the patching epoch. This way, the guard is guaranteed to block
221     // entries to the nmethod, until it has safely published the requirement for
222     // further fencing by mutators, before they are allowed to enter.
223     BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
224     bs_asm->increment_patching_epoch();
225   }
226 
227   set_value(nm, value);
228 }
229 
230 int BarrierSetNMethod::guard_value(nmethod* nm) {
231   if (!supports_entry_barrier(nm)) {
232     return disarmed_guard_value();
233   }
234 
235   NativeNMethodBarrier barrier(nm);
236   return barrier.get_value();
237 }
238 
239 #if INCLUDE_JVMCI
240 bool BarrierSetNMethod::verify_barrier(nmethod* nm, err_msg& msg) {
241   NativeNMethodBarrier barrier(nm);
242   return barrier.check_barrier(msg);
243 }
244 #endif