53 // Note that this offset is invariant of PreserveFramePointer.
54 static int entry_barrier_offset(nmethod* nm) {
55 BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
56 switch (bs_asm->nmethod_patching_type()) {
57 case NMethodPatchingType::stw_instruction_and_data_patch:
58 return -4 * (4 + slow_path_size(nm));
59 case NMethodPatchingType::conc_instruction_and_data_patch:
60 return -4 * (10 + slow_path_size(nm));
61 case NMethodPatchingType::conc_data_patch:
62 return -4 * (5 + slow_path_size(nm));
63 }
64 ShouldNotReachHere();
65 return 0;
66 }
67
68 class NativeNMethodBarrier {
69 address _instruction_address;
70 int* _guard_addr;
71 nmethod* _nm;
72
73 address instruction_address() const { return _instruction_address; }
74
75 int *guard_addr() {
76 return _guard_addr;
77 }
78
79 int local_guard_offset(nmethod* nm) {
80 // It's the last instruction
81 return (-entry_barrier_offset(nm)) - 4;
82 }
83
84 public:
85 NativeNMethodBarrier(nmethod* nm): _nm(nm) {
86 #if INCLUDE_JVMCI
87 if (nm->is_compiled_by_jvmci()) {
88 address pc = nm->code_begin() + nm->jvmci_nmethod_data()->nmethod_entry_patch_offset();
89 RelocIterator iter(nm, pc, pc + 4);
90 guarantee(iter.next(), "missing relocs");
91 guarantee(iter.type() == relocInfo::section_word_type, "unexpected reloc");
92
93 _guard_addr = (int*) iter.section_word_reloc()->target();
94 _instruction_address = pc;
95 } else
96 #endif
97 {
98 _instruction_address = nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
99 if (nm->is_compiled_by_c2()) {
100 // With c2 compiled code, the guard is out-of-line in a stub
101 // We find it using the RelocIterator.
102 RelocIterator iter(nm);
103 while (iter.next()) {
104 if (iter.type() == relocInfo::entry_guard_type) {
105 entry_guard_Relocation* const reloc = iter.entry_guard_reloc();
106 _guard_addr = reinterpret_cast<int*>(reloc->addr());
107 return;
108 }
109 }
110 ShouldNotReachHere();
111 }
112 _guard_addr = reinterpret_cast<int*>(instruction_address() + local_guard_offset(nm));
113 }
114 }
115
116 int get_value() {
117 return Atomic::load_acquire(guard_addr());
118 }
164
165 assert(frame.is_compiled_frame() || frame.is_native_frame(), "must be");
166 assert(frame.cb() == nm, "must be");
167 frame = frame.sender(®_map);
168
169 LogTarget(Trace, nmethod, barrier) out;
170 if (out.is_enabled()) {
171 ResourceMark mark;
172 log_trace(nmethod, barrier)("deoptimize(nmethod: %s(%p), return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p",
173 nm->method()->name_and_sig_as_C_string(),
174 nm, *(address *) return_address_ptr, nm->is_osr_method(), thread,
175 thread->name(), frame.sp(), nm->verified_entry_point());
176 }
177
178 new_frame->sp = frame.sp();
179 new_frame->fp = frame.fp();
180 new_frame->lr = frame.pc();
181 new_frame->pc = SharedRuntime::get_handle_wrong_method_stub();
182 }
183
184 void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
185 if (!supports_entry_barrier(nm)) {
186 return;
187 }
188
189 if (value == disarmed_guard_value()) {
190 // The patching epoch is incremented before the nmethod is disarmed. Disarming
191 // is performed with a release store. In the nmethod entry barrier, the values
192 // are read in the opposite order, such that the load of the nmethod guard
193 // acquires the patching epoch. This way, the guard is guaranteed to block
194 // entries to the nmethod, until it has safely published the requirement for
195 // further fencing by mutators, before they are allowed to enter.
196 BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
197 bs_asm->increment_patching_epoch();
198 }
199
200 NativeNMethodBarrier barrier(nm);
201 barrier.set_value(value);
202 }
203
204 int BarrierSetNMethod::guard_value(nmethod* nm) {
205 if (!supports_entry_barrier(nm)) {
206 return disarmed_guard_value();
207 }
208
209 NativeNMethodBarrier barrier(nm);
210 return barrier.get_value();
211 }
212
213 #if INCLUDE_JVMCI
214 bool BarrierSetNMethod::verify_barrier(nmethod* nm, err_msg& msg) {
215 NativeNMethodBarrier barrier(nm);
216 return barrier.check_barrier(msg);
217 }
218 #endif
|
53 // Note that this offset is invariant of PreserveFramePointer.
54 static int entry_barrier_offset(nmethod* nm) {
55 BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
56 switch (bs_asm->nmethod_patching_type()) {
57 case NMethodPatchingType::stw_instruction_and_data_patch:
58 return -4 * (4 + slow_path_size(nm));
59 case NMethodPatchingType::conc_instruction_and_data_patch:
60 return -4 * (10 + slow_path_size(nm));
61 case NMethodPatchingType::conc_data_patch:
62 return -4 * (5 + slow_path_size(nm));
63 }
64 ShouldNotReachHere();
65 return 0;
66 }
67
68 class NativeNMethodBarrier {
69 address _instruction_address;
70 int* _guard_addr;
71 nmethod* _nm;
72
73 public:
74 address instruction_address() const { return _instruction_address; }
75
76 int *guard_addr() {
77 return _guard_addr;
78 }
79
80 int local_guard_offset(nmethod* nm) {
81 // It's the last instruction
82 return (-entry_barrier_offset(nm)) - 4;
83 }
84
85 NativeNMethodBarrier(nmethod* nm, address alt_entry_instruction_address = 0): _nm(nm) {
86 #if INCLUDE_JVMCI
87 if (nm->is_compiled_by_jvmci()) {
88 assert(alt_entry_instruction_address == 0, "invariant");
89 address pc = nm->code_begin() + nm->jvmci_nmethod_data()->nmethod_entry_patch_offset();
90 RelocIterator iter(nm, pc, pc + 4);
91 guarantee(iter.next(), "missing relocs");
92 guarantee(iter.type() == relocInfo::section_word_type, "unexpected reloc");
93
94 _guard_addr = (int*) iter.section_word_reloc()->target();
95 _instruction_address = pc;
96 } else
97 #endif
98 {
99 _instruction_address = (alt_entry_instruction_address != 0) ? alt_entry_instruction_address :
100 nm->code_begin() + nm->frame_complete_offset() + entry_barrier_offset(nm);
101 if (nm->is_compiled_by_c2()) {
102 // With c2 compiled code, the guard is out-of-line in a stub
103 // We find it using the RelocIterator.
104 RelocIterator iter(nm);
105 while (iter.next()) {
106 if (iter.type() == relocInfo::entry_guard_type) {
107 entry_guard_Relocation* const reloc = iter.entry_guard_reloc();
108 _guard_addr = reinterpret_cast<int*>(reloc->addr());
109 return;
110 }
111 }
112 ShouldNotReachHere();
113 }
114 _guard_addr = reinterpret_cast<int*>(instruction_address() + local_guard_offset(nm));
115 }
116 }
117
118 int get_value() {
119 return Atomic::load_acquire(guard_addr());
120 }
166
167 assert(frame.is_compiled_frame() || frame.is_native_frame(), "must be");
168 assert(frame.cb() == nm, "must be");
169 frame = frame.sender(®_map);
170
171 LogTarget(Trace, nmethod, barrier) out;
172 if (out.is_enabled()) {
173 ResourceMark mark;
174 log_trace(nmethod, barrier)("deoptimize(nmethod: %s(%p), return_addr: %p, osr: %d, thread: %p(%s), making rsp: %p) -> %p",
175 nm->method()->name_and_sig_as_C_string(),
176 nm, *(address *) return_address_ptr, nm->is_osr_method(), thread,
177 thread->name(), frame.sp(), nm->verified_entry_point());
178 }
179
180 new_frame->sp = frame.sp();
181 new_frame->fp = frame.fp();
182 new_frame->lr = frame.pc();
183 new_frame->pc = SharedRuntime::get_handle_wrong_method_stub();
184 }
185
186 static void set_value(nmethod* nm, jint val) {
187 NativeNMethodBarrier cmp1 = NativeNMethodBarrier(nm);
188 cmp1.set_value(val);
189
190 if (!nm->is_osr_method() && nm->method()->has_scalarized_args()) {
191 // nmethods with scalarized arguments have multiple entry points that each have an own nmethod entry barrier
192 assert(nm->verified_entry_point() != nm->verified_inline_entry_point(), "scalarized entry point not found");
193 address method_body = nm->is_compiled_by_c1() ? nm->verified_inline_entry_point() : nm->verified_entry_point();
194 address entry_point2 = nm->is_compiled_by_c1() ? nm->verified_entry_point() : nm->verified_inline_entry_point();
195
196 int barrier_offset = cmp1.instruction_address() - method_body;
197 NativeNMethodBarrier cmp2 = NativeNMethodBarrier(nm, entry_point2 + barrier_offset);
198 assert(cmp1.instruction_address() != cmp2.instruction_address(), "sanity");
199 debug_only(cmp2.verify());
200 cmp2.set_value(val);
201
202 if (method_body != nm->verified_inline_ro_entry_point() && entry_point2 != nm->verified_inline_ro_entry_point()) {
203 NativeNMethodBarrier cmp3 = NativeNMethodBarrier(nm, nm->verified_inline_ro_entry_point() + barrier_offset);
204 assert(cmp1.instruction_address() != cmp3.instruction_address() && cmp2.instruction_address() != cmp3.instruction_address(), "sanity");
205 debug_only(cmp3.verify());
206 cmp3.set_value(val);
207 }
208 }
209 }
210
211 void BarrierSetNMethod::set_guard_value(nmethod* nm, int value) {
212 if (!supports_entry_barrier(nm)) {
213 return;
214 }
215
216 if (value == disarmed_guard_value()) {
217 // The patching epoch is incremented before the nmethod is disarmed. Disarming
218 // is performed with a release store. In the nmethod entry barrier, the values
219 // are read in the opposite order, such that the load of the nmethod guard
220 // acquires the patching epoch. This way, the guard is guaranteed to block
221 // entries to the nmethod, until it has safely published the requirement for
222 // further fencing by mutators, before they are allowed to enter.
223 BarrierSetAssembler* bs_asm = BarrierSet::barrier_set()->barrier_set_assembler();
224 bs_asm->increment_patching_epoch();
225 }
226
227 set_value(nm, value);
228 }
229
230 int BarrierSetNMethod::guard_value(nmethod* nm) {
231 if (!supports_entry_barrier(nm)) {
232 return disarmed_guard_value();
233 }
234
235 NativeNMethodBarrier barrier(nm);
236 return barrier.get_value();
237 }
238
239 #if INCLUDE_JVMCI
240 bool BarrierSetNMethod::verify_barrier(nmethod* nm, err_msg& msg) {
241 NativeNMethodBarrier barrier(nm);
242 return barrier.check_barrier(msg);
243 }
244 #endif
|