187 (int)(masm->pc() - s->code_begin()),
188 stub_length,
189 (int)(s->code_end() - masm->pc()));
190 }
191 guarantee(masm->pc() <= s->code_end(), "%s #%d: overflowed buffer, estimated len: %d, actual len: %d, overrun: %d",
192 name, index, stub_length,
193 (int)(masm->pc() - s->code_begin()),
194 (int)(masm->pc() - s->code_end()));
195 assert((masm->pc() + index_dependent_slop) <= s->code_end(), "%s #%d: spare space for 32-bit offset: required = %d, available = %d",
196 name, index, index_dependent_slop,
197 (int)(s->code_end() - masm->pc()));
198
199 // After the first vtable/itable stub is generated, we have a much
200 // better estimate for the stub size. Remember/update this
201 // estimate after some sanity checks.
202 check_and_set_size_limit(is_vtable_stub, masm->offset(), slop_bytes);
203 s->set_exception_points(npe_addr, ame_addr);
204 }
205
206
207 address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index) {
208 assert(vtable_index >= 0, "must be positive");
209
210 VtableStub* s;
211 {
212 MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
213 s = lookup(is_vtable_stub, vtable_index);
214 if (s == nullptr) {
215 if (is_vtable_stub) {
216 s = create_vtable_stub(vtable_index);
217 } else {
218 s = create_itable_stub(vtable_index);
219 }
220
221 // Creation of vtable or itable can fail if there is not enough free space in the code cache.
222 if (s == nullptr) {
223 return nullptr;
224 }
225
226 enter(is_vtable_stub, vtable_index, s);
227 if (PrintAdapterHandlers) {
228 tty->print_cr("Decoding VtableStub %s[%d]@" PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%zu bytes)",
229 is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location()),
230 p2i(s->code_begin()), p2i(s->code_end()), pointer_delta(s->code_end(), s->code_begin(), 1));
231 Disassembler::decode(s->code_begin(), s->code_end());
232 }
233 // Notify JVMTI about this stub. The event will be recorded by the enclosing
234 // JvmtiDynamicCodeEventCollector and posted when this thread has released
235 // all locks. Only post this event if a new state is not required. Creating a new state would
236 // cause a safepoint and the caller of this code has a NoSafepointVerifier.
237 if (JvmtiExport::should_post_dynamic_code_generated()) {
238 JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub",
239 s->code_begin(), s->code_end());
240 }
241 }
242 }
243 return s->entry_point();
244 }
245
246
247 inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index){
248 // Assumption: receiver_location < 4 in most cases.
249 int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;
250 return (is_vtable_stub ? ~hash : hash) & mask;
251 }
252
253
254 inline uint VtableStubs::unsafe_hash(address entry_point) {
255 // The entrypoint may or may not be a VtableStub. Generate a hash as if it was.
256 address vtable_stub_addr = entry_point - VtableStub::entry_offset();
257 assert(CodeCache::contains(vtable_stub_addr), "assumed to always be the case");
258 address vtable_type_addr = vtable_stub_addr + offset_of(VtableStub, _type);
259 address vtable_index_addr = vtable_stub_addr + offset_of(VtableStub, _index);
260 bool is_vtable_stub = *vtable_type_addr == static_cast<uint8_t>(VtableStub::Type::vtable_stub);
261 short vtable_index;
262 static_assert(sizeof(VtableStub::_index) == sizeof(vtable_index), "precondition");
263 memcpy(&vtable_index, vtable_index_addr, sizeof(vtable_index));
264 return hash(is_vtable_stub, vtable_index);
265 }
266
267
268 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index) {
269 assert_lock_strong(VtableStubs_lock);
270 unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index);
271 VtableStub* s = Atomic::load(&_table[hash]);
272 while( s && !s->matches(is_vtable_stub, vtable_index)) s = s->next();
273 return s;
274 }
275
276
277 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, VtableStub* s) {
278 assert_lock_strong(VtableStubs_lock);
279 assert(s->matches(is_vtable_stub, vtable_index), "bad vtable stub");
280 unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index);
281 // Insert s at the beginning of the corresponding list.
282 s->set_next(Atomic::load(&_table[h]));
283 // Make sure that concurrent readers not taking the mutex observe the writing of "next".
284 Atomic::release_store(&_table[h], s);
285 }
286
287 VtableStub* VtableStubs::entry_point(address pc) {
288 // The pc may or may not be the entry point for a VtableStub. Use unsafe_hash
289 // to generate the hash that would have been used if it was. The lookup in the
290 // _table will only succeed if there is a VtableStub with an entry point at
291 // the pc.
292 MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
293 uint hash = VtableStubs::unsafe_hash(pc);
294 VtableStub* s;
295 for (s = Atomic::load(&_table[hash]); s != nullptr && s->entry_point() != pc; s = s->next()) {}
296 return (s != nullptr && s->entry_point() == pc) ? s : nullptr;
297 }
298
299 bool VtableStubs::contains(address pc) {
300 // simple solution for now - we may want to use
301 // a faster way if this function is called often
302 return stub_containing(pc) != nullptr;
303 }
304
305
306 VtableStub* VtableStubs::stub_containing(address pc) {
307 for (int i = 0; i < N; i++) {
308 for (VtableStub* s = Atomic::load_acquire(&_table[i]); s != nullptr; s = s->next()) {
309 if (s->contains(pc)) return s;
310 }
311 }
312 return nullptr;
313 }
|
187 (int)(masm->pc() - s->code_begin()),
188 stub_length,
189 (int)(s->code_end() - masm->pc()));
190 }
191 guarantee(masm->pc() <= s->code_end(), "%s #%d: overflowed buffer, estimated len: %d, actual len: %d, overrun: %d",
192 name, index, stub_length,
193 (int)(masm->pc() - s->code_begin()),
194 (int)(masm->pc() - s->code_end()));
195 assert((masm->pc() + index_dependent_slop) <= s->code_end(), "%s #%d: spare space for 32-bit offset: required = %d, available = %d",
196 name, index, index_dependent_slop,
197 (int)(s->code_end() - masm->pc()));
198
199 // After the first vtable/itable stub is generated, we have a much
200 // better estimate for the stub size. Remember/update this
201 // estimate after some sanity checks.
202 check_and_set_size_limit(is_vtable_stub, masm->offset(), slop_bytes);
203 s->set_exception_points(npe_addr, ame_addr);
204 }
205
206
207 address VtableStubs::find_stub(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
208 assert(vtable_index >= 0, "must be positive");
209
210 VtableStub* s;
211 {
212 MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
213 s = lookup(is_vtable_stub, vtable_index, caller_is_c1);
214 if (s == nullptr) {
215 if (is_vtable_stub) {
216 s = create_vtable_stub(vtable_index, caller_is_c1);
217 } else {
218 s = create_itable_stub(vtable_index, caller_is_c1);
219 }
220
221 // Creation of vtable or itable can fail if there is not enough free space in the code cache.
222 if (s == nullptr) {
223 return nullptr;
224 }
225
226 enter(is_vtable_stub, vtable_index, caller_is_c1, s);
227 if (PrintAdapterHandlers) {
228 tty->print_cr("Decoding VtableStub (%s) %s[%d]@" PTR_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT "] (%zu bytes)",
229 caller_is_c1 ? "c1" : "full opt",
230 is_vtable_stub? "vtbl": "itbl", vtable_index, p2i(VtableStub::receiver_location()),
231 p2i(s->code_begin()), p2i(s->code_end()), pointer_delta(s->code_end(), s->code_begin(), 1));
232 Disassembler::decode(s->code_begin(), s->code_end());
233 }
234 // Notify JVMTI about this stub. The event will be recorded by the enclosing
235 // JvmtiDynamicCodeEventCollector and posted when this thread has released
236 // all locks. Only post this event if a new state is not required. Creating a new state would
237 // cause a safepoint and the caller of this code has a NoSafepointVerifier.
238 if (JvmtiExport::should_post_dynamic_code_generated()) {
239 JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub", // FIXME: need to pass caller_is_c1??
240 s->code_begin(), s->code_end());
241 }
242 }
243 }
244 return s->entry_point();
245 }
246
247
248 inline uint VtableStubs::hash(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
249 // Assumption: receiver_location < 4 in most cases.
250 int hash = ((vtable_index << 2) ^ VtableStub::receiver_location()->value()) + vtable_index;
251 if (caller_is_c1) {
252 hash = 7 - hash;
253 }
254 return (is_vtable_stub ? ~hash : hash) & mask;
255 }
256
257
258 inline uint VtableStubs::unsafe_hash(address entry_point, bool caller_is_c1) {
259 // The entrypoint may or may not be a VtableStub. Generate a hash as if it was.
260 address vtable_stub_addr = entry_point - VtableStub::entry_offset();
261 assert(CodeCache::contains(vtable_stub_addr), "assumed to always be the case");
262 address vtable_type_addr = vtable_stub_addr + offset_of(VtableStub, _type);
263 address vtable_index_addr = vtable_stub_addr + offset_of(VtableStub, _index);
264 bool is_vtable_stub = *vtable_type_addr == static_cast<uint8_t>(VtableStub::Type::vtable_stub);
265 short vtable_index;
266 static_assert(sizeof(VtableStub::_index) == sizeof(vtable_index), "precondition");
267 memcpy(&vtable_index, vtable_index_addr, sizeof(vtable_index));
268 return hash(is_vtable_stub, vtable_index, caller_is_c1);
269 }
270
271 VtableStub* VtableStubs::lookup(bool is_vtable_stub, int vtable_index, bool caller_is_c1) {
272 assert_lock_strong(VtableStubs_lock);
273 unsigned hash = VtableStubs::hash(is_vtable_stub, vtable_index, caller_is_c1);
274 VtableStub* s = Atomic::load(&_table[hash]);
275 while( s && !s->matches(is_vtable_stub, vtable_index, caller_is_c1)) s = s->next();
276 return s;
277 }
278
279
280 void VtableStubs::enter(bool is_vtable_stub, int vtable_index, bool caller_is_c1, VtableStub* s) {
281 assert_lock_strong(VtableStubs_lock);
282 assert(s->matches(is_vtable_stub, vtable_index, caller_is_c1), "bad vtable stub");
283 unsigned int h = VtableStubs::hash(is_vtable_stub, vtable_index, caller_is_c1);
284 // Insert s at the beginning of the corresponding list.
285 s->set_next(Atomic::load(&_table[h]));
286 // Make sure that concurrent readers not taking the mutex observe the writing of "next".
287 Atomic::release_store(&_table[h], s);
288 }
289
290 VtableStub* VtableStubs::entry_point(address pc) {
291 // The pc may or may not be the entry point for a VtableStub. Use unsafe_hash
292 // to generate the hash that would have been used if it was. The lookup in the
293 // _table will only succeed if there is a VtableStub with an entry point at
294 // the pc.
295 MutexLocker ml(VtableStubs_lock, Mutex::_no_safepoint_check_flag);
296 VtableStub* stub = (VtableStub*)(pc - VtableStub::entry_offset());
297 uint hash = VtableStubs::unsafe_hash(pc, stub->caller_is_c1());
298 VtableStub* s;
299 for (s = Atomic::load(&_table[hash]); s != nullptr && s->entry_point() != pc; s = s->next()) {}
300 return (s != nullptr && s->entry_point() == pc) ? s : nullptr;
301 }
302
303 bool VtableStubs::contains(address pc) {
304 // simple solution for now - we may want to use
305 // a faster way if this function is called often
306 return stub_containing(pc) != nullptr;
307 }
308
309
310 VtableStub* VtableStubs::stub_containing(address pc) {
311 for (int i = 0; i < N; i++) {
312 for (VtableStub* s = Atomic::load_acquire(&_table[i]); s != nullptr; s = s->next()) {
313 if (s->contains(pc)) return s;
314 }
315 }
316 return nullptr;
317 }
|