< prev index next >

src/hotspot/share/runtime/stubRoutines.cpp

Print this page

211 bool UnsafeMemoryAccess::contains_pc(address pc) {
212   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
213     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
214     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
215       return true;
216     }
217   }
218   return false;
219 }
220 
221 address UnsafeMemoryAccess::page_error_continue_pc(address pc) {
222   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
223     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
224     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
225       return entry->error_exit_pc();
226     }
227   }
228   return nullptr;
229 }
230 
231 
232 static BufferBlob* initialize_stubs(StubCodeGenerator::StubsKind kind,
233                                     int code_size, int max_aligned_stubs,
234                                     const char* timer_msg,
235                                     const char* buffer_name,
236                                     const char* assert_msg) {
237   ResourceMark rm;
238   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
239   // Add extra space for large CodeEntryAlignment
240   int size = code_size + CodeEntryAlignment * max_aligned_stubs;
241   BufferBlob* stubs_code = BufferBlob::create(buffer_name, size);
242   if (stubs_code == nullptr) {
243     vm_exit_out_of_memory(code_size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", buffer_name);
244   }
245   CodeBuffer buffer(stubs_code);
246   StubGenerator_generate(&buffer, kind);
247   // When new stubs added we need to make sure there is some space left
248   // to catch situation when we should increase size again.
249   assert(code_size == 0 || buffer.insts_remaining() > 200, "increase %s", assert_msg);
250 
251   LogTarget(Info, stubs) lt;

211 bool UnsafeMemoryAccess::contains_pc(address pc) {
212   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
213     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
214     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
215       return true;
216     }
217   }
218   return false;
219 }
220 
221 address UnsafeMemoryAccess::page_error_continue_pc(address pc) {
222   for (int i = 0; i < UnsafeMemoryAccess::_table_length; i++) {
223     UnsafeMemoryAccess* entry = &UnsafeMemoryAccess::_table[i];
224     if (pc >= entry->start_pc() && pc < entry->end_pc()) {
225       return entry->error_exit_pc();
226     }
227   }
228   return nullptr;
229 }
230 

231 static BufferBlob* initialize_stubs(StubCodeGenerator::StubsKind kind,
232                                     int code_size, int max_aligned_stubs,
233                                     const char* timer_msg,
234                                     const char* buffer_name,
235                                     const char* assert_msg) {
236   ResourceMark rm;
237   TraceTime timer(timer_msg, TRACETIME_LOG(Info, startuptime));
238   // Add extra space for large CodeEntryAlignment
239   int size = code_size + CodeEntryAlignment * max_aligned_stubs;
240   BufferBlob* stubs_code = BufferBlob::create(buffer_name, size);
241   if (stubs_code == nullptr) {
242     vm_exit_out_of_memory(code_size, OOM_MALLOC_ERROR, "CodeCache: no room for %s", buffer_name);
243   }
244   CodeBuffer buffer(stubs_code);
245   StubGenerator_generate(&buffer, kind);
246   // When new stubs added we need to make sure there is some space left
247   // to catch situation when we should increase size again.
248   assert(code_size == 0 || buffer.insts_remaining() > 200, "increase %s", assert_msg);
249 
250   LogTarget(Info, stubs) lt;
< prev index next >