1 /* 2 * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP 26 #define SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP 27 28 #include "oops/stackChunkOop.hpp" 29 30 #include "gc/shared/collectedHeap.hpp" 31 #include "gc/shared/barrierSet.hpp" 32 #include "gc/shared/barrierSetStackChunk.hpp" 33 #include "gc/shared/gc_globals.hpp" 34 #include "memory/memRegion.hpp" 35 #include "memory/universe.hpp" 36 #include "oops/access.inline.hpp" 37 #include "oops/instanceStackChunkKlass.inline.hpp" 38 #include "runtime/continuationJavaClasses.inline.hpp" 39 #include "runtime/frame.hpp" 40 #include "runtime/globals.hpp" 41 #include "runtime/handles.inline.hpp" 42 #include "runtime/objectMonitor.hpp" 43 #include "runtime/registerMap.hpp" 44 #include "runtime/smallRegisterMap.inline.hpp" 45 #include "utilities/macros.hpp" 46 #include CPU_HEADER_INLINE(stackChunkOop) 47 48 DEF_HANDLE_CONSTR(stackChunk, is_stackChunk_noinline) 49 50 inline stackChunkOop stackChunkOopDesc::cast(oop obj) { 51 assert(obj == nullptr || obj->is_stackChunk(), "Wrong type"); 52 return stackChunkOop(obj); 53 } 54 55 inline stackChunkOop stackChunkOopDesc::parent() const { return stackChunkOopDesc::cast(jdk_internal_vm_StackChunk::parent(as_oop())); } 56 inline void stackChunkOopDesc::set_parent(stackChunkOop value) { jdk_internal_vm_StackChunk::set_parent(this, value); } 57 template<typename P> 58 inline void stackChunkOopDesc::set_parent_raw(oop value) { jdk_internal_vm_StackChunk::set_parent_raw<P>(this, value); } 59 template<DecoratorSet decorators> 60 inline void stackChunkOopDesc::set_parent_access(oop value) { jdk_internal_vm_StackChunk::set_parent_access<decorators>(this, value); } 61 62 inline int stackChunkOopDesc::stack_size() const { return jdk_internal_vm_StackChunk::size(as_oop()); } 63 64 inline int stackChunkOopDesc::bottom() const { return jdk_internal_vm_StackChunk::bottom(as_oop()); } 65 inline void stackChunkOopDesc::set_bottom(int value) { jdk_internal_vm_StackChunk::set_bottom(this, value); } 66 67 inline int stackChunkOopDesc::sp() const { return jdk_internal_vm_StackChunk::sp(as_oop()); } 68 inline void stackChunkOopDesc::set_sp(int value) { jdk_internal_vm_StackChunk::set_sp(this, value); } 69 70 inline address stackChunkOopDesc::pc() const { return jdk_internal_vm_StackChunk::pc(as_oop()); } 71 inline void stackChunkOopDesc::set_pc(address value) { jdk_internal_vm_StackChunk::set_pc(this, value); } 72 73 inline uint8_t stackChunkOopDesc::flags() const { return jdk_internal_vm_StackChunk::flags(as_oop()); } 74 inline void stackChunkOopDesc::set_flags(uint8_t value) { jdk_internal_vm_StackChunk::set_flags(this, value); } 75 76 inline uint8_t stackChunkOopDesc::flags_acquire() const { return jdk_internal_vm_StackChunk::flags_acquire(as_oop()); } 77 78 inline void stackChunkOopDesc::release_set_flags(uint8_t value) { 79 jdk_internal_vm_StackChunk::release_set_flags(this, value); 80 } 81 82 inline bool stackChunkOopDesc::try_set_flags(uint8_t prev_flags, uint8_t new_flags) { 83 return jdk_internal_vm_StackChunk::try_set_flags(this, prev_flags, new_flags); 84 } 85 86 inline int stackChunkOopDesc::max_thawing_size() const { return jdk_internal_vm_StackChunk::maxThawingSize(as_oop()); } 87 inline void stackChunkOopDesc::set_max_thawing_size(int value) { 88 assert(value >= 0, "size must be >= 0"); 89 jdk_internal_vm_StackChunk::set_maxThawingSize(this, (jint)value); 90 } 91 92 inline uint8_t stackChunkOopDesc::lockstack_size() const { return jdk_internal_vm_StackChunk::lockStackSize(as_oop()); } 93 inline void stackChunkOopDesc::set_lockstack_size(uint8_t value) { jdk_internal_vm_StackChunk::set_lockStackSize(this, value); } 94 95 inline oop stackChunkOopDesc::cont() const { return jdk_internal_vm_StackChunk::cont(as_oop()); } 96 inline void stackChunkOopDesc::set_cont(oop value) { jdk_internal_vm_StackChunk::set_cont(this, value); } 97 template<typename P> 98 inline void stackChunkOopDesc::set_cont_raw(oop value) { jdk_internal_vm_StackChunk::set_cont_raw<P>(this, value); } 99 template<DecoratorSet decorators> 100 inline void stackChunkOopDesc::set_cont_access(oop value) { jdk_internal_vm_StackChunk::set_cont_access<decorators>(this, value); } 101 102 inline int stackChunkOopDesc::argsize() const { 103 assert(!is_empty(), "should not ask for argsize in empty chunk"); 104 return stack_size() - bottom() - frame::metadata_words_at_top; 105 } 106 107 inline HeapWord* stackChunkOopDesc::start_of_stack() const { 108 return (HeapWord*)(cast_from_oop<intptr_t>(as_oop()) + InstanceStackChunkKlass::offset_of_stack()); 109 } 110 111 inline intptr_t* stackChunkOopDesc::start_address() const { return (intptr_t*)start_of_stack(); } 112 inline intptr_t* stackChunkOopDesc::end_address() const { return start_address() + stack_size(); } 113 inline intptr_t* stackChunkOopDesc::bottom_address() const { return start_address() + bottom(); } 114 inline intptr_t* stackChunkOopDesc::sp_address() const { return start_address() + sp(); } 115 116 inline int stackChunkOopDesc::to_offset(intptr_t* p) const { 117 assert(is_in_chunk(p) 118 || (p >= start_address() && (p - start_address()) <= stack_size() + frame::metadata_words), 119 "p: " PTR_FORMAT " start: " PTR_FORMAT " end: " PTR_FORMAT, p2i(p), p2i(start_address()), p2i(bottom_address())); 120 return (int)(p - start_address()); 121 } 122 123 inline intptr_t* stackChunkOopDesc::from_offset(int offset) const { 124 assert(offset <= stack_size(), ""); 125 return start_address() + offset; 126 } 127 128 inline bool stackChunkOopDesc::is_empty() const { 129 assert(sp() <= bottom(), ""); 130 return sp() == bottom(); 131 } 132 133 inline bool stackChunkOopDesc::is_in_chunk(void* p) const { 134 HeapWord* start = (HeapWord*)start_address(); 135 HeapWord* end = start + stack_size(); 136 return (HeapWord*)p >= start && (HeapWord*)p < end; 137 } 138 139 bool stackChunkOopDesc::is_usable_in_chunk(void* p) const { 140 HeapWord* start = (HeapWord*)start_address() + sp() - frame::metadata_words_at_bottom; 141 HeapWord* end = start + stack_size(); 142 return (HeapWord*)p >= start && (HeapWord*)p < end; 143 } 144 145 inline bool stackChunkOopDesc::is_flag(uint8_t flag) const { 146 return (flags() & flag) != 0; 147 } 148 inline bool stackChunkOopDesc::is_flag_acquire(uint8_t flag) const { 149 return (flags_acquire() & flag) != 0; 150 } 151 inline void stackChunkOopDesc::set_flag(uint8_t flag, bool value) { 152 uint32_t flags = this->flags(); 153 set_flags((uint8_t)(value ? flags |= flag : flags &= ~flag)); 154 } 155 156 inline bool stackChunkOopDesc::has_mixed_frames() const { return is_flag(FLAG_HAS_INTERPRETED_FRAMES); } 157 inline void stackChunkOopDesc::set_has_mixed_frames(bool value) { 158 assert((flags() & ~(FLAG_HAS_INTERPRETED_FRAMES | FLAG_PREEMPTED)) == 0, "other flags should not be set"); 159 set_flag(FLAG_HAS_INTERPRETED_FRAMES, value); 160 } 161 162 inline bool stackChunkOopDesc::preempted() const { return is_flag(FLAG_PREEMPTED); } 163 inline void stackChunkOopDesc::set_preempted(bool value) { 164 assert(preempted() != value, ""); 165 set_flag(FLAG_PREEMPTED, value); 166 } 167 168 inline bool stackChunkOopDesc::at_klass_init() const { return jdk_internal_vm_StackChunk::atKlassInit(as_oop()); } 169 inline void stackChunkOopDesc::set_at_klass_init(bool value) { 170 assert(at_klass_init() != value, ""); 171 jdk_internal_vm_StackChunk::set_atKlassInit(this, value); 172 } 173 174 inline bool stackChunkOopDesc::has_args_at_top() const { return jdk_internal_vm_StackChunk::hasArgsAtTop(as_oop()); } 175 inline void stackChunkOopDesc::set_has_args_at_top(bool value) { 176 assert(has_args_at_top() != value, ""); 177 jdk_internal_vm_StackChunk::set_hasArgsAtTop(this, value); 178 } 179 180 inline bool stackChunkOopDesc::has_lockstack() const { return is_flag(FLAG_HAS_LOCKSTACK); } 181 inline void stackChunkOopDesc::set_has_lockstack(bool value) { set_flag(FLAG_HAS_LOCKSTACK, value); } 182 183 inline bool stackChunkOopDesc::is_gc_mode() const { return is_flag(FLAG_GC_MODE); } 184 inline bool stackChunkOopDesc::is_gc_mode_acquire() const { return is_flag_acquire(FLAG_GC_MODE); } 185 inline void stackChunkOopDesc::set_gc_mode(bool value) { set_flag(FLAG_GC_MODE, value); } 186 187 inline bool stackChunkOopDesc::has_bitmap() const { return is_flag(FLAG_HAS_BITMAP); } 188 inline void stackChunkOopDesc::set_has_bitmap(bool value) { set_flag(FLAG_HAS_BITMAP, value); } 189 190 inline bool stackChunkOopDesc::has_thaw_slowpath_condition() const { return flags() != 0; } 191 192 inline bool stackChunkOopDesc::requires_barriers() { 193 return Universe::heap()->requires_barriers(this); 194 } 195 196 template <stackChunkOopDesc::BarrierType barrier, ChunkFrames frame_kind, typename RegisterMapT> 197 void stackChunkOopDesc::do_barriers(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) { 198 if (frame_kind == ChunkFrames::Mixed) { 199 // we could freeze deopted frames in slow mode. 200 f.handle_deopted(); 201 } 202 do_barriers0<barrier>(f, map); 203 } 204 205 template <typename OopT, class StackChunkLockStackClosureType> 206 inline void stackChunkOopDesc::iterate_lockstack(StackChunkLockStackClosureType* closure) { 207 assert(LockingMode == LM_LIGHTWEIGHT, ""); 208 int cnt = lockstack_size(); 209 intptr_t* lockstart_addr = start_address(); 210 for (int i = 0; i < cnt; i++) { 211 closure->do_oop((OopT*)&lockstart_addr[i]); 212 } 213 } 214 215 template <class StackChunkFrameClosureType> 216 inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure) { 217 has_mixed_frames() ? iterate_stack<ChunkFrames::Mixed>(closure) 218 : iterate_stack<ChunkFrames::CompiledOnly>(closure); 219 } 220 221 template <ChunkFrames frame_kind, class StackChunkFrameClosureType> 222 inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure) { 223 const auto* map = SmallRegisterMap::instance_no_args(); 224 assert(!map->in_cont(), ""); 225 226 StackChunkFrameStream<frame_kind> f(this); 227 bool should_continue = true; 228 229 if (f.is_stub()) { 230 RegisterMap full_map(nullptr, 231 RegisterMap::UpdateMap::include, 232 RegisterMap::ProcessFrames::skip, 233 RegisterMap::WalkContinuation::include); 234 full_map.set_include_argument_oops(false); 235 closure->do_frame(f, map); 236 237 f.next(&full_map); 238 assert(!f.is_done(), ""); 239 assert(f.is_compiled(), ""); 240 241 should_continue = closure->do_frame(f, &full_map); 242 f.next(map); 243 } else if (frame_kind == ChunkFrames::Mixed && f.is_interpreted() && has_args_at_top()) { 244 should_continue = closure->do_frame(f, SmallRegisterMap::instance_with_args()); 245 f.next(map); 246 } 247 assert(!f.is_stub(), ""); 248 249 for(; should_continue && !f.is_done(); f.next(map)) { 250 if (frame_kind == ChunkFrames::Mixed) { 251 // in slow mode we might freeze deoptimized frames 252 f.handle_deopted(); 253 } 254 should_continue = closure->do_frame(f, map); 255 } 256 } 257 258 inline frame stackChunkOopDesc::relativize(frame fr) const { relativize_frame(fr); return fr; } 259 inline frame stackChunkOopDesc::derelativize(frame fr) const { derelativize_frame(fr); return fr; } 260 261 inline void* stackChunkOopDesc::gc_data() const { 262 int stack_sz = stack_size(); 263 assert(stack_sz != 0, "stack should not be empty"); 264 265 // The gc data is located after the stack. 266 return start_of_stack() + stack_sz; 267 } 268 269 inline BitMapView stackChunkOopDesc::bitmap() const { 270 HeapWord* bitmap_addr = static_cast<HeapWord*>(gc_data()); 271 int stack_sz = stack_size(); 272 size_t bitmap_size_in_bits = InstanceStackChunkKlass::bitmap_size_in_bits(stack_sz); 273 274 BitMapView bitmap((BitMap::bm_word_t*)bitmap_addr, bitmap_size_in_bits); 275 276 DEBUG_ONLY(bitmap.verify_range(bit_index_for(start_address()), bit_index_for(end_address()));) 277 278 return bitmap; 279 } 280 281 inline BitMap::idx_t stackChunkOopDesc::bit_index_for(address p) const { 282 return UseCompressedOops ? bit_index_for((narrowOop*)p) : bit_index_for((oop*)p); 283 } 284 285 template <typename OopT> 286 inline BitMap::idx_t stackChunkOopDesc::bit_index_for(OopT* p) const { 287 assert(is_aligned(p, alignof(OopT)), "should be aligned: " PTR_FORMAT, p2i(p)); 288 assert(p >= (OopT*)start_address(), "Address not in chunk"); 289 return p - (OopT*)start_address(); 290 } 291 292 inline intptr_t* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const { 293 return UseCompressedOops ? (intptr_t*)address_for_bit<narrowOop>(index) : (intptr_t*)address_for_bit<oop>(index); 294 } 295 296 template <typename OopT> 297 inline OopT* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const { 298 return (OopT*)start_address() + index; 299 } 300 301 inline MemRegion stackChunkOopDesc::range() { 302 return MemRegion((HeapWord*)this, size()); 303 } 304 305 inline int stackChunkOopDesc::relativize_usp_offset(const frame& fr, const int usp_offset_in_bytes) const { 306 assert(fr.is_compiled_frame() || fr.cb()->is_runtime_stub(), ""); 307 assert(is_in_chunk(fr.unextended_sp()), ""); 308 309 intptr_t* base = fr.real_fp(); // equal to the caller's sp 310 intptr_t* loc = (intptr_t*)((address)fr.unextended_sp() + usp_offset_in_bytes); 311 assert(base > loc, ""); 312 return (int)(base - loc); 313 } 314 315 inline address stackChunkOopDesc::usp_offset_to_location(const frame& fr, const int usp_offset_in_bytes) const { 316 assert(fr.is_compiled_frame(), ""); 317 return (address)derelativize_address(fr.offset_unextended_sp()) + usp_offset_in_bytes; 318 } 319 320 inline address stackChunkOopDesc::reg_to_location(const frame& fr, const RegisterMap* map, VMReg reg) const { 321 assert(fr.is_compiled_frame(), ""); 322 assert(map != nullptr, ""); 323 assert(map->stack_chunk() == as_oop(), ""); 324 325 // the offsets are saved in the map after going through relativize_usp_offset, so they are sp - loc, in words 326 intptr_t offset = (intptr_t)map->location(reg, nullptr); // see usp_offset_to_index for the chunk case 327 intptr_t* base = derelativize_address(fr.offset_sp()); 328 return (address)(base - offset); 329 } 330 331 inline Method* stackChunkOopDesc::interpreter_frame_method(const frame& fr) { 332 return derelativize(fr).interpreter_frame_method(); 333 } 334 335 inline address stackChunkOopDesc::interpreter_frame_bcp(const frame& fr) { 336 return derelativize(fr).interpreter_frame_bcp(); 337 } 338 339 inline intptr_t* stackChunkOopDesc::interpreter_frame_expression_stack_at(const frame& fr, int index) const { 340 frame heap_frame = derelativize(fr); 341 assert(heap_frame.is_heap_frame(), "must be"); 342 return heap_frame.interpreter_frame_expression_stack_at(index); 343 } 344 345 inline intptr_t* stackChunkOopDesc::interpreter_frame_local_at(const frame& fr, int index) const { 346 frame heap_frame = derelativize(fr); 347 assert(heap_frame.is_heap_frame(), "must be"); 348 return heap_frame.interpreter_frame_local_at(index); 349 } 350 351 inline void stackChunkOopDesc::copy_from_stack_to_chunk(intptr_t* from, intptr_t* to, int size) { 352 log_develop_trace(continuations)("Copying from v: " PTR_FORMAT " - " PTR_FORMAT " (%d words, %d bytes)", 353 p2i(from), p2i(from + size), size, size << LogBytesPerWord); 354 log_develop_trace(continuations)("Copying to h: " PTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") - " PTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") (%d words, %d bytes)", 355 p2i(to), to - start_address(), relative_base() - to, p2i(to + size), to + size - start_address(), 356 relative_base() - (to + size), size, size << LogBytesPerWord); 357 358 assert(to >= start_address(), "Chunk underflow"); 359 assert(to + size <= end_address(), "Chunk overflow"); 360 361 #if !(defined(AMD64) || defined(AARCH64) || defined(RISCV64) || defined(PPC64)) || defined(ZERO) 362 // Suppress compilation warning-as-error on unimplemented architectures 363 // that stub out arch-specific methods. Some compilers are smart enough 364 // to figure out the argument is always null and then warn about it. 365 if (to != nullptr) 366 #endif 367 memcpy(to, from, size << LogBytesPerWord); 368 } 369 370 inline void stackChunkOopDesc::copy_from_chunk_to_stack(intptr_t* from, intptr_t* to, int size) { 371 log_develop_trace(continuations)("Copying from h: " PTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") - " PTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") (%d words, %d bytes)", 372 p2i(from), from - start_address(), relative_base() - from, p2i(from + size), from + size - start_address(), 373 relative_base() - (from + size), size, size << LogBytesPerWord); 374 log_develop_trace(continuations)("Copying to v: " PTR_FORMAT " - " PTR_FORMAT " (%d words, %d bytes)", p2i(to), 375 p2i(to + size), size, size << LogBytesPerWord); 376 377 assert(from >= start_address(), ""); 378 assert(from + size <= end_address(), ""); 379 380 #if !(defined(AMD64) || defined(AARCH64) || defined(RISCV64) || defined(PPC64)) || defined(ZERO) 381 // Suppress compilation warning-as-error on unimplemented architectures 382 // that stub out arch-specific methods. Some compilers are smart enough 383 // to figure out the argument is always null and then warn about it. 384 if (to != nullptr) 385 #endif 386 memcpy(to, from, size << LogBytesPerWord); 387 } 388 389 template <typename OopT> 390 inline oop stackChunkOopDesc::load_oop(OopT* addr) { 391 return BarrierSet::barrier_set()->barrier_set_stack_chunk()->load_oop(this, addr); 392 } 393 394 inline intptr_t* stackChunkOopDesc::relative_base() const { 395 // we relativize with respect to end rather than start because GC might compact the chunk 396 return end_address() + frame::metadata_words; 397 } 398 399 inline intptr_t* stackChunkOopDesc::derelativize_address(int offset) const { 400 intptr_t* base = relative_base(); 401 intptr_t* p = base - offset; 402 assert(start_address() <= p && p <= base, "start_address: " PTR_FORMAT " p: " PTR_FORMAT " base: " PTR_FORMAT, 403 p2i(start_address()), p2i(p), p2i(base)); 404 return p; 405 } 406 407 inline int stackChunkOopDesc::relativize_address(intptr_t* p) const { 408 intptr_t* base = relative_base(); 409 intptr_t offset = base - p; 410 assert(start_address() <= p && p <= base, "start_address: " PTR_FORMAT " p: " PTR_FORMAT " base: " PTR_FORMAT, 411 p2i(start_address()), p2i(p), p2i(base)); 412 assert(0 <= offset && offset <= std::numeric_limits<int>::max(), "offset: " PTR_FORMAT, offset); 413 return (int)offset; 414 } 415 416 inline void stackChunkOopDesc::relativize_frame(frame& fr) const { 417 fr.set_offset_sp(relativize_address(fr.sp())); 418 fr.set_offset_unextended_sp(relativize_address(fr.unextended_sp())); 419 relativize_frame_pd(fr); 420 } 421 422 inline void stackChunkOopDesc::derelativize_frame(frame& fr) const { 423 fr.set_sp(derelativize_address(fr.offset_sp())); 424 fr.set_unextended_sp(derelativize_address(fr.offset_unextended_sp())); 425 derelativize_frame_pd(fr); 426 fr.set_frame_index(-1); // for the sake of assertions in frame 427 } 428 429 #endif // SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP