1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "code/debugInfo.hpp" 26 #include "oops/access.hpp" 27 #include "oops/compressedOops.inline.hpp" 28 #include "oops/oop.hpp" 29 #include "runtime/frame.inline.hpp" 30 #include "runtime/globals.hpp" 31 #include "runtime/handles.inline.hpp" 32 #include "runtime/stackValue.hpp" 33 #if INCLUDE_ZGC 34 #include "gc/z/zBarrier.inline.hpp" 35 #endif 36 #if INCLUDE_SHENANDOAHGC 37 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" 38 #endif 39 40 class RegisterMap; 41 class SmallRegisterMap; 42 43 44 static oop oop_from_oop_location(stackChunkOop chunk, void* addr) { 45 if (addr == nullptr) { 46 return nullptr; 47 } 48 49 if (UseCompressedOops) { 50 // When compressed oops is enabled, an oop location may 51 // contain narrow oop values - we deal with that here 52 53 if (chunk != nullptr && chunk->has_bitmap()) { 54 // Transformed stack chunk with narrow oops 55 return chunk->load_oop((narrowOop*)addr); 56 } 57 58 #ifdef _LP64 59 if (CompressedOops::is_base(*(void**)addr)) { 60 // Compiled code may produce decoded oop = narrow_oop_base 61 // when a narrow oop implicit null check is used. 62 // The narrow_oop_base could be null or be the address 63 // of the page below heap. Use null value for both cases. 64 return nullptr; 65 } 66 #endif 67 } 68 69 if (chunk != nullptr) { 70 // Load oop from chunk 71 return chunk->load_oop((oop*)addr); 72 } 73 74 // Load oop from stack 75 oop val = *(oop*)addr; 76 77 #if INCLUDE_SHENANDOAHGC 78 if (UseShenandoahGC) { 79 // Pass the value through the barrier to avoid capturing bad oops as 80 // stack values. Note: do not heal the location, to avoid accidentally 81 // corrupting the stack. Stack watermark barriers are supposed to handle 82 // the healing. 83 val = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(val); 84 } 85 #endif 86 87 return val; 88 } 89 90 static oop oop_from_narrowOop_location(stackChunkOop chunk, void* addr, bool is_register) { 91 assert(UseCompressedOops, "Narrow oops should not exist"); 92 assert(addr != nullptr, "Not expecting null address"); 93 narrowOop* narrow_addr; 94 if (is_register) { 95 // The callee has no clue whether the register holds an int, 96 // long or is unused. He always saves a long. Here we know 97 // a long was saved, but we only want an int back. Narrow the 98 // saved long to the int that the JVM wants. We can't just 99 // use narrow_oop_cast directly, because we don't know what 100 // the high bits of the value might be. 101 narrow_addr = ((narrowOop*)addr) BIG_ENDIAN_ONLY(+ 1); 102 } else { 103 narrow_addr = (narrowOop*)addr; 104 } 105 106 if (chunk != nullptr) { 107 // Load oop from chunk 108 return chunk->load_oop(narrow_addr); 109 } 110 111 // Load oop from stack 112 oop val = CompressedOops::decode(*narrow_addr); 113 114 #if INCLUDE_SHENANDOAHGC 115 if (UseShenandoahGC) { 116 // Pass the value through the barrier to avoid capturing bad oops as 117 // stack values. Note: do not heal the location, to avoid accidentally 118 // corrupting the stack. Stack watermark barriers are supposed to handle 119 // the healing. 120 val = ShenandoahBarrierSet::barrier_set()->load_reference_barrier(val); 121 } 122 #endif 123 124 return val; 125 } 126 127 StackValue* StackValue::create_stack_value_from_oop_location(stackChunkOop chunk, void* addr) { 128 oop val = oop_from_oop_location(chunk, addr); 129 assert(oopDesc::is_oop_or_null(val), "bad oop found at " INTPTR_FORMAT " in_cont: %d compressed: %d", 130 p2i(addr), chunk != nullptr, chunk != nullptr && chunk->has_bitmap() && UseCompressedOops); 131 Handle h(Thread::current(), val); // Wrap a handle around the oop 132 return new StackValue(h); 133 } 134 135 StackValue* StackValue::create_stack_value_from_narrowOop_location(stackChunkOop chunk, void* addr, bool is_register) { 136 oop val = oop_from_narrowOop_location(chunk, addr, is_register); 137 assert(oopDesc::is_oop_or_null(val), "bad oop found at " INTPTR_FORMAT " in_cont: %d compressed: %d", 138 p2i(addr), chunk != nullptr, chunk != nullptr && chunk->has_bitmap() && UseCompressedOops); 139 Handle h(Thread::current(), val); // Wrap a handle around the oop 140 return new StackValue(h); 141 } 142 143 144 template StackValue* StackValue::create_stack_value(const frame* fr, const RegisterMap* reg_map, ScopeValue* sv); 145 template StackValue* StackValue::create_stack_value(const frame* fr, const SmallRegisterMap* reg_map, ScopeValue* sv); 146 147 template<typename RegisterMapT> 148 StackValue* StackValue::create_stack_value(const frame* fr, const RegisterMapT* reg_map, ScopeValue* sv) { 149 address value_addr = stack_value_address(fr, reg_map, sv); 150 stackChunkOop chunk = reg_map->stack_chunk()(); 151 if (sv->is_location()) { 152 // Stack or register value 153 Location loc = ((LocationValue *)sv)->location(); 154 155 // Then package it right depending on type 156 // Note: the transfer of the data is thru a union that contains 157 // an intptr_t. This is because an interpreter stack slot is 158 // really an intptr_t. The use of a union containing an intptr_t 159 // ensures that on a 64 bit platform we have proper alignment 160 // and that we store the value where the interpreter will expect 161 // to find it (i.e. proper endian). Similarly on a 32bit platform 162 // using the intptr_t ensures that when a value is larger than 163 // a stack slot (jlong/jdouble) that we capture the proper part 164 // of the value for the stack slot in question. 165 // 166 switch( loc.type() ) { 167 case Location::float_in_dbl: { // Holds a float in a double register? 168 // The callee has no clue whether the register holds a float, 169 // double or is unused. He always saves a double. Here we know 170 // a double was saved, but we only want a float back. Narrow the 171 // saved double to the float that the JVM wants. 172 assert( loc.is_register(), "floats always saved to stack in 1 word" ); 173 union { intptr_t p; jfloat jf; } value; 174 value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF); 175 value.jf = (jfloat) *(jdouble*) value_addr; 176 return new StackValue(value.p); // 64-bit high half is stack junk 177 } 178 case Location::int_in_long: { // Holds an int in a long register? 179 // The callee has no clue whether the register holds an int, 180 // long or is unused. He always saves a long. Here we know 181 // a long was saved, but we only want an int back. Narrow the 182 // saved long to the int that the JVM wants. 183 assert( loc.is_register(), "ints always saved to stack in 1 word" ); 184 union { intptr_t p; jint ji;} value; 185 value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF); 186 value.ji = (jint) *(jlong*) value_addr; 187 return new StackValue(value.p); // 64-bit high half is stack junk 188 } 189 #ifdef _LP64 190 case Location::dbl: 191 // Double value in an aligned adjacent pair 192 return new StackValue(*(intptr_t*)value_addr); 193 case Location::lng: 194 // Long value in an aligned adjacent pair 195 return new StackValue(*(intptr_t*)value_addr); 196 case Location::narrowoop: 197 return create_stack_value_from_narrowOop_location(reg_map->stack_chunk()(), (void*)value_addr, loc.is_register()); 198 #endif 199 case Location::oop: 200 return create_stack_value_from_oop_location(reg_map->stack_chunk()(), (void*)value_addr); 201 case Location::addr: { 202 loc.print_on(tty); 203 ShouldNotReachHere(); // both C1 and C2 now inline jsrs 204 } 205 case Location::normal: { 206 // Just copy all other bits straight through 207 union { intptr_t p; jint ji;} value; 208 value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF); 209 value.ji = *(jint*)value_addr; 210 return new StackValue(value.p); 211 } 212 case Location::invalid: { 213 return new StackValue(); 214 } 215 case Location::vector: { 216 loc.print_on(tty); 217 ShouldNotReachHere(); // should be handled by VectorSupport::allocate_vector() 218 } 219 default: 220 loc.print_on(tty); 221 ShouldNotReachHere(); 222 } 223 224 } else if (sv->is_constant_int()) { 225 // Constant int: treat same as register int. 226 union { intptr_t p; jint ji;} value; 227 value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF); 228 value.ji = (jint)((ConstantIntValue*)sv)->value(); 229 return new StackValue(value.p); 230 } else if (sv->is_constant_oop()) { 231 // constant oop 232 return new StackValue(sv->as_ConstantOopReadValue()->value()); 233 #ifdef _LP64 234 } else if (sv->is_constant_double()) { 235 // Constant double in a single stack slot 236 union { intptr_t p; double d; } value; 237 value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF); 238 value.d = ((ConstantDoubleValue *)sv)->value(); 239 return new StackValue(value.p); 240 } else if (sv->is_constant_long()) { 241 // Constant long in a single stack slot 242 union { intptr_t p; jlong jl; } value; 243 value.p = (intptr_t) CONST64(0xDEADDEAFDEADDEAF); 244 value.jl = ((ConstantLongValue *)sv)->value(); 245 return new StackValue(value.p); 246 #endif 247 } else if (sv->is_object()) { // Scalar replaced object in compiled frame 248 ObjectValue* ov = (ObjectValue *)sv; 249 Handle hdl = ov->value(); 250 bool scalar_replaced = hdl.is_null() && ov->is_scalar_replaced(); 251 if (ov->maybe_null()) { 252 // Don't treat inline type as scalar replaced if it is null 253 jint is_init = StackValue::create_stack_value(fr, reg_map, ov->is_init())->get_jint(); 254 scalar_replaced &= (is_init != 0); 255 } 256 return new StackValue(hdl, scalar_replaced ? 1 : 0); 257 } else if (sv->is_marker()) { 258 // Should never need to directly construct a marker. 259 ShouldNotReachHere(); 260 } 261 // Unknown ScopeValue type 262 ShouldNotReachHere(); 263 return new StackValue((intptr_t) 0); // dummy 264 } 265 266 template address StackValue::stack_value_address(const frame* fr, const RegisterMap* reg_map, ScopeValue* sv); 267 template address StackValue::stack_value_address(const frame* fr, const SmallRegisterMap* reg_map, ScopeValue* sv); 268 269 template<typename RegisterMapT> 270 address StackValue::stack_value_address(const frame* fr, const RegisterMapT* reg_map, ScopeValue* sv) { 271 if (!sv->is_location()) { 272 return nullptr; 273 } 274 Location loc = ((LocationValue *)sv)->location(); 275 if (loc.type() == Location::invalid) { 276 return nullptr; 277 } 278 279 if (!reg_map->in_cont()) { 280 address value_addr = loc.is_register() 281 // Value was in a callee-save register 282 ? reg_map->location(VMRegImpl::as_VMReg(loc.register_number()), fr->sp()) 283 // Else value was directly saved on the stack. The frame's original stack pointer, 284 // before any extension by its callee (due to Compiler1 linkage on SPARC), must be used. 285 : ((address)fr->unextended_sp()) + loc.stack_offset(); 286 287 assert(value_addr == nullptr || reg_map->thread() == nullptr || reg_map->thread()->is_in_usable_stack(value_addr), INTPTR_FORMAT, p2i(value_addr)); 288 return value_addr; 289 } 290 291 address value_addr = loc.is_register() 292 ? reg_map->as_RegisterMap()->stack_chunk()->reg_to_location(*fr, reg_map->as_RegisterMap(), VMRegImpl::as_VMReg(loc.register_number())) 293 : reg_map->as_RegisterMap()->stack_chunk()->usp_offset_to_location(*fr, loc.stack_offset()); 294 295 assert(value_addr == nullptr || Continuation::is_in_usable_stack(value_addr, reg_map->as_RegisterMap()) || (reg_map->thread() != nullptr && reg_map->thread()->is_in_usable_stack(value_addr)), INTPTR_FORMAT, p2i(value_addr)); 296 return value_addr; 297 } 298 299 BasicLock* StackValue::resolve_monitor_lock(const frame& fr, Location location) { 300 assert(location.is_stack(), "for now we only look at the stack"); 301 int word_offset = location.stack_offset() / wordSize; 302 // (stack picture) 303 // high: [ ] word_offset + 1 304 // low [ ] word_offset 305 // 306 // sp-> [ ] 0 307 // the word_offset is the distance from the stack pointer to the lowest address 308 // The frame's original stack pointer, before any extension by its callee 309 // (due to Compiler1 linkage on SPARC), must be used. 310 return (BasicLock*) (fr.unextended_sp() + word_offset); 311 } 312 313 314 #ifndef PRODUCT 315 316 void StackValue::print_on(outputStream* st) const { 317 switch(_type) { 318 case T_INT: 319 st->print("%d (int) %f (float) %x (hex)", *(int *)&_integer_value, *(float *)&_integer_value, *(int *)&_integer_value); 320 break; 321 322 case T_OBJECT: 323 if (_handle_value() != nullptr) { 324 _handle_value()->print_value_on(st); 325 } else { 326 st->print("null"); 327 } 328 st->print(" <" INTPTR_FORMAT ">", p2i(_handle_value())); 329 break; 330 331 case T_CONFLICT: 332 st->print("conflict"); 333 break; 334 335 default: 336 ShouldNotReachHere(); 337 } 338 } 339 340 #endif