1 /* 2 * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "ci/ciInlineKlass.hpp" 26 #include "gc/shared/barrierSet.hpp" 27 #include "gc/shared/gc_globals.hpp" 28 #include "opto/addnode.hpp" 29 #include "opto/castnode.hpp" 30 #include "opto/convertnode.hpp" 31 #include "opto/graphKit.hpp" 32 #include "opto/idealKit.hpp" 33 #include "opto/inlinetypenode.hpp" 34 #include "opto/movenode.hpp" 35 #include "opto/narrowptrnode.hpp" 36 #include "opto/rootnode.hpp" 37 #include "opto/phaseX.hpp" 38 39 // Clones the inline type to handle control flow merges involving multiple inline types. 40 // The inputs are replaced by PhiNodes to represent the merged values for the given region. 41 InlineTypeNode* InlineTypeNode::clone_with_phis(PhaseGVN* gvn, Node* region, SafePointNode* map, bool is_init) { 42 InlineTypeNode* vt = clone_if_required(gvn, map); 43 const Type* t = Type::get_const_type(inline_klass()); 44 gvn->set_type(vt, t); 45 vt->as_InlineType()->set_type(t); 46 47 // Create a PhiNode for merging the oop values 48 PhiNode* oop = PhiNode::make(region, vt->get_oop(), t); 49 gvn->set_type(oop, t); 50 gvn->record_for_igvn(oop); 51 vt->set_oop(*gvn, oop); 52 53 // Create a PhiNode for merging the is_buffered values 54 t = Type::get_const_basic_type(T_BOOLEAN); 55 Node* is_buffered_node = PhiNode::make(region, vt->get_is_buffered(), t); 56 gvn->set_type(is_buffered_node, t); 57 gvn->record_for_igvn(is_buffered_node); 58 vt->set_req(IsBuffered, is_buffered_node); 59 60 // Create a PhiNode for merging the is_init values 61 Node* is_init_node; 62 if (is_init) { 63 is_init_node = gvn->intcon(1); 64 } else { 65 t = Type::get_const_basic_type(T_BOOLEAN); 66 is_init_node = PhiNode::make(region, vt->get_is_init(), t); 67 gvn->set_type(is_init_node, t); 68 gvn->record_for_igvn(is_init_node); 69 } 70 vt->set_req(IsInit, is_init_node); 71 72 // Create a PhiNode each for merging the field values 73 for (uint i = 0; i < vt->field_count(); ++i) { 74 ciType* type = vt->field_type(i); 75 Node* value = vt->field_value(i); 76 // We limit scalarization for inline types with circular fields and can therefore observe nodes 77 // of the same type but with different scalarization depth during GVN. To avoid inconsistencies 78 // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized. 79 bool no_circularity = !gvn->C->has_circular_inline_type() || field_is_flat(i); 80 if (type->is_inlinetype() && no_circularity) { 81 // Handle inline type fields recursively 82 value = value->as_InlineType()->clone_with_phis(gvn, region, map); 83 } else { 84 t = Type::get_const_type(type); 85 value = PhiNode::make(region, value, t); 86 gvn->set_type(value, t); 87 gvn->record_for_igvn(value); 88 } 89 vt->set_field_value(i, value); 90 } 91 gvn->record_for_igvn(vt); 92 return vt; 93 } 94 95 // Checks if the inputs of the InlineTypeNode were replaced by PhiNodes 96 // for the given region (see InlineTypeNode::clone_with_phis). 97 bool InlineTypeNode::has_phi_inputs(Node* region) { 98 // Check oop input 99 bool result = get_oop()->is_Phi() && get_oop()->as_Phi()->region() == region; 100 #ifdef ASSERT 101 if (result) { 102 // Check all field value inputs for consistency 103 for (uint i = Values; i < field_count(); ++i) { 104 Node* n = in(i); 105 if (n->is_InlineType()) { 106 assert(n->as_InlineType()->has_phi_inputs(region), "inconsistent phi inputs"); 107 } else { 108 assert(n->is_Phi() && n->as_Phi()->region() == region, "inconsistent phi inputs"); 109 } 110 } 111 } 112 #endif 113 return result; 114 } 115 116 // Merges 'this' with 'other' by updating the input PhiNodes added by 'clone_with_phis' 117 InlineTypeNode* InlineTypeNode::merge_with(PhaseGVN* gvn, const InlineTypeNode* other, int pnum, bool transform) { 118 assert(inline_klass() == other->inline_klass(), "Merging incompatible types"); 119 120 // Merge oop inputs 121 PhiNode* phi = get_oop()->as_Phi(); 122 phi->set_req(pnum, other->get_oop()); 123 if (transform) { 124 set_oop(*gvn, gvn->transform(phi)); 125 } 126 127 // Merge is_buffered inputs 128 phi = get_is_buffered()->as_Phi(); 129 phi->set_req(pnum, other->get_is_buffered()); 130 if (transform) { 131 set_req(IsBuffered, gvn->transform(phi)); 132 } 133 134 // Merge is_init inputs 135 Node* is_init = get_is_init(); 136 if (is_init->is_Phi()) { 137 phi = is_init->as_Phi(); 138 phi->set_req(pnum, other->get_is_init()); 139 if (transform) { 140 set_req(IsInit, gvn->transform(phi)); 141 } 142 } else { 143 assert(is_init->find_int_con(0) == 1, "only with a non null inline type"); 144 } 145 146 // Merge field values 147 for (uint i = 0; i < field_count(); ++i) { 148 Node* val1 = field_value(i); 149 Node* val2 = other->field_value(i); 150 if (val1->is_InlineType()) { 151 if (val2->is_Phi()) { 152 val2 = gvn->transform(val2); 153 } 154 val1->as_InlineType()->merge_with(gvn, val2->as_InlineType(), pnum, transform); 155 } else { 156 assert(val1->is_Phi(), "must be a phi node"); 157 val1->set_req(pnum, val2); 158 } 159 if (transform) { 160 set_field_value(i, gvn->transform(val1)); 161 } 162 } 163 return this; 164 } 165 166 // Adds a new merge path to an inline type node with phi inputs 167 void InlineTypeNode::add_new_path(Node* region) { 168 assert(has_phi_inputs(region), "must have phi inputs"); 169 170 PhiNode* phi = get_oop()->as_Phi(); 171 phi->add_req(nullptr); 172 assert(phi->req() == region->req(), "must be same size as region"); 173 174 phi = get_is_buffered()->as_Phi(); 175 phi->add_req(nullptr); 176 assert(phi->req() == region->req(), "must be same size as region"); 177 178 phi = get_is_init()->as_Phi(); 179 phi->add_req(nullptr); 180 assert(phi->req() == region->req(), "must be same size as region"); 181 182 for (uint i = 0; i < field_count(); ++i) { 183 Node* val = field_value(i); 184 if (val->is_InlineType()) { 185 val->as_InlineType()->add_new_path(region); 186 } else { 187 val->as_Phi()->add_req(nullptr); 188 assert(val->req() == region->req(), "must be same size as region"); 189 } 190 } 191 } 192 193 Node* InlineTypeNode::field_value(uint index) const { 194 assert(index < field_count(), "index out of bounds"); 195 return in(Values + index); 196 } 197 198 // Get the value of the null marker at the given offset. 199 Node* InlineTypeNode::null_marker_by_offset(int offset, int holder_offset) const { 200 // Search through the null markers of all flat fields 201 for (uint i = 0; i < field_count(); ++i) { 202 if (field_is_flat(i)) { 203 InlineTypeNode* value = field_value(i)->as_InlineType(); 204 if (!field_is_null_free(i)) { 205 int nm_offset = holder_offset + field_null_marker_offset(i); 206 if (nm_offset == offset) { 207 return value->get_is_init(); 208 } 209 } 210 int flat_holder_offset = holder_offset + field_offset(i) - value->inline_klass()->payload_offset(); 211 Node* nm_value = value->null_marker_by_offset(offset, flat_holder_offset); 212 if (nm_value != nullptr) { 213 return nm_value; 214 } 215 } 216 } 217 return nullptr; 218 } 219 220 // Get the value of the field at the given offset. 221 // If 'recursive' is true, flat inline type fields will be resolved recursively. 222 Node* InlineTypeNode::field_value_by_offset(int offset, bool recursive, bool search_null_marker) const { 223 // First check if we are loading a null marker which is not a real field 224 if (recursive && search_null_marker) { 225 Node* value = null_marker_by_offset(offset); 226 if (value != nullptr){ 227 return value; 228 } 229 } 230 231 // If the field at 'offset' belongs to a flat inline type field, 'index' refers to the 232 // corresponding InlineTypeNode input and 'sub_offset' is the offset in the flattened inline type. 233 int index = inline_klass()->field_index_by_offset(offset); 234 int sub_offset = offset - field_offset(index); 235 Node* value = field_value(index); 236 assert(value != nullptr, "field value not found"); 237 if (recursive && value->is_InlineType()) { 238 if (field_is_flat(index)) { 239 // Flat inline type field 240 InlineTypeNode* vt = value->as_InlineType(); 241 sub_offset += vt->inline_klass()->payload_offset(); // Add header size 242 return vt->field_value_by_offset(sub_offset, recursive, false); 243 } else { 244 assert(sub_offset == 0, "should not have a sub offset"); 245 return value; 246 } 247 } 248 assert(!(recursive && value->is_InlineType()), "should not be an inline type"); 249 assert(sub_offset == 0, "offset mismatch"); 250 return value; 251 } 252 253 void InlineTypeNode::set_field_value(uint index, Node* value) { 254 assert(index < field_count(), "index out of bounds"); 255 set_req(Values + index, value); 256 } 257 258 void InlineTypeNode::set_field_value_by_offset(int offset, Node* value) { 259 set_field_value(field_index(offset), value); 260 } 261 262 int InlineTypeNode::field_offset(uint index) const { 263 assert(index < field_count(), "index out of bounds"); 264 return inline_klass()->declared_nonstatic_field_at(index)->offset_in_bytes(); 265 } 266 267 uint InlineTypeNode::field_index(int offset) const { 268 uint i = 0; 269 for (; i < field_count() && field_offset(i) != offset; i++) { } 270 assert(i < field_count(), "field not found"); 271 return i; 272 } 273 274 ciType* InlineTypeNode::field_type(uint index) const { 275 assert(index < field_count(), "index out of bounds"); 276 return inline_klass()->declared_nonstatic_field_at(index)->type(); 277 } 278 279 bool InlineTypeNode::field_is_flat(uint index) const { 280 assert(index < field_count(), "index out of bounds"); 281 ciField* field = inline_klass()->declared_nonstatic_field_at(index); 282 assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type"); 283 return field->is_flat(); 284 } 285 286 bool InlineTypeNode::field_is_null_free(uint index) const { 287 assert(index < field_count(), "index out of bounds"); 288 ciField* field = inline_klass()->declared_nonstatic_field_at(index); 289 assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type"); 290 return field->is_null_free(); 291 } 292 293 bool InlineTypeNode::field_is_volatile(uint index) const { 294 assert(index < field_count(), "index out of bounds"); 295 ciField* field = inline_klass()->declared_nonstatic_field_at(index); 296 assert(!field->is_flat() || field->type()->is_inlinetype(), "must be an inline type"); 297 return field->is_volatile(); 298 } 299 300 int InlineTypeNode::field_null_marker_offset(uint index) const { 301 assert(index < field_count(), "index out of bounds"); 302 ciField* field = inline_klass()->declared_nonstatic_field_at(index); 303 assert(field->is_flat(), "must be an inline type"); 304 return field->null_marker_offset(); 305 } 306 307 uint InlineTypeNode::add_fields_to_safepoint(Unique_Node_List& worklist, Node_List& null_markers, SafePointNode* sfpt) { 308 uint cnt = 0; 309 for (uint i = 0; i < field_count(); ++i) { 310 Node* value = field_value(i); 311 if (field_is_flat(i)) { 312 InlineTypeNode* vt = value->as_InlineType(); 313 cnt += vt->add_fields_to_safepoint(worklist, null_markers, sfpt); 314 if (!field_is_null_free(i)) { 315 null_markers.push(vt->get_is_init()); 316 cnt++; 317 } 318 continue; 319 } 320 if (value->is_InlineType()) { 321 // Add inline type to the worklist to process later 322 worklist.push(value); 323 } 324 sfpt->add_req(value); 325 cnt++; 326 } 327 return cnt; 328 } 329 330 void InlineTypeNode::make_scalar_in_safepoint(PhaseIterGVN* igvn, Unique_Node_List& worklist, SafePointNode* sfpt) { 331 // We should not scalarize larvals in debug info of their constructor calls because their fields could still be 332 // updated. If we scalarize and update the fields in the constructor, the updates won't be visible in the caller after 333 // deoptimization because the scalarized field values are local to the caller. We need to use a buffer to make the 334 // updates visible to the outside. 335 if (is_larval() && sfpt->is_CallJava() && sfpt->as_CallJava()->method() != nullptr && 336 sfpt->as_CallJava()->method()->is_object_constructor() && bottom_type()->is_inlinetypeptr() && 337 sfpt->in(TypeFunc::Parms) == this) { 338 // Receiver is always buffered because it's passed as oop, see special case in CompiledEntrySignature::compute_calling_conventions(). 339 assert(is_allocated(igvn), "receiver must be allocated"); 340 return; 341 } 342 343 JVMState* jvms = sfpt->jvms(); 344 assert(jvms != nullptr, "missing JVMS"); 345 uint first_ind = (sfpt->req() - jvms->scloff()); 346 347 // Iterate over the inline type fields in order of increasing offset and add the 348 // field values to the safepoint. Nullable inline types have an IsInit field that 349 // needs to be checked before using the field values. 350 const TypeInt* tinit = igvn->type(get_is_init())->isa_int(); 351 if (tinit != nullptr && !tinit->is_con(1)) { 352 sfpt->add_req(get_is_init()); 353 } else { 354 sfpt->add_req(igvn->C->top()); 355 } 356 Node_List null_markers; 357 uint nfields = add_fields_to_safepoint(worklist, null_markers, sfpt); 358 // Add null markers after the field values 359 for (uint i = 0; i < null_markers.size(); ++i) { 360 sfpt->add_req(null_markers.at(i)); 361 } 362 jvms->set_endoff(sfpt->req()); 363 // Replace safepoint edge by SafePointScalarObjectNode 364 SafePointScalarObjectNode* sobj = new SafePointScalarObjectNode(type()->isa_instptr(), 365 nullptr, 366 first_ind, 367 sfpt->jvms()->depth(), 368 nfields); 369 sobj->init_req(0, igvn->C->root()); 370 sobj = igvn->transform(sobj)->as_SafePointScalarObject(); 371 igvn->rehash_node_delayed(sfpt); 372 for (uint i = jvms->debug_start(); i < jvms->debug_end(); i++) { 373 Node* debug = sfpt->in(i); 374 if (debug != nullptr && debug->uncast() == this) { 375 sfpt->set_req(i, sobj); 376 } 377 } 378 } 379 380 void InlineTypeNode::make_scalar_in_safepoints(PhaseIterGVN* igvn, bool allow_oop) { 381 // If the inline type has a constant or loaded oop, use the oop instead of scalarization 382 // in the safepoint to avoid keeping field loads live just for the debug info. 383 Node* oop = get_oop(); 384 bool use_oop = false; 385 if (allow_oop && is_allocated(igvn) && oop->is_Phi()) { 386 Unique_Node_List worklist; 387 VectorSet visited; 388 visited.set(oop->_idx); 389 worklist.push(oop); 390 use_oop = true; 391 while (worklist.size() > 0 && use_oop) { 392 Node* n = worklist.pop(); 393 for (uint i = 1; i < n->req(); i++) { 394 Node* in = n->in(i); 395 if (in->is_Phi() && !visited.test_set(in->_idx)) { 396 worklist.push(in); 397 } else if (!(in->is_Con() || in->is_Parm())) { 398 use_oop = false; 399 break; 400 } 401 } 402 } 403 } else { 404 use_oop = allow_oop && is_allocated(igvn) && 405 (oop->is_Con() || oop->is_Parm() || oop->is_Load() || (oop->isa_DecodeN() && oop->in(1)->is_Load())); 406 } 407 408 ResourceMark rm; 409 Unique_Node_List safepoints; 410 Unique_Node_List vt_worklist; 411 Unique_Node_List worklist; 412 worklist.push(this); 413 while (worklist.size() > 0) { 414 Node* n = worklist.pop(); 415 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 416 Node* use = n->fast_out(i); 417 if (use->is_SafePoint() && !use->is_CallLeaf() && (!use->is_Call() || use->as_Call()->has_debug_use(n))) { 418 safepoints.push(use); 419 } else if (use->is_ConstraintCast()) { 420 worklist.push(use); 421 } 422 } 423 } 424 425 // Process all safepoint uses and scalarize inline type 426 while (safepoints.size() > 0) { 427 SafePointNode* sfpt = safepoints.pop()->as_SafePoint(); 428 if (use_oop) { 429 for (uint i = sfpt->jvms()->debug_start(); i < sfpt->jvms()->debug_end(); i++) { 430 Node* debug = sfpt->in(i); 431 if (debug != nullptr && debug->uncast() == this) { 432 sfpt->set_req(i, get_oop()); 433 } 434 } 435 igvn->rehash_node_delayed(sfpt); 436 } else { 437 make_scalar_in_safepoint(igvn, vt_worklist, sfpt); 438 } 439 } 440 // Now scalarize non-flat fields 441 for (uint i = 0; i < vt_worklist.size(); ++i) { 442 InlineTypeNode* vt = vt_worklist.at(i)->isa_InlineType(); 443 vt->make_scalar_in_safepoints(igvn); 444 } 445 if (outcnt() == 0) { 446 igvn->record_for_igvn(this); 447 } 448 } 449 450 const TypePtr* InlineTypeNode::field_adr_type(Node* base, int offset, ciInstanceKlass* holder, DecoratorSet decorators, PhaseGVN& gvn) const { 451 const TypeAryPtr* ary_type = gvn.type(base)->isa_aryptr(); 452 const TypePtr* adr_type = nullptr; 453 bool is_array = ary_type != nullptr; 454 if ((decorators & C2_MISMATCHED) != 0) { 455 adr_type = TypeRawPtr::BOTTOM; 456 } else if (is_array) { 457 // In the case of a flat inline type array, each field has its own slice 458 adr_type = ary_type->with_field_offset(offset)->add_offset(Type::OffsetBot); 459 } else { 460 ciField* field = holder->get_field_by_offset(offset, false); 461 assert(field != nullptr, "field not found"); 462 adr_type = gvn.C->alias_type(field)->adr_type(); 463 } 464 return adr_type; 465 } 466 467 // We limit scalarization for inline types with circular fields and can therefore observe nodes 468 // of the same type but with different scalarization depth during GVN. This method adjusts the 469 // scalarization depth to avoid inconsistencies during merging. 470 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth(GraphKit* kit) { 471 if (!kit->C->has_circular_inline_type()) { 472 return this; 473 } 474 GrowableArray<ciType*> visited; 475 visited.push(inline_klass()); 476 return adjust_scalarization_depth_impl(kit, visited); 477 } 478 479 InlineTypeNode* InlineTypeNode::adjust_scalarization_depth_impl(GraphKit* kit, GrowableArray<ciType*>& visited) { 480 InlineTypeNode* val = this; 481 for (uint i = 0; i < field_count(); ++i) { 482 Node* value = field_value(i); 483 Node* new_value = value; 484 ciType* ft = field_type(i); 485 if (value->is_InlineType()) { 486 if (!field_is_flat(i) && visited.contains(ft)) { 487 new_value = value->as_InlineType()->buffer(kit)->get_oop(); 488 } else { 489 int old_len = visited.length(); 490 visited.push(ft); 491 new_value = value->as_InlineType()->adjust_scalarization_depth_impl(kit, visited); 492 visited.trunc_to(old_len); 493 } 494 } else if (ft->is_inlinetype() && !visited.contains(ft)) { 495 int old_len = visited.length(); 496 visited.push(ft); 497 new_value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited); 498 visited.trunc_to(old_len); 499 } 500 if (value != new_value) { 501 if (val == this) { 502 val = clone_if_required(&kit->gvn(), kit->map()); 503 } 504 val->set_field_value(i, new_value); 505 } 506 } 507 return (val == this) ? this : kit->gvn().transform(val)->as_InlineType(); 508 } 509 510 void InlineTypeNode::load(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, GrowableArray<ciType*>& visited, int holder_offset, DecoratorSet decorators) { 511 // Initialize the inline type by loading its field values from 512 // memory and adding the values as input edges to the node. 513 for (uint i = 0; i < field_count(); ++i) { 514 int offset = holder_offset + field_offset(i); 515 Node* value = nullptr; 516 ciType* ft = field_type(i); 517 bool null_free = field_is_null_free(i); 518 if (null_free && ft->as_inline_klass()->is_empty()) { 519 // Loading from a field of an empty inline type. Just return the all-zero instance. 520 value = make_all_zero_impl(kit->gvn(), ft->as_inline_klass(), visited); 521 } else if (field_is_flat(i)) { 522 // Recursively load the flat inline type field 523 int nm_offset = null_free ? -1 : (holder_offset + field_null_marker_offset(i)); 524 value = make_from_flat_impl(kit, ft->as_inline_klass(), base, ptr, nullptr, holder, offset, /* atomic */ false, nm_offset, decorators, visited); 525 } else { 526 const TypeOopPtr* oop_ptr = kit->gvn().type(base)->isa_oopptr(); 527 bool is_array = (oop_ptr->isa_aryptr() != nullptr); 528 bool mismatched = (decorators & C2_MISMATCHED) != 0; 529 if (base->is_Con() && oop_ptr->is_inlinetypeptr() && !is_array && !mismatched) { 530 // If the oop to the inline type is constant (static final field), we can 531 // also treat the fields as constants because the inline type is immutable. 532 ciObject* constant_oop = oop_ptr->const_oop(); 533 ciField* field = holder->get_field_by_offset(offset, false); 534 assert(field != nullptr, "field not found"); 535 ciConstant constant = constant_oop->as_instance()->field_value(field); 536 const Type* con_type = Type::make_from_constant(constant, /*require_const=*/ true); 537 assert(con_type != nullptr, "type not found"); 538 value = kit->gvn().transform(kit->makecon(con_type)); 539 // Check type of constant which might be more precise than the static field type 540 if (con_type->is_inlinetypeptr() && !con_type->is_zero_type()) { 541 ft = con_type->inline_klass(); 542 } 543 } else { 544 // Load field value from memory 545 const TypePtr* adr_type = field_adr_type(base, offset, holder, decorators, kit->gvn()); 546 Node* adr = kit->basic_plus_adr(base, ptr, offset); 547 BasicType bt = type2field[ft->basic_type()]; 548 assert(is_java_primitive(bt) || adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent"); 549 const Type* val_type = Type::get_const_type(ft); 550 if (null_free) { 551 val_type = val_type->join_speculative(TypePtr::NOTNULL); 552 } 553 value = kit->access_load_at(base, adr, adr_type, val_type, bt, is_array ? (decorators | IS_ARRAY) : decorators); 554 } 555 // Loading a non-flattened inline type from memory 556 if (visited.contains(ft)) { 557 kit->C->set_has_circular_inline_type(true); 558 } else if (ft->is_inlinetype()) { 559 int old_len = visited.length(); 560 visited.push(ft); 561 value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited); 562 visited.trunc_to(old_len); 563 } 564 } 565 set_field_value(i, value); 566 } 567 } 568 569 // Get a field value from the payload by shifting it according to the offset 570 static Node* get_payload_value(PhaseGVN* gvn, Node* payload, BasicType bt, BasicType val_bt, int offset) { 571 // Shift to the right position in the long value 572 assert((offset + type2aelembytes(val_bt)) <= type2aelembytes(bt), "Value does not fit into payload"); 573 Node* value = nullptr; 574 Node* shift_val = gvn->intcon(offset << LogBitsPerByte); 575 if (bt == T_LONG) { 576 value = gvn->transform(new URShiftLNode(payload, shift_val)); 577 value = gvn->transform(new ConvL2INode(value)); 578 } else { 579 value = gvn->transform(new URShiftINode(payload, shift_val)); 580 } 581 582 if (val_bt == T_INT || val_bt == T_OBJECT || val_bt == T_ARRAY) { 583 return value; 584 } else { 585 // Make sure to zero unused bits in the 32-bit value 586 return Compile::narrow_value(val_bt, value, nullptr, gvn, true); 587 } 588 } 589 590 // Convert a payload value to field values 591 void InlineTypeNode::convert_from_payload(GraphKit* kit, BasicType bt, Node* payload, int holder_offset, bool null_free, int null_marker_offset) { 592 PhaseGVN* gvn = &kit->gvn(); 593 Node* value = nullptr; 594 if (!null_free) { 595 // Get the null marker 596 value = get_payload_value(gvn, payload, bt, T_BOOLEAN, null_marker_offset); 597 set_req(IsInit, value); 598 } 599 // Iterate over the fields and get their values from the payload 600 for (uint i = 0; i < field_count(); ++i) { 601 ciType* ft = field_type(i); 602 bool field_null_free = field_is_null_free(i); 603 int offset = holder_offset + field_offset(i) - inline_klass()->payload_offset(); 604 if (field_is_flat(i)) { 605 null_marker_offset = holder_offset + field_null_marker_offset(i) - inline_klass()->payload_offset(); 606 InlineTypeNode* vt = make_uninitialized(*gvn, ft->as_inline_klass(), field_null_free); 607 vt->convert_from_payload(kit, bt, payload, offset, field_null_free, null_marker_offset); 608 value = gvn->transform(vt); 609 } else { 610 value = get_payload_value(gvn, payload, bt, ft->basic_type(), offset); 611 if (!ft->is_primitive_type()) { 612 // Narrow oop field 613 assert(UseCompressedOops && bt == T_LONG, "Naturally atomic"); 614 const Type* val_type = Type::get_const_type(ft); 615 if (field_null_free) { 616 val_type = val_type->join_speculative(TypePtr::NOTNULL); 617 } 618 value = gvn->transform(new CastI2NNode(kit->control(), value)); 619 value = gvn->transform(new DecodeNNode(value, val_type->make_narrowoop())); 620 value = gvn->transform(new CastPPNode(kit->control(), value, val_type, ConstraintCastNode::UnconditionalDependency)); 621 622 // Similar to CheckCastPP nodes with raw input, CastI2N nodes require special handling in 'PhaseCFG::schedule_late' to ensure the 623 // register allocator does not move the CastI2N below a safepoint. This is necessary to avoid having the raw pointer span a safepoint, 624 // making it opaque to the GC. Unlike CheckCastPPs, which need extra handling in 'Scheduling::ComputeRegisterAntidependencies' due to 625 // scalarization, CastI2N nodes are always used by a load if scalarization happens which inherently keeps them pinned above the safepoint. 626 627 if (ft->is_inlinetype()) { 628 GrowableArray<ciType*> visited; 629 value = make_from_oop_impl(kit, value, ft->as_inline_klass(), visited); 630 } 631 } 632 } 633 set_field_value(i, value); 634 } 635 } 636 637 // Set a field value in the payload by shifting it according to the offset 638 static Node* set_payload_value(PhaseGVN* gvn, Node* payload, BasicType bt, Node* value, BasicType val_bt, int offset) { 639 assert((offset + type2aelembytes(val_bt)) <= type2aelembytes(bt), "Value does not fit into payload"); 640 641 // Make sure to zero unused bits in the 32-bit value 642 if (val_bt == T_BYTE || val_bt == T_BOOLEAN) { 643 value = gvn->transform(new AndINode(value, gvn->intcon(0xFF))); 644 } else if (val_bt == T_CHAR || val_bt == T_SHORT) { 645 value = gvn->transform(new AndINode(value, gvn->intcon(0xFFFF))); 646 } else if (val_bt == T_FLOAT) { 647 value = gvn->transform(new MoveF2INode(value)); 648 } else { 649 assert(val_bt == T_INT, "Unsupported type: %s", type2name(val_bt)); 650 } 651 652 Node* shift_val = gvn->intcon(offset << LogBitsPerByte); 653 if (bt == T_LONG) { 654 // Convert to long and remove the sign bit (the backend will fold this and emit a zero extend i2l) 655 value = gvn->transform(new ConvI2LNode(value)); 656 value = gvn->transform(new AndLNode(value, gvn->longcon(0xFFFFFFFF))); 657 658 Node* shift_value = gvn->transform(new LShiftLNode(value, shift_val)); 659 payload = new OrLNode(shift_value, payload); 660 } else { 661 Node* shift_value = gvn->transform(new LShiftINode(value, shift_val)); 662 payload = new OrINode(shift_value, payload); 663 } 664 return gvn->transform(payload); 665 } 666 667 // Convert the field values to a payload value of type 'bt' 668 Node* InlineTypeNode::convert_to_payload(GraphKit* kit, BasicType bt, Node* payload, int holder_offset, bool null_free, int null_marker_offset, int& oop_off_1, int& oop_off_2) const { 669 PhaseGVN* gvn = &kit->gvn(); 670 Node* value = nullptr; 671 if (!null_free) { 672 // Set the null marker 673 value = get_is_init(); 674 payload = set_payload_value(gvn, payload, bt, value, T_BOOLEAN, null_marker_offset); 675 } 676 // Iterate over the fields and add their values to the payload 677 for (uint i = 0; i < field_count(); ++i) { 678 value = field_value(i); 679 int inner_offset = field_offset(i) - inline_klass()->payload_offset(); 680 int offset = holder_offset + inner_offset; 681 if (field_is_flat(i)) { 682 null_marker_offset = holder_offset + field_null_marker_offset(i) - inline_klass()->payload_offset(); 683 payload = value->as_InlineType()->convert_to_payload(kit, bt, payload, offset, field_is_null_free(i), null_marker_offset, oop_off_1, oop_off_2); 684 } else { 685 ciType* ft = field_type(i); 686 BasicType field_bt = ft->basic_type(); 687 if (!ft->is_primitive_type()) { 688 // Narrow oop field 689 assert(UseCompressedOops && bt == T_LONG, "Naturally atomic"); 690 assert(inner_offset != -1, "sanity"); 691 if (oop_off_1 == -1) { 692 oop_off_1 = inner_offset; 693 } else { 694 assert(oop_off_2 == -1, "already set"); 695 oop_off_2 = inner_offset; 696 } 697 const Type* val_type = Type::get_const_type(ft)->make_narrowoop(); 698 if (value->is_InlineType()) { 699 PreserveReexecuteState preexecs(kit); 700 kit->jvms()->set_should_reexecute(true); 701 value = value->as_InlineType()->buffer(kit, false); 702 } 703 value = gvn->transform(new EncodePNode(value, val_type)); 704 value = gvn->transform(new CastP2XNode(kit->control(), value)); 705 value = gvn->transform(new ConvL2INode(value)); 706 field_bt = T_INT; 707 } 708 payload = set_payload_value(gvn, payload, bt, value, field_bt, offset); 709 } 710 } 711 return payload; 712 } 713 714 void InlineTypeNode::store_flat(GraphKit* kit, Node* base, Node* ptr, Node* idx, ciInstanceKlass* holder, int holder_offset, bool atomic, int null_marker_offset, DecoratorSet decorators) const { 715 if (kit->gvn().type(base)->isa_aryptr()) { 716 kit->C->set_flat_accesses(); 717 } 718 ciInlineKlass* vk = inline_klass(); 719 bool null_free = (null_marker_offset == -1); 720 721 if (atomic) { 722 bool is_array = (kit->gvn().type(base)->isa_aryptr() != nullptr); 723 #ifdef ASSERT 724 bool is_naturally_atomic = (!is_array && vk->is_empty()) || (null_free && vk->nof_declared_nonstatic_fields() == 1); 725 assert(!is_naturally_atomic, "No atomic access required"); 726 #endif 727 // Convert to a payload value <= 64-bit and write atomically. 728 // The payload might contain at most two oop fields that must be narrow because otherwise they would be 64-bit 729 // in size and would then be written by a "normal" oop store. If the payload contains oops, its size is always 730 // 64-bit because the next smaller (power-of-two) size would be 32-bit which could only hold one narrow oop that 731 // would then be written by a normal narrow oop store. These properties are asserted in 'convert_to_payload'. 732 BasicType bt = vk->atomic_size_to_basic_type(null_free); 733 Node* payload = (bt == T_LONG) ? kit->longcon(0) : kit->intcon(0); 734 int oop_off_1 = -1; 735 int oop_off_2 = -1; 736 payload = convert_to_payload(kit, bt, payload, 0, null_free, null_marker_offset - holder_offset, oop_off_1, oop_off_2); 737 738 if (!UseG1GC || oop_off_1 == -1) { 739 // No oop fields or no late barrier expansion. Emit an atomic store of the payload and add GC barriers if needed. 740 assert(oop_off_2 == -1 || !UseG1GC, "sanity"); 741 // ZGC does not support compressed oops, so only one oop can be in the payload which is written by a "normal" oop store. 742 assert((oop_off_1 == -1 && oop_off_2 == -1) || !UseZGC, "ZGC does not support embedded oops in flat fields"); 743 const Type* val_type = Type::get_const_basic_type(bt); 744 745 if (!is_array) { 746 Node* adr = kit->basic_plus_adr(base, ptr, holder_offset); 747 kit->insert_mem_bar(Op_MemBarCPUOrder); 748 kit->access_store_at(base, adr, TypeRawPtr::BOTTOM, payload, val_type, bt, decorators | C2_MISMATCHED | (is_array ? IS_ARRAY : 0), true, this); 749 kit->insert_mem_bar(Op_MemBarCPUOrder); 750 } else { 751 assert(holder_offset == 0, "sanity"); 752 753 RegionNode* region = new RegionNode(3); 754 kit->gvn().set_type(region, Type::CONTROL); 755 kit->record_for_igvn(region); 756 757 Node* bol = kit->null_free_array_test(base); // Argument evaluation order is undefined in C++ and since this sets control, it needs to come first 758 IfNode* iff = kit->create_and_map_if(kit->control(), bol, PROB_FAIR, COUNT_UNKNOWN); 759 760 Node* input_memory_state = kit->reset_memory(); 761 kit->set_all_memory(input_memory_state); 762 763 Node* mem = PhiNode::make(region, input_memory_state, Type::MEMORY, TypePtr::BOTTOM); 764 kit->gvn().set_type(mem, Type::MEMORY); 765 kit->record_for_igvn(mem); 766 767 PhiNode* io = PhiNode::make(region, kit->i_o(), Type::ABIO); 768 kit->gvn().set_type(io, Type::ABIO); 769 kit->record_for_igvn(io); 770 771 // Nullable 772 kit->set_control(kit->IfFalse(iff)); 773 if (!kit->stopped()) { 774 assert(!null_free && vk->has_nullable_atomic_layout(), "Flat array can't be nullable"); 775 kit->insert_mem_bar(Op_MemBarCPUOrder); 776 kit->access_store_at(base, ptr, TypeRawPtr::BOTTOM, payload, val_type, bt, decorators | C2_MISMATCHED | (is_array ? IS_ARRAY : 0), true, this); 777 kit->insert_mem_bar(Op_MemBarCPUOrder); 778 mem->init_req(1, kit->reset_memory()); 779 io->init_req(1, kit->i_o()); 780 } 781 region->init_req(1, kit->control()); 782 783 // Null-free 784 kit->set_control(kit->IfTrue(iff)); 785 if (!kit->stopped()) { 786 kit->set_all_memory(input_memory_state); 787 788 // Check if it's atomic 789 RegionNode* region_null_free = new RegionNode(3); 790 kit->gvn().set_type(region_null_free, Type::CONTROL); 791 kit->record_for_igvn(region_null_free); 792 793 Node* mem_null_free = PhiNode::make(region_null_free, input_memory_state, Type::MEMORY, TypePtr::BOTTOM); 794 kit->gvn().set_type(mem_null_free, Type::MEMORY); 795 kit->record_for_igvn(mem_null_free); 796 797 PhiNode* io_null_free = PhiNode::make(region_null_free, kit->i_o(), Type::ABIO); 798 kit->gvn().set_type(io_null_free, Type::ABIO); 799 kit->record_for_igvn(io_null_free); 800 801 Node* bol = kit->null_free_atomic_array_test(base, vk); 802 IfNode* iff = kit->create_and_map_if(kit->control(), bol, PROB_FAIR, COUNT_UNKNOWN); 803 804 // Atomic 805 kit->set_control(kit->IfTrue(iff)); 806 if (!kit->stopped()) { 807 BasicType bt_null_free = vk->atomic_size_to_basic_type(/* null_free */ true); 808 const Type* val_type_null_free = Type::get_const_basic_type(bt_null_free); 809 kit->set_all_memory(input_memory_state); 810 811 if (bt == T_LONG && bt_null_free != T_LONG) { 812 payload = kit->gvn().transform(new ConvL2INode(payload)); 813 } 814 815 Node* cast = base; 816 Node* adr = kit->flat_array_element_address(cast, idx, vk, /* null_free */ true, /* not_null_free */ false, /* atomic */ true); 817 kit->insert_mem_bar(Op_MemBarCPUOrder); 818 kit->access_store_at(cast, adr, TypeRawPtr::BOTTOM, payload, val_type_null_free, bt_null_free, decorators | C2_MISMATCHED | (is_array ? IS_ARRAY : 0), true, this); 819 kit->insert_mem_bar(Op_MemBarCPUOrder); 820 mem_null_free->init_req(1, kit->reset_memory()); 821 io_null_free->init_req(1, kit->i_o()); 822 } 823 region_null_free->init_req(1, kit->control()); 824 825 // Non-Atomic 826 kit->set_control(kit->IfFalse(iff)); 827 if (!kit->stopped()) { 828 kit->set_all_memory(input_memory_state); 829 830 Node* cast = base; 831 Node* adr = kit->flat_array_element_address(cast, idx, vk, /* null_free */ true, /* not_null_free */ false, /* atomic */ false); 832 store(kit, cast, adr, holder, holder_offset - vk->payload_offset(), -1, decorators); 833 mem_null_free->init_req(2, kit->reset_memory()); 834 io_null_free->init_req(2, kit->i_o()); 835 } 836 region_null_free->init_req(2, kit->control()); 837 838 mem->init_req(2, kit->gvn().transform(mem_null_free)); 839 io->init_req(2, kit->gvn().transform(io_null_free)); 840 region->init_req(2, kit->gvn().transform(region_null_free)); 841 } 842 843 kit->set_control(kit->gvn().transform(region)); 844 kit->set_all_memory(kit->gvn().transform(mem)); 845 kit->set_i_o(kit->gvn().transform(io)); 846 } 847 } else { 848 if (oop_off_2 == -1 && UseCompressedOops && vk->nof_declared_nonstatic_fields() == 1) { 849 // TODO 8350865 Implement this 850 // If null free, it's not a long but an int store. Deoptimize for now. 851 BuildCutout unless(kit, kit->null_free_array_test(base, /* null_free = */ false), PROB_MAX); 852 kit->uncommon_trap_exact(Deoptimization::Reason_unhandled, Deoptimization::Action_none); 853 } 854 855 // Contains oops and requires late barrier expansion. Emit a special store node that allows to emit GC barriers in the backend. 856 assert(UseG1GC, "Unexpected GC"); 857 assert(bt == T_LONG, "Unexpected payload type"); 858 // If one oop, set the offset (if no offset is set, two oops are assumed by the backend) 859 Node* oop_offset = (oop_off_2 == -1) ? kit->intcon(oop_off_1) : nullptr; 860 Node* adr = kit->basic_plus_adr(base, ptr, holder_offset); 861 kit->insert_mem_bar(Op_MemBarCPUOrder); 862 Node* mem = kit->reset_memory(); 863 kit->set_all_memory(mem); 864 Node* st = kit->gvn().transform(new StoreLSpecialNode(kit->control(), mem, adr, TypeRawPtr::BOTTOM, payload, oop_offset, MemNode::unordered)); 865 kit->set_memory(st, TypeRawPtr::BOTTOM); 866 kit->insert_mem_bar(Op_MemBarCPUOrder); 867 } 868 return; 869 } 870 871 // The inline type is embedded into the object without an oop header. Subtract the 872 // offset of the first field to account for the missing header when storing the values. 873 holder_offset -= vk->payload_offset(); 874 875 if (!null_free) { 876 bool is_array = (kit->gvn().type(base)->isa_aryptr() != nullptr); 877 Node* adr = kit->basic_plus_adr(base, ptr, null_marker_offset); 878 kit->access_store_at(base, adr, TypeRawPtr::BOTTOM, get_is_init(), TypeInt::BOOL, T_BOOLEAN, is_array ? (decorators | IS_ARRAY) : decorators); 879 } 880 store(kit, base, ptr, holder, holder_offset, -1, decorators); 881 } 882 883 void InlineTypeNode::store(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset, int offsetOnly, DecoratorSet decorators) const { 884 // Write field values to memory 885 for (uint i = 0; i < field_count(); ++i) { 886 if (offsetOnly != -1 && offsetOnly != field_offset(i)) continue; 887 int offset = holder_offset + field_offset(i); 888 Node* value = field_value(i); 889 ciType* ft = field_type(i); 890 if (field_is_flat(i)) { 891 // Recursively store the flat inline type field 892 int nm_offset = field_is_null_free(i) ? -1 : (holder_offset + field_null_marker_offset(i)); 893 value->as_InlineType()->store_flat(kit, base, ptr, nullptr, holder, offset, /* atomic */ false, nm_offset, decorators); 894 } else { 895 // Store field value to memory 896 const TypePtr* adr_type = field_adr_type(base, offset, holder, decorators, kit->gvn()); 897 Node* adr = kit->basic_plus_adr(base, ptr, offset); 898 BasicType bt = type2field[ft->basic_type()]; 899 assert(is_java_primitive(bt) || adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent"); 900 const Type* val_type = Type::get_const_type(ft); 901 bool is_array = (kit->gvn().type(base)->isa_aryptr() != nullptr); 902 kit->access_store_at(base, adr, adr_type, value, val_type, bt, is_array ? (decorators | IS_ARRAY) : decorators); 903 } 904 } 905 } 906 907 InlineTypeNode* InlineTypeNode::buffer(GraphKit* kit, bool safe_for_replace, bool must_init) { 908 if (kit->gvn().find_int_con(get_is_buffered(), 0) == 1) { 909 // Already buffered 910 return this; 911 } 912 913 // Check if inline type is already buffered 914 Node* not_buffered_ctl = kit->top(); 915 Node* not_null_oop = kit->null_check_oop(get_oop(), ¬_buffered_ctl, /* never_see_null = */ false, safe_for_replace); 916 if (not_buffered_ctl->is_top()) { 917 // Already buffered 918 InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace); 919 vt->set_is_buffered(kit->gvn()); 920 vt = kit->gvn().transform(vt)->as_InlineType(); 921 if (safe_for_replace) { 922 kit->replace_in_map(this, vt); 923 } 924 return vt; 925 } 926 Node* buffered_ctl = kit->control(); 927 kit->set_control(not_buffered_ctl); 928 929 // Inline type is not buffered, check if it is null. 930 Node* null_ctl = kit->top(); 931 kit->null_check_common(get_is_init(), T_INT, false, &null_ctl); 932 bool null_free = null_ctl->is_top(); 933 934 RegionNode* region = new RegionNode(4); 935 PhiNode* oop = PhiNode::make(region, not_null_oop, type()->join_speculative(null_free ? TypePtr::NOTNULL : TypePtr::BOTTOM)); 936 937 // InlineType is already buffered 938 region->init_req(1, buffered_ctl); 939 oop->init_req(1, not_null_oop); 940 941 // InlineType is null 942 region->init_req(2, null_ctl); 943 oop->init_req(2, kit->gvn().zerocon(T_OBJECT)); 944 945 PhiNode* io = PhiNode::make(region, kit->i_o(), Type::ABIO); 946 PhiNode* mem = PhiNode::make(region, kit->merged_memory(), Type::MEMORY, TypePtr::BOTTOM); 947 948 if (!kit->stopped()) { 949 assert(!is_allocated(&kit->gvn()), "already buffered"); 950 PreserveJVMState pjvms(kit); 951 ciInlineKlass* vk = inline_klass(); 952 // Allocate and initialize buffer, re-execute on deoptimization. 953 kit->jvms()->set_bci(kit->bci()); 954 kit->jvms()->set_should_reexecute(true); 955 kit->kill_dead_locals(); 956 Node* klass_node = kit->makecon(TypeKlassPtr::make(vk)); 957 Node* alloc_oop = kit->new_instance(klass_node, nullptr, nullptr, /* deoptimize_on_exception */ true, this); 958 959 if (must_init) { 960 // Either not a larval or a larval receiver on which we are about to invoke an abstract value class constructor 961 // or the Object constructor which is not inlined. It is therefore escaping, and we must initialize the buffer 962 // because we have not done this, yet, for larvals (see else case). 963 store(kit, alloc_oop, alloc_oop, vk); 964 965 // Do not let stores that initialize this buffer be reordered with a subsequent 966 // store that would make this buffer accessible by other threads. 967 AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop); 968 assert(alloc != nullptr, "must have an allocation node"); 969 kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress)); 970 } else { 971 // We do not need to initialize the buffer because a larval could still be updated which will create a new buffer. 972 // Once the larval escapes, we will initialize the buffer (must_init set). 973 assert(is_larval(), "only larvals can possibly skip the initialization of their buffer"); 974 } 975 oop->init_req(3, alloc_oop); 976 region->init_req(3, kit->control()); 977 io ->init_req(3, kit->i_o()); 978 mem ->init_req(3, kit->merged_memory()); 979 } 980 981 // Update GraphKit 982 kit->set_control(kit->gvn().transform(region)); 983 kit->set_i_o(kit->gvn().transform(io)); 984 kit->set_all_memory(kit->gvn().transform(mem)); 985 kit->record_for_igvn(region); 986 kit->record_for_igvn(oop); 987 kit->record_for_igvn(io); 988 kit->record_for_igvn(mem); 989 990 // Use cloned InlineTypeNode to propagate oop from now on 991 Node* res_oop = kit->gvn().transform(oop); 992 InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map(), safe_for_replace); 993 vt->set_oop(kit->gvn(), res_oop); 994 vt->set_is_buffered(kit->gvn()); 995 vt = kit->gvn().transform(vt)->as_InlineType(); 996 if (safe_for_replace) { 997 kit->replace_in_map(this, vt); 998 } 999 // InlineTypeNode::remove_redundant_allocations piggybacks on split if. 1000 // Make sure it gets a chance to remove this allocation. 1001 kit->C->set_has_split_ifs(true); 1002 return vt; 1003 } 1004 1005 bool InlineTypeNode::is_allocated(PhaseGVN* phase) const { 1006 if (phase->find_int_con(get_is_buffered(), 0) == 1) { 1007 return true; 1008 } 1009 Node* oop = get_oop(); 1010 const Type* oop_type = (phase != nullptr) ? phase->type(oop) : oop->bottom_type(); 1011 return !oop_type->maybe_null(); 1012 } 1013 1014 static void replace_proj(Compile* C, CallNode* call, uint& proj_idx, Node* value, BasicType bt) { 1015 ProjNode* pn = call->proj_out_or_null(proj_idx); 1016 if (pn != nullptr) { 1017 C->gvn_replace_by(pn, value); 1018 C->initial_gvn()->hash_delete(pn); 1019 pn->set_req(0, C->top()); 1020 } 1021 proj_idx += type2size[bt]; 1022 } 1023 1024 // When a call returns multiple values, it has several result 1025 // projections, one per field. Replacing the result of the call by an 1026 // inline type node (after late inlining) requires that for each result 1027 // projection, we find the corresponding inline type field. 1028 void InlineTypeNode::replace_call_results(GraphKit* kit, CallNode* call, Compile* C) { 1029 uint proj_idx = TypeFunc::Parms; 1030 // Replace oop projection 1031 replace_proj(C, call, proj_idx, get_oop(), T_OBJECT); 1032 // Replace field projections 1033 replace_field_projs(C, call, proj_idx); 1034 // Replace is_init projection 1035 replace_proj(C, call, proj_idx, get_is_init(), T_BOOLEAN); 1036 assert(proj_idx == call->tf()->range_cc()->cnt(), "missed a projection"); 1037 } 1038 1039 void InlineTypeNode::replace_field_projs(Compile* C, CallNode* call, uint& proj_idx) { 1040 for (uint i = 0; i < field_count(); ++i) { 1041 Node* value = field_value(i); 1042 if (field_is_flat(i)) { 1043 InlineTypeNode* vt = value->as_InlineType(); 1044 // Replace field projections for flat field 1045 vt->replace_field_projs(C, call, proj_idx); 1046 if (!field_is_null_free(i)) { 1047 // Replace is_init projection for nullable field 1048 replace_proj(C, call, proj_idx, vt->get_is_init(), T_BOOLEAN); 1049 } 1050 continue; 1051 } 1052 // Replace projection for field value 1053 replace_proj(C, call, proj_idx, value, field_type(i)->basic_type()); 1054 } 1055 } 1056 1057 Node* InlineTypeNode::allocate_fields(GraphKit* kit) { 1058 InlineTypeNode* vt = clone_if_required(&kit->gvn(), kit->map()); 1059 for (uint i = 0; i < field_count(); i++) { 1060 Node* value = field_value(i); 1061 if (field_is_flat(i)) { 1062 // Flat inline type field 1063 vt->set_field_value(i, value->as_InlineType()->allocate_fields(kit)); 1064 } else if (value->is_InlineType()) { 1065 // Non-flat inline type field 1066 vt->set_field_value(i, value->as_InlineType()->buffer(kit)); 1067 } 1068 } 1069 vt = kit->gvn().transform(vt)->as_InlineType(); 1070 kit->replace_in_map(this, vt); 1071 return vt; 1072 } 1073 1074 // Replace a buffer allocation by a dominating allocation 1075 static void replace_allocation(PhaseIterGVN* igvn, Node* res, Node* dom) { 1076 // Remove initializing stores and GC barriers 1077 for (DUIterator_Fast imax, i = res->fast_outs(imax); i < imax; i++) { 1078 Node* use = res->fast_out(i); 1079 if (use->is_AddP()) { 1080 for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) { 1081 Node* store = use->fast_out(j)->isa_Store(); 1082 if (store != nullptr) { 1083 igvn->rehash_node_delayed(store); 1084 igvn->replace_in_uses(store, store->in(MemNode::Memory)); 1085 } 1086 } 1087 } else if (use->Opcode() == Op_CastP2X) { 1088 if (UseG1GC && use->find_out_with(Op_XorX)->in(1) != use) { 1089 // The G1 pre-barrier uses a CastP2X both for the pointer of the object 1090 // we store into, as well as the value we are storing. Skip if this is a 1091 // barrier for storing 'res' into another object. 1092 continue; 1093 } 1094 BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); 1095 bs->eliminate_gc_barrier(igvn, use); 1096 --i; --imax; 1097 } 1098 } 1099 igvn->replace_node(res, dom); 1100 } 1101 1102 Node* InlineTypeNode::Ideal(PhaseGVN* phase, bool can_reshape) { 1103 Node* oop = get_oop(); 1104 if (oop->isa_InlineType() && !phase->type(oop)->maybe_null()) { 1105 InlineTypeNode* vtptr = oop->as_InlineType(); 1106 set_oop(*phase, vtptr->get_oop()); 1107 set_is_buffered(*phase); 1108 set_is_init(*phase); 1109 for (uint i = Values; i < vtptr->req(); ++i) { 1110 set_req(i, vtptr->in(i)); 1111 } 1112 return this; 1113 } 1114 1115 // Use base oop if fields are loaded from memory 1116 Node* base = is_loaded(phase); 1117 if (base != nullptr && get_oop() != base && !phase->type(base)->maybe_null()) { 1118 set_oop(*phase, base); 1119 assert(is_allocated(phase), "should now be allocated"); 1120 return this; 1121 } 1122 1123 if (can_reshape) { 1124 PhaseIterGVN* igvn = phase->is_IterGVN(); 1125 if (is_allocated(phase)) { 1126 // Search for and remove re-allocations of this inline type. Ignore scalar replaceable ones, 1127 // they will be removed anyway and changing the memory chain will confuse other optimizations. 1128 // This can happen with late inlining when we first allocate an inline type argument 1129 // but later decide to inline the call after the callee code also triggered allocation. 1130 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 1131 AllocateNode* alloc = fast_out(i)->isa_Allocate(); 1132 if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) { 1133 // Found a re-allocation 1134 Node* res = alloc->result_cast(); 1135 if (res != nullptr && res->is_CheckCastPP()) { 1136 // Replace allocation by oop and unlink AllocateNode 1137 replace_allocation(igvn, res, oop); 1138 igvn->replace_input_of(alloc, AllocateNode::InlineType, igvn->C->top()); 1139 --i; --imax; 1140 } 1141 } 1142 } 1143 } 1144 } 1145 1146 return nullptr; 1147 } 1148 1149 InlineTypeNode* InlineTypeNode::make_uninitialized(PhaseGVN& gvn, ciInlineKlass* vk, bool null_free) { 1150 // Create a new InlineTypeNode with uninitialized values and nullptr oop 1151 InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), null_free); 1152 vt->set_is_buffered(gvn, false); 1153 vt->set_is_init(gvn); 1154 return vt; 1155 } 1156 1157 InlineTypeNode* InlineTypeNode::make_all_zero(PhaseGVN& gvn, ciInlineKlass* vk, bool is_larval) { 1158 GrowableArray<ciType*> visited; 1159 visited.push(vk); 1160 return make_all_zero_impl(gvn, vk, visited, is_larval); 1161 } 1162 1163 InlineTypeNode* InlineTypeNode::make_all_zero_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited, bool is_larval) { 1164 // Create a new InlineTypeNode initialized with all zero 1165 InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ true); 1166 vt->set_is_buffered(gvn, false); 1167 vt->set_is_init(gvn); 1168 vt->set_is_larval(is_larval); 1169 for (uint i = 0; i < vt->field_count(); ++i) { 1170 ciType* ft = vt->field_type(i); 1171 Node* value = gvn.zerocon(ft->basic_type()); 1172 if (!vt->field_is_flat(i) && visited.contains(ft)) { 1173 gvn.C->set_has_circular_inline_type(true); 1174 } else if (ft->is_inlinetype()) { 1175 int old_len = visited.length(); 1176 visited.push(ft); 1177 ciInlineKlass* vk = ft->as_inline_klass(); 1178 if (vt->field_is_null_free(i)) { 1179 value = make_all_zero_impl(gvn, vk, visited); 1180 } else { 1181 value = make_null_impl(gvn, vk, visited); 1182 } 1183 visited.trunc_to(old_len); 1184 } 1185 vt->set_field_value(i, value); 1186 } 1187 vt = gvn.transform(vt)->as_InlineType(); 1188 assert(vt->is_all_zero(&gvn), "must be the all-zero inline type"); 1189 return vt; 1190 } 1191 1192 bool InlineTypeNode::is_all_zero(PhaseGVN* gvn, bool flat) const { 1193 const TypeInt* tinit = gvn->type(get_is_init())->isa_int(); 1194 if (tinit == nullptr || !tinit->is_con(1)) { 1195 return false; // May be null 1196 } 1197 for (uint i = 0; i < field_count(); ++i) { 1198 Node* value = field_value(i); 1199 if (field_is_null_free(i)) { 1200 // Null-free value class field must have the all-zero value. If 'flat' is set, 1201 // reject non-flat fields because they need to be initialized with an oop to a buffer. 1202 if (!value->is_InlineType() || !value->as_InlineType()->is_all_zero(gvn) || (flat && !field_is_flat(i))) { 1203 return false; 1204 } 1205 continue; 1206 } else if (value->is_InlineType()) { 1207 // Nullable value class field must be null 1208 tinit = gvn->type(value->as_InlineType()->get_is_init())->isa_int(); 1209 if (tinit != nullptr && tinit->is_con(0)) { 1210 continue; 1211 } 1212 return false; 1213 } else if (!gvn->type(value)->is_zero_type()) { 1214 return false; 1215 } 1216 } 1217 return true; 1218 } 1219 1220 InlineTypeNode* InlineTypeNode::make_from_oop(GraphKit* kit, Node* oop, ciInlineKlass* vk, bool is_larval) { 1221 GrowableArray<ciType*> visited; 1222 visited.push(vk); 1223 return make_from_oop_impl(kit, oop, vk, visited, is_larval); 1224 } 1225 1226 InlineTypeNode* InlineTypeNode::make_from_oop_impl(GraphKit* kit, Node* oop, ciInlineKlass* vk, GrowableArray<ciType*>& visited, bool is_larval) { 1227 PhaseGVN& gvn = kit->gvn(); 1228 1229 // Create and initialize an InlineTypeNode by loading all field 1230 // values from a heap-allocated version and also save the oop. 1231 InlineTypeNode* vt = nullptr; 1232 1233 if (oop->isa_InlineType()) { 1234 // TODO 8335256 Re-enable assert and fix OSR code 1235 // Issue triggers with TestValueConstruction.java and -XX:Tier0BackedgeNotifyFreqLog=0 -XX:Tier2BackedgeNotifyFreqLog=0 -XX:Tier3BackedgeNotifyFreqLog=0 -XX:Tier2BackEdgeThreshold=1 -XX:Tier3BackEdgeThreshold=1 -XX:Tier4BackEdgeThreshold=1 -Xbatch -XX:-TieredCompilation 1236 // assert(!is_larval || oop->as_InlineType()->is_larval(), "must be larval"); 1237 if (is_larval && !oop->as_InlineType()->is_larval()) { 1238 vt = oop->clone()->as_InlineType(); 1239 vt->set_is_larval(true); 1240 return gvn.transform(vt)->as_InlineType(); 1241 } 1242 return oop->as_InlineType(); 1243 } else if (gvn.type(oop)->maybe_null()) { 1244 // Add a null check because the oop may be null 1245 Node* null_ctl = kit->top(); 1246 Node* not_null_oop = kit->null_check_oop(oop, &null_ctl); 1247 if (kit->stopped()) { 1248 // Constant null 1249 kit->set_control(null_ctl); 1250 vt = make_null_impl(gvn, vk, visited); 1251 kit->record_for_igvn(vt); 1252 return vt; 1253 } 1254 vt = new InlineTypeNode(vk, not_null_oop, /* null_free= */ false); 1255 vt->set_is_buffered(gvn); 1256 vt->set_is_init(gvn); 1257 vt->set_is_larval(is_larval); 1258 vt->load(kit, not_null_oop, not_null_oop, vk, visited); 1259 1260 if (null_ctl != kit->top()) { 1261 InlineTypeNode* null_vt = make_null_impl(gvn, vk, visited); 1262 Node* region = new RegionNode(3); 1263 region->init_req(1, kit->control()); 1264 region->init_req(2, null_ctl); 1265 vt = vt->clone_with_phis(&gvn, region, kit->map()); 1266 vt->merge_with(&gvn, null_vt, 2, true); 1267 vt->set_oop(gvn, oop); 1268 kit->set_control(gvn.transform(region)); 1269 } 1270 } else { 1271 // Oop can never be null 1272 vt = new InlineTypeNode(vk, oop, /* null_free= */ true); 1273 Node* init_ctl = kit->control(); 1274 vt->set_is_buffered(gvn); 1275 vt->set_is_init(gvn); 1276 vt->set_is_larval(is_larval); 1277 vt->load(kit, oop, oop, vk, visited); 1278 // TODO 8284443 1279 // assert(!null_free || vt->as_InlineType()->is_all_zero(&gvn) || init_ctl != kit->control() || !gvn.type(oop)->is_inlinetypeptr() || oop->is_Con() || oop->Opcode() == Op_InlineType || 1280 // AllocateNode::Ideal_allocation(oop, &gvn) != nullptr || vt->as_InlineType()->is_loaded(&gvn) == oop, "inline type should be loaded"); 1281 } 1282 assert(vt->is_allocated(&gvn), "inline type should be allocated"); 1283 kit->record_for_igvn(vt); 1284 return gvn.transform(vt)->as_InlineType(); 1285 } 1286 1287 InlineTypeNode* InlineTypeNode::make_from_flat(GraphKit* kit, ciInlineKlass* vk, Node* obj, Node* ptr, Node* idx, ciInstanceKlass* holder, int holder_offset, 1288 bool atomic, int null_marker_offset, DecoratorSet decorators) { 1289 GrowableArray<ciType*> visited; 1290 visited.push(vk); 1291 return make_from_flat_impl(kit, vk, obj, ptr, idx, holder, holder_offset, atomic, null_marker_offset, decorators, visited); 1292 } 1293 1294 // GraphKit wrapper for the 'make_from_flat' method 1295 InlineTypeNode* InlineTypeNode::make_from_flat_impl(GraphKit* kit, ciInlineKlass* vk, Node* obj, Node* ptr, Node* idx, ciInstanceKlass* holder, int holder_offset, 1296 bool atomic, int null_marker_offset, DecoratorSet decorators, GrowableArray<ciType*>& visited) { 1297 if (kit->gvn().type(obj)->isa_aryptr()) { 1298 kit->C->set_flat_accesses(); 1299 } 1300 // Create and initialize an InlineTypeNode by loading all field values from 1301 // a flat inline type field at 'holder_offset' or from an inline type array. 1302 bool null_free = (null_marker_offset == -1); 1303 InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free); 1304 1305 if (atomic) { 1306 // Read atomically and convert from payload 1307 bool is_array = (kit->gvn().type(obj)->isa_aryptr() != nullptr); 1308 #ifdef ASSERT 1309 bool is_naturally_atomic = (!is_array && vk->is_empty()) || (null_free && vk->nof_declared_nonstatic_fields() == 1); 1310 assert(!is_naturally_atomic, "No atomic access required"); 1311 #endif 1312 BasicType bt = vk->atomic_size_to_basic_type(null_free); 1313 decorators |= C2_MISMATCHED | C2_CONTROL_DEPENDENT_LOAD; 1314 const Type* val_type = Type::get_const_basic_type(bt); 1315 1316 Node* payload = nullptr; 1317 if (!is_array) { 1318 Node* adr = kit->basic_plus_adr(obj, ptr, holder_offset); 1319 payload = kit->access_load_at(obj, adr, TypeRawPtr::BOTTOM, val_type, bt, is_array ? (decorators | IS_ARRAY) : decorators, kit->control()); 1320 } else { 1321 assert(holder_offset == 0, "sanity"); 1322 1323 RegionNode* region = new RegionNode(3); 1324 kit->gvn().set_type(region, Type::CONTROL); 1325 kit->record_for_igvn(region); 1326 1327 payload = PhiNode::make(region, nullptr, val_type); 1328 kit->gvn().set_type(payload, val_type); 1329 kit->record_for_igvn(payload); 1330 1331 Node* input_memory_state = kit->reset_memory(); 1332 kit->set_all_memory(input_memory_state); 1333 1334 Node* mem = PhiNode::make(region, input_memory_state, Type::MEMORY, TypePtr::BOTTOM); 1335 kit->gvn().set_type(mem, Type::MEMORY); 1336 kit->record_for_igvn(mem); 1337 1338 PhiNode* io = PhiNode::make(region, kit->i_o(), Type::ABIO); 1339 kit->gvn().set_type(io, Type::ABIO); 1340 kit->record_for_igvn(io); 1341 1342 Node* bol = kit->null_free_array_test(obj); // Argument evaluation order is undefined in C++ and since this sets control, it needs to come first 1343 IfNode* iff = kit->create_and_map_if(kit->control(), bol, PROB_FAIR, COUNT_UNKNOWN); 1344 1345 kit->set_control(kit->IfFalse(iff)); 1346 region->init_req(1, kit->control()); 1347 1348 // Nullable 1349 if (!kit->stopped()) { 1350 assert(!null_free && vk->has_nullable_atomic_layout(), "Flat array can't be nullable"); 1351 Node* load = kit->access_load_at(obj, ptr, TypeRawPtr::BOTTOM, val_type, bt, is_array ? (decorators | IS_ARRAY) : decorators, kit->control()); 1352 payload->init_req(1, load); 1353 mem->init_req(1, kit->reset_memory()); 1354 io->init_req(1, kit->i_o()); 1355 } 1356 1357 kit->set_control(kit->IfTrue(iff)); 1358 1359 // Null-free 1360 if (!kit->stopped()) { 1361 kit->set_all_memory(input_memory_state); 1362 1363 // Check if it's atomic 1364 RegionNode* region_null_free = new RegionNode(3); 1365 kit->gvn().set_type(region_null_free, Type::CONTROL); 1366 kit->record_for_igvn(region_null_free); 1367 1368 Node* payload_null_free = PhiNode::make(region_null_free, nullptr, val_type); 1369 kit->gvn().set_type(payload_null_free, val_type); 1370 kit->record_for_igvn(payload_null_free); 1371 1372 Node* mem_null_free = PhiNode::make(region_null_free, input_memory_state, Type::MEMORY, TypePtr::BOTTOM); 1373 kit->gvn().set_type(mem_null_free, Type::MEMORY); 1374 kit->record_for_igvn(mem_null_free); 1375 1376 PhiNode* io_null_free = PhiNode::make(region_null_free, kit->i_o(), Type::ABIO); 1377 kit->gvn().set_type(io_null_free, Type::ABIO); 1378 kit->record_for_igvn(io_null_free); 1379 1380 bol = kit->null_free_atomic_array_test(obj, vk); 1381 IfNode* iff = kit->create_and_map_if(kit->control(), bol, PROB_FAIR, COUNT_UNKNOWN); 1382 1383 // Atomic 1384 kit->set_control(kit->IfTrue(iff)); 1385 if (!kit->stopped()) { 1386 BasicType bt_null_free = vk->atomic_size_to_basic_type(/* null_free */ true); 1387 const Type* val_type_null_free = Type::get_const_basic_type(bt_null_free); 1388 kit->set_all_memory(input_memory_state); 1389 1390 Node* cast = obj; 1391 Node* adr = kit->flat_array_element_address(cast, idx, vk, /* null_free */ true, /* not_null_free */ false, /* atomic */ true); 1392 Node* load = kit->access_load_at(cast, adr, TypeRawPtr::BOTTOM, val_type_null_free, bt_null_free, is_array ? (decorators | IS_ARRAY) : decorators, kit->control()); 1393 if (bt == T_LONG && bt_null_free != T_LONG) { 1394 load = kit->gvn().transform(new ConvI2LNode(load)); 1395 } 1396 // Set the null marker if not known to be null-free 1397 if (!null_free) { 1398 load = set_payload_value(&kit->gvn(), load, bt, kit->intcon(1), T_BOOLEAN, null_marker_offset); 1399 } 1400 payload_null_free->init_req(1, load); 1401 mem_null_free->init_req(1, kit->reset_memory()); 1402 io_null_free->init_req(1, kit->i_o()); 1403 } 1404 region_null_free->init_req(1, kit->control()); 1405 1406 // Non-Atomic 1407 kit->set_control(kit->IfFalse(iff)); 1408 if (!kit->stopped()) { 1409 // TODO 8350865 Is the conversion to/from payload folded? We should wire this directly. 1410 // Also remove the PreserveReexecuteState in Parse::array_load when buffering is no longer possible. 1411 kit->set_all_memory(input_memory_state); 1412 1413 InlineTypeNode* vt_atomic = make_uninitialized(kit->gvn(), vk, true); 1414 Node* cast = obj; 1415 Node* adr = kit->flat_array_element_address(cast, idx, vk, /* null_free */ true, /* not_null_free */ false, /* atomic */ false); 1416 vt_atomic->load(kit, cast, adr, holder, visited, holder_offset - vk->payload_offset(), decorators); 1417 1418 Node* tmp_payload = (bt == T_LONG) ? kit->longcon(0) : kit->intcon(0); 1419 int oop_off_1 = -1; 1420 int oop_off_2 = -1; 1421 tmp_payload = vt_atomic->convert_to_payload(kit, bt, tmp_payload, 0, null_free, null_marker_offset, oop_off_1, oop_off_2); 1422 1423 payload_null_free->init_req(2, tmp_payload); 1424 mem_null_free->init_req(2, kit->reset_memory()); 1425 io_null_free->init_req(2, kit->i_o()); 1426 } 1427 region_null_free->init_req(2, kit->control()); 1428 1429 region->init_req(2, kit->gvn().transform(region_null_free)); 1430 payload->init_req(2, kit->gvn().transform(payload_null_free)); 1431 mem->init_req(2, kit->gvn().transform(mem_null_free)); 1432 io->init_req(2, kit->gvn().transform(io_null_free)); 1433 } 1434 1435 kit->set_control(kit->gvn().transform(region)); 1436 kit->set_all_memory(kit->gvn().transform(mem)); 1437 kit->set_i_o(kit->gvn().transform(io)); 1438 } 1439 1440 vt->convert_from_payload(kit, bt, kit->gvn().transform(payload), 0, null_free, null_marker_offset - holder_offset); 1441 return kit->gvn().transform(vt)->as_InlineType(); 1442 } 1443 1444 // The inline type is embedded into the object without an oop header. Subtract the 1445 // offset of the first field to account for the missing header when storing the values. 1446 holder_offset -= vk->payload_offset(); 1447 1448 if (!null_free) { 1449 bool is_array = (kit->gvn().type(obj)->isa_aryptr() != nullptr); 1450 Node* adr = kit->basic_plus_adr(obj, ptr, null_marker_offset); 1451 Node* nm_value = kit->access_load_at(obj, adr, TypeRawPtr::BOTTOM, TypeInt::BOOL, T_BOOLEAN, is_array ? (decorators | IS_ARRAY) : decorators); 1452 vt->set_req(IsInit, nm_value); 1453 } 1454 vt->load(kit, obj, ptr, holder, visited, holder_offset, decorators); 1455 1456 assert(vt->is_loaded(&kit->gvn()) != obj, "holder oop should not be used as flattened inline type oop"); 1457 return kit->gvn().transform(vt)->as_InlineType(); 1458 } 1459 1460 InlineTypeNode* InlineTypeNode::make_from_multi(GraphKit* kit, MultiNode* multi, ciInlineKlass* vk, uint& base_input, bool in, bool null_free) { 1461 InlineTypeNode* vt = make_uninitialized(kit->gvn(), vk, null_free); 1462 if (!in) { 1463 // Keep track of the oop. The returned inline type might already be buffered. 1464 Node* oop = kit->gvn().transform(new ProjNode(multi, base_input++)); 1465 vt->set_oop(kit->gvn(), oop); 1466 } 1467 GrowableArray<ciType*> visited; 1468 visited.push(vk); 1469 vt->initialize_fields(kit, multi, base_input, in, null_free, nullptr, visited); 1470 return kit->gvn().transform(vt)->as_InlineType(); 1471 } 1472 1473 InlineTypeNode* InlineTypeNode::make_larval(GraphKit* kit, bool allocate) const { 1474 ciInlineKlass* vk = inline_klass(); 1475 InlineTypeNode* res = make_uninitialized(kit->gvn(), vk); 1476 for (uint i = 1; i < req(); ++i) { 1477 res->set_req(i, in(i)); 1478 } 1479 1480 if (allocate) { 1481 // Re-execute if buffering triggers deoptimization 1482 PreserveReexecuteState preexecs(kit); 1483 kit->jvms()->set_should_reexecute(true); 1484 Node* klass_node = kit->makecon(TypeKlassPtr::make(vk)); 1485 Node* alloc_oop = kit->new_instance(klass_node, nullptr, nullptr, true); 1486 AllocateNode* alloc = AllocateNode::Ideal_allocation(alloc_oop); 1487 alloc->_larval = true; 1488 1489 store(kit, alloc_oop, alloc_oop, vk); 1490 res->set_oop(kit->gvn(), alloc_oop); 1491 } 1492 // TODO 8239003 1493 //res->set_type(TypeInlineType::make(vk, true)); 1494 res = kit->gvn().transform(res)->as_InlineType(); 1495 assert(!allocate || res->is_allocated(&kit->gvn()), "must be allocated"); 1496 return res; 1497 } 1498 1499 InlineTypeNode* InlineTypeNode::finish_larval(GraphKit* kit) const { 1500 Node* obj = get_oop(); 1501 Node* mark_addr = kit->basic_plus_adr(obj, oopDesc::mark_offset_in_bytes()); 1502 Node* mark = kit->make_load(nullptr, mark_addr, TypeX_X, TypeX_X->basic_type(), MemNode::unordered); 1503 mark = kit->gvn().transform(new AndXNode(mark, kit->MakeConX(~markWord::larval_bit_in_place))); 1504 kit->store_to_memory(kit->control(), mark_addr, mark, TypeX_X->basic_type(), MemNode::unordered); 1505 1506 // Do not let stores that initialize this buffer be reordered with a subsequent 1507 // store that would make this buffer accessible by other threads. 1508 AllocateNode* alloc = AllocateNode::Ideal_allocation(obj); 1509 assert(alloc != nullptr, "must have an allocation node"); 1510 kit->insert_mem_bar(Op_MemBarStoreStore, alloc->proj_out_or_null(AllocateNode::RawAddress)); 1511 1512 ciInlineKlass* vk = inline_klass(); 1513 InlineTypeNode* res = make_uninitialized(kit->gvn(), vk); 1514 for (uint i = 1; i < req(); ++i) { 1515 res->set_req(i, in(i)); 1516 } 1517 // TODO 8239003 1518 //res->set_type(TypeInlineType::make(vk, false)); 1519 res = kit->gvn().transform(res)->as_InlineType(); 1520 return res; 1521 } 1522 1523 bool InlineTypeNode::is_larval(PhaseGVN* gvn) const { 1524 if (!is_allocated(gvn)) { 1525 return false; 1526 } 1527 1528 Node* oop = get_oop(); 1529 AllocateNode* alloc = AllocateNode::Ideal_allocation(oop); 1530 return alloc != nullptr && alloc->_larval; 1531 } 1532 1533 Node* InlineTypeNode::is_loaded(PhaseGVN* phase, ciInlineKlass* vk, Node* base, int holder_offset) { 1534 if (is_larval() || is_larval(phase)) { 1535 return nullptr; 1536 } 1537 if (vk == nullptr) { 1538 vk = inline_klass(); 1539 } 1540 for (uint i = 0; i < field_count(); ++i) { 1541 int offset = holder_offset + field_offset(i); 1542 Node* value = field_value(i); 1543 if (value->is_InlineType()) { 1544 InlineTypeNode* vt = value->as_InlineType(); 1545 if (vt->type()->inline_klass()->is_empty()) { 1546 continue; 1547 } else if (field_is_flat(i) && vt->is_InlineType()) { 1548 // Check inline type field load recursively 1549 base = vt->as_InlineType()->is_loaded(phase, vk, base, offset - vt->type()->inline_klass()->payload_offset()); 1550 if (base == nullptr) { 1551 return nullptr; 1552 } 1553 continue; 1554 } else { 1555 value = vt->get_oop(); 1556 if (value->Opcode() == Op_CastPP) { 1557 // Skip CastPP 1558 value = value->in(1); 1559 } 1560 } 1561 } 1562 if (value->isa_DecodeN()) { 1563 // Skip DecodeN 1564 value = value->in(1); 1565 } 1566 if (value->isa_Load()) { 1567 // Check if base and offset of field load matches inline type layout 1568 intptr_t loffset = 0; 1569 Node* lbase = AddPNode::Ideal_base_and_offset(value->in(MemNode::Address), phase, loffset); 1570 if (lbase == nullptr || (lbase != base && base != nullptr) || loffset != offset) { 1571 return nullptr; 1572 } else if (base == nullptr) { 1573 // Set base and check if pointer type matches 1574 base = lbase; 1575 const TypeInstPtr* vtptr = phase->type(base)->isa_instptr(); 1576 if (vtptr == nullptr || !vtptr->instance_klass()->equals(vk)) { 1577 return nullptr; 1578 } 1579 } 1580 } else { 1581 return nullptr; 1582 } 1583 } 1584 return base; 1585 } 1586 1587 Node* InlineTypeNode::tagged_klass(ciInlineKlass* vk, PhaseGVN& gvn) { 1588 const TypeKlassPtr* tk = TypeKlassPtr::make(vk); 1589 intptr_t bits = tk->get_con(); 1590 set_nth_bit(bits, 0); 1591 return gvn.longcon((jlong)bits); 1592 } 1593 1594 void InlineTypeNode::pass_fields(GraphKit* kit, Node* n, uint& base_input, bool in, bool null_free) { 1595 if (!null_free && in) { 1596 n->init_req(base_input++, get_is_init()); 1597 } 1598 for (uint i = 0; i < field_count(); i++) { 1599 Node* arg = field_value(i); 1600 if (field_is_flat(i)) { 1601 // Flat inline type field 1602 arg->as_InlineType()->pass_fields(kit, n, base_input, in); 1603 if (!field_is_null_free(i)) { 1604 assert(field_null_marker_offset(i) != -1, "inconsistency"); 1605 n->init_req(base_input++, arg->as_InlineType()->get_is_init()); 1606 } 1607 } else { 1608 if (arg->is_InlineType()) { 1609 // Non-flat inline type field 1610 InlineTypeNode* vt = arg->as_InlineType(); 1611 assert(n->Opcode() != Op_Return || vt->is_allocated(&kit->gvn()), "inline type field should be allocated on return"); 1612 arg = vt->buffer(kit); 1613 } 1614 // Initialize call/return arguments 1615 n->init_req(base_input++, arg); 1616 if (field_type(i)->size() == 2) { 1617 n->init_req(base_input++, kit->top()); 1618 } 1619 } 1620 } 1621 // The last argument is used to pass IsInit information to compiled code and not required here. 1622 if (!null_free && !in) { 1623 n->init_req(base_input++, kit->top()); 1624 } 1625 } 1626 1627 void InlineTypeNode::initialize_fields(GraphKit* kit, MultiNode* multi, uint& base_input, bool in, bool null_free, Node* null_check_region, GrowableArray<ciType*>& visited) { 1628 PhaseGVN& gvn = kit->gvn(); 1629 Node* is_init = nullptr; 1630 if (!null_free) { 1631 // Nullable inline type 1632 if (in) { 1633 // Set IsInit field 1634 if (multi->is_Start()) { 1635 is_init = gvn.transform(new ParmNode(multi->as_Start(), base_input)); 1636 } else { 1637 is_init = multi->as_Call()->in(base_input); 1638 } 1639 set_req(IsInit, is_init); 1640 base_input++; 1641 } 1642 // Add a null check to make subsequent loads dependent on 1643 assert(null_check_region == nullptr, "already set"); 1644 if (is_init == nullptr) { 1645 // Will only be initialized below, use dummy node for now 1646 is_init = new Node(1); 1647 is_init->init_req(0, kit->control()); // Add an input to prevent dummy from being dead 1648 gvn.set_type_bottom(is_init); 1649 } 1650 Node* null_ctrl = kit->top(); 1651 kit->null_check_common(is_init, T_INT, false, &null_ctrl); 1652 Node* non_null_ctrl = kit->control(); 1653 null_check_region = new RegionNode(3); 1654 null_check_region->init_req(1, non_null_ctrl); 1655 null_check_region->init_req(2, null_ctrl); 1656 null_check_region = gvn.transform(null_check_region); 1657 kit->set_control(null_check_region); 1658 } 1659 1660 for (uint i = 0; i < field_count(); ++i) { 1661 ciType* type = field_type(i); 1662 Node* parm = nullptr; 1663 if (field_is_flat(i)) { 1664 // Flat inline type field 1665 InlineTypeNode* vt = make_uninitialized(gvn, type->as_inline_klass(), field_is_null_free(i)); 1666 vt->initialize_fields(kit, multi, base_input, in, true, null_check_region, visited); 1667 if (!field_is_null_free(i)) { 1668 assert(field_null_marker_offset(i) != -1, "inconsistency"); 1669 Node* is_init = nullptr; 1670 if (multi->is_Start()) { 1671 is_init = gvn.transform(new ParmNode(multi->as_Start(), base_input)); 1672 } else if (in) { 1673 is_init = multi->as_Call()->in(base_input); 1674 } else { 1675 is_init = gvn.transform(new ProjNode(multi->as_Call(), base_input)); 1676 } 1677 vt->set_req(IsInit, is_init); 1678 base_input++; 1679 } 1680 parm = gvn.transform(vt); 1681 } else { 1682 if (multi->is_Start()) { 1683 assert(in, "return from start?"); 1684 parm = gvn.transform(new ParmNode(multi->as_Start(), base_input)); 1685 } else if (in) { 1686 parm = multi->as_Call()->in(base_input); 1687 } else { 1688 parm = gvn.transform(new ProjNode(multi->as_Call(), base_input)); 1689 } 1690 bool null_free = field_is_null_free(i); 1691 // Non-flat inline type field 1692 if (type->is_inlinetype()) { 1693 if (null_check_region != nullptr) { 1694 // We limit scalarization for inline types with circular fields and can therefore observe nodes 1695 // of the same type but with different scalarization depth during GVN. To avoid inconsistencies 1696 // during merging, make sure that we only create Phis for fields that are guaranteed to be scalarized. 1697 if (parm->is_InlineType() && kit->C->has_circular_inline_type()) { 1698 parm = parm->as_InlineType()->get_oop(); 1699 } 1700 // Holder is nullable, set field to nullptr if holder is nullptr to avoid loading from uninitialized memory 1701 parm = PhiNode::make(null_check_region, parm, TypeInstPtr::make(TypePtr::BotPTR, type->as_inline_klass())); 1702 parm->set_req(2, kit->zerocon(T_OBJECT)); 1703 parm = gvn.transform(parm); 1704 null_free = false; 1705 } 1706 if (visited.contains(type)) { 1707 kit->C->set_has_circular_inline_type(true); 1708 } else if (!parm->is_InlineType()) { 1709 int old_len = visited.length(); 1710 visited.push(type); 1711 if (null_free) { 1712 parm = kit->cast_not_null(parm); 1713 } 1714 parm = make_from_oop_impl(kit, parm, type->as_inline_klass(), visited); 1715 visited.trunc_to(old_len); 1716 } 1717 } 1718 base_input += type->size(); 1719 } 1720 assert(parm != nullptr, "should never be null"); 1721 assert(field_value(i) == nullptr, "already set"); 1722 set_field_value(i, parm); 1723 gvn.record_for_igvn(parm); 1724 } 1725 // The last argument is used to pass IsInit information to compiled code 1726 if (!null_free && !in) { 1727 Node* cmp = is_init->raw_out(0); 1728 is_init = gvn.transform(new ProjNode(multi->as_Call(), base_input)); 1729 set_req(IsInit, is_init); 1730 gvn.hash_delete(cmp); 1731 cmp->set_req(1, is_init); 1732 gvn.hash_find_insert(cmp); 1733 gvn.record_for_igvn(cmp); 1734 base_input++; 1735 } 1736 } 1737 1738 // Search for multiple allocations of this inline type and try to replace them by dominating allocations. 1739 // Equivalent InlineTypeNodes are merged by GVN, so we just need to search for AllocateNode users to find redundant allocations. 1740 void InlineTypeNode::remove_redundant_allocations(PhaseIdealLoop* phase) { 1741 // TODO 8332886 Really needed? GVN is disabled anyway. 1742 if (is_larval()) { 1743 return; 1744 } 1745 PhaseIterGVN* igvn = &phase->igvn(); 1746 // Search for allocations of this inline type. Ignore scalar replaceable ones, they 1747 // will be removed anyway and changing the memory chain will confuse other optimizations. 1748 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 1749 AllocateNode* alloc = fast_out(i)->isa_Allocate(); 1750 if (alloc != nullptr && alloc->in(AllocateNode::InlineType) == this && !alloc->_is_scalar_replaceable) { 1751 Node* res = alloc->result_cast(); 1752 if (res == nullptr || !res->is_CheckCastPP()) { 1753 break; // No unique CheckCastPP 1754 } 1755 // Search for a dominating allocation of the same inline type 1756 Node* res_dom = res; 1757 for (DUIterator_Fast jmax, j = fast_outs(jmax); j < jmax; j++) { 1758 AllocateNode* alloc_other = fast_out(j)->isa_Allocate(); 1759 if (alloc_other != nullptr && alloc_other->in(AllocateNode::InlineType) == this && !alloc_other->_is_scalar_replaceable) { 1760 Node* res_other = alloc_other->result_cast(); 1761 if (res_other != nullptr && res_other->is_CheckCastPP() && res_other != res_dom && 1762 phase->is_dominator(res_other->in(0), res_dom->in(0))) { 1763 res_dom = res_other; 1764 } 1765 } 1766 } 1767 if (res_dom != res) { 1768 // Replace allocation by dominating one. 1769 replace_allocation(igvn, res, res_dom); 1770 // The result of the dominated allocation is now unused and will be removed 1771 // later in PhaseMacroExpand::eliminate_allocate_node to not confuse loop opts. 1772 igvn->_worklist.push(alloc); 1773 } 1774 } 1775 } 1776 } 1777 1778 InlineTypeNode* InlineTypeNode::make_null(PhaseGVN& gvn, ciInlineKlass* vk, bool transform) { 1779 GrowableArray<ciType*> visited; 1780 visited.push(vk); 1781 return make_null_impl(gvn, vk, visited, transform); 1782 } 1783 1784 InlineTypeNode* InlineTypeNode::make_null_impl(PhaseGVN& gvn, ciInlineKlass* vk, GrowableArray<ciType*>& visited, bool transform) { 1785 InlineTypeNode* vt = new InlineTypeNode(vk, gvn.zerocon(T_OBJECT), /* null_free= */ false); 1786 vt->set_is_buffered(gvn); 1787 vt->set_is_init(gvn, false); 1788 for (uint i = 0; i < vt->field_count(); i++) { 1789 ciType* ft = vt->field_type(i); 1790 Node* value = gvn.zerocon(ft->basic_type()); 1791 if (!vt->field_is_flat(i) && visited.contains(ft)) { 1792 gvn.C->set_has_circular_inline_type(true); 1793 } else if (ft->is_inlinetype()) { 1794 int old_len = visited.length(); 1795 visited.push(ft); 1796 value = make_null_impl(gvn, ft->as_inline_klass(), visited); 1797 visited.trunc_to(old_len); 1798 } 1799 vt->set_field_value(i, value); 1800 } 1801 return transform ? gvn.transform(vt)->as_InlineType() : vt; 1802 } 1803 1804 InlineTypeNode* InlineTypeNode::clone_if_required(PhaseGVN* gvn, SafePointNode* map, bool safe_for_replace) { 1805 if (!safe_for_replace || (map == nullptr && outcnt() != 0)) { 1806 return clone()->as_InlineType(); 1807 } 1808 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 1809 if (fast_out(i) != map) { 1810 return clone()->as_InlineType(); 1811 } 1812 } 1813 gvn->hash_delete(this); 1814 return this; 1815 } 1816 1817 const Type* InlineTypeNode::Value(PhaseGVN* phase) const { 1818 Node* oop = get_oop(); 1819 const Type* toop = phase->type(oop); 1820 #ifdef ASSERT 1821 if (oop->is_Con() && toop->is_zero_type() && _type->isa_oopptr()->is_known_instance()) { 1822 // We are not allocated (anymore) and should therefore not have an instance id 1823 dump(1); 1824 assert(false, "Unbuffered inline type should not have known instance id"); 1825 } 1826 #endif 1827 const Type* t = toop->filter_speculative(_type); 1828 if (t->singleton()) { 1829 // Don't replace InlineType by a constant 1830 t = _type; 1831 } 1832 const Type* tinit = phase->type(in(IsInit)); 1833 if (tinit == Type::TOP) { 1834 return Type::TOP; 1835 } 1836 if (tinit->isa_int() && tinit->is_int()->is_con(1)) { 1837 t = t->join_speculative(TypePtr::NOTNULL); 1838 } 1839 return t; 1840 } 1841 1842 #ifndef PRODUCT 1843 void InlineTypeNode::dump_spec(outputStream* st) const { 1844 if (_is_larval) { 1845 st->print(" #larval"); 1846 } 1847 } 1848 #endif // NOT PRODUCT