1 /*
  2  * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "opto/locknode.hpp"
 26 #include "opto/parse.hpp"
 27 #include "opto/rootnode.hpp"
 28 #include "opto/runtime.hpp"
 29 
 30 //=============================================================================
 31 const RegMask &BoxLockNode::in_RegMask(uint i) const {
 32   return _inmask;
 33 }
 34 
 35 const RegMask &BoxLockNode::out_RegMask() const {
 36   return *Matcher::idealreg2regmask[Op_RegP];
 37 }
 38 
 39 uint BoxLockNode::size_of() const { return sizeof(*this); }
 40 
 41 BoxLockNode::BoxLockNode( int slot ) : Node( Compile::current()->root() ),
 42                                        _slot(slot), _kind(BoxLockNode::Regular) {
 43   init_class_id(Class_BoxLock);
 44   init_flags(Flag_rematerialize);
 45   OptoReg::Name reg = OptoReg::stack2reg(_slot);
 46   if (!RegMask::can_represent(reg, Compile::current()->sync_stack_slots())) {
 47     Compile::current()->record_method_not_compilable("must be able to represent all monitor slots in reg mask");
 48     return;
 49   }
 50   _inmask.Insert(reg);
 51 }
 52 
 53 uint BoxLockNode::hash() const {
 54   if (EliminateNestedLocks) {
 55     return NO_HASH; // Each locked region has own BoxLock node
 56   }
 57   return Node::hash() + _slot + (is_eliminated() ? Compile::current()->fixed_slots() : 0);
 58 }
 59 
 60 bool BoxLockNode::cmp( const Node &n ) const {
 61   if (EliminateNestedLocks) {
 62     return (&n == this); // Always fail except on self
 63   }
 64   const BoxLockNode &bn = (const BoxLockNode &)n;
 65   return (bn._slot == _slot) && (bn.is_eliminated() == is_eliminated());
 66 }
 67 
 68 Node* BoxLockNode::Identity(PhaseGVN* phase) {
 69   if (!EliminateNestedLocks && !this->is_eliminated()) {
 70     Node* n = phase->hash_find(this);
 71     if (n == nullptr || n == this) {
 72       return this;
 73     }
 74     BoxLockNode* old_box = n->as_BoxLock();
 75     // Set corresponding status (_kind) when commoning BoxLock nodes.
 76     if (this->_kind != old_box->_kind) {
 77       if (this->is_unbalanced()) {
 78         old_box->set_unbalanced();
 79       }
 80       if (!old_box->is_unbalanced()) {
 81         // Only Regular or Coarsened status should be here:
 82         // Nested and Local are set only when EliminateNestedLocks is on.
 83         if (old_box->is_regular()) {
 84           assert(this->is_coarsened(),"unexpected kind: %s", _kind_name[(int)this->_kind]);
 85           old_box->set_coarsened();
 86         } else {
 87           assert(this->is_regular(),"unexpected kind: %s", _kind_name[(int)this->_kind]);
 88           assert(old_box->is_coarsened(),"unexpected kind: %s", _kind_name[(int)old_box->_kind]);
 89         }
 90       }
 91     }
 92     return old_box;
 93   }
 94   return this;
 95 }
 96 
 97 BoxLockNode* BoxLockNode::box_node(Node* box) {
 98   // Chase down the BoxNode after RA which may spill box nodes.
 99   while (!box->is_BoxLock()) {
100     //    if (box_node->is_SpillCopy()) {
101     //      Node *m = box_node->in(1);
102     //      if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_StoreP) {
103     //        box_node = m->in(m->as_Mach()->operand_index(2));
104     //        continue;
105     //      }
106     //    }
107     assert(box->is_SpillCopy() || box->is_Phi(), "Bad spill of Lock.");
108     // Only BoxLock nodes with the same stack slot are merged.
109     // So it is enough to trace one path to find the slot value.
110     box = box->in(1);
111   }
112   return box->as_BoxLock();
113 }
114 
115 OptoReg::Name BoxLockNode::reg(Node* box) {
116   return box_node(box)->in_RegMask(0).find_first_elem();
117 }
118 
119 // Is BoxLock node used for one simple lock region (same box and obj)?
120 bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj, Node** bad_lock) {
121   if (is_unbalanced()) {
122     return false;
123   }
124   LockNode* lock = nullptr;
125   bool has_one_lock = false;
126   for (uint i = 0; i < this->outcnt(); i++) {
127     Node* n = this->raw_out(i);
128     assert(!n->is_Phi(), "should not merge BoxLock nodes");
129     if (n->is_AbstractLock()) {
130       AbstractLockNode* alock = n->as_AbstractLock();
131       // Check lock's box since box could be referenced by Lock's debug info.
132       if (alock->box_node() == this) {
133         if (alock->obj_node()->eqv_uncast(obj)) {
134           if ((unique_lock != nullptr) && alock->is_Lock()) {
135             if (lock == nullptr) {
136               lock = alock->as_Lock();
137               has_one_lock = true;
138             } else if (lock != alock->as_Lock()) {
139               has_one_lock = false;
140               if (bad_lock != nullptr) {
141                 *bad_lock = alock;
142               }
143             }
144           }
145         } else {
146           if (bad_lock != nullptr) {
147             *bad_lock = alock;
148           }
149           return false; // Different objects
150         }
151       }
152     }
153   }
154 #ifdef ASSERT
155   // Verify that FastLock and Safepoint reference only this lock region.
156   for (uint i = 0; i < this->outcnt(); i++) {
157     Node* n = this->raw_out(i);
158     if (n->is_FastLock()) {
159       FastLockNode* flock = n->as_FastLock();
160       assert((flock->box_node() == this) && flock->obj_node()->eqv_uncast(obj),"");
161     }
162     // Don't check monitor info in safepoints since the referenced object could
163     // be different from the locked object. It could be Phi node of different
164     // cast nodes which point to this locked object.
165     // We assume that no other objects could be referenced in monitor info
166     // associated with this BoxLock node because all associated locks and
167     // unlocks are reference only this one object.
168   }
169 #endif
170   if (unique_lock != nullptr && has_one_lock) {
171     *unique_lock = lock;
172   }
173   return true;
174 }
175 
176 //=============================================================================
177 //-----------------------------hash--------------------------------------------
178 uint FastLockNode::hash() const { return NO_HASH; }
179 
180 uint FastLockNode::size_of() const { return sizeof(*this); }
181 
182 //------------------------------cmp--------------------------------------------
183 bool FastLockNode::cmp( const Node &n ) const {
184   return (&n == this);                // Always fail except on self
185 }
186 
187 //=============================================================================
188 //-----------------------------hash--------------------------------------------
189 uint FastUnlockNode::hash() const { return NO_HASH; }
190 
191 //------------------------------cmp--------------------------------------------
192 bool FastUnlockNode::cmp( const Node &n ) const {
193   return (&n == this);                // Always fail except on self
194 }
195 
196 //=============================================================================
197 //------------------------------do_monitor_enter-------------------------------
198 void Parse::do_monitor_enter() {
199   kill_dead_locals();
200 
201   // Null check; get casted pointer.
202   Node* obj = null_check(peek());
203   // Check for locking null object
204   if (stopped()) return;
205 
206   // the monitor object is not part of debug info expression stack
207   pop();
208 
209   // Insert a FastLockNode which takes as arguments the current thread pointer,
210   // the obj pointer & the address of the stack slot pair used for the lock.
211   shared_lock(obj);
212 }
213 
214 //------------------------------do_monitor_exit--------------------------------
215 void Parse::do_monitor_exit() {
216   kill_dead_locals();
217 
218   pop();                        // Pop oop to unlock
219   // Because monitors are guaranteed paired (else we bail out), we know
220   // the matching Lock for this Unlock.  Hence we know there is no need
221   // for a null check on Unlock.
222   shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj());
223 }