1 /*
   2  * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "classfile/vmSymbols.hpp"
  26 #include "jfrfiles/jfrEventClasses.hpp"
  27 #include "logging/log.hpp"
  28 #include "memory/allStatic.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "nmt/memTag.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/atomic.hpp"
  33 #include "runtime/basicLock.inline.hpp"
  34 #include "runtime/globals_extension.hpp"
  35 #include "runtime/interfaceSupport.inline.hpp"
  36 #include "runtime/javaThread.inline.hpp"
  37 #include "runtime/lightweightSynchronizer.hpp"
  38 #include "runtime/lockStack.inline.hpp"
  39 #include "runtime/mutexLocker.hpp"
  40 #include "runtime/objectMonitor.inline.hpp"
  41 #include "runtime/os.hpp"
  42 #include "runtime/safepointMechanism.inline.hpp"
  43 #include "runtime/safepointVerifiers.hpp"
  44 #include "runtime/synchronizer.inline.hpp"
  45 #include "runtime/timerTrace.hpp"
  46 #include "runtime/trimNativeHeap.hpp"
  47 #include "utilities/concurrentHashTable.inline.hpp"
  48 #include "utilities/concurrentHashTableTasks.inline.hpp"
  49 #include "utilities/globalDefinitions.hpp"
  50 
  51 static uintx objhash(oop obj) {
  52   if (UseCompactObjectHeaders) {
  53     uintx hash = LightweightSynchronizer::get_hash(obj->mark(), obj);
  54     assert(hash != 0, "should have a hash");
  55     return hash;
  56   } else {
  57     uintx hash = obj->mark().hash();
  58     assert(hash != 0, "should have a hash");
  59     return hash;
  60   }
  61 }
  62 
  63 // ConcurrentHashTable storing links from objects to ObjectMonitors
  64 class ObjectMonitorTable : AllStatic {
  65   struct Config {
  66     using Value = ObjectMonitor*;
  67     static uintx get_hash(Value const& value, bool* is_dead) {
  68       return (uintx)value->hash();
  69     }
  70     static void* allocate_node(void* context, size_t size, Value const& value) {
  71       ObjectMonitorTable::inc_items_count();
  72       return AllocateHeap(size, mtObjectMonitor);
  73     };
  74     static void free_node(void* context, void* memory, Value const& value) {
  75       ObjectMonitorTable::dec_items_count();
  76       FreeHeap(memory);
  77     }
  78   };
  79   using ConcurrentTable = ConcurrentHashTable<Config, mtObjectMonitor>;
  80 
  81   static ConcurrentTable* _table;
  82   static volatile size_t _items_count;
  83   static size_t _table_size;
  84   static volatile bool _resize;
  85 
  86   class Lookup : public StackObj {
  87     oop _obj;
  88 
  89    public:
  90     explicit Lookup(oop obj) : _obj(obj) {}
  91 
  92     uintx get_hash() const {
  93       return objhash(_obj);
  94     }
  95 
  96     bool equals(ObjectMonitor** value) {
  97       assert(*value != nullptr, "must be");
  98       return (*value)->object_refers_to(_obj);
  99     }
 100 
 101     bool is_dead(ObjectMonitor** value) {
 102       assert(*value != nullptr, "must be");
 103       return false;
 104     }
 105   };
 106 
 107   class LookupMonitor : public StackObj {
 108     ObjectMonitor* _monitor;
 109 
 110    public:
 111     explicit LookupMonitor(ObjectMonitor* monitor) : _monitor(monitor) {}
 112 
 113     uintx get_hash() const {
 114       return _monitor->hash();
 115     }
 116 
 117     bool equals(ObjectMonitor** value) {
 118       return (*value) == _monitor;
 119     }
 120 
 121     bool is_dead(ObjectMonitor** value) {
 122       assert(*value != nullptr, "must be");
 123       return (*value)->object_is_dead();
 124     }
 125   };
 126 
 127   static void inc_items_count() {
 128     Atomic::inc(&_items_count);
 129   }
 130 
 131   static void dec_items_count() {
 132     Atomic::dec(&_items_count);
 133   }
 134 
 135   static double get_load_factor() {
 136     return (double)_items_count / (double)_table_size;
 137   }
 138 
 139   static size_t table_size(Thread* current = Thread::current()) {
 140     return ((size_t)1) << _table->get_size_log2(current);
 141   }
 142 
 143   static size_t max_log_size() {
 144     // TODO[OMTable]: Evaluate the max size.
 145     // TODO[OMTable]: Need to fix init order to use Universe::heap()->max_capacity();
 146     //                Using MaxHeapSize directly this early may be wrong, and there
 147     //                are definitely rounding errors (alignment).
 148     const size_t max_capacity = MaxHeapSize;
 149     const size_t min_object_size = CollectedHeap::min_dummy_object_size() * HeapWordSize;
 150     const size_t max_objects = max_capacity / MAX2(MinObjAlignmentInBytes, checked_cast<int>(min_object_size));
 151     const size_t log_max_objects = log2i_graceful(max_objects);
 152 
 153     return MAX2(MIN2<size_t>(SIZE_BIG_LOG2, log_max_objects), min_log_size());
 154   }
 155 
 156   static size_t min_log_size() {
 157     // ~= log(AvgMonitorsPerThreadEstimate default)
 158     return 10;
 159   }
 160 
 161   template<typename V>
 162   static size_t clamp_log_size(V log_size) {
 163     return MAX2(MIN2(log_size, checked_cast<V>(max_log_size())), checked_cast<V>(min_log_size()));
 164   }
 165 
 166   static size_t initial_log_size() {
 167     const size_t estimate = log2i(MAX2(os::processor_count(), 1)) + log2i(MAX2(AvgMonitorsPerThreadEstimate, size_t(1)));
 168     return clamp_log_size(estimate);
 169   }
 170 
 171   static size_t grow_hint () {
 172     return ConcurrentTable::DEFAULT_GROW_HINT;
 173   }
 174 
 175  public:
 176   static void create() {
 177     _table = new ConcurrentTable(initial_log_size(), max_log_size(), grow_hint());
 178     _items_count = 0;
 179     _table_size = table_size();
 180     _resize = false;
 181   }
 182 
 183   static void verify_monitor_get_result(oop obj, ObjectMonitor* monitor) {
 184 #ifdef ASSERT
 185     if (SafepointSynchronize::is_at_safepoint()) {
 186       bool has_monitor = obj->mark().has_monitor();
 187       assert(has_monitor == (monitor != nullptr),
 188           "Inconsistency between markWord and ObjectMonitorTable has_monitor: %s monitor: " PTR_FORMAT,
 189           BOOL_TO_STR(has_monitor), p2i(monitor));
 190     }
 191 #endif
 192   }
 193 
 194   static ObjectMonitor* monitor_get(Thread* current, oop obj) {
 195     ObjectMonitor* result = nullptr;
 196     Lookup lookup_f(obj);
 197     auto found_f = [&](ObjectMonitor** found) {
 198       assert((*found)->object_peek() == obj, "must be");
 199       result = *found;
 200     };
 201     _table->get(current, lookup_f, found_f);
 202     verify_monitor_get_result(obj, result);
 203     return result;
 204   }
 205 
 206   static void try_notify_grow() {
 207     if (!_table->is_max_size_reached() && !Atomic::load(&_resize)) {
 208       Atomic::store(&_resize, true);
 209       if (Service_lock->try_lock()) {
 210         Service_lock->notify();
 211         Service_lock->unlock();
 212       }
 213     }
 214   }
 215 
 216   static bool should_shrink() {
 217     // Not implemented;
 218     return false;
 219   }
 220 
 221   static constexpr double GROW_LOAD_FACTOR = 0.75;
 222 
 223   static bool should_grow() {
 224     return get_load_factor() > GROW_LOAD_FACTOR && !_table->is_max_size_reached();
 225   }
 226 
 227   static bool should_resize() {
 228     return should_grow() || should_shrink() || Atomic::load(&_resize);
 229   }
 230 
 231   template<typename Task, typename... Args>
 232   static bool run_task(JavaThread* current, Task& task, const char* task_name, Args&... args) {
 233     if (task.prepare(current)) {
 234       log_trace(monitortable)("Started to %s", task_name);
 235       TraceTime timer(task_name, TRACETIME_LOG(Debug, monitortable, perf));
 236       while (task.do_task(current, args...)) {
 237         task.pause(current);
 238         {
 239           ThreadBlockInVM tbivm(current);
 240         }
 241         task.cont(current);
 242       }
 243       task.done(current);
 244       return true;
 245     }
 246     return false;
 247   }
 248 
 249   static bool grow(JavaThread* current) {
 250     ConcurrentTable::GrowTask grow_task(_table);
 251     if (run_task(current, grow_task, "Grow")) {
 252       _table_size = table_size(current);
 253       log_info(monitortable)("Grown to size: %zu", _table_size);
 254       return true;
 255     }
 256     return false;
 257   }
 258 
 259   static bool clean(JavaThread* current) {
 260     ConcurrentTable::BulkDeleteTask clean_task(_table);
 261     auto is_dead = [&](ObjectMonitor** monitor) {
 262       return (*monitor)->object_is_dead();
 263     };
 264     auto do_nothing = [&](ObjectMonitor** monitor) {};
 265     NativeHeapTrimmer::SuspendMark sm("ObjectMonitorTable");
 266     return run_task(current, clean_task, "Clean", is_dead, do_nothing);
 267   }
 268 
 269   static bool resize(JavaThread* current) {
 270     LogTarget(Info, monitortable) lt;
 271     bool success = false;
 272 
 273     if (should_grow()) {
 274       lt.print("Start growing with load factor %f", get_load_factor());
 275       success = grow(current);
 276     } else {
 277       if (!_table->is_max_size_reached() && Atomic::load(&_resize)) {
 278         lt.print("WARNING: Getting resize hints with load factor %f", get_load_factor());
 279       }
 280       lt.print("Start cleaning with load factor %f", get_load_factor());
 281       success = clean(current);
 282     }
 283 
 284     Atomic::store(&_resize, false);
 285 
 286     return success;
 287   }
 288 
 289   static ObjectMonitor* monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj) {
 290     // Enter the monitor into the concurrent hashtable.
 291     ObjectMonitor* result = monitor;
 292     Lookup lookup_f(obj);
 293     auto found_f = [&](ObjectMonitor** found) {
 294       assert((*found)->object_peek() == obj, "must be");
 295       assert(objhash(obj) == (uintx)(*found)->hash(), "hash must match");
 296       result = *found;
 297     };
 298     bool grow;
 299     _table->insert_get(current, lookup_f, monitor, found_f, &grow);
 300     verify_monitor_get_result(obj, result);
 301     if (grow) {
 302       try_notify_grow();
 303     }
 304     return result;
 305   }
 306 
 307   static bool remove_monitor_entry(Thread* current, ObjectMonitor* monitor) {
 308     LookupMonitor lookup_f(monitor);
 309     return _table->remove(current, lookup_f);
 310   }
 311 
 312   static bool contains_monitor(Thread* current, ObjectMonitor* monitor) {
 313     LookupMonitor lookup_f(monitor);
 314     bool result = false;
 315     auto found_f = [&](ObjectMonitor** found) {
 316       result = true;
 317     };
 318     _table->get(current, lookup_f, found_f);
 319     return result;
 320   }
 321 
 322   static void print_on(outputStream* st) {
 323     auto printer = [&] (ObjectMonitor** entry) {
 324        ObjectMonitor* om = *entry;
 325        oop obj = om->object_peek();
 326        st->print("monitor=" PTR_FORMAT ", ", p2i(om));
 327        st->print("object=" PTR_FORMAT, p2i(obj));
 328        assert(objhash(obj) == (uintx)om->hash(), "hash must match");
 329        st->cr();
 330        return true;
 331     };
 332     if (SafepointSynchronize::is_at_safepoint()) {
 333       _table->do_safepoint_scan(printer);
 334     } else {
 335       _table->do_scan(Thread::current(), printer);
 336     }
 337   }
 338 };
 339 
 340 ObjectMonitorTable::ConcurrentTable* ObjectMonitorTable::_table = nullptr;
 341 volatile size_t ObjectMonitorTable::_items_count = 0;
 342 size_t ObjectMonitorTable::_table_size = 0;
 343 volatile bool ObjectMonitorTable::_resize = false;
 344 
 345 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted) {
 346   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 347 
 348   ObjectMonitor* monitor = get_monitor_from_table(current, object);
 349   if (monitor != nullptr) {
 350     *inserted = false;
 351     return monitor;
 352   }
 353 
 354   ObjectMonitor* alloced_monitor = new ObjectMonitor(object);
 355   alloced_monitor->set_anonymous_owner();
 356 
 357   // Try insert monitor
 358   monitor = add_monitor(current, alloced_monitor, object);
 359 
 360   *inserted = alloced_monitor == monitor;
 361   if (!*inserted) {
 362     delete alloced_monitor;
 363   }
 364 
 365   return monitor;
 366 }
 367 
 368 static void log_inflate(Thread* current, oop object, ObjectSynchronizer::InflateCause cause) {
 369   if (log_is_enabled(Trace, monitorinflation)) {
 370     ResourceMark rm(current);
 371     log_trace(monitorinflation)("inflate: object=" INTPTR_FORMAT ", mark="
 372                                 INTPTR_FORMAT ", type='%s' cause=%s", p2i(object),
 373                                 object->mark().value(), object->klass()->external_name(),
 374                                 ObjectSynchronizer::inflate_cause_name(cause));
 375   }
 376 }
 377 
 378 static void post_monitor_inflate_event(EventJavaMonitorInflate* event,
 379                                        const oop obj,
 380                                        ObjectSynchronizer::InflateCause cause) {
 381   assert(event != nullptr, "invariant");
 382   const Klass* monitor_klass = obj->klass();
 383   if (ObjectMonitor::is_jfr_excluded(monitor_klass)) {
 384     return;
 385   }
 386   event->set_monitorClass(monitor_klass);
 387   event->set_address((uintptr_t)(void*)obj);
 388   event->set_cause((u1)cause);
 389   event->commit();
 390 }
 391 
 392 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor(oop object, JavaThread* current, ObjectSynchronizer::InflateCause cause) {
 393   assert(UseObjectMonitorTable, "must be");
 394 
 395   EventJavaMonitorInflate event;
 396 
 397   bool inserted;
 398   ObjectMonitor* monitor = get_or_insert_monitor_from_table(object, current, &inserted);
 399 
 400   if (inserted) {
 401     log_inflate(current, object, cause);
 402     if (event.should_commit()) {
 403       post_monitor_inflate_event(&event, object, cause);
 404     }
 405 
 406     // The monitor has an anonymous owner so it is safe from async deflation.
 407     ObjectSynchronizer::_in_use_list.add(monitor);
 408   }
 409 
 410   return monitor;
 411 }
 412 
 413 // Add the hashcode to the monitor to match the object and put it in the hashtable.
 414 ObjectMonitor* LightweightSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) {
 415   assert(UseObjectMonitorTable, "must be");
 416   assert(obj == monitor->object(), "must be");
 417 
 418   intptr_t hash = objhash(obj);
 419   assert(hash != 0, "must be set when claiming the object monitor");
 420   monitor->set_hash(hash);
 421 
 422   return ObjectMonitorTable::monitor_put_get(current, monitor, obj);
 423 }
 424 
 425 bool LightweightSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) {
 426   assert(UseObjectMonitorTable, "must be");
 427   assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead");
 428 
 429   return ObjectMonitorTable::remove_monitor_entry(current, monitor);
 430 }
 431 
 432 void LightweightSynchronizer::deflate_mark_word(oop obj) {
 433   assert(UseObjectMonitorTable, "must be");
 434 
 435   markWord mark = obj->mark_acquire();
 436   assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash");
 437 
 438   while (mark.has_monitor()) {
 439     const markWord new_mark = mark.clear_lock_bits().set_unlocked();
 440     mark = obj->cas_set_mark(new_mark, mark);
 441   }
 442 }
 443 
 444 void LightweightSynchronizer::initialize() {
 445   if (!UseObjectMonitorTable) {
 446     return;
 447   }
 448   ObjectMonitorTable::create();
 449 }
 450 
 451 bool LightweightSynchronizer::needs_resize() {
 452   if (!UseObjectMonitorTable) {
 453     return false;
 454   }
 455   return ObjectMonitorTable::should_resize();
 456 }
 457 
 458 bool LightweightSynchronizer::resize_table(JavaThread* current) {
 459   if (!UseObjectMonitorTable) {
 460     return true;
 461   }
 462   return ObjectMonitorTable::resize(current);
 463 }
 464 
 465 class LightweightSynchronizer::LockStackInflateContendedLocks : private OopClosure {
 466  private:
 467   oop _contended_oops[LockStack::CAPACITY];
 468   int _length;
 469 
 470   void do_oop(oop* o) final {
 471     oop obj = *o;
 472     if (obj->mark_acquire().has_monitor()) {
 473       if (_length > 0 && _contended_oops[_length - 1] == obj) {
 474         // Recursive
 475         return;
 476       }
 477       _contended_oops[_length++] = obj;
 478     }
 479   }
 480 
 481   void do_oop(narrowOop* o) final {
 482     ShouldNotReachHere();
 483   }
 484 
 485  public:
 486   LockStackInflateContendedLocks() :
 487     _contended_oops(),
 488     _length(0) {};
 489 
 490   void inflate(JavaThread* current) {
 491     assert(current == JavaThread::current(), "must be");
 492     current->lock_stack().oops_do(this);
 493     for (int i = 0; i < _length; i++) {
 494       LightweightSynchronizer::
 495         inflate_fast_locked_object(_contended_oops[i], ObjectSynchronizer::inflate_cause_vm_internal, current, current);
 496     }
 497   }
 498 };
 499 
 500 void LightweightSynchronizer::ensure_lock_stack_space(JavaThread* current) {
 501   assert(current == JavaThread::current(), "must be");
 502   LockStack& lock_stack = current->lock_stack();
 503 
 504   // Make room on lock_stack
 505   if (lock_stack.is_full()) {
 506     // Inflate contended objects
 507     LockStackInflateContendedLocks().inflate(current);
 508     if (lock_stack.is_full()) {
 509       // Inflate the oldest object
 510       inflate_fast_locked_object(lock_stack.bottom(), ObjectSynchronizer::inflate_cause_vm_internal, current, current);
 511     }
 512   }
 513 }
 514 
 515 class LightweightSynchronizer::CacheSetter : StackObj {
 516   JavaThread* const _thread;
 517   BasicLock* const _lock;
 518   ObjectMonitor* _monitor;
 519 
 520   NONCOPYABLE(CacheSetter);
 521 
 522  public:
 523   CacheSetter(JavaThread* thread, BasicLock* lock) :
 524     _thread(thread),
 525     _lock(lock),
 526     _monitor(nullptr) {}
 527 
 528   ~CacheSetter() {
 529     // Only use the cache if using the table.
 530     if (UseObjectMonitorTable) {
 531       if (_monitor != nullptr) {
 532         _thread->om_set_monitor_cache(_monitor);
 533         _lock->set_object_monitor_cache(_monitor);
 534       } else {
 535         _lock->clear_object_monitor_cache();
 536       }
 537     }
 538   }
 539 
 540   void set_monitor(ObjectMonitor* monitor) {
 541     assert(_monitor == nullptr, "only set once");
 542     _monitor = monitor;
 543   }
 544 
 545 };
 546 
 547 class LightweightSynchronizer::VerifyThreadState {
 548   bool _no_safepoint;
 549 
 550  public:
 551   VerifyThreadState(JavaThread* locking_thread, JavaThread* current) : _no_safepoint(locking_thread != current) {
 552     assert(current == Thread::current(), "must be");
 553     assert(locking_thread == current || locking_thread->is_obj_deopt_suspend(), "locking_thread may not run concurrently");
 554     if (_no_safepoint) {
 555       DEBUG_ONLY(JavaThread::current()->inc_no_safepoint_count();)
 556     }
 557   }
 558   ~VerifyThreadState() {
 559     if (_no_safepoint){
 560       DEBUG_ONLY(JavaThread::current()->dec_no_safepoint_count();)
 561     }
 562   }
 563 };
 564 
 565 inline bool LightweightSynchronizer::fast_lock_try_enter(oop obj, LockStack& lock_stack, JavaThread* current) {
 566   markWord mark = obj->mark();
 567   while (mark.is_unlocked()) {
 568     ensure_lock_stack_space(current);
 569     assert(!lock_stack.is_full(), "must have made room on the lock stack");
 570     assert(!lock_stack.contains(obj), "thread must not already hold the lock");
 571     // Try to swing into 'fast-locked' state.
 572     markWord locked_mark = mark.set_fast_locked();
 573     markWord old_mark = mark;
 574     mark = obj->cas_set_mark(locked_mark, old_mark);
 575     if (old_mark == mark) {
 576       // Successfully fast-locked, push object to lock-stack and return.
 577       lock_stack.push(obj);
 578       return true;
 579     }
 580   }
 581   return false;
 582 }
 583 
 584 bool LightweightSynchronizer::fast_lock_spin_enter(oop obj, LockStack& lock_stack, JavaThread* current, bool observed_deflation) {
 585   assert(UseObjectMonitorTable, "must be");
 586   // Will spin with exponential backoff with an accumulative O(2^spin_limit) spins.
 587   const int log_spin_limit = os::is_MP() ? LightweightFastLockingSpins : 1;
 588   const int log_min_safepoint_check_interval = 10;
 589 
 590   markWord mark = obj->mark();
 591   const auto should_spin = [&]() {
 592     if (!mark.has_monitor()) {
 593       // Spin while not inflated.
 594       return true;
 595     } else if (observed_deflation) {
 596       // Spin while monitor is being deflated.
 597       ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark);
 598       return monitor == nullptr || monitor->is_being_async_deflated();
 599     }
 600     // Else stop spinning.
 601     return false;
 602   };
 603   // Always attempt to lock once even when safepoint synchronizing.
 604   bool should_process = false;
 605   for (int i = 0; should_spin() && !should_process && i < log_spin_limit; i++) {
 606     // Spin with exponential backoff.
 607     const int total_spin_count = 1 << i;
 608     const int inner_spin_count = MIN2(1 << log_min_safepoint_check_interval, total_spin_count);
 609     const int outer_spin_count = total_spin_count / inner_spin_count;
 610     for (int outer = 0; outer < outer_spin_count; outer++) {
 611       should_process = SafepointMechanism::should_process(current);
 612       if (should_process) {
 613         // Stop spinning for safepoint.
 614         break;
 615       }
 616       for (int inner = 1; inner < inner_spin_count; inner++) {
 617         SpinPause();
 618       }
 619     }
 620 
 621     if (fast_lock_try_enter(obj, lock_stack, current)) return true;
 622   }
 623   return false;
 624 }
 625 
 626 void LightweightSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
 627   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 628   JavaThread* current = JavaThread::current();
 629   VerifyThreadState vts(locking_thread, current);
 630 
 631   if (obj->klass()->is_value_based()) {
 632     ObjectSynchronizer::handle_sync_on_value_based_class(obj, locking_thread);
 633   }
 634 
 635   CacheSetter cache_setter(locking_thread, lock);
 636 
 637   LockStack& lock_stack = locking_thread->lock_stack();
 638 
 639   ObjectMonitor* monitor = nullptr;
 640   if (lock_stack.contains(obj())) {
 641     monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
 642     bool entered = monitor->enter_for(locking_thread);
 643     assert(entered, "recursive ObjectMonitor::enter_for must succeed");
 644   } else {
 645     do {
 646       // It is assumed that enter_for must enter on an object without contention.
 647       monitor = inflate_and_enter(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, locking_thread, current);
 648       // But there may still be a race with deflation.
 649     } while (monitor == nullptr);
 650   }
 651 
 652   assert(monitor != nullptr, "LightweightSynchronizer::enter_for must succeed");
 653   cache_setter.set_monitor(monitor);
 654 }
 655 
 656 void LightweightSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
 657   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 658   assert(current == JavaThread::current(), "must be");
 659 
 660   if (obj->klass()->is_value_based()) {
 661     ObjectSynchronizer::handle_sync_on_value_based_class(obj, current);
 662   }
 663 
 664   CacheSetter cache_setter(current, lock);
 665 
 666   // Used when deflation is observed. Progress here requires progress
 667   // from the deflator. After observing that the deflator is not
 668   // making progress (after two yields), switch to sleeping.
 669   SpinYield spin_yield(0, 2);
 670   bool observed_deflation = false;
 671 
 672   LockStack& lock_stack = current->lock_stack();
 673 
 674   if (!lock_stack.is_full() && lock_stack.try_recursive_enter(obj())) {
 675     // Recursively fast locked
 676     return;
 677   }
 678 
 679   if (lock_stack.contains(obj())) {
 680     ObjectMonitor* monitor = inflate_fast_locked_object(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
 681     bool entered = monitor->enter(current);
 682     assert(entered, "recursive ObjectMonitor::enter must succeed");
 683     cache_setter.set_monitor(monitor);
 684     return;
 685   }
 686 
 687   while (true) {
 688     // Fast-locking does not use the 'lock' argument.
 689     // Fast-lock spinning to avoid inflating for short critical sections.
 690     // The goal is to only inflate when the extra cost of using ObjectMonitors
 691     // is worth it.
 692     // If deflation has been observed we also spin while deflation is ongoing.
 693     if (fast_lock_try_enter(obj(), lock_stack, current)) {
 694       return;
 695     } else if (UseObjectMonitorTable && fast_lock_spin_enter(obj(), lock_stack, current, observed_deflation)) {
 696       return;
 697     }
 698 
 699     if (observed_deflation) {
 700       spin_yield.wait();
 701     }
 702 
 703     ObjectMonitor* monitor = inflate_and_enter(obj(), ObjectSynchronizer::inflate_cause_monitor_enter, current, current);
 704     if (monitor != nullptr) {
 705       cache_setter.set_monitor(monitor);
 706       return;
 707     }
 708 
 709     // If inflate_and_enter returns nullptr it is because a deflated monitor
 710     // was encountered. Fallback to fast locking. The deflater is responsible
 711     // for clearing out the monitor and transitioning the markWord back to
 712     // fast locking.
 713     observed_deflation = true;
 714   }
 715 }
 716 
 717 void LightweightSynchronizer::exit(oop object, JavaThread* current) {
 718   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 719   assert(current == Thread::current(), "must be");
 720 
 721   markWord mark = object->mark();
 722   assert(!mark.is_unlocked(), "must be");
 723 
 724   LockStack& lock_stack = current->lock_stack();
 725   if (mark.is_fast_locked()) {
 726     if (lock_stack.try_recursive_exit(object)) {
 727       // This is a recursive exit which succeeded
 728       return;
 729     }
 730     if (lock_stack.is_recursive(object)) {
 731       // Must inflate recursive locks if try_recursive_exit fails
 732       // This happens for un-structured unlocks, could potentially
 733       // fix try_recursive_exit to handle these.
 734       inflate_fast_locked_object(object, ObjectSynchronizer::inflate_cause_vm_internal, current, current);
 735     }
 736   }
 737 
 738   while (mark.is_fast_locked()) {
 739     markWord unlocked_mark = mark.set_unlocked();
 740     markWord old_mark = mark;
 741     mark = object->cas_set_mark(unlocked_mark, old_mark);
 742     if (old_mark == mark) {
 743       // CAS successful, remove from lock_stack
 744       size_t recursion = lock_stack.remove(object) - 1;
 745       assert(recursion == 0, "Should not have unlocked here");
 746       return;
 747     }
 748   }
 749 
 750   assert(mark.has_monitor(), "must be");
 751   // The monitor exists
 752   ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, object, mark);
 753   if (monitor->has_anonymous_owner()) {
 754     assert(current->lock_stack().contains(object), "current must have object on its lock stack");
 755     monitor->set_owner_from_anonymous(current);
 756     monitor->set_recursions(current->lock_stack().remove(object) - 1);
 757   }
 758 
 759   monitor->exit(current);
 760 }
 761 
 762 // LightweightSynchronizer::inflate_locked_or_imse is used to to get an inflated
 763 // ObjectMonitor* with LM_LIGHTWEIGHT. It is used from contexts which require
 764 // an inflated ObjectMonitor* for a monitor, and expects to throw a
 765 // java.lang.IllegalMonitorStateException if it is not held by the current
 766 // thread. Such as notify/wait and jni_exit. LM_LIGHTWEIGHT keeps it invariant
 767 // that it only inflates if it is already locked by the current thread or the
 768 // current thread is in the process of entering. To maintain this invariant we
 769 // need to throw a java.lang.IllegalMonitorStateException before inflating if
 770 // the current thread is not the owner.
 771 // LightweightSynchronizer::inflate_locked_or_imse facilitates this.
 772 ObjectMonitor* LightweightSynchronizer::inflate_locked_or_imse(oop obj, ObjectSynchronizer::InflateCause cause, TRAPS) {
 773   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 774   JavaThread* current = THREAD;
 775 
 776   for (;;) {
 777     markWord mark = obj->mark_acquire();
 778     if (mark.is_unlocked()) {
 779       // No lock, IMSE.
 780       THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
 781                  "current thread is not owner", nullptr);
 782     }
 783 
 784     if (mark.is_fast_locked()) {
 785       if (!current->lock_stack().contains(obj)) {
 786         // Fast locked by other thread, IMSE.
 787         THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
 788                    "current thread is not owner", nullptr);
 789       } else {
 790         // Current thread owns the lock, must inflate
 791         return inflate_fast_locked_object(obj, cause, current, current);
 792       }
 793     }
 794 
 795     assert(mark.has_monitor(), "must be");
 796     ObjectMonitor* monitor = ObjectSynchronizer::read_monitor(current, obj, mark);
 797     if (monitor != nullptr) {
 798       if (monitor->has_anonymous_owner()) {
 799         LockStack& lock_stack = current->lock_stack();
 800         if (lock_stack.contains(obj)) {
 801           // Current thread owns the lock but someone else inflated it.
 802           // Fix owner and pop lock stack.
 803           monitor->set_owner_from_anonymous(current);
 804           monitor->set_recursions(lock_stack.remove(obj) - 1);
 805         } else {
 806           // Fast locked (and inflated) by other thread, or deflation in progress, IMSE.
 807           THROW_MSG_(vmSymbols::java_lang_IllegalMonitorStateException(),
 808                      "current thread is not owner", nullptr);
 809         }
 810       }
 811       return monitor;
 812     }
 813   }
 814 }
 815 
 816 ObjectMonitor* LightweightSynchronizer::inflate_into_object_header(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, Thread* current) {
 817 
 818   // The JavaThread* locking_thread parameter is only used by LM_LIGHTWEIGHT and requires
 819   // that the locking_thread == Thread::current() or is suspended throughout the call by
 820   // some other mechanism.
 821   // Even with LM_LIGHTWEIGHT the thread might be nullptr when called from a non
 822   // JavaThread. (As may still be the case from FastHashCode). However it is only
 823   // important for the correctness of the LM_LIGHTWEIGHT algorithm that the thread
 824   // is set when called from ObjectSynchronizer::enter from the owning thread,
 825   // ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
 826   EventJavaMonitorInflate event;
 827 
 828   for (;;) {
 829     const markWord mark = object->mark_acquire();
 830 
 831     // The mark can be in one of the following states:
 832     // *  inflated     - Just return if using stack-locking.
 833     //                   If using fast-locking and the ObjectMonitor owner
 834     //                   is anonymous and the locking_thread owns the
 835     //                   object lock, then we make the locking_thread
 836     //                   the ObjectMonitor owner and remove the lock from
 837     //                   the locking_thread's lock stack.
 838     // *  fast-locked  - Coerce it to inflated from fast-locked.
 839     // *  unlocked     - Aggressively inflate the object.
 840 
 841     // CASE: inflated
 842     if (mark.has_monitor()) {
 843       ObjectMonitor* inf = mark.monitor();
 844       markWord dmw = inf->header();
 845       assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
 846       if (inf->has_anonymous_owner() &&
 847           locking_thread != nullptr && locking_thread->lock_stack().contains(object)) {
 848         inf->set_owner_from_anonymous(locking_thread);
 849         size_t removed = locking_thread->lock_stack().remove(object);
 850         inf->set_recursions(removed - 1);
 851       }
 852       return inf;
 853     }
 854 
 855     // CASE: fast-locked
 856     // Could be fast-locked either by the locking_thread or by some other thread.
 857     //
 858     // Note that we allocate the ObjectMonitor speculatively, _before_
 859     // attempting to set the object's mark to the new ObjectMonitor. If
 860     // the locking_thread owns the monitor, then we set the ObjectMonitor's
 861     // owner to the locking_thread. Otherwise, we set the ObjectMonitor's owner
 862     // to anonymous. If we lose the race to set the object's mark to the
 863     // new ObjectMonitor, then we just delete it and loop around again.
 864     //
 865     if (mark.is_fast_locked()) {
 866       ObjectMonitor* monitor = new ObjectMonitor(object);
 867       monitor->set_header(mark.set_unlocked());
 868       bool own = locking_thread != nullptr && locking_thread->lock_stack().contains(object);
 869       if (own) {
 870         // Owned by locking_thread.
 871         monitor->set_owner(locking_thread);
 872       } else {
 873         // Owned by somebody else.
 874         monitor->set_anonymous_owner();
 875       }
 876       markWord monitor_mark = markWord::encode(monitor);
 877       markWord old_mark = object->cas_set_mark(monitor_mark, mark);
 878       if (old_mark == mark) {
 879         // Success! Return inflated monitor.
 880         if (own) {
 881           size_t removed = locking_thread->lock_stack().remove(object);
 882           monitor->set_recursions(removed - 1);
 883         }
 884         // Once the ObjectMonitor is configured and object is associated
 885         // with the ObjectMonitor, it is safe to allow async deflation:
 886         ObjectSynchronizer::_in_use_list.add(monitor);
 887 
 888         log_inflate(current, object, cause);
 889         if (event.should_commit()) {
 890           post_monitor_inflate_event(&event, object, cause);
 891         }
 892         return monitor;
 893       } else {
 894         delete monitor;
 895         continue;  // Interference -- just retry
 896       }
 897     }
 898 
 899     // CASE: unlocked
 900     // TODO-FIXME: for entry we currently inflate and then try to CAS _owner.
 901     // If we know we're inflating for entry it's better to inflate by swinging a
 902     // pre-locked ObjectMonitor pointer into the object header.   A successful
 903     // CAS inflates the object *and* confers ownership to the inflating thread.
 904     // In the current implementation we use a 2-step mechanism where we CAS()
 905     // to inflate and then CAS() again to try to swing _owner from null to current.
 906     // An inflateTry() method that we could call from enter() would be useful.
 907 
 908     assert(mark.is_unlocked(), "invariant: header=" INTPTR_FORMAT, mark.value());
 909     ObjectMonitor* m = new ObjectMonitor(object);
 910     // prepare m for installation - set monitor to initial state
 911     m->set_header(mark);
 912 
 913     if (object->cas_set_mark(markWord::encode(m), mark) != mark) {
 914       delete m;
 915       m = nullptr;
 916       continue;
 917       // interference - the markword changed - just retry.
 918       // The state-transitions are one-way, so there's no chance of
 919       // live-lock -- "Inflated" is an absorbing state.
 920     }
 921 
 922     // Once the ObjectMonitor is configured and object is associated
 923     // with the ObjectMonitor, it is safe to allow async deflation:
 924     ObjectSynchronizer::_in_use_list.add(m);
 925 
 926     log_inflate(current, object, cause);
 927     if (event.should_commit()) {
 928       post_monitor_inflate_event(&event, object, cause);
 929     }
 930     return m;
 931   }
 932 }
 933 
 934 ObjectMonitor* LightweightSynchronizer::inflate_fast_locked_object(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
 935   assert(LockingMode == LM_LIGHTWEIGHT, "only used for lightweight");
 936   VerifyThreadState vts(locking_thread, current);
 937   assert(locking_thread->lock_stack().contains(object), "locking_thread must have object on its lock stack");
 938 
 939   ObjectMonitor* monitor;
 940 
 941   if (!UseObjectMonitorTable) {
 942     return inflate_into_object_header(object, cause, locking_thread, current);
 943   }
 944 
 945   // Inflating requires a hash code
 946   ObjectSynchronizer::FastHashCode(current, object);
 947 
 948   markWord mark = object->mark_acquire();
 949   assert(!mark.is_unlocked(), "Cannot be unlocked");
 950 
 951   for (;;) {
 952     // Fetch the monitor from the table
 953     monitor = get_or_insert_monitor(object, current, cause);
 954 
 955     // ObjectMonitors are always inserted as anonymously owned, this thread is
 956     // the current holder of the monitor. So unless the entry is stale and
 957     // contains a deflating monitor it must be anonymously owned.
 958     if (monitor->has_anonymous_owner()) {
 959       // The monitor must be anonymously owned if it was added
 960       assert(monitor == get_monitor_from_table(current, object), "The monitor must be found");
 961       // New fresh monitor
 962       break;
 963     }
 964 
 965     // If the monitor was not anonymously owned then we got a deflating monitor
 966     // from the table. We need to let the deflator make progress and remove this
 967     // entry before we are allowed to add a new one.
 968     os::naked_yield();
 969     assert(monitor->is_being_async_deflated(), "Should be the reason");
 970   }
 971 
 972   // Set the mark word; loop to handle concurrent updates to other parts of the mark word
 973   while (mark.is_fast_locked()) {
 974     mark = object->cas_set_mark(mark.set_has_monitor(), mark);
 975   }
 976 
 977   // Indicate that the monitor now has a known owner
 978   monitor->set_owner_from_anonymous(locking_thread);
 979 
 980   // Remove the entry from the thread's lock stack
 981   monitor->set_recursions(locking_thread->lock_stack().remove(object) - 1);
 982 
 983   if (locking_thread == current) {
 984     // Only change the thread local state of the current thread.
 985     locking_thread->om_set_monitor_cache(monitor);
 986   }
 987 
 988   return monitor;
 989 }
 990 
 991 ObjectMonitor* LightweightSynchronizer::inflate_and_enter(oop object, ObjectSynchronizer::InflateCause cause, JavaThread* locking_thread, JavaThread* current) {
 992   assert(LockingMode == LM_LIGHTWEIGHT, "only used for lightweight");
 993   VerifyThreadState vts(locking_thread, current);
 994 
 995   // Note: In some paths (deoptimization) the 'current' thread inflates and
 996   // enters the lock on behalf of the 'locking_thread' thread.
 997 
 998   ObjectMonitor* monitor = nullptr;
 999 
1000   if (!UseObjectMonitorTable) {
1001     // Do the old inflate and enter.
1002     monitor = inflate_into_object_header(object, cause, locking_thread, current);
1003 
1004     bool entered;
1005     if (locking_thread == current) {
1006       entered = monitor->enter(locking_thread);
1007     } else {
1008       entered = monitor->enter_for(locking_thread);
1009     }
1010 
1011     // enter returns false for deflation found.
1012     return entered ? monitor : nullptr;
1013   }
1014 
1015   NoSafepointVerifier nsv;
1016 
1017   // Lightweight monitors require that hash codes are installed first
1018   ObjectSynchronizer::FastHashCode(locking_thread, object);
1019 
1020   // Try to get the monitor from the thread-local cache.
1021   // There's no need to use the cache if we are locking
1022   // on behalf of another thread.
1023   if (current == locking_thread) {
1024     monitor = current->om_get_from_monitor_cache(object);
1025   }
1026 
1027   // Get or create the monitor
1028   if (monitor == nullptr) {
1029     monitor = get_or_insert_monitor(object, current, cause);
1030   }
1031 
1032   if (monitor->try_enter(locking_thread)) {
1033     return monitor;
1034   }
1035 
1036   // Holds is_being_async_deflated() stable throughout this function.
1037   ObjectMonitorContentionMark contention_mark(monitor);
1038 
1039   /// First handle the case where the monitor from the table is deflated
1040   if (monitor->is_being_async_deflated()) {
1041     // The MonitorDeflation thread is deflating the monitor. The locking thread
1042     // must spin until further progress has been made.
1043 
1044     const markWord mark = object->mark_acquire();
1045 
1046     if (mark.has_monitor()) {
1047       // Waiting on the deflation thread to remove the deflated monitor from the table.
1048       os::naked_yield();
1049 
1050     } else if (mark.is_fast_locked()) {
1051       // Some other thread managed to fast-lock the lock, or this is a
1052       // recursive lock from the same thread; yield for the deflation
1053       // thread to remove the deflated monitor from the table.
1054       os::naked_yield();
1055 
1056     } else {
1057       assert(mark.is_unlocked(), "Implied");
1058       // Retry immediately
1059     }
1060 
1061     // Retry
1062     return nullptr;
1063   }
1064 
1065   for (;;) {
1066     const markWord mark = object->mark_acquire();
1067     // The mark can be in one of the following states:
1068     // *  inflated     - If the ObjectMonitor owner is anonymous
1069     //                   and the locking_thread owns the object
1070     //                   lock, then we make the locking_thread
1071     //                   the ObjectMonitor owner and remove the
1072     //                   lock from the locking_thread's lock stack.
1073     // *  fast-locked  - Coerce it to inflated from fast-locked.
1074     // *  neutral      - Inflate the object. Successful CAS is locked
1075 
1076     // CASE: inflated
1077     if (mark.has_monitor()) {
1078       LockStack& lock_stack = locking_thread->lock_stack();
1079       if (monitor->has_anonymous_owner() && lock_stack.contains(object)) {
1080         // The lock is fast-locked by the locking thread,
1081         // convert it to a held monitor with a known owner.
1082         monitor->set_owner_from_anonymous(locking_thread);
1083         monitor->set_recursions(lock_stack.remove(object) - 1);
1084       }
1085 
1086       break; // Success
1087     }
1088 
1089     // CASE: fast-locked
1090     // Could be fast-locked either by locking_thread or by some other thread.
1091     //
1092     if (mark.is_fast_locked()) {
1093       markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
1094       if (old_mark != mark) {
1095         // CAS failed
1096         continue;
1097       }
1098 
1099       // Success! Return inflated monitor.
1100       LockStack& lock_stack = locking_thread->lock_stack();
1101       if (lock_stack.contains(object)) {
1102         // The lock is fast-locked by the locking thread,
1103         // convert it to a held monitor with a known owner.
1104         monitor->set_owner_from_anonymous(locking_thread);
1105         monitor->set_recursions(lock_stack.remove(object) - 1);
1106       }
1107 
1108       break; // Success
1109     }
1110 
1111     // CASE: neutral (unlocked)
1112 
1113     // Catch if the object's header is not neutral (not locked and
1114     // not marked is what we care about here).
1115     assert(mark.is_neutral(), "invariant: header=" INTPTR_FORMAT, mark.value());
1116     markWord old_mark = object->cas_set_mark(mark.set_has_monitor(), mark);
1117     if (old_mark != mark) {
1118       // CAS failed
1119       continue;
1120     }
1121 
1122     // Transitioned from unlocked to monitor means locking_thread owns the lock.
1123     monitor->set_owner_from_anonymous(locking_thread);
1124 
1125     return monitor;
1126   }
1127 
1128   if (current == locking_thread) {
1129     // One round of spinning
1130     if (monitor->spin_enter(locking_thread)) {
1131       return monitor;
1132     }
1133 
1134     // Monitor is contended, take the time before entering to fix the lock stack.
1135     LockStackInflateContendedLocks().inflate(current);
1136   }
1137 
1138   // enter can block for safepoints; clear the unhandled object oop
1139   PauseNoSafepointVerifier pnsv(&nsv);
1140   object = nullptr;
1141 
1142   if (current == locking_thread) {
1143     monitor->enter_with_contention_mark(locking_thread, contention_mark);
1144   } else {
1145     monitor->enter_for_with_contention_mark(locking_thread, contention_mark);
1146   }
1147 
1148   return monitor;
1149 }
1150 
1151 void LightweightSynchronizer::deflate_monitor(Thread* current, oop obj, ObjectMonitor* monitor) {
1152   if (obj != nullptr) {
1153     deflate_mark_word(obj);
1154   }
1155   bool removed = remove_monitor(current, monitor, obj);
1156   if (obj != nullptr) {
1157     assert(removed, "Should have removed the entry if obj was alive");
1158   }
1159 }
1160 
1161 ObjectMonitor* LightweightSynchronizer::get_monitor_from_table(Thread* current, oop obj) {
1162   assert(UseObjectMonitorTable, "must be");
1163   return ObjectMonitorTable::monitor_get(current, obj);
1164 }
1165 
1166 bool LightweightSynchronizer::contains_monitor(Thread* current, ObjectMonitor* monitor) {
1167   assert(UseObjectMonitorTable, "must be");
1168   return ObjectMonitorTable::contains_monitor(current, monitor);
1169 }
1170 
1171 bool LightweightSynchronizer::quick_enter(oop obj, BasicLock* lock, JavaThread* current) {
1172   assert(current->thread_state() == _thread_in_Java, "must be");
1173   assert(obj != nullptr, "must be");
1174   NoSafepointVerifier nsv;
1175 
1176   // If quick_enter succeeds with entering, the cache should be in a valid initialized state.
1177   CacheSetter cache_setter(current, lock);
1178 
1179   LockStack& lock_stack = current->lock_stack();
1180   if (lock_stack.is_full()) {
1181     // Always go into runtime if the lock stack is full.
1182     return false;
1183   }
1184 
1185   const markWord mark = obj->mark();
1186 
1187 #ifndef _LP64
1188   // Only for 32bit which has limited support for fast locking outside the runtime.
1189   if (lock_stack.try_recursive_enter(obj)) {
1190     // Recursive lock successful.
1191     return true;
1192   }
1193 
1194   if (mark.is_unlocked()) {
1195     markWord locked_mark = mark.set_fast_locked();
1196     if (obj->cas_set_mark(locked_mark, mark) == mark) {
1197       // Successfully fast-locked, push object to lock-stack and return.
1198       lock_stack.push(obj);
1199       return true;
1200     }
1201   }
1202 #endif
1203 
1204   if (mark.has_monitor()) {
1205     ObjectMonitor* const monitor = UseObjectMonitorTable ? current->om_get_from_monitor_cache(obj) :
1206                                                            ObjectSynchronizer::read_monitor(mark);
1207 
1208     if (monitor == nullptr) {
1209       // Take the slow-path on a cache miss.
1210       return false;
1211     }
1212 
1213     if (monitor->try_enter(current)) {
1214       // ObjectMonitor enter successful.
1215       cache_setter.set_monitor(monitor);
1216       return true;
1217     }
1218   }
1219 
1220   // Slow-path.
1221   return false;
1222 }
1223 
1224 uint32_t LightweightSynchronizer::get_hash(markWord mark, oop obj, Klass* klass) {
1225   assert(UseCompactObjectHeaders, "Only with compact i-hash");
1226   //assert(mark.is_neutral() | mark.is_fast_locked(), "only from neutral or fast-locked mark: " INTPTR_FORMAT, mark.value());
1227   assert(mark.is_hashed(), "only from hashed or copied object");
1228   if (mark.is_hashed_expanded()) {
1229     return obj->int_field(klass->hash_offset_in_bytes(obj, mark));
1230   } else {
1231     assert(mark.is_hashed_not_expanded(), "must be hashed");
1232     assert(hashCode == 6 || hashCode == 2, "must have idempotent hashCode");
1233     // Already marked as hashed, but not yet copied. Recompute hash and return it.
1234     return ObjectSynchronizer::get_next_hash(nullptr, obj); // recompute hash
1235   }
1236 }
1237 
1238 uint32_t LightweightSynchronizer::get_hash(markWord mark, oop obj) {
1239   return get_hash(mark, obj, mark.klass());
1240 }