7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "gc/epsilon/epsilonHeap.hpp"
28 #include "gc/epsilon/epsilonInitLogger.hpp"
29 #include "gc/epsilon/epsilonMemoryPool.hpp"
30 #include "gc/epsilon/epsilonThreadLocalData.hpp"
31 #include "gc/shared/gcArguments.hpp"
32 #include "gc/shared/locationPrinter.inline.hpp"
33 #include "logging/log.hpp"
34 #include "memory/allocation.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "memory/metaspaceUtils.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "memory/universe.hpp"
39 #include "runtime/atomic.hpp"
40 #include "runtime/globals.hpp"
41
42 jint EpsilonHeap::initialize() {
43 size_t align = HeapAlignment;
44 size_t init_byte_size = align_up(InitialHeapSize, align);
45 size_t max_byte_size = align_up(MaxHeapSize, align);
46
47 // Initialize backing storage
48 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
49 _virtual_space.initialize(heap_rs, init_byte_size);
50
51 MemRegion committed_region((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
52
53 initialize_reserved_region(heap_rs);
54
55 _space = new ContiguousSpace();
56 _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
57
58 // Precompute hot fields
59 _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize));
60 _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);
61 _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps);
62 _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;
63
64 // Enable monitoring
65 _monitoring_support = new EpsilonMonitoringSupport(this);
66 _last_counter_update = 0;
67 _last_heap_print = 0;
68
69 // Install barrier set
70 BarrierSet::set_barrier_set(new EpsilonBarrierSet());
71
72 // All done, print out the configuration
73 EpsilonInitLogger::print();
74
75 return JNI_OK;
76 }
77
78 void EpsilonHeap::initialize_serviceability() {
79 _pool = new EpsilonMemoryPool(this);
80 _memory_manager.add_pool(_pool);
81 }
82
83 GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() {
84 GrowableArray<GCMemoryManager*> memory_managers(1);
85 memory_managers.append(&_memory_manager);
86 return memory_managers;
87 }
88
89 GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() {
90 GrowableArray<MemoryPool*> memory_pools(1);
91 memory_pools.append(_pool);
217 assert(min_size <= size,
218 "Size honors min size: " SIZE_FORMAT " <= " SIZE_FORMAT, min_size, size);
219 assert(size <= _max_tlab_size,
220 "Size honors max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, _max_tlab_size);
221 assert(size <= CollectedHeap::max_tlab_size(),
222 "Size honors global max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, CollectedHeap::max_tlab_size());
223
224 if (log_is_enabled(Trace, gc)) {
225 ResourceMark rm;
226 log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
227 "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
228 thread->name(),
229 requested_size * HeapWordSize / K,
230 min_size * HeapWordSize / K,
231 _max_tlab_size * HeapWordSize / K,
232 ergo_tlab * HeapWordSize / K,
233 size * HeapWordSize / K);
234 }
235
236 // All prepared, let's do it!
237 HeapWord* res = allocate_work(size);
238
239 if (res != nullptr) {
240 // Allocation successful
241 *actual_size = size;
242 if (EpsilonElasticTLABDecay) {
243 EpsilonThreadLocalData::set_last_tlab_time(thread, time);
244 }
245 if (EpsilonElasticTLAB && !fits) {
246 // If we requested expansion, this is our new ergonomic TLAB size
247 EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
248 }
249 } else {
250 // Allocation failed, reset ergonomics to try and fit smaller TLABs
251 if (EpsilonElasticTLAB) {
252 EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
253 }
254 }
255
256 return res;
257 }
258
259 HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
260 *gc_overhead_limit_was_exceeded = false;
261 return allocate_work(size);
262 }
263
264 HeapWord* EpsilonHeap::allocate_loaded_archive_space(size_t size) {
265 // Cannot use verbose=true because Metaspace is not initialized
266 return allocate_work(size, /* verbose = */false);
267 }
268
269 void EpsilonHeap::collect(GCCause::Cause cause) {
270 switch (cause) {
271 case GCCause::_metadata_GC_threshold:
272 case GCCause::_metadata_GC_clear_soft_refs:
273 // Receiving these causes means the VM itself entered the safepoint for metadata collection.
274 // While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would
275 // re-enter the safepoint again very soon.
276
277 assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint");
278 log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause));
279 MetaspaceGC::compute_new_size();
280 print_metaspace_info();
281 break;
282 default:
283 log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
284 }
285 _monitoring_support->update_counters();
286 }
287
288 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
289 collect(gc_cause());
290 }
291
292 void EpsilonHeap::object_iterate(ObjectClosure *cl) {
293 _space->object_iterate(cl);
294 }
295
296 void EpsilonHeap::print_on(outputStream *st) const {
297 st->print_cr("Epsilon Heap");
298
299 _virtual_space.print_on(st);
300
301 if (_space != nullptr) {
302 st->print_cr("Allocation space:");
303 _space->print_on(st);
307 }
308
309 bool EpsilonHeap::print_location(outputStream* st, void* addr) const {
310 return BlockLocationPrinter<EpsilonHeap>::print_location(st, addr);
311 }
312
313 void EpsilonHeap::print_tracing_info() const {
314 print_heap_info(used());
315 print_metaspace_info();
316 }
317
318 void EpsilonHeap::print_heap_info(size_t used) const {
319 size_t reserved = max_capacity();
320 size_t committed = capacity();
321
322 if (reserved != 0) {
323 log_info(gc)("Heap: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
324 SIZE_FORMAT "%s (%.2f%%) used",
325 byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved),
326 byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
327 committed * 100.0 / reserved,
328 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),
329 used * 100.0 / reserved);
330 } else {
331 log_info(gc)("Heap: no reliable data");
332 }
333 }
334
335 void EpsilonHeap::print_metaspace_info() const {
336 MetaspaceCombinedStats stats = MetaspaceUtils::get_combined_statistics();
337 size_t reserved = stats.reserved();
338 size_t committed = stats.committed();
339 size_t used = stats.used();
340
341 if (reserved != 0) {
342 log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
343 SIZE_FORMAT "%s (%.2f%%) used",
344 byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved),
345 byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
346 committed * 100.0 / reserved,
347 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),
348 used * 100.0 / reserved);
349 } else {
350 log_info(gc, metaspace)("Metaspace: no reliable data");
351 }
352 }
|
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "classfile/classLoaderDataGraph.hpp"
28 #include "code/codeCache.hpp"
29 #include "gc/epsilon/epsilonHeap.hpp"
30 #include "gc/epsilon/epsilonInitLogger.hpp"
31 #include "gc/epsilon/epsilonMemoryPool.hpp"
32 #include "gc/epsilon/epsilonThreadLocalData.hpp"
33 #include "gc/shared/gcArguments.hpp"
34 #include "gc/shared/gcLocker.inline.hpp"
35 #include "gc/shared/gcTraceTime.inline.hpp"
36 #include "gc/shared/locationPrinter.inline.hpp"
37 #include "gc/shared/markBitMap.inline.hpp"
38 #include "gc/shared/strongRootsScope.hpp"
39 #include "gc/shared/preservedMarks.inline.hpp"
40 #include "gc/shared/oopStorageSet.inline.hpp"
41 #include "logging/log.hpp"
42 #include "nmt/memTracker.hpp"
43 #include "memory/allocation.hpp"
44 #include "memory/iterator.inline.hpp"
45 #include "memory/metaspaceUtils.hpp"
46 #include "memory/resourceArea.hpp"
47 #include "memory/universe.hpp"
48 #include "oops/compressedOops.inline.hpp"
49 #include "runtime/atomic.hpp"
50 #include "runtime/globals.hpp"
51 #include "runtime/thread.hpp"
52 #include "runtime/threads.hpp"
53 #include "runtime/vmOperations.hpp"
54 #include "runtime/vmThread.hpp"
55 #include "services/management.hpp"
56 #include "utilities/stack.inline.hpp"
57
58 jint EpsilonHeap::initialize() {
59 size_t align = HeapAlignment;
60 size_t init_byte_size = align_up(InitialHeapSize, align);
61 size_t max_byte_size = align_up(MaxHeapSize, align);
62
63 // Initialize backing storage
64 ReservedHeapSpace heap_rs = Universe::reserve_heap(max_byte_size, align);
65 _virtual_space.initialize(heap_rs, init_byte_size);
66
67 MemRegion committed_region((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high());
68
69 initialize_reserved_region(heap_rs);
70
71 _space = new ContiguousSpace();
72 _space->initialize(committed_region, /* clear_space = */ true, /* mangle_space = */ true);
73
74 // Precompute hot fields
75 _max_tlab_size = MIN2(CollectedHeap::max_tlab_size(), align_object_size(EpsilonMaxTLABSize / HeapWordSize));
76 _step_counter_update = MIN2<size_t>(max_byte_size / 16, EpsilonUpdateCountersStep);
77 _step_heap_print = (EpsilonPrintHeapSteps == 0) ? SIZE_MAX : (max_byte_size / EpsilonPrintHeapSteps);
78 _decay_time_ns = (int64_t) EpsilonTLABDecayTime * NANOSECS_PER_MILLISEC;
79
80 // Enable monitoring
81 _monitoring_support = new EpsilonMonitoringSupport(this);
82 _last_counter_update = 0;
83 _last_heap_print = 0;
84
85 // Install barrier set
86 BarrierSet::set_barrier_set(new EpsilonBarrierSet());
87
88 size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
89 size_t _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
90 _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
91
92 // Initialize marking bitmap, but not commit it yet
93 if (EpsilonSlidingGC) {
94 ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
95 MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
96 _bitmap_region = MemRegion((HeapWord *) bitmap.base(), bitmap.size() / HeapWordSize);
97 MemRegion heap_region = MemRegion((HeapWord *) heap_rs.base(), heap_rs.size() / HeapWordSize);
98 _bitmap.initialize(heap_region, _bitmap_region);
99 }
100
101 // All done, print out the configuration
102 EpsilonInitLogger::print();
103
104 return JNI_OK;
105 }
106
107 void EpsilonHeap::initialize_serviceability() {
108 _pool = new EpsilonMemoryPool(this);
109 _memory_manager.add_pool(_pool);
110 }
111
112 GrowableArray<GCMemoryManager*> EpsilonHeap::memory_managers() {
113 GrowableArray<GCMemoryManager*> memory_managers(1);
114 memory_managers.append(&_memory_manager);
115 return memory_managers;
116 }
117
118 GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() {
119 GrowableArray<MemoryPool*> memory_pools(1);
120 memory_pools.append(_pool);
246 assert(min_size <= size,
247 "Size honors min size: " SIZE_FORMAT " <= " SIZE_FORMAT, min_size, size);
248 assert(size <= _max_tlab_size,
249 "Size honors max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, _max_tlab_size);
250 assert(size <= CollectedHeap::max_tlab_size(),
251 "Size honors global max size: " SIZE_FORMAT " <= " SIZE_FORMAT, size, CollectedHeap::max_tlab_size());
252
253 if (log_is_enabled(Trace, gc)) {
254 ResourceMark rm;
255 log_trace(gc)("TLAB size for \"%s\" (Requested: " SIZE_FORMAT "K, Min: " SIZE_FORMAT
256 "K, Max: " SIZE_FORMAT "K, Ergo: " SIZE_FORMAT "K) -> " SIZE_FORMAT "K",
257 thread->name(),
258 requested_size * HeapWordSize / K,
259 min_size * HeapWordSize / K,
260 _max_tlab_size * HeapWordSize / K,
261 ergo_tlab * HeapWordSize / K,
262 size * HeapWordSize / K);
263 }
264
265 // All prepared, let's do it!
266 HeapWord* res = allocate_or_collect_work(size);
267
268 if (res != nullptr) {
269 // Allocation successful
270 *actual_size = size;
271 if (EpsilonElasticTLABDecay) {
272 EpsilonThreadLocalData::set_last_tlab_time(thread, time);
273 }
274 if (EpsilonElasticTLAB && !fits) {
275 // If we requested expansion, this is our new ergonomic TLAB size
276 EpsilonThreadLocalData::set_ergo_tlab_size(thread, size);
277 }
278 } else {
279 // Allocation failed, reset ergonomics to try and fit smaller TLABs
280 if (EpsilonElasticTLAB) {
281 EpsilonThreadLocalData::set_ergo_tlab_size(thread, 0);
282 }
283 }
284
285 return res;
286 }
287
288 HeapWord* EpsilonHeap::mem_allocate(size_t size, bool *gc_overhead_limit_was_exceeded) {
289 *gc_overhead_limit_was_exceeded = false;
290 return allocate_or_collect_work(size);
291 }
292
293 HeapWord* EpsilonHeap::allocate_loaded_archive_space(size_t size) {
294 // Cannot use verbose=true because Metaspace is not initialized
295 return allocate_work(size, /* verbose = */false);
296 }
297
298 void EpsilonHeap::collect(GCCause::Cause cause) {
299 switch (cause) {
300 case GCCause::_metadata_GC_threshold:
301 case GCCause::_metadata_GC_clear_soft_refs:
302 // Receiving these causes means the VM itself entered the safepoint for metadata collection.
303 // While Epsilon does not do GC, it has to perform sizing adjustments, otherwise we would
304 // re-enter the safepoint again very soon.
305
306 assert(SafepointSynchronize::is_at_safepoint(), "Expected at safepoint");
307 log_info(gc)("GC request for \"%s\" is handled", GCCause::to_string(cause));
308 MetaspaceGC::compute_new_size();
309 print_metaspace_info();
310 break;
311 default:
312 if (EpsilonSlidingGC) {
313 if (SafepointSynchronize::is_at_safepoint()) {
314 entry_collect(cause);
315 } else {
316 vmentry_collect(cause);
317 }
318 } else {
319 log_info(gc)("GC request for \"%s\" is ignored", GCCause::to_string(cause));
320 }
321 }
322 _monitoring_support->update_counters();
323 }
324
325 void EpsilonHeap::do_full_collection(bool clear_all_soft_refs) {
326 collect(gc_cause());
327 }
328
329 void EpsilonHeap::object_iterate(ObjectClosure *cl) {
330 _space->object_iterate(cl);
331 }
332
333 void EpsilonHeap::print_on(outputStream *st) const {
334 st->print_cr("Epsilon Heap");
335
336 _virtual_space.print_on(st);
337
338 if (_space != nullptr) {
339 st->print_cr("Allocation space:");
340 _space->print_on(st);
344 }
345
346 bool EpsilonHeap::print_location(outputStream* st, void* addr) const {
347 return BlockLocationPrinter<EpsilonHeap>::print_location(st, addr);
348 }
349
350 void EpsilonHeap::print_tracing_info() const {
351 print_heap_info(used());
352 print_metaspace_info();
353 }
354
355 void EpsilonHeap::print_heap_info(size_t used) const {
356 size_t reserved = max_capacity();
357 size_t committed = capacity();
358
359 if (reserved != 0) {
360 log_info(gc)("Heap: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
361 SIZE_FORMAT "%s (%.2f%%) used",
362 byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved),
363 byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
364 percent_of(committed, reserved),
365 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),
366 percent_of(used, reserved)
367 );
368 } else {
369 log_info(gc)("Heap: no reliable data");
370 }
371 }
372
373 void EpsilonHeap::print_metaspace_info() const {
374 MetaspaceCombinedStats stats = MetaspaceUtils::get_combined_statistics();
375 size_t reserved = stats.reserved();
376 size_t committed = stats.committed();
377 size_t used = stats.used();
378
379 if (reserved != 0) {
380 log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "%s reserved, " SIZE_FORMAT "%s (%.2f%%) committed, "
381 SIZE_FORMAT "%s (%.2f%%) used",
382 byte_size_in_proper_unit(reserved), proper_unit_for_byte_size(reserved),
383 byte_size_in_proper_unit(committed), proper_unit_for_byte_size(committed),
384 percent_of(committed, reserved),
385 byte_size_in_proper_unit(used), proper_unit_for_byte_size(used),
386 percent_of(used, reserved)
387 );
388 } else {
389 log_info(gc, metaspace)("Metaspace: no reliable data");
390 }
391 }
392
393 // ------------------ EXPERIMENTAL MARK-COMPACT -------------------------------
394 //
395 // This implements a trivial Lisp2-style sliding collector:
396 // https://en.wikipedia.org/wiki/Mark-compact_algorithm#LISP2_algorithm
397 //
398 // The goal for this implementation is to be as simple as possible, ignoring
399 // non-trivial performance optimizations. This collector does not implement
400 // reference processing: no soft/weak/phantom/finalizeable references are ever
401 // cleared. It also does not implement class unloading and other runtime
402 // cleanups.
403 //
404
405 // VM operation that executes collection cycle under safepoint
406 class VM_EpsilonCollect: public VM_Operation {
407 private:
408 const GCCause::Cause _cause;
409 EpsilonHeap* const _heap;
410 static size_t _req_id;
411 public:
412 VM_EpsilonCollect(GCCause::Cause cause) : VM_Operation(),
413 _cause(cause),
414 _heap(EpsilonHeap::heap()) {};
415
416 VM_Operation::VMOp_Type type() const { return VMOp_EpsilonCollect; }
417 const char* name() const { return "Epsilon Collection"; }
418
419 virtual bool doit_prologue() {
420 size_t id = Atomic::load_acquire(&_req_id);
421
422 // Need to take the Heap lock before managing backing storage.
423 Heap_lock->lock();
424
425 // Heap lock also naturally serializes GC requests, and allows us to coalesce
426 // back-to-back GC requests from many threads. Avoid the consecutive GCs
427 // if we started waiting when other GC request was being handled.
428 if (id < Atomic::load_acquire(&_req_id)) {
429 Heap_lock->unlock();
430 return false;
431 }
432
433 // No contenders. Start handling a new GC request.
434 Atomic::inc(&_req_id);
435 return true;
436 }
437
438 virtual void doit() {
439 _heap->entry_collect(_cause);
440 }
441
442 virtual void doit_epilogue() {
443 Heap_lock->unlock();
444 }
445 };
446
447 size_t VM_EpsilonCollect::_req_id = 0;
448
449 void EpsilonHeap::vmentry_collect(GCCause::Cause cause) {
450 VM_EpsilonCollect vmop(cause);
451 VMThread::execute(&vmop);
452 }
453
454 HeapWord* EpsilonHeap::allocate_or_collect_work(size_t size, bool verbose) {
455 HeapWord* res = allocate_work(size);
456 if (res == NULL && EpsilonSlidingGC && EpsilonImplicitGC) {
457 vmentry_collect(GCCause::_allocation_failure);
458 // TODO: This looks incorrect
459 GCLocker::stall_until_clear();
460 res = allocate_work(size, verbose);
461 }
462 return res;
463 }
464
465 typedef Stack<oop, mtGC> EpsilonMarkStack;
466
467 void EpsilonHeap::process_roots(OopClosure* cl) {
468 // Need to tell runtime we are about to walk the roots with 1 thread
469 StrongRootsScope scope(1);
470
471 // Need to adapt oop closure for some special root types.
472 CLDToOopClosure clds(cl, ClassLoaderData::_claim_none);
473 MarkingCodeBlobClosure blobs(cl, CodeBlobToOopClosure::FixRelocations, true); // TODO: Keepalive?
474
475 // Strong roots: always reachable roots
476
477 // General strong roots that are registered in OopStorages
478 for (auto id : EnumRange<OopStorageSet::StrongId>()) {
479 OopStorageSet::storage(id)->oops_do(cl);
480 }
481
482 // Subsystems that still have their own root handling
483 ClassLoaderDataGraph::cld_do(&clds);
484 Threads::possibly_parallel_oops_do(false, cl, &blobs);
485
486 {
487 MutexLocker lock(CodeCache_lock, Mutex::_no_safepoint_check_flag);
488 CodeCache::blobs_do(&blobs);
489 }
490
491 // Weak roots: in an advanced GC these roots would be skipped during
492 // the initial scan, and walked again after the marking is complete.
493 // Then, we could discover which roots are not actually pointing
494 // to surviving Java objects, and either clean the roots, or mark them.
495 // Current simple implementation does not handle weak roots specially,
496 // and therefore, we mark through them as if they are strong roots.
497 for (auto id : EnumRange<OopStorageSet::WeakId>()) {
498 OopStorageSet::storage(id)->oops_do(cl);
499 }
500 }
501
502 // Walk the marking bitmap and call object closure on every marked object.
503 // This is much faster that walking a (very sparse) parsable heap, but it
504 // takes up to 1/64-th of heap size for the bitmap.
505 void EpsilonHeap::walk_bitmap(ObjectClosure* cl) {
506 HeapWord* limit = _space->top();
507 HeapWord* addr = _bitmap.get_next_marked_addr(_space->bottom(), limit);
508 while (addr < limit) {
509 oop obj = cast_to_oop(addr);
510 assert(_bitmap.is_marked(obj), "sanity");
511 cl->do_object(obj);
512 addr += 1;
513 if (addr < limit) {
514 addr = _bitmap.get_next_marked_addr(addr, limit);
515 }
516 }
517 }
518
519 class EpsilonScanOopClosure : public BasicOopIterateClosure {
520 private:
521 EpsilonMarkStack* const _stack;
522 MarkBitMap* const _bitmap;
523
524 template <class T>
525 void do_oop_work(T* p) {
526 // p is the pointer to memory location where oop is, load the value
527 // from it, unpack the compressed reference, if needed:
528 T o = RawAccess<>::oop_load(p);
529 if (!CompressedOops::is_null(o)) {
530 oop obj = CompressedOops::decode_not_null(o);
531
532 // Object is discovered. See if it is marked already. If not,
533 // mark and push it on mark stack for further traversal. Non-atomic
534 // check and set would do, as this closure is called by single thread.
535 if (!_bitmap->is_marked(obj)) {
536 _bitmap->mark(obj);
537 _stack->push(obj);
538 }
539 }
540 }
541
542 public:
543 EpsilonScanOopClosure(EpsilonMarkStack* stack, MarkBitMap* bitmap) :
544 _stack(stack), _bitmap(bitmap) {}
545 virtual void do_oop(oop* p) { do_oop_work(p); }
546 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
547 };
548
549 class EpsilonCalcNewLocationObjectClosure : public ObjectClosure {
550 private:
551 HeapWord* _compact_point;
552 PreservedMarks* const _preserved_marks;
553
554 public:
555 EpsilonCalcNewLocationObjectClosure(HeapWord* start, PreservedMarks* pm) :
556 _compact_point(start),
557 _preserved_marks(pm) {}
558
559 void do_object(oop obj) {
560 // Record the new location of the object: it is current compaction point.
561 // If object stays at the same location (which is true for objects in
562 // dense prefix, that we would normally get), do not bother recording the
563 // move, letting downstream code ignore it.
564 if (obj != cast_to_oop(_compact_point)) {
565 markWord mark = obj->mark();
566 _preserved_marks->push_if_necessary(obj, mark);
567 obj->forward_to(cast_to_oop(_compact_point));
568 }
569 _compact_point += obj->size();
570 }
571
572 HeapWord* compact_point() {
573 return _compact_point;
574 }
575 };
576
577 class EpsilonAdjustPointersOopClosure : public BasicOopIterateClosure {
578 private:
579 template <class T>
580 void do_oop_work(T* p) {
581 // p is the pointer to memory location where oop is, load the value
582 // from it, unpack the compressed reference, if needed:
583 T o = RawAccess<>::oop_load(p);
584 if (!CompressedOops::is_null(o)) {
585 oop obj = CompressedOops::decode_not_null(o);
586
587 // Rewrite the current pointer to the object with its forwardee.
588 // Skip the write if update is not needed.
589 if (obj->is_forwarded()) {
590 oop fwd = obj->forwardee();
591 assert(fwd != NULL, "just checking");
592 RawAccess<>::oop_store(p, fwd);
593 }
594 }
595 }
596
597 public:
598 virtual void do_oop(oop* p) { do_oop_work(p); }
599 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
600 };
601
602 class EpsilonAdjustPointersObjectClosure : public ObjectClosure {
603 private:
604 EpsilonAdjustPointersOopClosure _cl;
605 public:
606 void do_object(oop obj) {
607 // Apply the updates to all references reachable from current object:
608 obj->oop_iterate(&_cl);
609 }
610 };
611
612 class EpsilonMoveObjectsObjectClosure : public ObjectClosure {
613 private:
614 size_t _moved;
615 public:
616 EpsilonMoveObjectsObjectClosure() : ObjectClosure(), _moved(0) {}
617
618 void do_object(oop obj) {
619 // Copy the object to its new location, if needed. This is final step,
620 // so we have to re-initialize its new mark word, dropping the forwardee
621 // data from it.
622 if (obj->is_forwarded()) {
623 oop fwd = obj->forwardee();
624 assert(fwd != NULL, "just checking");
625 Copy::aligned_conjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(fwd), obj->size());
626 fwd->init_mark();
627 _moved++;
628 }
629 }
630
631 size_t moved() {
632 return _moved;
633 }
634 };
635
636 class EpsilonVerifyOopClosure : public BasicOopIterateClosure {
637 private:
638 EpsilonHeap* const _heap;
639 EpsilonMarkStack* const _stack;
640 MarkBitMap* const _bitmap;
641
642 template <class T>
643 void do_oop_work(T* p) {
644 T o = RawAccess<>::oop_load(p);
645 if (!CompressedOops::is_null(o)) {
646 oop obj = CompressedOops::decode_not_null(o);
647 if (!_bitmap->is_marked(obj)) {
648 _bitmap->mark(obj);
649
650 guarantee(_heap->is_in(obj), "Is in heap: " PTR_FORMAT, p2i(obj));
651 guarantee(oopDesc::is_oop(obj), "Is an object: " PTR_FORMAT, p2i(obj));
652 guarantee(!obj->mark().is_marked(), "Mark is gone: " PTR_FORMAT, p2i(obj));
653
654 _stack->push(obj);
655 }
656 }
657 }
658
659 public:
660 EpsilonVerifyOopClosure(EpsilonMarkStack* stack, MarkBitMap* bitmap) :
661 _heap(EpsilonHeap::heap()), _stack(stack), _bitmap(bitmap) {}
662 virtual void do_oop(oop* p) { do_oop_work(p); }
663 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
664 };
665
666 void EpsilonHeap::entry_collect(GCCause::Cause cause) {
667 if (GCLocker::check_active_before_gc()) {
668 return;
669 }
670
671 GCIdMark mark;
672 GCTraceTime(Info, gc) time("Lisp2-style Mark-Compact", NULL, cause, true);
673
674 // Some statistics, for fun and profit:
675 size_t stat_reachable_roots = 0;
676 size_t stat_reachable_heap = 0;
677 size_t stat_moved = 0;
678 size_t stat_preserved_marks = 0;
679
680 {
681 GCTraceTime(Info, gc) time("Step 0: Prologue", NULL);
682
683 // Commit marking bitmap memory. There are several upsides of doing this
684 // before the cycle: no memory is taken if GC is not happening, the memory
685 // is "cleared" on first touch, and untouched parts of bitmap are mapped
686 // to zero page, boosting performance on sparse heaps.
687 if (!os::commit_memory((char*)_bitmap_region.start(), _bitmap_region.byte_size(), false)) {
688 log_warning(gc)("Could not commit native memory for marking bitmap, GC failed");
689 return;
690 }
691
692 // We do not need parsable heap for this algorithm to work, but we want
693 // threads to give up their TLABs.
694 ensure_parsability(true);
695
696 #if COMPILER2_OR_JVMCI
697 // Derived pointers would be re-discovered during the mark.
698 // Clear and activate the table for them.
699 DerivedPointerTable::clear();
700 #endif
701 }
702
703 {
704 GCTraceTime(Info, gc) time("Step 1: Mark", NULL);
705
706 // Marking stack and the closure that does most of the work. The closure
707 // would scan the outgoing references, mark them, and push newly-marked
708 // objects to stack for further processing.
709 EpsilonMarkStack stack;
710 EpsilonScanOopClosure cl(&stack, &_bitmap);
711
712 // Seed the marking with roots.
713 process_roots(&cl);
714 stat_reachable_roots = stack.size();
715
716 // Scan the rest of the heap until we run out of objects. Termination is
717 // guaranteed, because all reachable objects would be marked eventually.
718 while (!stack.is_empty()) {
719 oop obj = stack.pop();
720 obj->oop_iterate(&cl);
721 stat_reachable_heap++;
722 }
723
724 #if COMPILER2_OR_JVMCI
725 // No more derived pointers discovered after marking is done.
726 DerivedPointerTable::set_active(false);
727 #endif
728 }
729
730 // We are going to store forwarding information (where the new copy resides)
731 // in mark words. Some of those mark words need to be carefully preserved.
732 // This is an utility that maintains the list of those special mark words.
733 PreservedMarks preserved_marks;
734
735 // New top of the allocated space.
736 HeapWord* new_top;
737
738 {
739 GCTraceTime(Info, gc) time("Step 2: Calculate new locations", NULL);
740
741 // Walk all alive objects, compute their new addresses and store those
742 // addresses in mark words. Optionally preserve some marks.
743 EpsilonCalcNewLocationObjectClosure cl(_space->bottom(), &preserved_marks);
744 walk_bitmap(&cl);
745
746 // After addresses are calculated, we know the new top for the allocated
747 // space. We cannot set it just yet, because some asserts check that objects
748 // are "in heap" based on current "top".
749 new_top = cl.compact_point();
750
751 stat_preserved_marks = preserved_marks.size();
752 }
753
754 {
755 GCTraceTime(Info, gc) time("Step 3: Adjust pointers", NULL);
756
757 // Walk all alive objects _and their reference fields_, and put "new
758 // addresses" there. We know the new addresses from the forwarding data
759 // in mark words. Take care of the heap objects first.
760 EpsilonAdjustPointersObjectClosure cl;
761 walk_bitmap(&cl);
762
763 // Now do the same, but for all VM roots, which reference the objects on
764 // their own: their references should also be updated.
765 EpsilonAdjustPointersOopClosure cli;
766 process_roots(&cli);
767
768 // Finally, make sure preserved marks know the objects are about to move.
769 preserved_marks.adjust_during_full_gc();
770 }
771
772 {
773 GCTraceTime(Info, gc) time("Step 4: Move objects", NULL);
774
775 // Move all alive objects to their new locations. All the references are
776 // already adjusted at previous step.
777 EpsilonMoveObjectsObjectClosure cl;
778 walk_bitmap(&cl);
779 stat_moved = cl.moved();
780
781 // Now we moved all objects to their relevant locations, we can retract
782 // the "top" of the allocation space to the end of the compacted prefix.
783 _space->set_top(new_top);
784 }
785
786 {
787 GCTraceTime(Info, gc) time("Step 5: Epilogue", NULL);
788
789 // Restore all special mark words.
790 preserved_marks.restore();
791
792 #if COMPILER2_OR_JVMCI
793 // Tell the rest of runtime we have finished the GC.
794 DerivedPointerTable::update_pointers();
795 #endif
796
797 // Verification code walks entire heap and verifies nothing is broken.
798 if (EpsilonVerify) {
799 // The basic implementation turns heap into entirely parsable one with
800 // only alive objects, which mean we could just walked the heap object
801 // by object and verify it. But, it would be inconvenient for verification
802 // to assume heap has only alive objects. Any future change that leaves
803 // at least one dead object with dead outgoing references would fail the
804 // verification. Therefore, it makes more sense to mark through the heap
805 // again, not assuming objects are all alive.
806 EpsilonMarkStack stack;
807 EpsilonVerifyOopClosure cl(&stack, &_bitmap);
808
809 _bitmap.clear();
810
811 // Verify all roots are correct, and that we have the same number of
812 // object reachable from roots.
813 process_roots(&cl);
814
815 size_t verified_roots = stack.size();
816 guarantee(verified_roots == stat_reachable_roots,
817 "Verification discovered " SIZE_FORMAT " roots out of " SIZE_FORMAT,
818 verified_roots, stat_reachable_roots);
819
820 // Verify the rest of the heap is correct, and that we have the same
821 // number of objects reachable from heap.
822 size_t verified_heap = 0;
823 while (!stack.is_empty()) {
824 oop obj = stack.pop();
825 obj->oop_iterate(&cl);
826 verified_heap++;
827 }
828
829 guarantee(verified_heap == stat_reachable_heap,
830 "Verification discovered " SIZE_FORMAT " heap objects out of " SIZE_FORMAT,
831 verified_heap, stat_reachable_heap);
832
833 // Ask parts of runtime to verify themselves too
834 Universe::verify("Epsilon");
835 }
836
837 // Marking bitmap is not needed anymore
838 if (!os::uncommit_memory((char*)_bitmap_region.start(), _bitmap_region.byte_size())) {
839 log_warning(gc)("Could not uncommit native memory for marking bitmap");
840 }
841
842 // Return all memory back if so requested. On large heaps, this would
843 // take a while.
844 if (EpsilonUncommit) {
845 _virtual_space.shrink_by((_space->end() - new_top) * HeapWordSize);
846 _space->set_end((HeapWord*)_virtual_space.high());
847 }
848 }
849
850 size_t stat_reachable = stat_reachable_roots + stat_reachable_heap;
851 log_info(gc)("GC Stats: " SIZE_FORMAT " (%.2f%%) reachable from roots, " SIZE_FORMAT " (%.2f%%) reachable from heap, "
852 SIZE_FORMAT " (%.2f%%) moved, " SIZE_FORMAT " (%.2f%%) markwords preserved",
853 stat_reachable_roots, percent_of(stat_reachable_roots, stat_reachable),
854 stat_reachable_heap, percent_of(stat_reachable_heap, stat_reachable),
855 stat_moved, percent_of(stat_moved, stat_reachable),
856 stat_preserved_marks, percent_of(stat_preserved_marks, stat_reachable)
857 );
858
859 print_heap_info(used());
860 print_metaspace_info();
861 }
862
863 void EpsilonHeap::pin_object(JavaThread* thread, oop obj) {
864 GCLocker::lock_critical(thread);
865 }
866
867 void EpsilonHeap::unpin_object(JavaThread* thread, oop obj) {
868 GCLocker::unlock_critical(thread);
869 }
|