155 bool expand_call) {
156 // If expand_call is true then we expand the call_VM_leaf macro
157 // directly to skip generating the check by
158 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
159
160 assert(thread == rthread, "must be");
161
162 Label done;
163 Label runtime;
164
165 assert_different_registers(obj, pre_val, tmp1, tmp2);
166 assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
167
168 generate_pre_barrier_fast_path(masm, thread, tmp1);
169 // If marking is not active (*(mark queue active address) == 0), jump to done
170 __ cbzw(tmp1, done);
171 generate_pre_barrier_slow_path(masm, obj, pre_val, thread, tmp1, tmp2, done, runtime);
172
173 __ bind(runtime);
174
175 __ push_call_clobbered_registers();
176
177 // Calling the runtime using the regular call_VM_leaf mechanism generates
178 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
179 // that checks that the *(rfp+frame::interpreter_frame_last_sp) == nullptr.
180 //
181 // If we care generating the pre-barrier without a frame (e.g. in the
182 // intrinsified Reference.get() routine) then rfp might be pointing to
183 // the caller frame and so this check will most likely fail at runtime.
184 //
185 // Expanding the call directly bypasses the generation of the check.
186 // So when we do not have have a full interpreter frame on the stack
187 // expand_call should be passed true.
188
189 if (expand_call) {
190 assert(pre_val != c_rarg1, "smashed arg");
191 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
192 } else {
193 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
194 }
195
196 __ pop_call_clobbered_registers();
197
198 __ bind(done);
199
200 }
201
202 static void generate_post_barrier_fast_path(MacroAssembler* masm,
203 const Register store_addr,
204 const Register new_val,
205 const Register tmp1,
206 const Register tmp2,
207 Label& done,
208 bool new_val_may_be_null) {
209 // Does store cross heap regions?
210 __ eor(tmp1, store_addr, new_val); // tmp1 := store address ^ new value
211 __ lsr(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes); // tmp1 := ((store address ^ new value) >> LogOfHRGrainBytes)
212 __ cbz(tmp1, done);
213 // Crosses regions, storing null?
214 if (new_val_may_be_null) {
215 __ cbz(new_val, done);
216 }
247 Register store_addr,
248 Register new_val,
249 Register thread,
250 Register tmp1,
251 Register tmp2) {
252 assert(thread == rthread, "must be");
253 assert_different_registers(store_addr, new_val, thread, tmp1, tmp2,
254 rscratch1);
255 assert(store_addr != noreg && new_val != noreg && tmp1 != noreg
256 && tmp2 != noreg, "expecting a register");
257
258 Label done;
259 Label runtime;
260
261 generate_post_barrier_fast_path(masm, store_addr, new_val, tmp1, tmp2, done, true /* new_val_may_be_null */);
262 // If card is young, jump to done
263 __ br(Assembler::EQ, done);
264 generate_post_barrier_slow_path(masm, thread, tmp1, tmp2, done, runtime);
265
266 __ bind(runtime);
267 // save the live input values
268 RegSet saved = RegSet::of(store_addr);
269 __ push(saved, sp);
270 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), tmp1, thread);
271 __ pop(saved, sp);
272
273 __ bind(done);
274 }
275
276 #if defined(COMPILER2)
277
278 static void generate_c2_barrier_runtime_call(MacroAssembler* masm, G1BarrierStubC2* stub, const Register arg, const address runtime_path) {
279 SaveLiveRegisters save_registers(masm, stub);
280 if (c_rarg0 != arg) {
281 __ mov(c_rarg0, arg);
282 }
283 __ mov(c_rarg1, rthread);
284 __ mov(rscratch1, runtime_path);
285 __ blr(rscratch1);
286 }
287
288 void G1BarrierSetAssembler::g1_write_barrier_pre_c2(MacroAssembler* masm,
289 Register obj,
290 Register pre_val,
374 ModRefBarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
375 if (on_oop && on_reference) {
376 // LR is live. It must be saved around calls.
377 __ enter(/*strip_ret_addr*/true); // barrier may call runtime
378 // Generate the G1 pre-barrier code to log the value of
379 // the referent field in an SATB buffer.
380 g1_write_barrier_pre(masm /* masm */,
381 noreg /* obj */,
382 dst /* pre_val */,
383 rthread /* thread */,
384 tmp1 /* tmp1 */,
385 tmp2 /* tmp2 */,
386 true /* tosca_live */,
387 true /* expand_call */);
388 __ leave();
389 }
390 }
391
392 void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
393 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
394 // flatten object address if needed
395 if (dst.index() == noreg && dst.offset() == 0) {
396 if (dst.base() != tmp3) {
397 __ mov(tmp3, dst.base());
398 }
399 } else {
400 __ lea(tmp3, dst);
401 }
402
403 g1_write_barrier_pre(masm,
404 tmp3 /* obj */,
405 tmp2 /* pre_val */,
406 rthread /* thread */,
407 tmp1 /* tmp1 */,
408 rscratch2 /* tmp2 */,
409 val != noreg /* tosca_live */,
410 false /* expand_call */);
411
412 if (val == noreg) {
413 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), noreg, noreg, noreg, noreg);
414 } else {
415 // G1 barrier needs uncompressed oop for region cross check.
416 Register new_val = val;
417 if (UseCompressedOops) {
418 new_val = rscratch2;
419 __ mov(new_val, val);
420 }
421 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
422 g1_write_barrier_post(masm,
423 tmp3 /* store_adr */,
424 new_val /* new_val */,
425 rthread /* thread */,
426 tmp1 /* tmp1 */,
427 tmp2 /* tmp2 */);
428 }
429
430 }
431
432 #ifdef COMPILER1
433
434 #undef __
435 #define __ ce->masm()->
436
437 void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
438 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
439 // At this point we know that marking is in progress.
440 // If do_load() is true then we have to emit the
441 // load of the previous value; otherwise it has already
442 // been loaded into _pre_val.
443
444 __ bind(*stub->entry());
445
446 assert(stub->pre_val()->is_register(), "Precondition.");
447
|
155 bool expand_call) {
156 // If expand_call is true then we expand the call_VM_leaf macro
157 // directly to skip generating the check by
158 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
159
160 assert(thread == rthread, "must be");
161
162 Label done;
163 Label runtime;
164
165 assert_different_registers(obj, pre_val, tmp1, tmp2);
166 assert(pre_val != noreg && tmp1 != noreg && tmp2 != noreg, "expecting a register");
167
168 generate_pre_barrier_fast_path(masm, thread, tmp1);
169 // If marking is not active (*(mark queue active address) == 0), jump to done
170 __ cbzw(tmp1, done);
171 generate_pre_barrier_slow_path(masm, obj, pre_val, thread, tmp1, tmp2, done, runtime);
172
173 __ bind(runtime);
174
175 // save the live input values
176 RegSet saved = RegSet::of(pre_val);
177 FloatRegSet fsaved;
178
179 // Barriers might be emitted when converting between (scalarized) calling
180 // conventions for inline types. Save all argument registers before calling
181 // into the runtime.
182 if (EnableValhalla && InlineTypePassFieldsAsArgs) {
183 if (tosca_live) saved += RegSet::of(r0);
184 if (obj != noreg) saved += RegSet::of(obj);
185 saved += RegSet::of(j_rarg0, j_rarg1, j_rarg2, j_rarg3);
186 saved += RegSet::of(j_rarg4, j_rarg5, j_rarg6, j_rarg7);
187
188 fsaved += FloatRegSet::of(j_farg0, j_farg1, j_farg2, j_farg3);
189 fsaved += FloatRegSet::of(j_farg4, j_farg5, j_farg6, j_farg7);
190
191 __ push(saved, sp);
192 __ push_fp(fsaved, sp);
193 } else {
194 __ push_call_clobbered_registers();
195 }
196
197 // Calling the runtime using the regular call_VM_leaf mechanism generates
198 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
199 // that checks that the *(rfp+frame::interpreter_frame_last_sp) == nullptr.
200 //
201 // If we care generating the pre-barrier without a frame (e.g. in the
202 // intrinsified Reference.get() routine) then rfp might be pointing to
203 // the caller frame and so this check will most likely fail at runtime.
204 //
205 // Expanding the call directly bypasses the generation of the check.
206 // So when we do not have have a full interpreter frame on the stack
207 // expand_call should be passed true.
208
209 if (expand_call) {
210 assert(pre_val != c_rarg1, "smashed arg");
211 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
212 } else {
213 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
214 }
215
216 if (EnableValhalla && InlineTypePassFieldsAsArgs) {
217 __ pop_fp(fsaved, sp);
218 __ pop(saved, sp);
219 } else {
220 __ pop_call_clobbered_registers();
221 }
222
223 __ bind(done);
224
225 }
226
227 static void generate_post_barrier_fast_path(MacroAssembler* masm,
228 const Register store_addr,
229 const Register new_val,
230 const Register tmp1,
231 const Register tmp2,
232 Label& done,
233 bool new_val_may_be_null) {
234 // Does store cross heap regions?
235 __ eor(tmp1, store_addr, new_val); // tmp1 := store address ^ new value
236 __ lsr(tmp1, tmp1, G1HeapRegion::LogOfHRGrainBytes); // tmp1 := ((store address ^ new value) >> LogOfHRGrainBytes)
237 __ cbz(tmp1, done);
238 // Crosses regions, storing null?
239 if (new_val_may_be_null) {
240 __ cbz(new_val, done);
241 }
272 Register store_addr,
273 Register new_val,
274 Register thread,
275 Register tmp1,
276 Register tmp2) {
277 assert(thread == rthread, "must be");
278 assert_different_registers(store_addr, new_val, thread, tmp1, tmp2,
279 rscratch1);
280 assert(store_addr != noreg && new_val != noreg && tmp1 != noreg
281 && tmp2 != noreg, "expecting a register");
282
283 Label done;
284 Label runtime;
285
286 generate_post_barrier_fast_path(masm, store_addr, new_val, tmp1, tmp2, done, true /* new_val_may_be_null */);
287 // If card is young, jump to done
288 __ br(Assembler::EQ, done);
289 generate_post_barrier_slow_path(masm, thread, tmp1, tmp2, done, runtime);
290
291 __ bind(runtime);
292
293 // save the live input values
294 RegSet saved = RegSet::of(store_addr);
295 FloatRegSet fsaved;
296
297 // Barriers might be emitted when converting between (scalarized) calling
298 // conventions for inline types. Save all argument registers before calling
299 // into the runtime.
300 if (EnableValhalla && InlineTypePassFieldsAsArgs) {
301 saved += RegSet::of(j_rarg0, j_rarg1, j_rarg2, j_rarg3);
302 saved += RegSet::of(j_rarg4, j_rarg5, j_rarg6, j_rarg7);
303
304 fsaved += FloatRegSet::of(j_farg0, j_farg1, j_farg2, j_farg3);
305 fsaved += FloatRegSet::of(j_farg4, j_farg5, j_farg6, j_farg7);
306 }
307
308 __ push(saved, sp);
309 __ push_fp(fsaved, sp);
310 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), tmp1, thread);
311 __ pop_fp(fsaved, sp);
312 __ pop(saved, sp);
313
314 __ bind(done);
315 }
316
317 #if defined(COMPILER2)
318
319 static void generate_c2_barrier_runtime_call(MacroAssembler* masm, G1BarrierStubC2* stub, const Register arg, const address runtime_path) {
320 SaveLiveRegisters save_registers(masm, stub);
321 if (c_rarg0 != arg) {
322 __ mov(c_rarg0, arg);
323 }
324 __ mov(c_rarg1, rthread);
325 __ mov(rscratch1, runtime_path);
326 __ blr(rscratch1);
327 }
328
329 void G1BarrierSetAssembler::g1_write_barrier_pre_c2(MacroAssembler* masm,
330 Register obj,
331 Register pre_val,
415 ModRefBarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp2);
416 if (on_oop && on_reference) {
417 // LR is live. It must be saved around calls.
418 __ enter(/*strip_ret_addr*/true); // barrier may call runtime
419 // Generate the G1 pre-barrier code to log the value of
420 // the referent field in an SATB buffer.
421 g1_write_barrier_pre(masm /* masm */,
422 noreg /* obj */,
423 dst /* pre_val */,
424 rthread /* thread */,
425 tmp1 /* tmp1 */,
426 tmp2 /* tmp2 */,
427 true /* tosca_live */,
428 true /* expand_call */);
429 __ leave();
430 }
431 }
432
433 void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
434 Address dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
435
436 bool in_heap = (decorators & IN_HEAP) != 0;
437 bool as_normal = (decorators & AS_NORMAL) != 0;
438 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
439
440 bool needs_pre_barrier = as_normal && !dest_uninitialized;
441 bool needs_post_barrier = (val != noreg && in_heap);
442
443 assert_different_registers(val, tmp1, tmp2, tmp3);
444
445 // flatten object address if needed
446 if (dst.index() == noreg && dst.offset() == 0) {
447 if (dst.base() != tmp3) {
448 __ mov(tmp3, dst.base());
449 }
450 } else {
451 __ lea(tmp3, dst);
452 }
453
454 if (needs_pre_barrier) {
455 g1_write_barrier_pre(masm,
456 tmp3 /* obj */,
457 tmp2 /* pre_val */,
458 rthread /* thread */,
459 tmp1 /* tmp1 */,
460 rscratch2 /* tmp2 */,
461 val != noreg /* tosca_live */,
462 false /* expand_call */);
463 }
464
465 if (val == noreg) {
466 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), noreg, noreg, noreg, noreg);
467 } else {
468 // G1 barrier needs uncompressed oop for region cross check.
469 Register new_val = val;
470 if (needs_post_barrier) {
471 if (UseCompressedOops) {
472 new_val = rscratch2;
473 __ mov(new_val, val);
474 }
475 }
476
477 BarrierSetAssembler::store_at(masm, decorators, type, Address(tmp3, 0), val, noreg, noreg, noreg);
478 if (needs_post_barrier) {
479 g1_write_barrier_post(masm,
480 tmp3 /* store_adr */,
481 new_val /* new_val */,
482 rthread /* thread */,
483 tmp1 /* tmp1 */,
484 tmp2 /* tmp2 */);
485 }
486 }
487
488 }
489
490 #ifdef COMPILER1
491
492 #undef __
493 #define __ ce->masm()->
494
495 void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
496 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
497 // At this point we know that marking is in progress.
498 // If do_load() is true then we have to emit the
499 // load of the previous value; otherwise it has already
500 // been loaded into _pre_val.
501
502 __ bind(*stub->entry());
503
504 assert(stub->pre_val()->is_register(), "Precondition.");
505
|