36 // (Note: We could improve performance by ignoring the low bits of size,
37 // and putting a short cleanup loop after each bulk copy loop.
38 // There are plenty of other ways to make this faster also,
39 // and it's a slippery slope. For now, let's keep this code simple
40 // since the simplicity helps clarify the atomicity semantics of
41 // this operation. There are also CPU-specific assembly versions
42 // which may or may not want to include such optimizations.)
43
44 if (bits % sizeof(jlong) == 0) {
45 Copy::conjoint_jlongs_atomic((const jlong*) from, (jlong*) to, size / sizeof(jlong));
46 } else if (bits % sizeof(jint) == 0) {
47 Copy::conjoint_jints_atomic((const jint*) from, (jint*) to, size / sizeof(jint));
48 } else if (bits % sizeof(jshort) == 0) {
49 Copy::conjoint_jshorts_atomic((const jshort*) from, (jshort*) to, size / sizeof(jshort));
50 } else {
51 // Not aligned, so no need to be atomic.
52 Copy::conjoint_jbytes((const void*) from, (void*) to, size);
53 }
54 }
55
56 class CopySwap : AllStatic {
57 public:
58 /**
59 * Copy and optionally byte swap elements
60 *
61 * <swap> - true if elements should be byte swapped
62 *
63 * @param src address of source
64 * @param dst address of destination
65 * @param byte_count number of bytes to copy
66 * @param elem_size size of the elements to copy-swap
67 */
68 template<bool swap>
69 static void conjoint_swap_if_needed(const void* src, void* dst, size_t byte_count, size_t elem_size) {
70 assert(src != nullptr, "address must not be null");
71 assert(dst != nullptr, "address must not be null");
72 assert(elem_size == 2 || elem_size == 4 || elem_size == 8,
73 "incorrect element size: %zu", elem_size);
74 assert(is_aligned(byte_count, elem_size),
75 "byte_count %zu must be multiple of element size %zu", byte_count, elem_size);
|
36 // (Note: We could improve performance by ignoring the low bits of size,
37 // and putting a short cleanup loop after each bulk copy loop.
38 // There are plenty of other ways to make this faster also,
39 // and it's a slippery slope. For now, let's keep this code simple
40 // since the simplicity helps clarify the atomicity semantics of
41 // this operation. There are also CPU-specific assembly versions
42 // which may or may not want to include such optimizations.)
43
44 if (bits % sizeof(jlong) == 0) {
45 Copy::conjoint_jlongs_atomic((const jlong*) from, (jlong*) to, size / sizeof(jlong));
46 } else if (bits % sizeof(jint) == 0) {
47 Copy::conjoint_jints_atomic((const jint*) from, (jint*) to, size / sizeof(jint));
48 } else if (bits % sizeof(jshort) == 0) {
49 Copy::conjoint_jshorts_atomic((const jshort*) from, (jshort*) to, size / sizeof(jshort));
50 } else {
51 // Not aligned, so no need to be atomic.
52 Copy::conjoint_jbytes((const void*) from, (void*) to, size);
53 }
54 }
55
56 #define COPY_ALIGNED_SEGMENT(t) \
57 if (bits % sizeof(t) == 0) { \
58 size_t segment = remain / sizeof(t); \
59 if (segment > 0) { \
60 Copy::conjoint_##t##s_atomic((const t*) cursor_from, (t*) cursor_to, segment); \
61 remain -= segment * sizeof(t); \
62 cursor_from = (void*)(((char*)cursor_from) + segment * sizeof(t)); \
63 cursor_to = (void*)(((char*)cursor_to) + segment * sizeof(t)); \
64 } \
65 } \
66
67 void Copy::copy_value_content(const void* from, void* to, size_t size) {
68 // Simple cases first
69 uintptr_t bits = (uintptr_t) from | (uintptr_t) to | (uintptr_t) size;
70 if (bits % sizeof(jlong) == 0) {
71 Copy::conjoint_jlongs_atomic((const jlong*) from, (jlong*) to, size / sizeof(jlong));
72 return;
73 } else if (bits % sizeof(jint) == 0) {
74 Copy::conjoint_jints_atomic((const jint*) from, (jint*) to, size / sizeof(jint));
75 return;
76 } else if (bits % sizeof(jshort) == 0) {
77 Copy::conjoint_jshorts_atomic((const jshort*) from, (jshort*) to, size / sizeof(jshort));
78 return;
79 }
80
81 // Complex cases
82 bits = (uintptr_t) from | (uintptr_t) to;
83 const void* cursor_from = from;
84 void* cursor_to = to;
85 size_t remain = size;
86 COPY_ALIGNED_SEGMENT(jlong)
87 COPY_ALIGNED_SEGMENT(jint)
88 COPY_ALIGNED_SEGMENT(jshort)
89 if (remain > 0) {
90 Copy::conjoint_jbytes((const void*) cursor_from, (void*) cursor_to, remain);
91 }
92 }
93
94 #undef COPY_ALIGNED_SEGMENT
95
96 class CopySwap : AllStatic {
97 public:
98 /**
99 * Copy and optionally byte swap elements
100 *
101 * <swap> - true if elements should be byte swapped
102 *
103 * @param src address of source
104 * @param dst address of destination
105 * @param byte_count number of bytes to copy
106 * @param elem_size size of the elements to copy-swap
107 */
108 template<bool swap>
109 static void conjoint_swap_if_needed(const void* src, void* dst, size_t byte_count, size_t elem_size) {
110 assert(src != nullptr, "address must not be null");
111 assert(dst != nullptr, "address must not be null");
112 assert(elem_size == 2 || elem_size == 4 || elem_size == 8,
113 "incorrect element size: %zu", elem_size);
114 assert(is_aligned(byte_count, elem_size),
115 "byte_count %zu must be multiple of element size %zu", byte_count, elem_size);
|