1 /* 2 * Copyright (c) 2006, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "utilities/copy.hpp" 26 #include "runtime/sharedRuntime.hpp" 27 #include "utilities/align.hpp" 28 #include "utilities/byteswap.hpp" 29 #include "utilities/copy.hpp" 30 31 32 // Copy bytes; larger units are filled atomically if everything is aligned. 33 void Copy::conjoint_memory_atomic(const void* from, void* to, size_t size) { 34 uintptr_t bits = (uintptr_t) from | (uintptr_t) to | (uintptr_t) size; 35 36 // (Note: We could improve performance by ignoring the low bits of size, 37 // and putting a short cleanup loop after each bulk copy loop. 38 // There are plenty of other ways to make this faster also, 39 // and it's a slippery slope. For now, let's keep this code simple 40 // since the simplicity helps clarify the atomicity semantics of 41 // this operation. There are also CPU-specific assembly versions 42 // which may or may not want to include such optimizations.) 43 44 if (bits % sizeof(jlong) == 0) { 45 Copy::conjoint_jlongs_atomic((const jlong*) from, (jlong*) to, size / sizeof(jlong)); 46 } else if (bits % sizeof(jint) == 0) { 47 Copy::conjoint_jints_atomic((const jint*) from, (jint*) to, size / sizeof(jint)); 48 } else if (bits % sizeof(jshort) == 0) { 49 Copy::conjoint_jshorts_atomic((const jshort*) from, (jshort*) to, size / sizeof(jshort)); 50 } else { 51 // Not aligned, so no need to be atomic. 52 Copy::conjoint_jbytes((const void*) from, (void*) to, size); 53 } 54 } 55 56 #define COPY_ALIGNED_SEGMENT(t) \ 57 if (bits % sizeof(t) == 0) { \ 58 size_t segment = remain / sizeof(t); \ 59 if (segment > 0) { \ 60 Copy::conjoint_##t##s_atomic((const t*) cursor_from, (t*) cursor_to, segment); \ 61 remain -= segment * sizeof(t); \ 62 cursor_from = (void*)(((char*)cursor_from) + segment * sizeof(t)); \ 63 cursor_to = (void*)(((char*)cursor_to) + segment * sizeof(t)); \ 64 } \ 65 } \ 66 67 void Copy::copy_value_content(const void* from, void* to, size_t size) { 68 // Simple cases first 69 uintptr_t bits = (uintptr_t) from | (uintptr_t) to | (uintptr_t) size; 70 if (bits % sizeof(jlong) == 0) { 71 Copy::conjoint_jlongs_atomic((const jlong*) from, (jlong*) to, size / sizeof(jlong)); 72 return; 73 } else if (bits % sizeof(jint) == 0) { 74 Copy::conjoint_jints_atomic((const jint*) from, (jint*) to, size / sizeof(jint)); 75 return; 76 } else if (bits % sizeof(jshort) == 0) { 77 Copy::conjoint_jshorts_atomic((const jshort*) from, (jshort*) to, size / sizeof(jshort)); 78 return; 79 } 80 81 // Complex cases 82 bits = (uintptr_t) from | (uintptr_t) to; 83 const void* cursor_from = from; 84 void* cursor_to = to; 85 size_t remain = size; 86 COPY_ALIGNED_SEGMENT(jlong) 87 COPY_ALIGNED_SEGMENT(jint) 88 COPY_ALIGNED_SEGMENT(jshort) 89 if (remain > 0) { 90 Copy::conjoint_jbytes((const void*) cursor_from, (void*) cursor_to, remain); 91 } 92 } 93 94 #undef COPY_ALIGNED_SEGMENT 95 96 class CopySwap : AllStatic { 97 public: 98 /** 99 * Copy and optionally byte swap elements 100 * 101 * <swap> - true if elements should be byte swapped 102 * 103 * @param src address of source 104 * @param dst address of destination 105 * @param byte_count number of bytes to copy 106 * @param elem_size size of the elements to copy-swap 107 */ 108 template<bool swap> 109 static void conjoint_swap_if_needed(const void* src, void* dst, size_t byte_count, size_t elem_size) { 110 assert(src != nullptr, "address must not be null"); 111 assert(dst != nullptr, "address must not be null"); 112 assert(elem_size == 2 || elem_size == 4 || elem_size == 8, 113 "incorrect element size: %zu", elem_size); 114 assert(is_aligned(byte_count, elem_size), 115 "byte_count %zu must be multiple of element size %zu", byte_count, elem_size); 116 117 address src_end = (address)src + byte_count; 118 119 if (dst <= src || dst >= src_end) { 120 do_conjoint_swap<RIGHT,swap>(src, dst, byte_count, elem_size); 121 } else { 122 do_conjoint_swap<LEFT,swap>(src, dst, byte_count, elem_size); 123 } 124 } 125 126 private: 127 enum CopyDirection { 128 RIGHT, // lower -> higher address 129 LEFT // higher -> lower address 130 }; 131 132 /** 133 * Copy and byte swap elements 134 * 135 * <T> - type of element to copy 136 * <D> - copy direction 137 * <is_src_aligned> - true if src argument is aligned to element size 138 * <is_dst_aligned> - true if dst argument is aligned to element size 139 * 140 * @param src address of source 141 * @param dst address of destination 142 * @param byte_count number of bytes to copy 143 */ 144 template <typename T, CopyDirection D, bool swap, bool is_src_aligned, bool is_dst_aligned> 145 static void do_conjoint_swap(const void* src, void* dst, size_t byte_count) { 146 const char* cur_src; 147 char* cur_dst; 148 149 switch (D) { 150 case RIGHT: 151 cur_src = (const char*)src; 152 cur_dst = (char*)dst; 153 break; 154 case LEFT: 155 cur_src = (const char*)src + byte_count - sizeof(T); 156 cur_dst = (char*)dst + byte_count - sizeof(T); 157 break; 158 } 159 160 for (size_t i = 0; i < byte_count / sizeof(T); i++) { 161 T tmp; 162 163 if (is_src_aligned) { 164 tmp = *(T*)cur_src; 165 } else { 166 memcpy(&tmp, cur_src, sizeof(T)); 167 } 168 169 if (swap) { 170 tmp = byteswap(tmp); 171 } 172 173 if (is_dst_aligned) { 174 *(T*)cur_dst = tmp; 175 } else { 176 memcpy(cur_dst, &tmp, sizeof(T)); 177 } 178 179 switch (D) { 180 case RIGHT: 181 cur_src += sizeof(T); 182 cur_dst += sizeof(T); 183 break; 184 case LEFT: 185 cur_src -= sizeof(T); 186 cur_dst -= sizeof(T); 187 break; 188 } 189 } 190 } 191 192 /** 193 * Copy and byte swap elements 194 * 195 * <T> - type of element to copy 196 * <D> - copy direction 197 * <swap> - true if elements should be byte swapped 198 * 199 * @param src address of source 200 * @param dst address of destination 201 * @param byte_count number of bytes to copy 202 */ 203 template <typename T, CopyDirection direction, bool swap> 204 static void do_conjoint_swap(const void* src, void* dst, size_t byte_count) { 205 if (is_aligned(src, sizeof(T))) { 206 if (is_aligned(dst, sizeof(T))) { 207 do_conjoint_swap<T,direction,swap,true,true>(src, dst, byte_count); 208 } else { 209 do_conjoint_swap<T,direction,swap,true,false>(src, dst, byte_count); 210 } 211 } else { 212 if (is_aligned(dst, sizeof(T))) { 213 do_conjoint_swap<T,direction,swap,false,true>(src, dst, byte_count); 214 } else { 215 do_conjoint_swap<T,direction,swap,false,false>(src, dst, byte_count); 216 } 217 } 218 } 219 220 221 /** 222 * Copy and byte swap elements 223 * 224 * <D> - copy direction 225 * <swap> - true if elements should be byte swapped 226 * 227 * @param src address of source 228 * @param dst address of destination 229 * @param byte_count number of bytes to copy 230 * @param elem_size size of the elements to copy-swap 231 */ 232 template <CopyDirection D, bool swap> 233 static void do_conjoint_swap(const void* src, void* dst, size_t byte_count, size_t elem_size) { 234 switch (elem_size) { 235 case 2: do_conjoint_swap<uint16_t,D,swap>(src, dst, byte_count); break; 236 case 4: do_conjoint_swap<uint32_t,D,swap>(src, dst, byte_count); break; 237 case 8: do_conjoint_swap<uint64_t,D,swap>(src, dst, byte_count); break; 238 default: guarantee(false, "do_conjoint_swap: Invalid elem_size %zu\n", elem_size); 239 } 240 } 241 }; 242 243 void Copy::conjoint_copy(const void* src, void* dst, size_t byte_count, size_t elem_size) { 244 CopySwap::conjoint_swap_if_needed<false>(src, dst, byte_count, elem_size); 245 } 246 247 void Copy::conjoint_swap(const void* src, void* dst, size_t byte_count, size_t elem_size) { 248 CopySwap::conjoint_swap_if_needed<true>(src, dst, byte_count, elem_size); 249 } 250 251 // Fill bytes; larger units are filled atomically if everything is aligned. 252 void Copy::fill_to_memory_atomic(void* to, size_t size, jubyte value) { 253 address dst = (address)to; 254 uintptr_t bits = (uintptr_t)to | (uintptr_t)size; 255 if (bits % sizeof(jlong) == 0) { 256 jlong fill = (julong)((jubyte)value); // zero-extend 257 if (fill != 0) { 258 fill += fill << 8; 259 fill += fill << 16; 260 fill += fill << 32; 261 } 262 // Copy::fill_to_jlongs_atomic((jlong*) dst, size / sizeof(jlong)); 263 for (uintptr_t off = 0; off < size; off += sizeof(jlong)) { 264 *(jlong*)(dst + off) = fill; 265 } 266 } else if (bits % sizeof(jint) == 0) { 267 jint fill = (juint)((jubyte)value); // zero-extend 268 if (fill != 0) { 269 fill += fill << 8; 270 fill += fill << 16; 271 } 272 // Copy::fill_to_jints_atomic((jint*) dst, size / sizeof(jint)); 273 for (uintptr_t off = 0; off < size; off += sizeof(jint)) { 274 *(jint*)(dst + off) = fill; 275 } 276 } else if (bits % sizeof(jshort) == 0) { 277 jshort fill = (jushort)((jubyte)value); // zero-extend 278 fill += (jshort)(fill << 8); 279 // Copy::fill_to_jshorts_atomic((jshort*) dst, size / sizeof(jshort)); 280 for (uintptr_t off = 0; off < size; off += sizeof(jshort)) { 281 *(jshort*)(dst + off) = fill; 282 } 283 } else { 284 // Not aligned, so no need to be atomic. 285 #ifdef MUSL_LIBC 286 // This code is used by Unsafe and may hit the next page after truncation 287 // of mapped memory. Therefore, we use volatile to prevent compilers from 288 // replacing the loop by memset which may not trigger SIGBUS as needed 289 // (observed on Alpine Linux x86_64) 290 jbyte fill = value; 291 for (uintptr_t off = 0; off < size; off += sizeof(jbyte)) { 292 *(volatile jbyte*)(dst + off) = fill; 293 } 294 #else 295 Copy::fill_to_bytes(dst, size, value); 296 #endif 297 } 298 }