mhd_bithelpers.h (36623B)
1 /* SPDX-License-Identifier: LGPL-2.1-or-later OR (GPL-2.0-or-later WITH eCos-exception-2.0) */ 2 /* 3 This file is part of GNU libmicrohttpd. 4 Copyright (C) 2019-2025 Karlson2k (Evgeny Grin) 5 6 GNU libmicrohttpd is free software; you can redistribute it and/or 7 modify it under the terms of the GNU Lesser General Public 8 License as published by the Free Software Foundation; either 9 version 2.1 of the License, or (at your option) any later version. 10 11 GNU libmicrohttpd is distributed in the hope that it will be useful, 12 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 Lesser General Public License for more details. 15 16 Alternatively, you can redistribute GNU libmicrohttpd and/or 17 modify it under the terms of the GNU General Public License as 18 published by the Free Software Foundation; either version 2 of 19 the License, or (at your option) any later version, together 20 with the eCos exception, as follows: 21 22 As a special exception, if other files instantiate templates or 23 use macros or inline functions from this file, or you compile this 24 file and link it with other works to produce a work based on this 25 file, this file does not by itself cause the resulting work to be 26 covered by the GNU General Public License. However the source code 27 for this file must still be made available in accordance with 28 section (3) of the GNU General Public License v2. 29 30 This exception does not invalidate any other reasons why a work 31 based on this file might be covered by the GNU General Public 32 License. 33 34 You should have received copies of the GNU Lesser General Public 35 License and the GNU General Public License along with this library; 36 if not, see <https://www.gnu.org/licenses/>. 37 */ 38 39 /** 40 * @file src/mhd2/mhd_bithelpers.h 41 * @brief Bit manipulation helpers 42 * @author Karlson2k (Evgeny Grin) 43 */ 44 45 #ifndef MHD_BITHELPERS_H 46 #define MHD_BITHELPERS_H 1 47 48 #include "mhd_sys_options.h" 49 50 #include "sys_base_types.h" 51 #include <string.h> 52 53 #include "mhd_assert.h" 54 55 #if defined(_MSC_FULL_VER) 56 /* Clang-cl produces a function call instead of intrinsics if optimisations 57 are turned off. */ 58 # if (! defined(__clang__)) || defined(__OPTIMIZE__) 59 /* Declarations for VC & Clang-cl built-ins */ 60 # include <intrin.h> 61 # define mhd_HAS_VC_INTRINSICS 1 62 # endif /* (! __clang__) || (__OPTIMIZE__) */ 63 #endif /* _MSC_FULL_VER */ 64 #include "mhd_byteorder.h" 65 66 #ifdef CHAR_BIT 67 # if CHAR_BIT != 8 68 #error CHAR_BIT different from 8 is not supported 69 # endif 70 #endif 71 72 #ifndef __has_builtin 73 # define mhd_HAS_BUILTIN(x) (0) 74 #else 75 # define mhd_HAS_BUILTIN(x) __has_builtin (x) 76 #endif 77 78 #if defined(_MSC_FULL_VER) 79 #pragma warning(push) 80 /* Disable C4505 "unreferenced local function has been removed" */ 81 #pragma warning(disable:4505) 82 #endif /* _MSC_FULL_VER */ 83 84 mhd_DATA_TRUNCATION_RUNTIME_CHECK_DISABLE 85 86 #if defined(MHD_HAVE___BUILTIN_BSWAP16) || \ 87 mhd_HAS_BUILTIN (__builtin_bswap16) 88 # define mhd_BYTES_SWAP16(value16) \ 89 ((uint16_t)__builtin_bswap16 ((uint16_t) value16)) 90 #elif defined(mhd_HAS_VC_INTRINSICS) 91 # ifndef __clang__ 92 # pragma intrinsic(_byteswap_ushort) 93 # endif /* ! __clang__ */ 94 # define mhd_BYTES_SWAP16(value16) \ 95 ((uint16_t)_byteswap_ushort ((uint16_t) value16)) 96 #else /* ! mhd_HAS_BUILTIN(__builtin_bswap32) */ 97 mhd_static_inline uint16_t 98 mhd_BYTES_SWAP16 (uint16_t value16) 99 { 100 return (uint16_t) ((value16 << 8u) | (value16 >> 8u)); 101 } 102 103 104 #endif /* mhd_BYTES_SWAP16 */ 105 106 107 #ifdef MHD_HAVE___BUILTIN_BSWAP32 108 # define mhd_BYTES_SWAP32(value32) \ 109 ((uint32_t) __builtin_bswap32 ((uint32_t) value32)) 110 #elif defined(mhd_HAS_VC_INTRINSICS) 111 # ifndef __clang__ 112 # pragma intrinsic(_byteswap_ulong) 113 # endif /* ! __clang__ */ 114 # define mhd_BYTES_SWAP32(value32) \ 115 ((uint32_t) _byteswap_ulong ((uint32_t) value32)) 116 #elif \ 117 mhd_HAS_BUILTIN (__builtin_bswap32) 118 # define mhd_BYTES_SWAP32(value32) \ 119 ((uint32_t) __builtin_bswap32 ((uint32_t) value32)) 120 #else /* ! mhd_HAS_BUILTIN(__builtin_bswap32) */ 121 mhd_static_inline uint32_t 122 mhd_BYTES_SWAP32 (uint32_t value32) 123 { 124 uint32_t ret; 125 ret = (uint32_t) (value32 << 24u); 126 ret |= (uint32_t) ((value32 << 8u) & (0x00FF0000u)); 127 ret |= (uint32_t) ((value32 >> 8u) & (0x0000FF00u)); 128 ret |= (uint32_t) (value32 >> 24u); 129 return ret; 130 } 131 132 133 #endif /* ! mhd_HAS_BUILTIN(__builtin_bswap32) */ 134 135 136 #ifdef MHD_HAVE___BUILTIN_BSWAP64 137 # define mhd_BYTES_SWAP64(value64) \ 138 ((uint64_t) __builtin_bswap64 ((uint64_t) value64)) 139 #elif defined(mhd_HAS_VC_INTRINSICS) 140 # ifndef __clang__ 141 # pragma intrinsic(_byteswap_uint64) 142 # endif /* ! __clang__ */ 143 # define mhd_BYTES_SWAP64(value64) \ 144 ((uint64_t) _byteswap_uint64 ((uint64_t) value64)) 145 #elif \ 146 mhd_HAS_BUILTIN (__builtin_bswap64) 147 # define mhd_BYTES_SWAP64(value64) \ 148 ((uint64_t) __builtin_bswap64 ((uint64_t) value64)) 149 #else /* ! mhd_HAS_BUILTIN(__builtin_bswap64) */ 150 mhd_static_inline uint64_t 151 mhd_BYTES_SWAP64(uint64_t value64) 152 { 153 uint64_t ret; 154 ret = (uint64_t) (value64 << 56u); 155 ret |= (uint64_t) ((value64 << 40u) & (0x00FF000000000000u)); 156 ret |= (uint64_t) ((value64 << 24u) & (0x0000FF0000000000u)); 157 ret |= (uint64_t) ((value64 << 8u) & (0x000000FF00000000u)); 158 ret |= (uint64_t) ((value64 >> 8u) & (0x00000000FF000000u)); 159 ret |= (uint64_t) ((value64 >> 24u) & (0x0000000000FF0000u)); 160 ret |= (uint64_t) ((value64 >> 40u) & (0x000000000000FF00u)); 161 ret |= (uint64_t) (value64 >> 56u); 162 return ret; 163 } 164 #endif /* ! mhd_HAS_BUILTIN(__builtin_bswap64) */ 165 166 #if defined(__SIZEOF_INT128__) || \ 167 (defined(BITINT_MAXWIDTH) && (BITINT_MAXWIDTH >= 128)) 168 /* swap 128-bit value if supported by compiler. 169 * Parameter value128 must be unsigned 128-bit constant or variable, otherwise 170 * macro will not work properly. 171 * Warning: evaluate arguments multiple times! 172 */ 173 # define mhd_BYTES_SWAP128(value128) \ 174 (((((value128) >> 0u) & 0xFFu) << 120u) | \ 175 ((((value128) >> 8u) & 0xFFu) << 112u) | \ 176 ((((value128) >> 16u) & 0xFFu) << 104u) | \ 177 ((((value128) >> 24u) & 0xFFu) << 96u) | \ 178 ((((value128) >> 32u) & 0xFFu) << 88u) | \ 179 ((((value128) >> 40u) & 0xFFu) << 80u) | \ 180 ((((value128) >> 48u) & 0xFFu) << 72u) | \ 181 ((((value128) >> 56u) & 0xFFu) << 64u) | \ 182 ((((value128) >> 64u) & 0xFFu) << 56u) | \ 183 ((((value128) >> 72u) & 0xFFu) << 48u) | \ 184 ((((value128) >> 80u) & 0xFFu) << 40u) | \ 185 ((((value128) >> 88u) & 0xFFu) << 32u) | \ 186 ((((value128) >> 96u) & 0xFFu) << 24u) | \ 187 ((((value128) >> 104u) & 0xFFu) << 16u) | \ 188 ((((value128) >> 112u) & 0xFFu) << 8u) | \ 189 ((((value128) >> 120u) & 0xFFu) << 0u)) 190 #endif 191 192 /* mhd_PUT_64BIT_LE (addr, value64) 193 * put 64-bit value64 to addr in little-endian mode. 194 */ 195 /* Slow version that works with unaligned addr and with any byte order */ 196 mhd_static_inline void 197 mhd_PUT_64BIT_LE_SLOW(void *addr, uint64_t value64) 198 { 199 uint8_t *const dst = (uint8_t *) addr; 200 dst[0] = (uint8_t) value64; 201 dst[1] = (uint8_t) (value64 >> 8u); 202 dst[2] = (uint8_t) (value64 >> 16u); 203 dst[3] = (uint8_t) (value64 >> 24u); 204 dst[4] = (uint8_t) (value64 >> 32u); 205 dst[5] = (uint8_t) (value64 >> 40u); 206 dst[6] = (uint8_t) (value64 >> 48u); 207 dst[7] = (uint8_t) (value64 >> 56u); 208 } 209 #if mhd_BYTE_ORDER == mhd_LITTLE_ENDIAN 210 # define mhd_PUT_64BIT_LE(addr, value64) \ 211 ((*(uint64_t*) (addr)) = (uint64_t) (value64)) 212 #elif mhd_BYTE_ORDER == mhd_BIG_ENDIAN 213 # define mhd_PUT_64BIT_LE(addr, value64) \ 214 ((*(uint64_t*) (addr)) = mhd_BYTES_SWAP64 (value64)) 215 #else /* mhd_BYTE_ORDER != mhd_BIG_ENDIAN */ 216 /* Endianness was not detected or non-standard like PDP-endian */ 217 # define mhd_PUT_64BIT_LE(addr, value64) \ 218 mhd_PUT_64BIT_LE_SLOW ((addr),(value64)) 219 /* Indicate that mhd_PUT_64BIT_LE does not need aligned pointer */ 220 # define mhd_PUT_64BIT_LE_ALLOW_UNALIGNED 1 221 #endif /* mhd_BYTE_ORDER != mhd_BIG_ENDIAN */ 222 223 /* Put result safely to unaligned address */ 224 #ifdef mhd_PUT_64BIT_LE_ALLOW_UNALIGNED 225 # define mhd_PUT_64BIT_LE_UNALIGN(addr, value64) \ 226 mhd_PUT_64BIT_LE ((addr),(value64)) 227 #else /* ! mhd_PUT_64BIT_LE_ALLOW_UNALIGNED */ 228 # define mhd_PUT_64BIT_LE_UNALIGN(addr, value64) \ 229 do { uint64_t mhd__aligned_dst; \ 230 mhd_PUT_64BIT_LE (&mhd__aligned_dst, (value64)); \ 231 memcpy ((addr), &mhd__aligned_dst, \ 232 sizeof(mhd__aligned_dst)); } while (0) 233 #endif /* ! mhd_PUT_64BIT_LE_ALLOW_UNALIGNED */ 234 235 236 /* mhd_PUT_32BIT_LE (addr, value32) 237 * put 32-bit value32 to addr in little-endian mode. 238 */ 239 /* Slow version that works with unaligned addr and with any byte order */ 240 mhd_static_inline void 241 mhd_PUT_32BIT_LE_SLOW(void *addr, uint32_t value32) 242 { 243 uint8_t *const dst = (uint8_t *) addr; 244 dst[0] = (uint8_t) value32; 245 dst[1] = (uint8_t) (value32 >> 8u); 246 dst[2] = (uint8_t) (value32 >> 16u); 247 dst[3] = (uint8_t) (value32 >> 24u); 248 } 249 #if mhd_BYTE_ORDER == mhd_LITTLE_ENDIAN 250 # define mhd_PUT_32BIT_LE(addr,value32) \ 251 ((*(uint32_t*) (addr)) = (uint32_t) (value32)) 252 #elif mhd_BYTE_ORDER == mhd_BIG_ENDIAN 253 # define mhd_PUT_32BIT_LE(addr, value32) \ 254 ((*(uint32_t*) (addr)) = mhd_BYTES_SWAP32 (value32)) 255 #else /* mhd_BYTE_ORDER != mhd_BIG_ENDIAN */ 256 /* Endianness was not detected or non-standard like PDP-endian */ 257 # define mhd_PUT_32BIT_LE(addr, value32) \ 258 mhd_PUT_32BIT_LE_SLOW ((addr),(value32)) 259 /* Indicate that mhd_PUT_32BIT_LE does not need aligned pointer */ 260 # define mhd_PUT_32BIT_LE_ALLOW_UNALIGNED 1 261 #endif /* mhd_BYTE_ORDER != mhd_BIG_ENDIAN */ 262 263 /* Put result safely to unaligned address */ 264 #ifdef mhd_PUT_32BIT_LE_ALLOW_UNALIGNED 265 # define mhd_PUT_32BIT_LE_UNALIGN(addr, value32) \ 266 mhd_PUT_32BIT_LE ((addr),(value32)) 267 #else /* ! mhd_PUT_32BIT_LE_ALLOW_UNALIGNED */ 268 # define mhd_PUT_32BIT_LE_UNALIGN(addr, value32) \ 269 do { uint32_t mhd__aligned_dst; \ 270 mhd_PUT_32BIT_LE (&mhd__aligned_dst, (value32)); \ 271 memcpy ((addr), &mhd__aligned_dst, \ 272 sizeof(mhd__aligned_dst)); } while (0) 273 #endif /* ! mhd_PUT_32BIT_LE_ALLOW_UNALIGNED */ 274 275 276 /* mhd_GET_32BIT_LE (addr) 277 * get little-endian 32-bit value stored at addr 278 * and return it in native-endian mode. 279 */ 280 /* Slow version that works with unaligned addr and with any byte order */ 281 mhd_static_inline uint32_t 282 mhd_GET_32BIT_LE_SLOW(const void *addr) 283 { 284 const uint8_t *const src = (const uint8_t *) addr; 285 uint32_t ret; 286 ret = (uint32_t) src[0]; 287 ret |= (uint32_t) (((uint32_t) src[1]) << 8u); 288 ret |= (uint32_t) (((uint32_t) src[2]) << 16u); 289 ret |= (uint32_t) (((uint32_t) src[3]) << 24u); 290 return ret; 291 } 292 #if mhd_BYTE_ORDER == mhd_LITTLE_ENDIAN 293 # define mhd_GET_32BIT_LE(addr) \ 294 (*(const uint32_t*) (addr)) 295 #elif mhd_BYTE_ORDER == mhd_BIG_ENDIAN 296 # define mhd_GET_32BIT_LE(addr) \ 297 mhd_BYTES_SWAP32 (*(const uint32_t*) (addr)) 298 #else /* mhd_BYTE_ORDER != mhd_BIG_ENDIAN */ 299 /* Endianness was not detected or non-standard like PDP-endian */ 300 # define mhd_GET_32BIT_LE(addr) mhd_GET_32BIT_LE_SLOW ((addr)) 301 /* Indicate that mhd_GET_32BIT_LE does not need aligned pointer */ 302 # define mhd_GET_32BIT_LE_ALLOW_UNALIGNED 1 303 #endif /* mhd_BYTE_ORDER != mhd_BIG_ENDIAN */ 304 305 306 #ifdef mhd_GET_32BIT_LE_ALLOW_UNALIGNED 307 # define mhd_GET_32BIT_LE_UNALIGN(addr) mhd_GET_32BIT_LE (addr) 308 #else /* ! mhd_GET_32BIT_LE_ALLOW_UNALIGNED */ 309 /* Get value safely from an unaligned address */ 310 mhd_static_inline MHD_FN_PAR_NONNULL_ALL_ uint32_t 311 mhd_GET_32BIT_LE_UNALIGN (const void *addr) 312 { 313 uint32_t aligned_src; 314 memcpy (&aligned_src, addr, sizeof(aligned_src)); 315 return mhd_GET_32BIT_LE (&aligned_src); 316 } 317 #endif /* ! mhd_GET_32BIT_LE_ALLOW_UNALIGNED */ 318 319 320 /* mhd_GET_64BIT_LE (addr) 321 * get little-endian 64-bit value stored at addr 322 * and return it in native-endian mode. 323 */ 324 /* Slow version that works with unaligned addr and with any byte order */ 325 mhd_static_inline uint64_t 326 mhd_GET_64BIT_LE_SLOW (const void *addr) 327 { 328 const uint8_t *const src = (const uint8_t *) addr; 329 uint64_t ret; 330 ret = (uint64_t) src[0]; 331 ret |= (uint64_t) src[1] << 8u; 332 ret |= (uint64_t) src[2] << 16u; 333 ret |= (uint64_t) src[3] << 24u; 334 ret |= (uint64_t) src[4] << 32u; 335 ret |= (uint64_t) src[5] << 40u; 336 ret |= (uint64_t) src[6] << 48u; 337 ret |= (uint64_t) src[7] << 56u; 338 return ret; 339 } 340 #if mhd_BYTE_ORDER == mhd_LITTLE_ENDIAN 341 # define mhd_GET_64BIT_LE(addr) \ 342 (*(const uint64_t*) (addr)) 343 #elif mhd_BYTE_ORDER == mhd_BIG_ENDIAN 344 # define mhd_GET_64BIT_LE(addr) \ 345 mhd_BYTES_SWAP64 (*(const uint64_t*) (addr)) 346 #else /* mhd_BYTE_ORDER != mhd_BIG_ENDIAN */ 347 /* Endianness was not detected or non-standard like PDP-endian */ 348 # define mhd_GET_64BIT_LE(addr) mhd_GET_64BIT_LE_SLOW ((addr)) 349 /* Indicate that mhd_GET_64BIT_LE does not need aligned pointer */ 350 # define mhd_GET_64BIT_LE_ALLOW_UNALIGNED 1 351 #endif /* mhd_BYTE_ORDER != mhd_BIG_ENDIAN */ 352 #ifdef mhd_GET_64BIT_LE_ALLOW_UNALIGNED 353 # define mhd_GET_64BIT_LE_UNALIGN(addr) mhd_GET_64BIT_LE (addr) 354 #else /* !mhd_GET_64BIT_LE_ALLOW_UNALIGNED */ 355 mhd_static_inline MHD_FN_PAR_NONNULL_ALL_ uint64_t 356 mhd_GET_64BIT_LE_UNALIGN (const void *addr) 357 { 358 uint64_t aligned_src; 359 memcpy(&aligned_src, addr, sizeof(aligned_src)); 360 return mhd_GET_64BIT_LE(&aligned_src); 361 } 362 #endif 363 364 365 /* mhd_GET_UINTFAST32_LE(addr) 366 * Get uint_fast32_t value at the addr as little endian. 367 */ 368 #if SIZEOF_UINT_FAST32_T == 4 369 # define mhd_GET_UINTFAST32_LE(addr) \ 370 mhd_GET_32BIT_LE ((addr)) 371 #elif SIZEOF_UINT_FAST32_T == 8 372 # define mhd_GET_UINTFAST32_LE(addr) \ 373 mhd_GET_64BIT_LE ((addr)) 374 #else /* future-proof */ 375 mhd_static_inline uint_fast32_t 376 mhd_GET_UINTFAST32_LE(const void *addr) 377 { 378 const uint8_t *const src = (const uint8_t *) addr; 379 size_t i; 380 uint_fast32_t ret = 0; 381 for (i = 0; i < sizeof(ret); ++i) 382 ret |= (uint_fast32_t) (((uint_fast32_t) src[i]) << (8u * i)); 383 return ret; 384 } 385 #endif 386 387 388 /* mhd_PUT_64BIT_BE (addr, value64) 389 * put native-endian 64-bit value64 to addr 390 * in big-endian mode. 391 */ 392 /* Slow version that works with unaligned addr and with any byte order */ 393 mhd_static_inline void 394 mhd_PUT_64BIT_BE_SLOW(void *addr, uint64_t value64) 395 { 396 uint8_t *const dst = (uint8_t *) addr; 397 dst[0] = (uint8_t) (value64 >> 56u); 398 dst[1] = (uint8_t) (value64 >> 48u); 399 dst[2] = (uint8_t) (value64 >> 40u); 400 dst[3] = (uint8_t) (value64 >> 32u); 401 dst[4] = (uint8_t) (value64 >> 24u); 402 dst[5] = (uint8_t) (value64 >> 16u); 403 dst[6] = (uint8_t) (value64 >> 8u); 404 dst[7] = (uint8_t) value64; 405 } 406 #if mhd_BYTE_ORDER == mhd_BIG_ENDIAN 407 # define mhd_PUT_64BIT_BE(addr, value64) \ 408 ((*(uint64_t*) (addr)) = (uint64_t) (value64)) 409 #elif mhd_BYTE_ORDER == mhd_LITTLE_ENDIAN 410 # define mhd_PUT_64BIT_BE(addr, value64) \ 411 ((*(uint64_t*) (addr)) = mhd_BYTES_SWAP64 (value64)) 412 #else /* mhd_BYTE_ORDER != mhd_LITTLE_ENDIAN */ 413 /* Endianness was not detected or non-standard like PDP-endian */ 414 # define mhd_PUT_64BIT_BE(addr, value64) mhd_PUT_64BIT_BE_SLOW (addr, value64) 415 /* Indicate that mhd_PUT_64BIT_BE does not need aligned pointer */ 416 # define mhd_PUT_64BIT_BE_ALLOW_UNALIGNED 1 417 #endif /* mhd_BYTE_ORDER != mhd_LITTLE_ENDIAN */ 418 419 /* Put result safely to unaligned address */ 420 #ifdef mhd_PUT_64BIT_BE_ALLOW_UNALIGNED 421 # define mhd_PUT_64BIT_BE_UNALIGN(addr, value64) \ 422 mhd_PUT_64BIT_BE ((addr),(value64)) 423 #else /* ! mhd_PUT_64BIT_BE_ALLOW_UNALIGNED */ 424 # define mhd_PUT_64BIT_BE_UNALIGN(addr, value64) \ 425 do { uint64_t mhd__aligned_dst; \ 426 mhd_PUT_64BIT_BE (&mhd__aligned_dst, (value64)); \ 427 memcpy ((addr), &mhd__aligned_dst, \ 428 sizeof(mhd__aligned_dst)); } while (0) 429 #endif /* ! mhd_PUT_64BIT_BE_ALLOW_UNALIGNED */ 430 431 432 /* mhd_GET_64BIT_BE (addr) 433 * load 64-bit value located at addr in big endian mode. 434 */ 435 #if mhd_BYTE_ORDER == mhd_BIG_ENDIAN 436 # define mhd_GET_64BIT_BE(addr) \ 437 (*(const uint64_t*) (addr)) 438 #elif mhd_BYTE_ORDER == mhd_LITTLE_ENDIAN 439 # define mhd_GET_64BIT_BE(addr) \ 440 mhd_BYTES_SWAP64 (*(const uint64_t*) (addr)) 441 #else /* mhd_BYTE_ORDER != mhd_LITTLE_ENDIAN */ 442 /* Endianness was not detected or non-standard like PDP-endian */ 443 mhd_static_inline uint64_t 444 mhd_GET_64BIT_BE(const void *addr) 445 { 446 const uint8_t *const src = (const uint8_t *) addr; 447 uint64_t ret; 448 ret = (uint64_t) (((uint64_t) src[0]) << 56u); 449 ret |= (uint64_t) (((uint64_t) src[1]) << 48u); 450 ret |= (uint64_t) (((uint64_t) src[2]) << 40u); 451 ret |= (uint64_t) (((uint64_t) src[3]) << 32u); 452 ret |= (uint64_t) (((uint64_t) src[4]) << 24u); 453 ret |= (uint64_t) (((uint64_t) src[5]) << 16u); 454 ret |= (uint64_t) (((uint64_t) src[6]) << 8u); 455 ret |= (uint64_t) src[7]; 456 return ret; 457 } 458 /* Indicate that mhd_GET_64BIT_BE does not need aligned pointer */ 459 # define mhd_GET_64BIT_BE_ALLOW_UNALIGNED 1 460 #endif /* mhd_BYTE_ORDER != mhd_LITTLE_ENDIAN */ 461 462 463 /* mhd_PUT_32BIT_BE (addr, value32) 464 * put native-endian 32-bit value32 to addr 465 * in big-endian mode. 466 */ 467 #if mhd_BYTE_ORDER == mhd_BIG_ENDIAN 468 # define mhd_PUT_32BIT_BE(addr, value32) \ 469 ((*(uint32_t*) (addr)) = (uint32_t) (value32)) 470 #elif mhd_BYTE_ORDER == mhd_LITTLE_ENDIAN 471 # define mhd_PUT_32BIT_BE(addr, value32) \ 472 ((*(uint32_t*) (addr)) = mhd_BYTES_SWAP32 (value32)) 473 #else /* mhd_BYTE_ORDER != mhd_LITTLE_ENDIAN */ 474 /* Endianness was not detected or non-standard like PDP-endian */ 475 mhd_static_inline void 476 mhd_PUT_32BIT_BE(void *addr, uint32_t value32) 477 { 478 uint8_t *const dst = (uint8_t *) addr; 479 dst[0] = (uint8_t) (value32 >> 24u); 480 dst[1] = (uint8_t) (value32 >> 16u); 481 dst[2] = (uint8_t) (value32 >> 8u); 482 dst[3] = (uint8_t) value32; 483 } 484 /* Indicate that mhd_PUT_32BIT_BE does not need aligned pointer */ 485 # define mhd_PUT_32BIT_BE_ALLOW_UNALIGNED 1 486 #endif /* mhd_BYTE_ORDER != mhd_LITTLE_ENDIAN */ 487 488 /* Put result safely to unaligned address */ 489 #ifdef mhd_PUT_32BIT_BE_ALLOW_UNALIGNED 490 # define mhd_PUT_32BIT_BE_UNALIGN(addr, value32) \ 491 mhd_PUT_32BIT_BE ((addr),(value32)) 492 #else /* ! mhd_PUT_32BIT_BE_ALLOW_UNALIGNED */ 493 # define mhd_PUT_32BIT_BE_UNALIGN(addr, value32) \ 494 do { uint32_t mhd__aligned_dst; \ 495 mhd_PUT_32BIT_BE (&mhd__aligned_dst, (value32)); \ 496 memcpy ((addr), &mhd__aligned_dst, \ 497 sizeof(mhd__aligned_dst)); } while (0) 498 #endif /* ! mhd_PUT_32BIT_BE_ALLOW_UNALIGNED */ 499 500 501 /* mhd_GET_32BIT_BE (addr) 502 * get big-endian 32-bit value stored at addr 503 * and return it in native-endian mode. 504 */ 505 #if mhd_BYTE_ORDER == mhd_BIG_ENDIAN 506 # define mhd_GET_32BIT_BE(addr) \ 507 (*(const uint32_t*) (addr)) 508 #elif mhd_BYTE_ORDER == mhd_LITTLE_ENDIAN 509 # define mhd_GET_32BIT_BE(addr) \ 510 mhd_BYTES_SWAP32 (*(const uint32_t*) (addr)) 511 #else /* mhd_BYTE_ORDER != mhd_LITTLE_ENDIAN */ 512 /* Endianness was not detected or non-standard like PDP-endian */ 513 mhd_static_inline uint32_t 514 mhd_GET_32BIT_BE(const void *addr) 515 { 516 const uint8_t *const src = (const uint8_t *) addr; 517 uint32_t ret; 518 ret = (uint32_t) (((uint32_t) src[0]) << 24u); 519 ret |= (uint32_t) (((uint32_t) src[1]) << 16u); 520 ret |= (uint32_t) (((uint32_t) src[2]) << 8u); 521 ret |= (uint32_t) src[3]; 522 return ret; 523 } 524 /* Indicate that mhd_GET_32BIT_BE does not need aligned pointer */ 525 # define mhd_GET_32BIT_BE_ALLOW_UNALIGNED 1 526 #endif /* mhd_BYTE_ORDER != mhd_LITTLE_ENDIAN */ 527 528 #ifdef mhd_GET_32BIT_BE_ALLOW_UNALIGNED 529 # define mhd_GET_32BIT_BE_UNALIGN(addr) mhd_GET_32BIT_BE ((addr)) 530 #else /* ! mhd_GET_32BIT_LE_ALLOW_UNALIGNED */ 531 /* Get value safely from an unaligned address */ 532 mhd_static_inline MHD_FN_PAR_NONNULL_ALL_ uint32_t 533 mhd_GET_32BIT_BE_UNALIGN (const void *addr) 534 { 535 uint32_t aligned_src; 536 memcpy (&aligned_src, 537 addr, 538 sizeof(aligned_src)); 539 return mhd_GET_32BIT_BE (&aligned_src); 540 } 541 #endif /* ! mhd_GET_32BIT_BE_ALLOW_UNALIGNED */ 542 543 544 /* mhd_PUT_UINTFAST32_BE(addr, uif32) 545 * Put uint_fast32_t value to the addr in big endian mode. 546 * Complete uint_fast32_t value is written, regardless the number of bits 547 * in this uint_fast32_t (it can be more than 32 bits). 548 */ 549 #if SIZEOF_UINT_FAST32_T == 4 550 # define mhd_PUT_UINTFAST32_BE(addr, uif32) \ 551 mhd_PUT_32BIT_BE ((addr),(uif32)) 552 #elif SIZEOF_UINT_FAST32_T == 8 553 # define mhd_PUT_UINTFAST32_BE(addr, uif32) \ 554 mhd_PUT_64BIT_BE ((addr),(uif32)) 555 #else /* future-proof */ 556 mhd_static_inline void 557 mhd_PUT_UINTFAST32_BE(void *addr, uint_fast32_t uif32) 558 { 559 uint8_t *const dst = (uint8_t *) addr; 560 size_t i; 561 for (i = 0; i < sizeof(uif32); ++i) 562 dst[i] = (uint8_t) (uif32 >> ((sizeof(uif32) - 1 - i) * 8)); 563 } 564 #endif 565 566 /* mhd_GET_UINTFAST32_BE(addr) 567 * Get uint_fast32_t value at the addr as big endian. 568 */ 569 #if SIZEOF_UINT_FAST32_T == 4 570 # define mhd_GET_UINTFAST32_BE(addr) \ 571 mhd_GET_32BIT_BE ((addr)) 572 #elif SIZEOF_UINT_FAST32_T == 8 573 # define mhd_GET_UINTFAST32_BE(addr) \ 574 mhd_GET_64BIT_BE ((addr)) 575 #else /* future-proof */ 576 mhd_static_inline uint_fast32_t 577 mhd_GET_UINTFAST32_BE(const void *addr) 578 { 579 size_t i; 580 uint_fast32_t ret = 0; 581 582 for (i = 0; i < sizeof(ret); ++i) 583 { 584 ret <<= 8u; 585 ret |= (uint_fast32_t) (((const uint8_t*) addr)[i]); 586 } 587 return ret; 588 } 589 #endif 590 591 592 /* mhd_PUT_16BIT_BE (addr, value16) 593 * put 16-bit value16 to addr 594 * in big-endian mode. 595 */ 596 #if mhd_BYTE_ORDER == mhd_BIG_ENDIAN 597 # define mhd_PUT_16BIT_BE(addr, value16) \ 598 ((*(uint16_t*) (addr)) = (uint16_t) (value16)) 599 #elif mhd_BYTE_ORDER == mhd_LITTLE_ENDIAN 600 # define mhd_PUT_16BIT_BE(addr, value16) \ 601 ((*(uint16_t*) (addr)) = mhd_BYTES_SWAP16 (value16)) 602 #else /* mhd_BYTE_ORDER != mhd_LITTLE_ENDIAN */ 603 /* Endianness was not detected or non-standard like PDP-endian */ 604 mhd_static_inline void 605 mhd_PUT_16BIT_BE(void *addr, uint16_t value16) 606 { 607 uint8_t *const dst = (uint8_t *) addr; 608 dst[0] = (uint8_t) (value16 >> 8u); 609 dst[1] = (uint8_t) (value16 >> 0u); 610 } 611 /* Indicate that mhd_PUT_16BIT_BE does not need aligned pointer */ 612 # define mhd_PUT_16BIT_BE_ALLOW_UNALIGNED 1 613 #endif /* mhd_BYTE_ORDER != mhd_LITTLE_ENDIAN */ 614 615 /* Put result safely to unaligned address */ 616 #ifdef mhd_PUT_16BIT_BE_ALLOW_UNALIGNED 617 # define mhd_PUT_16BIT_BE_UNALIGN(addr, value16) \ 618 mhd_PUT_16BIT_BE ((addr),(value16)) 619 #else /* ! mhd_PUT_16BIT_BE_ALLOW_UNALIGNED */ 620 # define mhd_PUT_16BIT_BE_UNALIGN(addr, value16) \ 621 do { uint16_t mhd__aligned_dst; \ 622 mhd_PUT_16BIT_BE (&mhd__aligned_dst, (value16)); \ 623 memcpy ((addr), &mhd__aligned_dst, \ 624 sizeof(mhd__aligned_dst)); } while (0) 625 #endif /* ! mhd_PUT_16BIT_BE_ALLOW_UNALIGNED */ 626 627 628 /* mhd_GET_16BIT_BE (addr) 629 * get big-endian 16-bit value stored at addr 630 */ 631 #if mhd_BYTE_ORDER == mhd_BIG_ENDIAN 632 # define mhd_GET_16BIT_BE(addr) \ 633 (*(const uint16_t*) (addr)) 634 #elif mhd_BYTE_ORDER == mhd_LITTLE_ENDIAN 635 # define mhd_GET_16BIT_BE(addr) \ 636 mhd_BYTES_SWAP16 (*(const uint16_t*) (addr)) 637 #else /* mhd_BYTE_ORDER != mhd_LITTLE_ENDIAN */ 638 /* Endianness was not detected or non-standard like PDP-endian */ 639 mhd_static_inline uint16_t 640 mhd_GET_16BIT_BE(const void *addr) 641 { 642 const uint8_t *const src = (const uint8_t *) addr; 643 return (uint16_t) ((((uint16_t) src[0]) << 8u) | ((uint16_t) src[1])); 644 } 645 /* Indicate that mhd_GET_16BIT_BE does not need aligned pointer */ 646 # define mhd_GET_16BIT_BE_ALLOW_UNALIGNED 1 647 #endif /* mhd_BYTE_ORDER != mhd_LITTLE_ENDIAN */ 648 649 #ifdef mhd_GET_16BIT_BE_ALLOW_UNALIGNED 650 # define mhd_GET_16BIT_BE_UNALIGN(addr) mhd_GET_16BIT_BE ((addr)) 651 #else /* ! mhd_GET_16BIT_LE_ALLOW_UNALIGNED */ 652 /* Get value safely from an unaligned address */ 653 mhd_static_inline MHD_FN_PAR_NONNULL_ALL_ uint16_t 654 mhd_GET_16BIT_BE_UNALIGN (const void *addr) 655 { 656 uint16_t aligned_src; 657 memcpy (&aligned_src, 658 addr, 659 sizeof(aligned_src)); 660 return mhd_GET_16BIT_BE (&aligned_src); 661 } 662 #endif /* ! mhd_GET_16BIT_BE_ALLOW_UNALIGNED */ 663 664 /** 665 * Rotate right 32-bit value by number of bits. 666 */ 667 #if defined(mhd_HAS_VC_INTRINSICS) 668 # ifndef __clang__ 669 # pragma intrinsic(_rotr) 670 # endif /* ! __clang__ */ 671 # define mhd_ROTR32(value32, bits) \ 672 ((uint32_t) _rotr ((uint32_t) (value32), (int) (bits))) 673 #elif mhd_HAS_BUILTIN (__builtin_stdc_rotate_right) 674 # define mhd_ROTR32(value32, bits) \ 675 (__builtin_stdc_rotate_right ((uint32_t) (value32), (bits))) 676 #elif mhd_HAS_BUILTIN (__builtin_rotateright32) 677 # define mhd_ROTR32(value32, bits) \ 678 ((uint32_t) __builtin_rotateright32 ((value32), (bits))) 679 #else /* ! __builtin_rotateright32 */ 680 mhd_static_inline uint32_t 681 mhd_ROTR32 (uint32_t value32, unsigned int bits) 682 { 683 bits &= 31u; 684 return (value32 >> bits) | (value32 << ((32u - bits) & 31u)); 685 } 686 687 688 #endif /* ! __builtin_rotateright32 */ 689 690 691 /** 692 * Rotate left 32-bit value by number of bits. 693 */ 694 #if defined(mhd_HAS_VC_INTRINSICS) 695 # ifndef __clang__ 696 # pragma intrinsic(_rotl) 697 # endif /* ! __clang__ */ 698 # define mhd_ROTL32(value32, bits) \ 699 ((uint32_t) _rotl ((uint32_t) (value32), (int) (bits))) 700 #elif mhd_HAS_BUILTIN (__builtin_stdc_rotate_left) 701 # define mhd_ROTL32(value32, bits) \ 702 (__builtin_stdc_rotate_left ((uint32_t) (value32), (bits))) 703 #elif mhd_HAS_BUILTIN (__builtin_rotateleft32) 704 # define mhd_ROTL32(value32, bits) \ 705 ((uint32_t) __builtin_rotateleft32 ((value32), (bits))) 706 #else /* ! __builtin_rotateleft32 */ 707 mhd_static_inline uint32_t 708 mhd_ROTL32 (uint32_t value32, unsigned int bits) 709 { 710 bits &= 31u; 711 return (value32 << bits) | (value32 >> ((32u - bits) & 31u)); 712 } 713 714 715 #endif /* ! __builtin_rotateleft32 */ 716 717 718 /** 719 * Rotate right 64-bit value by number of bits. 720 */ 721 #if defined(mhd_HAS_VC_INTRINSICS) 722 # ifndef __clang__ 723 # pragma intrinsic(_rotr64) 724 # endif /* ! __clang__ */ 725 # define mhd_ROTR64(value64, bits) \ 726 ((uint64_t) _rotr64 ((uint64_t) (value64), (int) (bits))) 727 #elif mhd_HAS_BUILTIN (__builtin_stdc_rotate_right) 728 # define mhd_ROTR64(value64, bits) \ 729 (__builtin_stdc_rotate_right ((uint64_t) (value64), (bits))) 730 #elif mhd_HAS_BUILTIN (__builtin_rotateright64) 731 # define mhd_ROTR64(value64, bits) \ 732 ((uint64_t) __builtin_rotateright64 ((value64), (bits))) 733 #else /* ! __builtin_rotateright64 */ 734 mhd_static_inline uint64_t 735 mhd_ROTR64 (uint64_t value64, unsigned int bits) 736 { 737 bits &= 63u; 738 return (value64 >> bits) | (value64 << ((64u - bits) & 63u)); 739 } 740 741 742 #endif /* ! __builtin_rotateright64 */ 743 744 745 /** 746 * @def mhd_LEADING_ZEROS32NZ 747 * Count leading (most-significant) zero bits in a non-zero 32-bit value. 748 * The result is undefined if the argument is zero or does not fit in 32 bits. 749 */ 750 #if defined(MHD_HAVE___BUILTIN_CLZ) && 4 == SIZEOF_UNSIGNED_INT 751 # define mhd_LEADING_ZEROS32NZ(val32) \ 752 ((uint_least8_t) __builtin_clz ((unsigned int) (val32))) 753 #elif defined(MHD_HAVE___BUILTIN_CLZL) && 4 == SIZEOF_UNSIGNED_LONG 754 # define mhd_LEADING_ZEROS32NZ(val32) \ 755 ((uint_least8_t) __builtin_clzl ((unsigned long) (val32))) 756 #elif defined(MHD_HAVE___BUILTIN_CLZG1) && 4 <= SIZEOF_UINT_LEAST32_T 757 # if 4 == SIZEOF_UINT_LEAST32_T 758 # define mhd_LEADING_ZEROS32NZ(val32) \ 759 ((uint_least8_t) __builtin_clzg ((uint_least32_t) (val32))) 760 # else /* 4 < SIZEOF_UINT_LEAST32_T */ 761 # define mhd_LEADING_ZEROS32NZ(val32) \ 762 ((uint_least8_t) (__builtin_clzg ((uint_least32_t) (val32)) \ 763 - ((sizeof(uint_least32_t) - 4u) * 8u))) 764 # endif /* 4 < SIZEOF_UINT_LEAST32_T */ 765 #endif /* MHD_HAVE___BUILTIN_CLZG1 && 4 <= SIZEOF_UINT_LEAST32_T */ 766 767 768 /** 769 * @def mhd_BIT_WIDTH32NZ 770 * Return the smallest number of bits needed to represent the value. 771 * The result is undefined if the argument is zero or does not fit in 32 bits. 772 */ 773 #if defined(mhd_HAS_VC_INTRINSICS) && 4 == SIZEOF_UNSIGNED_LONG && \ 774 (defined(_M_X64) || defined(_M_IX86) \ 775 || defined(_M_ARM) || defined(_M_ARM64) \ 776 || defined(__i386__) || defined(__x86_64__) \ 777 || defined(__arm__) || defined(__aarch64__)) 778 # ifndef __clang__ 779 # pragma intrinsic(_BitScanReverse) 780 # endif /* ! __clang__ */ 781 mhd_static_inline uint_least8_t 782 mhd_bh_func_bit_width32nz(uint_least32_t val32) 783 { 784 unsigned long idx; 785 (void) _BitScanReverse(&idx, (unsigned long) val32); 786 return (uint_least8_t) (idx + 1u); 787 } 788 # define mhd_BIT_WIDTH32NZ(val32) mhd_bh_func_bit_width32nz ((val32)) 789 #endif /* mhd_HAS_VC_INTRINSICS && 4 == SIZEOF_UNSIGNED_LONG && (x86 || ARM) */ 790 791 792 /** 793 * @def mhd_LEADING_ZEROS32 794 * Count leading (most-significant) zero bits in a 32-bit value. 795 * If the argument is zero then 32 is returned. 796 * The result is undefined if the argument does not fit in 32 bits. 797 */ 798 799 #if defined(MHD_HAVE___BUILTIN_CLZG2) && 4 <= SIZEOF_UINT_LEAST32_T 800 # if 4 == SIZEOF_UINT_LEAST32_T 801 # define mhd_LEADING_ZEROS32(val32) \ 802 ((uint_least8_t) __builtin_clzg ((uint_least32_t) (val32),32u)) 803 # else /* 4 < SIZEOF_UINT_LEAST32_T */ 804 # define mhd_LEADING_ZEROS32(val32) \ 805 ((uint_least8_t) (__builtin_clzg ((uint_least32_t) (val32), \ 806 sizeof(uint_least32_t) * 8u) \ 807 - ((sizeof(uint_least32_t) - 4u) * 8u))) 808 # endif /* 4 < SIZEOF_UINT_LEAST32_T */ 809 #elif defined(mhd_HAS_VC_INTRINSICS) && 4 == SIZEOF_UNSIGNED_LONG && \ 810 (defined(_M_ARM) || defined(_M_ARM64) \ 811 || defined(__arm__) || defined(__aarch64__)) && \ 812 ( (! defined(__clang__)) \ 813 || (((__clang_major__ + 0) >= 18) && defined(__aarch64__)) ) 814 /* Support for _CountLeadingZeros() was added only in clang 18 */ 815 # ifndef __clang__ 816 # pragma intrinsic(_CountLeadingZeros) 817 # endif /* ! __clang__ */ 818 # define mhd_LEADING_ZEROS32(val32) \ 819 ((uint_least8_t) _CountLeadingZeros ((unsigned long) (val32))) 820 #elif mhd_HAS_BUILTIN (__builtin_stdc_leading_zeros) 821 # if 4 == SIZEOF_UINT_LEAST32_T 822 # define mhd_LEADING_ZEROS32(val32) \ 823 ((uint_least8_t) \ 824 __builtin_stdc_leading_zeros ((uint_least32_t) (val32))) 825 # else /* 4 < SIZEOF_UINT_LEAST32_T */ 826 # define mhd_LEADING_ZEROS32(val32) \ 827 ((uint_least8_t) \ 828 (__builtin_stdc_leading_zeros ((uint_least32_t) (val32)) \ 829 - ((sizeof(uint_least32_t) - 4u) * 8u))) 830 # endif /* 4 < SIZEOF_UINT_LEAST32_T */ 831 #endif /* __builtin_stdc_leading_zeros */ 832 833 834 /** 835 * @def mhd_BIT_WIDTH32 836 * Return the smallest number of bits needed to represent the value. 837 * If the argument is zero then zero is returned. 838 * The result is undefined if the argument does not fit in 32 bits. 839 */ 840 #if defined(mhd_HAS_VC_INTRINSICS) && 4 == SIZEOF_UNSIGNED_LONG && \ 841 (defined(_M_X64) || defined(_M_IX86) \ 842 || defined(_M_ARM) || defined(_M_ARM64) \ 843 || defined(__i386__) || defined(__x86_64__) \ 844 || defined(__arm__) || defined(__aarch64__)) 845 # ifndef __clang__ 846 # pragma intrinsic(_BitScanReverse) 847 # endif /* ! __clang__ */ 848 mhd_static_inline uint_least8_t 849 mhd_bh_func_bit_width32(uint_least32_t val32) 850 { 851 unsigned long idx; 852 if (0 == _BitScanReverse(&idx, (unsigned long) val32)) 853 return 0u; 854 return (uint_least8_t) (idx + 1u); 855 } 856 # define mhd_BIT_WIDTH32(val32) mhd_bh_func_bit_width32 ((val32)) 857 #elif mhd_HAS_BUILTIN (__builtin_stdc_bit_width) 858 # define mhd_BIT_WIDTH32(val32) \ 859 ((uint_least8_t) __builtin_stdc_bit_width ((uint_least32_t) (val32))) 860 #endif /* __builtin_stdc_bit_width */ 861 862 863 /* ** Use compiler-optimised implementation for missing functionality ** */ 864 #ifndef mhd_LEADING_ZEROS32NZ 865 # ifdef mhd_BIT_WIDTH32NZ 866 # define mhd_LEADING_ZEROS32NZ(val32) \ 867 ((uint_least8_t) (32u - mhd_BIT_WIDTH32NZ ((val32)))) 868 # endif /* mhd_BIT_WIDTH32NZ */ 869 #endif /* ! mhd_LEADING_ZEROS32NZ */ 870 871 #ifndef mhd_BIT_WIDTH32NZ 872 # ifdef mhd_LEADING_ZEROS32NZ 873 # define mhd_BIT_WIDTH32NZ(val32) \ 874 ((uint_least8_t) (32u - mhd_LEADING_ZEROS32NZ ((val32)))) 875 # endif /* mhd_LEADING_ZEROS32NZ */ 876 #endif /* ! mhd_BIT_WIDTH32NZ */ 877 878 #ifndef mhd_LEADING_ZEROS32 879 # ifdef mhd_BIT_WIDTH32 880 # define mhd_LEADING_ZEROS32(val32) \ 881 ((uint_least8_t) (32u - mhd_BIT_WIDTH32 ((val32)))) 882 # endif /* mhd_BIT_WIDTH32 */ 883 #endif /* ! mhd_LEADING_ZEROS32 */ 884 885 #ifndef mhd_BIT_WIDTH32 886 # ifdef mhd_LEADING_ZEROS32 887 # define mhd_BIT_WIDTH32(val32) \ 888 ((uint_least8_t) (32u - mhd_LEADING_ZEROS32 ((val32)))) 889 # endif /* mhd_LEADING_ZEROS32 */ 890 #endif /* ! mhd_BIT_WIDTH32 */ 891 892 #if ! defined(mhd_LEADING_ZEROS32NZ) 893 # ifdef mhd_LEADING_ZEROS32 894 # define mhd_LEADING_ZEROS32NZ(val32) mhd_LEADING_ZEROS32 ((val32)) 895 # define mhd_BIT_WIDTH32NZ(val32) mhd_BIT_WIDTH32 ((val32)) 896 # endif /* mhd_LEADING_ZEROS32 */ 897 #else /* mhd_LEADING_ZEROS32NZ */ 898 # if ! defined(mhd_LEADING_ZEROS32) 899 900 mhd_static_inline uint_least8_t 901 mhd_bh_func_leading_zeros32(uint_least32_t val32) 902 { 903 if (0u == val32) 904 return 32u; 905 return mhd_LEADING_ZEROS32NZ(val32); 906 } 907 908 mhd_static_inline uint_least8_t 909 mhd_bh_func_bit_width32(uint_least32_t val32) 910 { 911 if (0u == val32) 912 return 0u; 913 return mhd_BIT_WIDTH32NZ(val32); 914 } 915 916 # define mhd_LEADING_ZEROS32(val32) mhd_bh_func_leading_zeros32 ((val32)) 917 # define mhd_BIT_WIDTH32(val32) mhd_bh_func_bit_width32 ((val32)) 918 # endif /* ! mhd_LEADING_ZEROS32 */ 919 #endif /* mhd_LEADING_ZEROS32NZ */ 920 921 #if defined(mhd_LEADING_ZEROS32NZ) || defined(mhd_BIT_WIDTH32NZ) \ 922 || defined(mhd_LEADING_ZEROS32) || defined(mhd_BIT_WIDTH32) 923 /* If at least one compiler-optimised function is detected, all macros must be 924 defined as all of them can be based on a single base function. */ 925 # if ! defined(mhd_LEADING_ZEROS32NZ) 926 #error mhd_LEADING_ZEROS32NZ() must be defined 927 # endif 928 # if ! defined(mhd_BIT_WIDTH32NZ) 929 #error mhd_BIT_WIDTH32NZ() must be defined 930 # endif 931 # if ! defined(mhd_LEADING_ZEROS32) 932 #error mhd_LEADING_ZEROS32() must be defined 933 # endif 934 # if ! defined(mhd_BIT_WIDTH32) 935 #error mhd_BIT_WIDTH32() must be defined 936 # endif 937 #else 938 /* No compiler-optimised base version. Use fallback implementation. */ 939 940 941 mhd_static_inline uint_least8_t 942 mhd_bh_func_bit_width32(uint_least32_t val32) 943 { 944 uint_fast8_t cal_width = 0u; 945 uint_fast8_t check_bits; 946 uint_fast32_t val_left = (uint_fast32_t) (val32 & 0xFFFFFFFFu); 947 mhd_assert (val32 == val_left); 948 949 /* Branchless code without any tables. 950 Should have a good performance even with a cold cache. */ 951 check_bits = (uint_fast8_t) ((0 != (val_left >> 16u)) * 16u); 952 cal_width += check_bits; 953 val_left >>= check_bits; 954 955 check_bits = (uint_fast8_t) ((0 != (val_left >> 8u)) * 8u); 956 cal_width += check_bits; 957 val_left >>= check_bits; 958 959 check_bits = (uint_fast8_t) ((0 != (val_left >> 4u)) * 4u); 960 cal_width += check_bits; 961 val_left >>= check_bits; 962 963 check_bits = (uint_fast8_t) ((0 != (val_left >> 2u)) * 2u); 964 cal_width += check_bits; 965 val_left >>= check_bits; 966 967 check_bits = (uint_fast8_t) ((0 != (val_left >> 1u)) * 1u); 968 cal_width += check_bits; 969 val_left >>= check_bits; 970 971 return (uint_least8_t) (cal_width + val_left); 972 } 973 974 # define mhd_LEADING_ZEROS32NZ(val32) \ 975 ((uint_least8_t) \ 976 (32u - mhd_bh_func_bit_width32 ((uint_least32_t) (val32)))) 977 # define mhd_BIT_WIDTH32NZ(val32) mhd_bh_func_bit_width32 ((val32)) 978 # define mhd_LEADING_ZEROS32(val32) \ 979 ((uint_least8_t) \ 980 (32u - mhd_bh_func_bit_width32 ((uint_least32_t) (val32)))) 981 # define mhd_BIT_WIDTH32(val32) mhd_bh_func_bit_width32 ((val32)) 982 #endif 983 984 /** 985 * @def mhd_LEADING_ONES32 986 * Count leading (most-significant) ones in a 32-bit value. 987 * The argument is always treated as a 32-bit value; any higher-order bits 988 * (if present) are ignored. 989 * @note Unlike other related macros, this one explicitly trims (or extends) 990 * the argument to 32 bits. Do not use signed types or narrower types 991 * as argument as they may produce unexpected results. 992 */ 993 #if mhd_HAS_BUILTIN (__builtin_stdc_leading_ones) && \ 994 4 == SIZEOF_UINT_LEAST32_T 995 # define mhd_LEADING_ONES32(val32) \ 996 ((uint_least8_t) __builtin_stdc_leading_ones ((uint_least32_t) (val32))) 997 #else /* ! __builtin_stdc_leading_ones || 4 != SIZEOF_UINT_LEAST32_T */ 998 # define mhd_LEADING_ONES32(val32) \ 999 mhd_LEADING_ZEROS32 ((uint_least32_t) \ 1000 (0xFFFFFFFFu & ~((uint_least32_t) (val32)))) 1001 #endif /* ! __builtin_stdc_leading_ones || 4 != SIZEOF_UINT_LEAST32_T */ 1002 1003 1004 mhd_DATA_TRUNCATION_RUNTIME_CHECK_RESTORE 1005 1006 #if defined(_MSC_FULL_VER) 1007 /* Restore warnings */ 1008 #pragma warning(pop) 1009 #endif /* _MSC_FULL_VER */ 1010 1011 #endif /* ! MHD_BITHELPERS_H */