| Directory: | cvmfs/ |
|---|---|
| File: | cvmfs/util/atomic.h |
| Date: | 2026-02-22 02:35:58 |
| Exec | Total | Coverage | |
|---|---|---|---|
| Lines: | 41 | 43 | 95.3% |
| Branches: | 8 | 8 | 100.0% |
| Line | Branch | Exec | Source |
|---|---|---|---|
| 1 | /** | ||
| 2 | * This file is part of the CernVM File System. | ||
| 3 | * | ||
| 4 | * Defines wrapper functions for atomic integer operations. Atomic operations | ||
| 5 | * are handled by GCC. | ||
| 6 | */ | ||
| 7 | |||
| 8 | #ifndef CVMFS_UTIL_ATOMIC_H_ | ||
| 9 | #define CVMFS_UTIL_ATOMIC_H_ | ||
| 10 | |||
| 11 | #include <stdint.h> | ||
| 12 | |||
| 13 | #ifdef CVMFS_NAMESPACE_GUARD | ||
| 14 | namespace CVMFS_NAMESPACE_GUARD { | ||
| 15 | #endif | ||
| 16 | |||
| 17 | typedef int32_t atomic_int32; | ||
| 18 | typedef int64_t atomic_int64; | ||
| 19 | |||
| 20 | 10384308 | static void inline __attribute__((used)) atomic_init32(atomic_int32 *a) { | |
| 21 | 10384308 | *a = 0; | |
| 22 | 10384308 | } | |
| 23 | |||
| 24 | 7104832 | static void inline __attribute__((used)) atomic_init64(atomic_int64 *a) { | |
| 25 | 7104832 | *a = 0; | |
| 26 | 7104832 | } | |
| 27 | |||
| 28 | 313695055 | static int32_t inline __attribute__((used)) atomic_read32(atomic_int32 *a) { | |
| 29 | 313695055 | return __sync_fetch_and_add(a, 0); | |
| 30 | } | ||
| 31 | |||
| 32 | 104783068 | static int64_t inline __attribute__((used)) atomic_read64(atomic_int64 *a) { | |
| 33 | 104783068 | return __sync_fetch_and_add(a, 0); | |
| 34 | } | ||
| 35 | |||
| 36 | 29028457 | static void inline __attribute__((used)) atomic_write32(atomic_int32 *a, | |
| 37 | int32_t value) { | ||
| 38 |
2/2✓ Branch 1 taken 59689710 times.
✓ Branch 2 taken 35389489 times.
|
88718167 | while (!__sync_bool_compare_and_swap(a, atomic_read32(a), value)) { |
| 39 | } | ||
| 40 | 35389489 | } | |
| 41 | |||
| 42 | 29174777 | static void inline __attribute__((used)) atomic_write64(atomic_int64 *a, | |
| 43 | int64_t value) { | ||
| 44 |
2/2✓ Branch 1 taken 59538754 times.
✓ Branch 2 taken 35569653 times.
|
88713531 | while (!__sync_bool_compare_and_swap(a, atomic_read64(a), value)) { |
| 45 | } | ||
| 46 | 35569653 | } | |
| 47 | |||
| 48 | 60165597 | static void inline __attribute__((used)) atomic_inc32(atomic_int32 *a) { | |
| 49 | 60165597 | (void)__sync_fetch_and_add(a, 1); | |
| 50 | 60165597 | } | |
| 51 | |||
| 52 | 117411761 | static void inline __attribute__((used)) atomic_inc64(atomic_int64 *a) { | |
| 53 | 117411761 | (void)__sync_fetch_and_add(a, 1); | |
| 54 | 117411761 | } | |
| 55 | |||
| 56 | 15337019 | static void inline __attribute__((used)) atomic_dec32(atomic_int32 *a) { | |
| 57 | 15337019 | (void)__sync_fetch_and_sub(a, 1); | |
| 58 | 15337019 | } | |
| 59 | |||
| 60 | 15116113 | static void inline __attribute__((used)) atomic_dec64(atomic_int64 *a) { | |
| 61 | 15116113 | (void)__sync_fetch_and_sub(a, 1); | |
| 62 | 15116113 | } | |
| 63 | |||
| 64 | 10056442 | static int32_t inline __attribute__((used)) atomic_xadd32(atomic_int32 *a, | |
| 65 | int32_t offset) { | ||
| 66 |
2/2✓ Branch 0 taken 1890076 times.
✓ Branch 1 taken 8166366 times.
|
10056442 | if (offset < 0) |
| 67 | 1890076 | return __sync_fetch_and_sub(a, -offset); | |
| 68 | 8166366 | return __sync_fetch_and_add(a, offset); | |
| 69 | } | ||
| 70 | |||
| 71 | 91862580 | static int64_t inline __attribute__((used)) atomic_xadd64(atomic_int64 *a, | |
| 72 | int64_t offset) { | ||
| 73 |
2/2✓ Branch 0 taken 21944281 times.
✓ Branch 1 taken 69918299 times.
|
91862580 | if (offset < 0) |
| 74 | 21944281 | return __sync_fetch_and_sub(a, -offset); | |
| 75 | 69918299 | return __sync_fetch_and_add(a, offset); | |
| 76 | } | ||
| 77 | |||
| 78 | 143523 | static bool inline __attribute__((used)) atomic_cas32(atomic_int32 *a, | |
| 79 | int32_t cmp, | ||
| 80 | int32_t newval) { | ||
| 81 | 143523 | return __sync_bool_compare_and_swap(a, cmp, newval); | |
| 82 | } | ||
| 83 | |||
| 84 | ✗ | static bool inline __attribute__((used)) atomic_cas64(atomic_int64 *a, | |
| 85 | int64_t cmp, | ||
| 86 | int64_t newval) { | ||
| 87 | // Clang 3.5 has a bug in optimized __sync_bool_compare_and_swap: | ||
| 88 | // https://bugs.llvm.org//show_bug.cgi?format=multiple&id=21499 | ||
| 89 | ✗ | return __sync_bool_compare_and_swap(a, cmp, newval); | |
| 90 | } | ||
| 91 | |||
| 92 | 42 | static void inline __attribute__((used)) MemoryFence() { | |
| 93 | 42 | asm __volatile__("" : : : "memory"); | |
| 94 | 42 | } | |
| 95 | |||
| 96 | #ifdef CVMFS_NAMESPACE_GUARD | ||
| 97 | } // namespace CVMFS_NAMESPACE_GUARD | ||
| 98 | #endif | ||
| 99 | |||
| 100 | #endif // CVMFS_UTIL_ATOMIC_H_ | ||
| 101 |