GCC Code Coverage Report
Directory: cvmfs/ Exec Total Coverage
File: cvmfs/receiver/../atomic.h Lines: 0 32 0.0 %
Date: 2019-02-03 02:48:13 Branches: 0 8 0.0 %

Line Branch Exec Source
1
/**
2
 * This file is part of the CernVM File System.
3
 *
4
 * Defines wrapper functions for atomic integer operations.  Atomic operations
5
 * are handled by GCC.
6
 */
7
8
#ifndef CVMFS_ATOMIC_H_
9
#define CVMFS_ATOMIC_H_
10
11
#include <stdint.h>
12
13
#ifdef CVMFS_NAMESPACE_GUARD
14
namespace CVMFS_NAMESPACE_GUARD {
15
#endif
16
17
typedef int32_t atomic_int32;
18
typedef int64_t atomic_int64;
19
20
static void inline __attribute__((used)) atomic_init32(atomic_int32 *a) {
21
  *a = 0;
22
}
23
24
static void inline __attribute__((used)) atomic_init64(atomic_int64 *a) {
25
  *a = 0;
26
}
27
28
static int32_t inline __attribute__((used)) atomic_read32(atomic_int32 *a) {
29
  return __sync_fetch_and_add(a, 0);
30
}
31
32
static int64_t inline __attribute__((used)) atomic_read64(atomic_int64 *a) {
33
  return __sync_fetch_and_add(a, 0);
34
}
35
36
static void inline __attribute__((used))
37
atomic_write32(atomic_int32 *a, int32_t value) {
38
  while (!__sync_bool_compare_and_swap(a, atomic_read32(a), value)) {
39
  }
40
}
41
42
static void inline __attribute__((used))
43
atomic_write64(atomic_int64 *a, int64_t value) {
44
  while (!__sync_bool_compare_and_swap(a, atomic_read64(a), value)) {
45
  }
46
}
47
48
static void inline __attribute__((used)) atomic_inc32(atomic_int32 *a) {
49
  (void)__sync_fetch_and_add(a, 1);
50
}
51
52
static void inline __attribute__((used)) atomic_inc64(atomic_int64 *a) {
53
  (void)__sync_fetch_and_add(a, 1);
54
}
55
56
static void inline __attribute__((used)) atomic_dec32(atomic_int32 *a) {
57
  (void)__sync_fetch_and_sub(a, 1);
58
}
59
60
static void inline __attribute__((used)) atomic_dec64(atomic_int64 *a) {
61
  (void)__sync_fetch_and_sub(a, 1);
62
}
63
64
static int32_t inline __attribute__((used))
65
atomic_xadd32(atomic_int32 *a, int32_t offset) {
66
  if (offset < 0) return __sync_fetch_and_sub(a, -offset);
67
  return __sync_fetch_and_add(a, offset);
68
}
69
70
static int64_t inline __attribute__((used))
71
atomic_xadd64(atomic_int64 *a, int64_t offset) {
72
  if (offset < 0) return __sync_fetch_and_sub(a, -offset);
73
  return __sync_fetch_and_add(a, offset);
74
}
75
76
static bool inline __attribute__((used))
77
atomic_cas32(atomic_int32 *a, int32_t cmp, int32_t newval) {
78
  return __sync_bool_compare_and_swap(a, cmp, newval);
79
}
80
81
static bool inline __attribute__((used))
82
atomic_cas64(atomic_int64 *a, int64_t cmp, int64_t newval) {
83
  // Clang 3.5 has a bug in optimized __sync_bool_compare_and_swap:
84
  // https://bugs.llvm.org//show_bug.cgi?format=multiple&id=21499
85
  return __sync_bool_compare_and_swap(a, cmp, newval);
86
}
87
88
static void inline __attribute__((used)) MemoryFence() {
89
  asm __volatile__("" : : : "memory");
90
}
91
92
#ifdef CVMFS_NAMESPACE_GUARD
93
}  // namespace CVMFS_NAMESPACE_GUARD
94
#endif
95
96
#endif  // CVMFS_ATOMIC_H_