generic-xlc.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. /*-------------------------------------------------------------------------
  2. *
  3. * generic-xlc.h
  4. * Atomic operations for IBM's CC
  5. *
  6. * Portions Copyright (c) 2013-2016, PostgreSQL Global Development Group
  7. *
  8. * NOTES:
  9. *
  10. * Documentation:
  11. * * Synchronization and atomic built-in functions
  12. * http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.2/com.ibm.xlcpp131.aix.doc/compiler_ref/bifs_sync_atomic.html
  13. *
  14. * src/include/port/atomics/generic-xlc.h
  15. *
  16. * -------------------------------------------------------------------------
  17. */
  18. #if defined(HAVE_ATOMICS)
  19. #define PG_HAVE_ATOMIC_U32_SUPPORT
  20. typedef struct pg_atomic_uint32
  21. {
  22. volatile uint32 value;
  23. } pg_atomic_uint32;
  24. /* 64bit atomics are only supported in 64bit mode */
  25. #ifdef __64BIT__
  26. #define PG_HAVE_ATOMIC_U64_SUPPORT
  27. typedef struct pg_atomic_uint64
  28. {
  29. volatile uint64 value pg_attribute_aligned(8);
  30. } pg_atomic_uint64;
  31. #endif /* __64BIT__ */
  32. #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
  33. static inline bool
  34. pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
  35. uint32 *expected, uint32 newval)
  36. {
  37. bool ret;
  38. /*
  39. * atomics.h specifies sequential consistency ("full barrier semantics")
  40. * for this interface. Since "lwsync" provides acquire/release
  41. * consistency only, do not use it here. GCC atomics observe the same
  42. * restriction; see its rs6000_pre_atomic_barrier().
  43. */
  44. __asm__ __volatile__ (" sync \n" ::: "memory");
  45. /*
  46. * XXX: __compare_and_swap is defined to take signed parameters, but that
  47. * shouldn't matter since we don't perform any arithmetic operations.
  48. */
  49. ret = __compare_and_swap((volatile int*)&ptr->value,
  50. (int *)expected, (int)newval);
  51. /*
  52. * xlc's documentation tells us:
  53. * "If __compare_and_swap is used as a locking primitive, insert a call to
  54. * the __isync built-in function at the start of any critical sections."
  55. *
  56. * The critical section begins immediately after __compare_and_swap().
  57. */
  58. __isync();
  59. return ret;
  60. }
  61. #define PG_HAVE_ATOMIC_FETCH_ADD_U32
  62. static inline uint32
  63. pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
  64. {
  65. /*
  66. * __fetch_and_add() emits a leading "sync" and trailing "isync", thereby
  67. * providing sequential consistency. This is undocumented.
  68. */
  69. return __fetch_and_add((volatile int *)&ptr->value, add_);
  70. }
  71. #ifdef PG_HAVE_ATOMIC_U64_SUPPORT
  72. #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64
  73. static inline bool
  74. pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
  75. uint64 *expected, uint64 newval)
  76. {
  77. bool ret;
  78. __asm__ __volatile__ (" sync \n" ::: "memory");
  79. ret = __compare_and_swaplp((volatile long*)&ptr->value,
  80. (long *)expected, (long)newval);
  81. __isync();
  82. return ret;
  83. }
  84. #define PG_HAVE_ATOMIC_FETCH_ADD_U64
  85. static inline uint64
  86. pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
  87. {
  88. return __fetch_and_addlp((volatile long *)&ptr->value, add_);
  89. }
  90. #endif /* PG_HAVE_ATOMIC_U64_SUPPORT */
  91. #endif /* defined(HAVE_ATOMICS) */