lwlock.h 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. /*-------------------------------------------------------------------------
  2. *
  3. * lwlock.h
  4. * Lightweight lock manager
  5. *
  6. *
  7. * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
  8. * Portions Copyright (c) 1994, Regents of the University of California
  9. *
  10. * src/include/storage/lwlock.h
  11. *
  12. *-------------------------------------------------------------------------
  13. */
  14. #ifndef LWLOCK_H
  15. #define LWLOCK_H
  16. #ifdef FRONTEND
  17. #error "lwlock.h may not be included from frontend code"
  18. #endif
  19. #include "lib/ilist.h"
  20. #include "storage/s_lock.h"
  21. #include "port/atomics.h"
  22. struct PGPROC;
  23. /*
  24. * Prior to PostgreSQL 9.4, every lightweight lock in the system was stored
  25. * in a single array. For convenience and for compatibility with past
  26. * releases, we still have a main array, but it's now also permissible to
  27. * store LWLocks elsewhere in the main shared memory segment or in a dynamic
  28. * shared memory segment. Each array of lwlocks forms a separate "tranche".
  29. *
  30. * It's occasionally necessary to identify a particular LWLock "by name"; e.g.
  31. * because we wish to report the lock to dtrace. We could store a name or
  32. * other identifying information in the lock itself, but since it's common
  33. * to have many nearly-identical locks (e.g. one per buffer) this would end
  34. * up wasting significant amounts of memory. Instead, each lwlock stores a
  35. * tranche ID which tells us which array it's part of. Based on that, we can
  36. * figure out where the lwlock lies within the array using the data structure
  37. * shown below; the lock is then identified based on the tranche name and
  38. * computed array index. We need the array stride because the array might not
  39. * be an array of lwlocks, but rather some larger data structure that includes
  40. * one or more lwlocks per element.
  41. */
  42. typedef struct LWLockTranche
  43. {
  44. const char *name;
  45. void *array_base;
  46. Size array_stride;
  47. } LWLockTranche;
  48. /*
  49. * Code outside of lwlock.c should not manipulate the contents of this
  50. * structure directly, but we have to declare it here to allow LWLocks to be
  51. * incorporated into other data structures.
  52. */
  53. typedef struct LWLock
  54. {
  55. uint16 tranche; /* tranche ID */
  56. pg_atomic_uint32 state; /* state of exclusive/nonexclusive lockers */
  57. dlist_head waiters; /* list of waiting PGPROCs */
  58. #ifdef LOCK_DEBUG
  59. pg_atomic_uint32 nwaiters; /* number of waiters */
  60. struct PGPROC *owner; /* last exclusive owner of the lock */
  61. #endif
  62. } LWLock;
  63. /*
  64. * In most cases, it's desirable to force each tranche of LWLocks to be aligned
  65. * on a cache line boundary and make the array stride a power of 2. This saves
  66. * a few cycles in indexing, but more importantly ensures that individual
  67. * LWLocks don't cross cache line boundaries. This reduces cache contention
  68. * problems, especially on AMD Opterons. In some cases, it's useful to add
  69. * even more padding so that each LWLock takes up an entire cache line; this is
  70. * useful, for example, in the main LWLock array, where the overall number of
  71. * locks is small but some are heavily contended.
  72. *
  73. * When allocating a tranche that contains data other than LWLocks, it is
  74. * probably best to include a bare LWLock and then pad the resulting structure
  75. * as necessary for performance. For an array that contains only LWLocks,
  76. * LWLockMinimallyPadded can be used for cases where we just want to ensure
  77. * that we don't cross cache line boundaries within a single lock, while
  78. * LWLockPadded can be used for cases where we want each lock to be an entire
  79. * cache line.
  80. *
  81. * On 32-bit platforms, an LWLockMinimallyPadded might actually contain more
  82. * than the absolute minimum amount of padding required to keep a lock from
  83. * crossing a cache line boundary, because an unpadded LWLock might fit into
  84. * 16 bytes. We ignore that possibility when determining the minimal amount
  85. * of padding. Older releases had larger LWLocks, so 32 really was the
  86. * minimum, and packing them in tighter might hurt performance.
  87. *
  88. * LWLOCK_MINIMAL_SIZE should be 32 on basically all common platforms, but
  89. * because slock_t is more than 2 bytes on some obscure platforms, we allow
  90. * for the possibility that it might be 64.
  91. */
  92. #define LWLOCK_PADDED_SIZE PG_CACHE_LINE_SIZE
  93. #define LWLOCK_MINIMAL_SIZE (sizeof(LWLock) <= 32 ? 32 : 64)
  94. /* LWLock, padded to a full cache line size */
  95. typedef union LWLockPadded
  96. {
  97. LWLock lock;
  98. char pad[LWLOCK_PADDED_SIZE];
  99. } LWLockPadded;
  100. /* LWLock, minimally padded */
  101. typedef union LWLockMinimallyPadded
  102. {
  103. LWLock lock;
  104. char pad[LWLOCK_MINIMAL_SIZE];
  105. } LWLockMinimallyPadded;
  106. extern PGDLLIMPORT LWLockPadded *MainLWLockArray;
  107. extern char *MainLWLockNames[];
  108. /* struct for storing named tranche information */
  109. typedef struct NamedLWLockTranche
  110. {
  111. LWLockTranche lwLockTranche;
  112. int trancheId;
  113. } NamedLWLockTranche;
  114. extern PGDLLIMPORT NamedLWLockTranche *NamedLWLockTrancheArray;
  115. extern PGDLLIMPORT int NamedLWLockTrancheRequests;
  116. /* Names for fixed lwlocks */
  117. #include "storage/lwlocknames.h"
  118. /*
  119. * It's a bit odd to declare NUM_BUFFER_PARTITIONS and NUM_LOCK_PARTITIONS
  120. * here, but we need them to figure out offsets within MainLWLockArray, and
  121. * having this file include lock.h or bufmgr.h would be backwards.
  122. */
  123. /* Number of partitions of the shared buffer mapping hashtable */
  124. #define NUM_BUFFER_PARTITIONS 128
  125. /* Number of partitions the shared lock tables are divided into */
  126. #define LOG2_NUM_LOCK_PARTITIONS 4
  127. #define NUM_LOCK_PARTITIONS (1 << LOG2_NUM_LOCK_PARTITIONS)
  128. /* Number of partitions the shared predicate lock tables are divided into */
  129. #define LOG2_NUM_PREDICATELOCK_PARTITIONS 4
  130. #define NUM_PREDICATELOCK_PARTITIONS (1 << LOG2_NUM_PREDICATELOCK_PARTITIONS)
  131. /* Offsets for various chunks of preallocated lwlocks. */
  132. #define BUFFER_MAPPING_LWLOCK_OFFSET NUM_INDIVIDUAL_LWLOCKS
  133. #define LOCK_MANAGER_LWLOCK_OFFSET \
  134. (BUFFER_MAPPING_LWLOCK_OFFSET + NUM_BUFFER_PARTITIONS)
  135. #define PREDICATELOCK_MANAGER_LWLOCK_OFFSET \
  136. (LOCK_MANAGER_LWLOCK_OFFSET + NUM_LOCK_PARTITIONS)
  137. #define NUM_FIXED_LWLOCKS \
  138. (PREDICATELOCK_MANAGER_LWLOCK_OFFSET + NUM_PREDICATELOCK_PARTITIONS)
  139. typedef enum LWLockMode
  140. {
  141. LW_EXCLUSIVE,
  142. LW_SHARED,
  143. LW_WAIT_UNTIL_FREE /* A special mode used in PGPROC->lwlockMode,
  144. * when waiting for lock to become free. Not
  145. * to be used as LWLockAcquire argument */
  146. } LWLockMode;
  147. #ifdef LOCK_DEBUG
  148. extern bool Trace_lwlocks;
  149. #endif
  150. extern bool LWLockAcquire(LWLock *lock, LWLockMode mode);
  151. extern bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode);
  152. extern bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode);
  153. extern void LWLockRelease(LWLock *lock);
  154. extern void LWLockReleaseClearVar(LWLock *lock, uint64 *valptr, uint64 val);
  155. extern void LWLockReleaseAll(void);
  156. extern bool LWLockHeldByMe(LWLock *lock);
  157. extern bool LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval);
  158. extern void LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 value);
  159. extern Size LWLockShmemSize(void);
  160. extern void CreateLWLocks(void);
  161. extern void InitLWLockAccess(void);
  162. extern const char *GetLWLockIdentifier(uint8 classId, uint16 eventId);
  163. /*
  164. * Extensions (or core code) can obtain an LWLocks by calling
  165. * RequestNamedLWLockTranche() during postmaster startup. Subsequently,
  166. * call GetNamedLWLockTranche() to obtain a pointer to an array containing
  167. * the number of LWLocks requested.
  168. */
  169. extern void RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks);
  170. extern LWLockPadded *GetNamedLWLockTranche(const char *tranche_name);
  171. /*
  172. * There is another, more flexible method of obtaining lwlocks. First, call
  173. * LWLockNewTrancheId just once to obtain a tranche ID; this allocates from
  174. * a shared counter. Next, each individual process using the tranche should
  175. * call LWLockRegisterTranche() to associate that tranche ID with appropriate
  176. * metadata. Finally, LWLockInitialize should be called just once per lwlock,
  177. * passing the tranche ID as an argument.
  178. *
  179. * It may seem strange that each process using the tranche must register it
  180. * separately, but dynamic shared memory segments aren't guaranteed to be
  181. * mapped at the same address in all coordinating backends, so storing the
  182. * registration in the main shared memory segment wouldn't work for that case.
  183. */
  184. extern int LWLockNewTrancheId(void);
  185. extern void LWLockRegisterTranche(int tranche_id, LWLockTranche *tranche);
  186. extern void LWLockInitialize(LWLock *lock, int tranche_id);
  187. /*
  188. * We reserve a few predefined tranche IDs. A call to LWLockNewTrancheId
  189. * will never return a value less than LWTRANCHE_FIRST_USER_DEFINED.
  190. */
  191. typedef enum BuiltinTrancheIds
  192. {
  193. LWTRANCHE_MAIN,
  194. LWTRANCHE_CLOG_BUFFERS,
  195. LWTRANCHE_COMMITTS_BUFFERS,
  196. LWTRANCHE_SUBTRANS_BUFFERS,
  197. LWTRANCHE_MXACTOFFSET_BUFFERS,
  198. LWTRANCHE_MXACTMEMBER_BUFFERS,
  199. LWTRANCHE_ASYNC_BUFFERS,
  200. LWTRANCHE_OLDSERXID_BUFFERS,
  201. LWTRANCHE_WAL_INSERT,
  202. LWTRANCHE_BUFFER_CONTENT,
  203. LWTRANCHE_BUFFER_IO_IN_PROGRESS,
  204. LWTRANCHE_REPLICATION_ORIGIN,
  205. LWTRANCHE_REPLICATION_SLOT_IO_IN_PROGRESS,
  206. LWTRANCHE_PROC,
  207. LWTRANCHE_BUFFER_MAPPING,
  208. LWTRANCHE_LOCK_MANAGER,
  209. LWTRANCHE_PREDICATE_LOCK_MANAGER,
  210. LWTRANCHE_FIRST_USER_DEFINED
  211. } BuiltinTrancheIds;
  212. /*
  213. * Prior to PostgreSQL 9.4, we used an enum type called LWLockId to refer
  214. * to LWLocks. New code should instead use LWLock *. However, for the
  215. * convenience of third-party code, we include the following typedef.
  216. */
  217. typedef LWLock *LWLockId;
  218. #endif /* LWLOCK_H */