arena.cc 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303
  1. // Protocol Buffers - Google's data interchange format
  2. // Copyright 2008 Google Inc. All rights reserved.
  3. // https://developers.google.com/protocol-buffers/
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are
  7. // met:
  8. //
  9. // * Redistributions of source code must retain the above copyright
  10. // notice, this list of conditions and the following disclaimer.
  11. // * Redistributions in binary form must reproduce the above
  12. // copyright notice, this list of conditions and the following disclaimer
  13. // in the documentation and/or other materials provided with the
  14. // distribution.
  15. // * Neither the name of Google Inc. nor the names of its
  16. // contributors may be used to endorse or promote products derived from
  17. // this software without specific prior written permission.
  18. //
  19. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. #include <google/protobuf/arena.h>
  31. #ifdef ADDRESS_SANITIZER
  32. #include <sanitizer/asan_interface.h>
  33. #endif
  34. namespace google {
  35. namespace protobuf {
  36. google::protobuf::internal::SequenceNumber Arena::lifecycle_id_generator_;
  37. #ifdef PROTOBUF_USE_DLLS
  38. Arena::ThreadCache& Arena::thread_cache() {
  39. static GOOGLE_THREAD_LOCAL ThreadCache thread_cache_ = { -1, NULL };
  40. return thread_cache_;
  41. }
  42. #elif defined(GOOGLE_PROTOBUF_NO_THREADLOCAL)
  43. Arena::ThreadCache& Arena::thread_cache() {
  44. static internal::ThreadLocalStorage<ThreadCache>* thread_cache_ =
  45. new internal::ThreadLocalStorage<ThreadCache>();
  46. return *thread_cache_->Get();
  47. }
  48. #else
  49. GOOGLE_THREAD_LOCAL Arena::ThreadCache Arena::thread_cache_ = { -1, NULL };
  50. #endif
  51. void Arena::Init() {
  52. lifecycle_id_ = lifecycle_id_generator_.GetNext();
  53. blocks_ = 0;
  54. hint_ = 0;
  55. owns_first_block_ = true;
  56. cleanup_list_ = 0;
  57. if (options_.initial_block != NULL && options_.initial_block_size > 0) {
  58. // Add first unowned block to list.
  59. Block* first_block = reinterpret_cast<Block*>(options_.initial_block);
  60. first_block->size = options_.initial_block_size;
  61. first_block->pos = kHeaderSize;
  62. first_block->next = NULL;
  63. // Thread which calls Init() owns the first block. This allows the
  64. // single-threaded case to allocate on the first block without taking any
  65. // locks.
  66. first_block->owner = &thread_cache();
  67. SetThreadCacheBlock(first_block);
  68. AddBlockInternal(first_block);
  69. owns_first_block_ = false;
  70. }
  71. // Call the initialization hook
  72. if (options_.on_arena_init != NULL) {
  73. hooks_cookie_ = options_.on_arena_init(this);
  74. } else {
  75. hooks_cookie_ = NULL;
  76. }
  77. }
  78. Arena::~Arena() {
  79. uint64 space_allocated = ResetInternal();
  80. // Call the destruction hook
  81. if (options_.on_arena_destruction != NULL) {
  82. options_.on_arena_destruction(this, hooks_cookie_, space_allocated);
  83. }
  84. }
  85. uint64 Arena::Reset() {
  86. // Invalidate any ThreadCaches pointing to any blocks we just destroyed.
  87. lifecycle_id_ = lifecycle_id_generator_.GetNext();
  88. return ResetInternal();
  89. }
  90. uint64 Arena::ResetInternal() {
  91. CleanupList();
  92. uint64 space_allocated = FreeBlocks();
  93. // Call the reset hook
  94. if (options_.on_arena_reset != NULL) {
  95. options_.on_arena_reset(this, hooks_cookie_, space_allocated);
  96. }
  97. return space_allocated;
  98. }
  99. Arena::Block* Arena::NewBlock(void* me, Block* my_last_block, size_t n,
  100. size_t start_block_size, size_t max_block_size) {
  101. size_t size;
  102. if (my_last_block != NULL) {
  103. // Double the current block size, up to a limit.
  104. size = 2 * (my_last_block->size);
  105. if (size > max_block_size) size = max_block_size;
  106. } else {
  107. size = start_block_size;
  108. }
  109. if (n > size - kHeaderSize) {
  110. // TODO(sanjay): Check if n + kHeaderSize would overflow
  111. size = kHeaderSize + n;
  112. }
  113. Block* b = reinterpret_cast<Block*>(options_.block_alloc(size));
  114. b->pos = kHeaderSize + n;
  115. b->size = size;
  116. if (b->avail() == 0) {
  117. // Do not attempt to reuse this block.
  118. b->owner = NULL;
  119. } else {
  120. b->owner = me;
  121. }
  122. #ifdef ADDRESS_SANITIZER
  123. // Poison the rest of the block for ASAN. It was unpoisoned by the underlying
  124. // malloc but it's not yet usable until we return it as part of an allocation.
  125. ASAN_POISON_MEMORY_REGION(
  126. reinterpret_cast<char*>(b) + b->pos, b->size - b->pos);
  127. #endif
  128. return b;
  129. }
  130. void Arena::AddBlock(Block* b) {
  131. MutexLock l(&blocks_lock_);
  132. AddBlockInternal(b);
  133. }
  134. void Arena::AddBlockInternal(Block* b) {
  135. b->next = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
  136. google::protobuf::internal::Release_Store(&blocks_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b));
  137. if (b->avail() != 0) {
  138. // Direct future allocations to this block.
  139. google::protobuf::internal::Release_Store(&hint_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b));
  140. }
  141. }
  142. void Arena::AddListNode(void* elem, void (*cleanup)(void*)) {
  143. Node* node = reinterpret_cast<Node*>(AllocateAligned(sizeof(Node)));
  144. node->elem = elem;
  145. node->cleanup = cleanup;
  146. node->next = reinterpret_cast<Node*>(
  147. google::protobuf::internal::NoBarrier_AtomicExchange(&cleanup_list_,
  148. reinterpret_cast<google::protobuf::internal::AtomicWord>(node)));
  149. }
  150. void* Arena::AllocateAligned(const std::type_info* allocated, size_t n) {
  151. // Align n to next multiple of 8 (from Hacker's Delight, Chapter 3.)
  152. n = (n + 7) & -8;
  153. // Monitor allocation if needed.
  154. if (GOOGLE_PREDICT_FALSE(hooks_cookie_ != NULL) &&
  155. options_.on_arena_allocation != NULL) {
  156. options_.on_arena_allocation(allocated, n, hooks_cookie_);
  157. }
  158. // If this thread already owns a block in this arena then try to use that.
  159. // This fast path optimizes the case where multiple threads allocate from the
  160. // same arena.
  161. if (thread_cache().last_lifecycle_id_seen == lifecycle_id_ &&
  162. thread_cache().last_block_used_ != NULL) {
  163. if (thread_cache().last_block_used_->avail() < n) {
  164. return SlowAlloc(n);
  165. }
  166. return AllocFromBlock(thread_cache().last_block_used_, n);
  167. }
  168. // Check whether we own the last accessed block on this arena.
  169. // This fast path optimizes the case where a single thread uses multiple
  170. // arenas.
  171. void* me = &thread_cache();
  172. Block* b = reinterpret_cast<Block*>(google::protobuf::internal::Acquire_Load(&hint_));
  173. if (!b || b->owner != me || b->avail() < n) {
  174. return SlowAlloc(n);
  175. }
  176. return AllocFromBlock(b, n);
  177. }
  178. void* Arena::AllocFromBlock(Block* b, size_t n) {
  179. size_t p = b->pos;
  180. b->pos = p + n;
  181. #ifdef ADDRESS_SANITIZER
  182. ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<char*>(b) + p, n);
  183. #endif
  184. return reinterpret_cast<char*>(b) + p;
  185. }
  186. void* Arena::SlowAlloc(size_t n) {
  187. void* me = &thread_cache();
  188. Block* b = FindBlock(me); // Find block owned by me.
  189. // See if allocation fits in my latest block.
  190. if (b != NULL && b->avail() >= n) {
  191. SetThreadCacheBlock(b);
  192. google::protobuf::internal::NoBarrier_Store(&hint_, reinterpret_cast<google::protobuf::internal::AtomicWord>(b));
  193. return AllocFromBlock(b, n);
  194. }
  195. b = NewBlock(me, b, n, options_.start_block_size, options_.max_block_size);
  196. AddBlock(b);
  197. if (b->owner == me) { // If this block can be reused (see NewBlock()).
  198. SetThreadCacheBlock(b);
  199. }
  200. return reinterpret_cast<char*>(b) + kHeaderSize;
  201. }
  202. uint64 Arena::SpaceAllocated() const {
  203. uint64 space_allocated = 0;
  204. Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
  205. while (b != NULL) {
  206. space_allocated += (b->size);
  207. b = b->next;
  208. }
  209. return space_allocated;
  210. }
  211. uint64 Arena::SpaceUsed() const {
  212. uint64 space_used = 0;
  213. Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
  214. while (b != NULL) {
  215. space_used += (b->pos - kHeaderSize);
  216. b = b->next;
  217. }
  218. return space_used;
  219. }
  220. uint64 Arena::FreeBlocks() {
  221. uint64 space_allocated = 0;
  222. Block* b = reinterpret_cast<Block*>(google::protobuf::internal::NoBarrier_Load(&blocks_));
  223. Block* first_block = NULL;
  224. while (b != NULL) {
  225. space_allocated += (b->size);
  226. Block* next = b->next;
  227. if (next != NULL) {
  228. options_.block_dealloc(b, b->size);
  229. } else {
  230. if (owns_first_block_) {
  231. options_.block_dealloc(b, b->size);
  232. } else {
  233. // User passed in the first block, skip free'ing the memory.
  234. first_block = b;
  235. }
  236. }
  237. b = next;
  238. }
  239. blocks_ = 0;
  240. hint_ = 0;
  241. if (!owns_first_block_) {
  242. // Make the first block that was passed in through ArenaOptions
  243. // available for reuse.
  244. first_block->pos = kHeaderSize;
  245. // Thread which calls Reset() owns the first block. This allows the
  246. // single-threaded case to allocate on the first block without taking any
  247. // locks.
  248. first_block->owner = &thread_cache();
  249. SetThreadCacheBlock(first_block);
  250. AddBlockInternal(first_block);
  251. }
  252. return space_allocated;
  253. }
  254. void Arena::CleanupList() {
  255. Node* head =
  256. reinterpret_cast<Node*>(google::protobuf::internal::NoBarrier_Load(&cleanup_list_));
  257. while (head != NULL) {
  258. head->cleanup(head->elem);
  259. head = head->next;
  260. }
  261. cleanup_list_ = 0;
  262. }
  263. Arena::Block* Arena::FindBlock(void* me) {
  264. // TODO(sanjay): We might want to keep a separate list with one
  265. // entry per thread.
  266. Block* b = reinterpret_cast<Block*>(google::protobuf::internal::Acquire_Load(&blocks_));
  267. while (b != NULL && b->owner != me) {
  268. b = b->next;
  269. }
  270. return b;
  271. }
  272. } // namespace protobuf
  273. } // namespace google