Quuxplusone created this revision.
Quuxplusone added a reviewer: EricWF.
Herald added a subscriber: cfe-commits.
Split out from https://reviews.llvm.org/D47090.
This patch is based on top of (depends on) https://reviews.llvm.org/D47111, but
I'm posting it now for review.
Repository:
rCXX libc++
https://reviews.llvm.org/D47358
Files:
include/experimental/memory_resource
src/experimental/memory_resource.cpp
test/std/experimental/memory/memory.resource.pool/synchronized_pool.pass.cpp
test/std/experimental/memory/memory.resource.pool/unsynchronized_pool.pass.cpp
test/support/count_new.hpp
Index: test/support/count_new.hpp
===================================================================
--- test/support/count_new.hpp
+++ test/support/count_new.hpp
@@ -211,6 +211,11 @@
return disable_checking || n != delete_called;
}
+ bool checkDeleteCalledGreaterThan(int n) const
+ {
+ return disable_checking || delete_called > n;
+ }
+
bool checkAlignedNewCalledEq(int n) const
{
return disable_checking || n == aligned_new_called;
Index: test/std/experimental/memory/memory.resource.pool/unsynchronized_pool.pass.cpp
===================================================================
--- /dev/null
+++ test/std/experimental/memory/memory.resource.pool/unsynchronized_pool.pass.cpp
@@ -0,0 +1,203 @@
+//===----------------------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++98, c++03
+
+// <experimental/memory_resource>
+
+// class unsynchronized_pool_resource
+
+#include <experimental/memory_resource>
+#include <new>
+#include <type_traits>
+#include <cassert>
+
+#include "count_new.hpp"
+
+struct assert_on_compare : public std::experimental::pmr::memory_resource
+{
+protected:
+ virtual void * do_allocate(size_t, size_t)
+ { assert(false); }
+
+ virtual void do_deallocate(void *, size_t, size_t)
+ { assert(false); }
+
+ virtual bool do_is_equal(std::experimental::pmr::memory_resource const &) const noexcept
+ { assert(false); }
+};
+
+static bool is_aligned_to(void *p, size_t alignment)
+{
+ void *p2 = p;
+ size_t space = 1;
+ void *result = std::align(alignment, 1, p2, space);
+ return (result == p);
+}
+
+void test_construction_with_default_resource()
+{
+ std::experimental::pmr::memory_resource *expected = std::experimental::pmr::null_memory_resource();
+ std::experimental::pmr::set_default_resource(expected);
+ {
+ std::experimental::pmr::pool_options opts { 0, 0 };
+ std::experimental::pmr::unsynchronized_pool_resource r1;
+ std::experimental::pmr::unsynchronized_pool_resource r2(opts);
+ assert(r1.upstream_resource() == expected);
+ assert(r2.upstream_resource() == expected);
+ }
+
+ expected = std::experimental::pmr::new_delete_resource();
+ std::experimental::pmr::set_default_resource(expected);
+ {
+ std::experimental::pmr::pool_options opts { 1024, 2048 };
+ std::experimental::pmr::unsynchronized_pool_resource r1;
+ std::experimental::pmr::unsynchronized_pool_resource r2(opts);
+ assert(r1.upstream_resource() == expected);
+ assert(r2.upstream_resource() == expected);
+ }
+}
+
+void test_construction_does_not_allocate()
+{
+ // Constructing a unsynchronized_pool_resource should not cause allocations
+ // by itself; the resource should wait to allocate until an allocation is
+ // requested.
+
+ globalMemCounter.reset();
+ std::experimental::pmr::set_default_resource(std::experimental::pmr::new_delete_resource());
+
+ std::experimental::pmr::unsynchronized_pool_resource r1;
+ assert(globalMemCounter.checkNewCalledEq(0));
+
+ std::experimental::pmr::unsynchronized_pool_resource r2(std::experimental::pmr::pool_options{ 1024, 2048 });
+ assert(globalMemCounter.checkNewCalledEq(0));
+
+ std::experimental::pmr::unsynchronized_pool_resource r3(std::experimental::pmr::pool_options{ 1024, 2048 }, std::experimental::pmr::new_delete_resource());
+ assert(globalMemCounter.checkNewCalledEq(0));
+}
+
+void test_equality()
+{
+ // Same object
+ {
+ std::experimental::pmr::unsynchronized_pool_resource r1;
+ std::experimental::pmr::unsynchronized_pool_resource r2;
+ assert(r1 == r1);
+ assert(r1 != r2);
+
+ std::experimental::pmr::memory_resource & p1 = r1;
+ std::experimental::pmr::memory_resource & p2 = r2;
+ assert(p1 == p1);
+ assert(p1 != p2);
+ assert(p1 == r1);
+ assert(r1 == p1);
+ assert(p1 != r2);
+ assert(r2 != p1);
+ }
+ // Different types
+ {
+ std::experimental::pmr::unsynchronized_pool_resource unsync1;
+ std::experimental::pmr::memory_resource & r1 = unsync1;
+ assert_on_compare c;
+ std::experimental::pmr::memory_resource & r2 = c;
+ assert(r1 != r2);
+ assert(!(r1 == r2));
+ }
+}
+
+void test_allocate_deallocate()
+{
+ {
+ globalMemCounter.reset();
+
+ std::experimental::pmr::unsynchronized_pool_resource unsync1(std::experimental::pmr::new_delete_resource());
+ std::experimental::pmr::memory_resource & r1 = unsync1;
+
+ void *ret = r1.allocate(50);
+ assert(ret);
+ assert(globalMemCounter.checkNewCalledGreaterThan(0));
+ assert(globalMemCounter.checkDeleteCalledEq(0));
+
+ r1.deallocate(ret, 50);
+ unsync1.release();
+ assert(globalMemCounter.checkDeleteCalledGreaterThan(0));
+ assert(globalMemCounter.checkOutstandingNewEq(0));
+
+ globalMemCounter.reset();
+
+ ret = r1.allocate(500);
+ assert(ret);
+ assert(globalMemCounter.checkNewCalledGreaterThan(0));
+ assert(globalMemCounter.checkDeleteCalledEq(0));
+
+ // Check that the destructor calls release()
+ }
+ assert(globalMemCounter.checkDeleteCalledGreaterThan(0));
+ assert(globalMemCounter.checkOutstandingNewEq(0));
+}
+
+void test_overaligned_single_allocation()
+{
+ globalMemCounter.reset();
+ std::experimental::pmr::pool_options opts { 1, 1024 };
+ std::experimental::pmr::unsynchronized_pool_resource unsync1(opts, std::experimental::pmr::new_delete_resource());
+ std::experimental::pmr::memory_resource & r1 = unsync1;
+
+ constexpr size_t big_alignment = 8 * alignof(std::max_align_t);
+ static_assert(big_alignment > 4);
+
+ assert(globalMemCounter.checkNewCalledEq(0));
+
+ void *ret = r1.allocate(2048, big_alignment);
+ assert(ret != nullptr);
+ assert(is_aligned_to(ret, big_alignment));
+ assert(globalMemCounter.checkNewCalledGreaterThan(0));
+
+ ret = r1.allocate(16, 4);
+ assert(ret != nullptr);
+ assert(is_aligned_to(ret, 4));
+ assert(globalMemCounter.checkNewCalledGreaterThan(1));
+}
+
+void test_reuse()
+{
+ globalMemCounter.reset();
+ std::experimental::pmr::pool_options opts { 1, 256 };
+ std::experimental::pmr::unsynchronized_pool_resource unsync1(opts, std::experimental::pmr::new_delete_resource());
+ std::experimental::pmr::memory_resource & r1 = unsync1;
+
+ void *ret = r1.allocate(8);
+ assert(ret != nullptr);
+ assert(is_aligned_to(ret, 8));
+ assert(globalMemCounter.checkNewCalledGreaterThan(0));
+ int new_called = globalMemCounter.new_called;
+
+ // After deallocation, the pool for 8-byte blocks should have at least one vacancy.
+ r1.deallocate(ret, 8);
+ assert(globalMemCounter.new_called == new_called);
+ assert(globalMemCounter.checkDeleteCalledEq(0));
+
+ // This should return an existing block from the pool: no new allocations.
+ ret = r1.allocate(8);
+ assert(ret != nullptr);
+ assert(is_aligned_to(ret, 8));
+ assert(globalMemCounter.new_called == new_called);
+ assert(globalMemCounter.checkDeleteCalledEq(0));
+}
+
+int main()
+{
+ test_construction_with_default_resource();
+ test_construction_does_not_allocate();
+ test_equality();
+ test_allocate_deallocate();
+ test_overaligned_single_allocation();
+ test_reuse();
+}
Index: test/std/experimental/memory/memory.resource.pool/synchronized_pool.pass.cpp
===================================================================
--- /dev/null
+++ test/std/experimental/memory/memory.resource.pool/synchronized_pool.pass.cpp
@@ -0,0 +1,203 @@
+//===----------------------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++98, c++03
+
+// <experimental/memory_resource>
+
+// class synchronized_pool_resource
+
+#include <experimental/memory_resource>
+#include <new>
+#include <type_traits>
+#include <cassert>
+
+#include "count_new.hpp"
+
+struct assert_on_compare : public std::experimental::pmr::memory_resource
+{
+protected:
+ virtual void * do_allocate(size_t, size_t)
+ { assert(false); }
+
+ virtual void do_deallocate(void *, size_t, size_t)
+ { assert(false); }
+
+ virtual bool do_is_equal(std::experimental::pmr::memory_resource const &) const noexcept
+ { assert(false); }
+};
+
+static bool is_aligned_to(void *p, size_t alignment)
+{
+ void *p2 = p;
+ size_t space = 1;
+ void *result = std::align(alignment, 1, p2, space);
+ return (result == p);
+}
+
+void test_construction_with_default_resource()
+{
+ std::experimental::pmr::memory_resource *expected = std::experimental::pmr::null_memory_resource();
+ std::experimental::pmr::set_default_resource(expected);
+ {
+ std::experimental::pmr::pool_options opts { 0, 0 };
+ std::experimental::pmr::synchronized_pool_resource r1;
+ std::experimental::pmr::synchronized_pool_resource r2(opts);
+ assert(r1.upstream_resource() == expected);
+ assert(r2.upstream_resource() == expected);
+ }
+
+ expected = std::experimental::pmr::new_delete_resource();
+ std::experimental::pmr::set_default_resource(expected);
+ {
+ std::experimental::pmr::pool_options opts { 1024, 2048 };
+ std::experimental::pmr::synchronized_pool_resource r1;
+ std::experimental::pmr::synchronized_pool_resource r2(opts);
+ assert(r1.upstream_resource() == expected);
+ assert(r2.upstream_resource() == expected);
+ }
+}
+
+void test_construction_does_not_allocate()
+{
+ // Constructing a synchronized_pool_resource should not cause allocations
+ // by itself; the resource should wait to allocate until an allocation is
+ // requested.
+
+ globalMemCounter.reset();
+ std::experimental::pmr::set_default_resource(std::experimental::pmr::new_delete_resource());
+
+ std::experimental::pmr::synchronized_pool_resource r1;
+ assert(globalMemCounter.checkNewCalledEq(0));
+
+ std::experimental::pmr::synchronized_pool_resource r2(std::experimental::pmr::pool_options{ 1024, 2048 });
+ assert(globalMemCounter.checkNewCalledEq(0));
+
+ std::experimental::pmr::synchronized_pool_resource r3(std::experimental::pmr::pool_options{ 1024, 2048 }, std::experimental::pmr::new_delete_resource());
+ assert(globalMemCounter.checkNewCalledEq(0));
+}
+
+void test_equality()
+{
+ // Same object
+ {
+ std::experimental::pmr::synchronized_pool_resource r1;
+ std::experimental::pmr::synchronized_pool_resource r2;
+ assert(r1 == r1);
+ assert(r1 != r2);
+
+ std::experimental::pmr::memory_resource & p1 = r1;
+ std::experimental::pmr::memory_resource & p2 = r2;
+ assert(p1 == p1);
+ assert(p1 != p2);
+ assert(p1 == r1);
+ assert(r1 == p1);
+ assert(p1 != r2);
+ assert(r2 != p1);
+ }
+ // Different types
+ {
+ std::experimental::pmr::synchronized_pool_resource sync1;
+ std::experimental::pmr::memory_resource & r1 = sync1;
+ assert_on_compare c;
+ std::experimental::pmr::memory_resource & r2 = c;
+ assert(r1 != r2);
+ assert(!(r1 == r2));
+ }
+}
+
+void test_allocate_deallocate()
+{
+ {
+ globalMemCounter.reset();
+
+ std::experimental::pmr::synchronized_pool_resource sync1(std::experimental::pmr::new_delete_resource());
+ std::experimental::pmr::memory_resource & r1 = sync1;
+
+ void *ret = r1.allocate(50);
+ assert(ret);
+ assert(globalMemCounter.checkNewCalledGreaterThan(0));
+ assert(globalMemCounter.checkDeleteCalledEq(0));
+
+ r1.deallocate(ret, 50);
+ sync1.release();
+ assert(globalMemCounter.checkDeleteCalledGreaterThan(0));
+ assert(globalMemCounter.checkOutstandingNewEq(0));
+
+ globalMemCounter.reset();
+
+ ret = r1.allocate(500);
+ assert(ret);
+ assert(globalMemCounter.checkNewCalledGreaterThan(0));
+ assert(globalMemCounter.checkDeleteCalledEq(0));
+
+ // Check that the destructor calls release()
+ }
+ assert(globalMemCounter.checkDeleteCalledGreaterThan(0));
+ assert(globalMemCounter.checkOutstandingNewEq(0));
+}
+
+void test_overaligned_single_allocation()
+{
+ globalMemCounter.reset();
+ std::experimental::pmr::pool_options opts { 1, 1024 };
+ std::experimental::pmr::synchronized_pool_resource sync1(opts, std::experimental::pmr::new_delete_resource());
+ std::experimental::pmr::memory_resource & r1 = sync1;
+
+ constexpr size_t big_alignment = 8 * alignof(std::max_align_t);
+ static_assert(big_alignment > 4);
+
+ assert(globalMemCounter.checkNewCalledEq(0));
+
+ void *ret = r1.allocate(2048, big_alignment);
+ assert(ret != nullptr);
+ assert(is_aligned_to(ret, big_alignment));
+ assert(globalMemCounter.checkNewCalledGreaterThan(0));
+
+ ret = r1.allocate(16, 4);
+ assert(ret != nullptr);
+ assert(is_aligned_to(ret, 4));
+ assert(globalMemCounter.checkNewCalledGreaterThan(1));
+}
+
+void test_reuse()
+{
+ globalMemCounter.reset();
+ std::experimental::pmr::pool_options opts { 1, 256 };
+ std::experimental::pmr::synchronized_pool_resource sync1(opts, std::experimental::pmr::new_delete_resource());
+ std::experimental::pmr::memory_resource & r1 = sync1;
+
+ void *ret = r1.allocate(8);
+ assert(ret != nullptr);
+ assert(is_aligned_to(ret, 8));
+ assert(globalMemCounter.checkNewCalledGreaterThan(0));
+ int new_called = globalMemCounter.new_called;
+
+ // After deallocation, the pool for 8-byte blocks should have at least one vacancy.
+ r1.deallocate(ret, 8);
+ assert(globalMemCounter.new_called == new_called);
+ assert(globalMemCounter.checkDeleteCalledEq(0));
+
+ // This should return an existing block from the pool: no new allocations.
+ ret = r1.allocate(8);
+ assert(ret != nullptr);
+ assert(is_aligned_to(ret, 8));
+ assert(globalMemCounter.new_called == new_called);
+ assert(globalMemCounter.checkDeleteCalledEq(0));
+}
+
+int main()
+{
+ test_construction_with_default_resource();
+ test_construction_does_not_allocate();
+ test_equality();
+ test_allocate_deallocate();
+ test_overaligned_single_allocation();
+ test_reuse();
+}
Index: src/experimental/memory_resource.cpp
===================================================================
--- src/experimental/memory_resource.cpp
+++ src/experimental/memory_resource.cpp
@@ -252,4 +252,327 @@
return try_allocate_from_chunk(__original_.__next_, bytes, align);
}
+// 23.12.5, mem.res.pool
+
+struct __pool_resource_adhoc_pool_header {
+ size_t bytes;
+ size_t alignment;
+ void *allocation;
+ __pool_resource_adhoc_pool_header *next;
+};
+
+void __pool_resource_adhoc_pool::release(memory_resource *upstream)
+{
+ while (__first_ != nullptr) {
+ __header *next = __first_->next;
+ upstream->deallocate(__first_->allocation, __first_->bytes, __first_->alignment);
+ __first_ = next;
+ }
+}
+
+void *__pool_resource_adhoc_pool::do_allocate(memory_resource *upstream, size_t bytes, size_t align)
+{
+ const size_t header_size = sizeof(__header);
+ const size_t header_align = alignof(__header);
+
+ if (align < header_align) {
+ align = header_align;
+ }
+
+ size_t aligned_capacity = roundup(bytes, header_align) + header_size;
+
+ void *result = upstream->allocate(aligned_capacity, align);
+
+ __header *h = (__header *)((char *)result + aligned_capacity - header_size);
+ h->allocation = result;
+ h->bytes = aligned_capacity;
+ h->alignment = align;
+ h->next = __first_;
+ __first_ = h;
+ return result;
+}
+
+void __pool_resource_adhoc_pool::do_deallocate(memory_resource *upstream, void *p, size_t bytes, size_t align)
+{
+ _LIBCPP_ASSERT(__first_ != nullptr, "deallocating a block that was not allocated with this allocator");
+ if (__first_->allocation == p) {
+ __header *next = __first_->next;
+ upstream->deallocate(p, bytes, align);
+ __first_ = next;
+ } else {
+ for (__header *h = __first_; h != nullptr; h = h->next) {
+ if (h->next != nullptr && h->next->allocation == p) {
+ __header *next = h->next->next;
+ upstream->deallocate(p, bytes, align);
+ h->next = next;
+ return;
+ }
+ }
+ _LIBCPP_ASSERT(false, "deallocating a block that was not allocated with this allocator");
+ }
+}
+
+struct __pool_resource_vacancy_header {
+ __pool_resource_vacancy_header *next_vacancy;
+};
+
+struct __pool_resource_fixed_pool_header {
+ __pool_resource_vacancy_header *first_vacancy;
+ size_t bytes;
+ size_t alignment;
+ void *allocation;
+ __pool_resource_fixed_pool_header *next;
+
+ bool allocation_contains(const char *p) const {
+ // TODO: This part technically relies on undefined behavior.
+ return allocation <= p && p < ((char*)allocation + bytes);
+ }
+};
+
+class __pool_resource_fixed_pool {
+ using __header = __pool_resource_fixed_pool_header;
+ __header *__first_;
+
+public:
+ explicit __pool_resource_fixed_pool() : __first_(nullptr) {}
+ void release(memory_resource *upstream);
+ void *try_allocate_from_vacancies();
+ void *do_allocate_with_new_chunk(memory_resource *upstream, size_t block_size, size_t chunk_size);
+ void do_evacuate(void *__p);
+
+ size_t previous_chunk_size_in_bytes() const {
+ return __first_ ? __first_->bytes : 0;
+ }
+
+ static const size_t __default_alignment = alignof(max_align_t);
+};
+
+void __pool_resource_fixed_pool::release(memory_resource *upstream)
+{
+ while (__first_ != nullptr) {
+ __header *next = __first_->next;
+ upstream->deallocate(__first_->allocation, __first_->bytes, __first_->alignment);
+ __first_ = next;
+ }
+}
+
+void *__pool_resource_fixed_pool::try_allocate_from_vacancies()
+{
+ for (__header *h = __first_; h != nullptr; h = h->next) {
+ if (h->first_vacancy != nullptr) {
+ void *result = h->first_vacancy;
+ h->first_vacancy = h->first_vacancy->next_vacancy;
+ return result;
+ }
+ }
+ return nullptr;
+}
+
+void *__pool_resource_fixed_pool::do_allocate_with_new_chunk(memory_resource *upstream, size_t block_size, size_t chunk_size)
+{
+ static_assert(__default_alignment >= alignof(std::max_align_t), "");
+ static_assert(__default_alignment >= alignof(__header), "");
+ static_assert(__default_alignment >= alignof(__pool_resource_vacancy_header), "");
+
+ const size_t header_size = sizeof(__header);
+
+ size_t aligned_capacity = roundup(chunk_size, alignof(__header)) + header_size;
+
+ void *result = upstream->allocate(aligned_capacity, __default_alignment);
+
+ __header *h = (__header *)((char *)result + aligned_capacity - header_size);
+ h->allocation = result;
+ h->bytes = aligned_capacity;
+ h->alignment = __default_alignment;
+ h->next = __first_;
+ __first_ = h;
+
+ if (chunk_size > block_size) {
+ auto vacancy_header = [&](size_t i) -> __pool_resource_vacancy_header& {
+ return *(__pool_resource_vacancy_header *)((char *)(result) + (i * block_size));
+ };
+ h->first_vacancy = &vacancy_header(1);
+ for (size_t i = 1; i != chunk_size / block_size; ++i) {
+ if (i + 1 == chunk_size / block_size) {
+ vacancy_header(i).next_vacancy = nullptr;
+ } else {
+ vacancy_header(i).next_vacancy = &vacancy_header(i+1);
+ }
+ }
+ } else {
+ h->first_vacancy = nullptr;
+ }
+
+ return result;
+}
+
+void __pool_resource_fixed_pool::do_evacuate(void *p)
+{
+ _LIBCPP_ASSERT(__first_ != nullptr, "deallocating a block that was not allocated with this allocator");
+ for (__header *h = __first_; h != nullptr; h = h->next) {
+ if (h->allocation_contains((char*)p)) {
+ __pool_resource_vacancy_header *v = (__pool_resource_vacancy_header *)(p);
+ v->next_vacancy = h->first_vacancy;
+ h->first_vacancy = v;
+ return;
+ }
+ }
+ _LIBCPP_ASSERT(false, "deallocating a block that was not allocated with this allocator");
+}
+
+size_t unsynchronized_pool_resource::__pool_block_size(int i) const
+{
+ return size_t(1) << __log2_pool_block_size(i);
+}
+
+int unsynchronized_pool_resource::__log2_pool_block_size(int i) const
+{
+ return (i + __log2_smallest_block_size);
+}
+
+int unsynchronized_pool_resource::__pool_index(size_t bytes, size_t align) const
+{
+ if (align > alignof(std::max_align_t) || bytes > (1 << __num_fixed_pools_)) {
+ return __num_fixed_pools_;
+ } else {
+ int i = 0;
+ bytes = (bytes > align) ? bytes : align;
+ bytes -= 1;
+ bytes >>= __log2_smallest_block_size;
+ while (bytes != 0) {
+ bytes >>= 1;
+ i += 1;
+ }
+ return i;
+ }
+}
+
+unsynchronized_pool_resource::unsynchronized_pool_resource(const pool_options& opts, memory_resource* upstream)
+ : __res_(upstream), __fixed_pools_(nullptr), __options_(opts)
+{
+ if (__options_.largest_required_pool_block == 0) {
+ __options_.largest_required_pool_block = __default_largest_block_size;
+ } else if (__options_.largest_required_pool_block < __smallest_block_size) {
+ __options_.largest_required_pool_block = __smallest_block_size;
+ } else if (__options_.largest_required_pool_block > __max_largest_block_size) {
+ __options_.largest_required_pool_block = __max_largest_block_size;
+ }
+
+ if (__options_.max_blocks_per_chunk == 0) {
+ __options_.max_blocks_per_chunk = __max_blocks_per_chunk;
+ } else if (__options_.max_blocks_per_chunk < __min_blocks_per_chunk) {
+ __options_.max_blocks_per_chunk = __min_blocks_per_chunk;
+ } else if (__options_.max_blocks_per_chunk > __max_blocks_per_chunk) {
+ __options_.max_blocks_per_chunk = __max_blocks_per_chunk;
+ }
+
+ __num_fixed_pools_ = 1;
+ size_t capacity = __smallest_block_size;
+ while (capacity < __options_.largest_required_pool_block) {
+ capacity <<= 1;
+ __num_fixed_pools_ += 1;
+ }
+}
+
+unsynchronized_pool_resource::~unsynchronized_pool_resource()
+{
+ release();
+}
+
+void unsynchronized_pool_resource::release()
+{
+ __adhoc_pool_.release(__res_);
+ if (__fixed_pools_ != nullptr) {
+ const int n = __num_fixed_pools_;
+ for (int i=0; i < n; ++i) {
+ __fixed_pools_[i].release(__res_);
+ }
+ __res_->deallocate(__fixed_pools_, __num_fixed_pools_ * sizeof(__pool_resource_fixed_pool), alignof(__pool_resource_fixed_pool));
+ __fixed_pools_ = nullptr;
+ }
+}
+
+void* unsynchronized_pool_resource::do_allocate(size_t bytes, size_t align)
+{
+ // A pointer to allocated storage (6.6.4.4.1) with a size of at least bytes.
+ // The size and alignment of the allocated memory shall meet the requirements for
+ // a class derived from memory_resource (23.12).
+ // If the pool selected for a block of size bytes is unable to satisfy the memory request
+ // from its own internal data structures, it will call upstream_resource()->allocate()
+ // to obtain more memory. If bytes is larger than that which the largest pool can handle,
+ // then memory will be allocated using upstream_resource()->allocate().
+
+ int i = __pool_index(bytes, align);
+ if (i == __num_fixed_pools_) {
+ return __adhoc_pool_.do_allocate(__res_, bytes, align);
+ } else {
+ if (__fixed_pools_ == nullptr) {
+ using P = __pool_resource_fixed_pool;
+ __fixed_pools_ = (P*)__res_->allocate(__num_fixed_pools_ * sizeof(P), alignof(P));
+ P *first = __fixed_pools_;
+ P *last = __fixed_pools_ + __num_fixed_pools_;
+ for (P *pool = first; pool != last; ++pool) {
+ ::new((void*)pool) P;
+ }
+ }
+ void *result = __fixed_pools_[i].try_allocate_from_vacancies();
+ if (result == nullptr) {
+ static_assert((__max_bytes_per_chunk*5)/4 > __max_bytes_per_chunk, "unsigned overflow is possible");
+ auto min = [](size_t a, size_t b) { return a < b ? a : b; };
+ auto max = [](size_t a, size_t b) { return a < b ? b : a; };
+
+ size_t prev_chunk_size_in_bytes = __fixed_pools_[i].previous_chunk_size_in_bytes();
+ size_t prev_chunk_size_in_blocks = prev_chunk_size_in_bytes >> __log2_pool_block_size(i);
+
+ size_t chunk_size_in_blocks;
+
+ if (prev_chunk_size_in_blocks == 0) {
+ size_t min_blocks_per_chunk = max(
+ __min_bytes_per_chunk >> __log2_pool_block_size(i),
+ __min_blocks_per_chunk
+ );
+ chunk_size_in_blocks = min_blocks_per_chunk;
+ } else {
+ chunk_size_in_blocks = (prev_chunk_size_in_blocks*5)/4;
+ }
+
+
+ size_t max_blocks_per_chunk = min(
+ (__max_bytes_per_chunk >> __log2_pool_block_size(i)),
+ min(
+ __max_blocks_per_chunk,
+ __options_.max_blocks_per_chunk
+ )
+ );
+ if (chunk_size_in_blocks > max_blocks_per_chunk) {
+ chunk_size_in_blocks = max_blocks_per_chunk;
+ }
+
+ size_t block_size = __pool_block_size(i);
+
+ size_t chunk_size_in_bytes = (chunk_size_in_blocks << __log2_pool_block_size(i));
+ result = __fixed_pools_[i].do_allocate_with_new_chunk(__res_, block_size, chunk_size_in_bytes);
+ }
+ return result;
+ }
+}
+
+void unsynchronized_pool_resource::do_deallocate(void* p, size_t bytes, size_t align)
+{
+ // Returns the memory at p to the pool. It is unspecified if, or under what circumstances,
+ // this operation will result in a call to upstream_resource()->deallocate().
+
+ int i = __pool_index(bytes, align);
+ if (i == __num_fixed_pools_) {
+ return __adhoc_pool_.do_deallocate(__res_, p, bytes, align);
+ } else {
+ _LIBCPP_ASSERT(__fixed_pools_ != nullptr, "deallocating a block that was not allocated with this allocator");
+ __fixed_pools_[i].do_evacuate(p);
+ }
+}
+
+synchronized_pool_resource::~synchronized_pool_resource()
+{
+}
+
_LIBCPP_END_NAMESPACE_LFTS_PMR
Index: include/experimental/memory_resource
===================================================================
--- include/experimental/memory_resource
+++ include/experimental/memory_resource
@@ -69,6 +69,9 @@
#include <experimental/__memory>
#include <limits>
#include <memory>
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+#include <mutex>
+#endif
#include <new>
#include <stdexcept>
#include <tuple>
@@ -487,6 +490,166 @@
size_t __next_buffer_size_;
};
+// 23.12.5, mem.res.pool
+
+// 23.12.5.2, mem.res.pool.options
+
+struct pool_options {
+ size_t max_blocks_per_chunk = 0;
+ size_t largest_required_pool_block = 0;
+};
+
+// 23.12.5.1, mem.res.pool.overview
+
+struct __pool_resource_adhoc_pool_header;
+
+class __pool_resource_adhoc_pool {
+ using __header = __pool_resource_adhoc_pool_header;
+ __header *__first_;
+
+public:
+ _LIBCPP_INLINE_VISIBILITY
+ explicit __pool_resource_adhoc_pool() : __first_(nullptr) {}
+ void release(memory_resource *__upstream);
+ void *do_allocate(memory_resource *__upstream, size_t __bytes, size_t __align);
+ void do_deallocate(memory_resource *__upstream, void *__p, size_t __bytes, size_t __align);
+};
+
+class __pool_resource_fixed_pool;
+
+class _LIBCPP_TYPE_VIS unsynchronized_pool_resource : public memory_resource
+{
+ static const size_t __min_blocks_per_chunk = 16;
+ static const size_t __min_bytes_per_chunk = 1024;
+ static const size_t __max_blocks_per_chunk = (size_t(1) << 20);
+ static const size_t __max_bytes_per_chunk = (size_t(1) << 30);
+
+ static const int __log2_smallest_block_size = 3;
+ static const size_t __smallest_block_size = 8;
+ static const size_t __default_largest_block_size = (size_t(1) << 20);
+ static const size_t __max_largest_block_size = (size_t(1) << 30);
+
+ size_t __pool_block_size(int __i) const;
+ int __log2_pool_block_size(int __i) const;
+ int __pool_index(size_t __bytes, size_t __align) const;
+
+public:
+ unsynchronized_pool_resource(const pool_options& __opts, memory_resource* __upstream);
+
+ _LIBCPP_INLINE_VISIBILITY
+ unsynchronized_pool_resource()
+ : unsynchronized_pool_resource(pool_options(), get_default_resource()) {}
+
+ _LIBCPP_INLINE_VISIBILITY
+ explicit unsynchronized_pool_resource(memory_resource* __upstream)
+ : unsynchronized_pool_resource(pool_options(), __upstream) {}
+
+ _LIBCPP_INLINE_VISIBILITY
+ explicit unsynchronized_pool_resource(const pool_options& __opts)
+ : unsynchronized_pool_resource(__opts, get_default_resource()) {}
+
+ unsynchronized_pool_resource(const unsynchronized_pool_resource&) = delete;
+
+ virtual ~unsynchronized_pool_resource();
+
+ unsynchronized_pool_resource& operator=(const unsynchronized_pool_resource&) = delete;
+
+ void release();
+
+ _LIBCPP_INLINE_VISIBILITY
+ memory_resource* upstream_resource() const
+ { return __res_; }
+
+ _LIBCPP_INLINE_VISIBILITY
+ pool_options options() const
+ { return __options_; }
+
+protected:
+ void* do_allocate(size_t __bytes, size_t __align);
+
+ void do_deallocate(void* __p, size_t __bytes, size_t __align);
+
+ _LIBCPP_INLINE_VISIBILITY
+ bool do_is_equal(const memory_resource& __other) const _NOEXCEPT
+ { return this == _VSTD::addressof(__other); }
+
+private:
+ memory_resource *__res_;
+ __pool_resource_adhoc_pool __adhoc_pool_;
+ __pool_resource_fixed_pool *__fixed_pools_;
+ int __num_fixed_pools_;
+ pool_options __options_;
+};
+
+class _LIBCPP_TYPE_VIS synchronized_pool_resource : public memory_resource
+{
+public:
+ _LIBCPP_INLINE_VISIBILITY
+ synchronized_pool_resource(const pool_options& __opts, memory_resource* __upstream)
+ : __unsync_(__opts, __upstream) {}
+
+ _LIBCPP_INLINE_VISIBILITY
+ synchronized_pool_resource()
+ : synchronized_pool_resource(pool_options(), get_default_resource()) {}
+
+ _LIBCPP_INLINE_VISIBILITY
+ explicit synchronized_pool_resource(memory_resource* __upstream)
+ : synchronized_pool_resource(pool_options(), __upstream) {}
+
+ _LIBCPP_INLINE_VISIBILITY
+ explicit synchronized_pool_resource(const pool_options& __opts)
+ : synchronized_pool_resource(__opts, get_default_resource()) {}
+
+ synchronized_pool_resource(const synchronized_pool_resource&) = delete;
+
+ virtual ~synchronized_pool_resource();
+
+ synchronized_pool_resource& operator=(const synchronized_pool_resource&) = delete;
+
+ _LIBCPP_INLINE_VISIBILITY
+ void release() {
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+ unique_lock<mutex> __lk(__mut_);
+#endif
+ __unsync_.release();
+ }
+
+ _LIBCPP_INLINE_VISIBILITY
+ memory_resource* upstream_resource() const
+ { return __unsync_.upstream_resource(); }
+
+ _LIBCPP_INLINE_VISIBILITY
+ pool_options options() const
+ { return __unsync_.options(); }
+
+protected:
+ _LIBCPP_INLINE_VISIBILITY
+ void* do_allocate(size_t __bytes, size_t __align) {
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+ unique_lock<mutex> __lk(__mut_);
+#endif
+ return __unsync_.allocate(__bytes, __align);
+ }
+
+ _LIBCPP_INLINE_VISIBILITY
+ void do_deallocate(void* __p, size_t __bytes, size_t __align) {
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+ unique_lock<mutex> __lk(__mut_);
+#endif
+ return __unsync_.deallocate(__p, __bytes, __align);
+ }
+
+ _LIBCPP_INLINE_VISIBILITY
+ bool do_is_equal(const memory_resource& __other) const _NOEXCEPT
+ { return this == _VSTD::addressof(__other); }
+
+private:
+#if !defined(_LIBCPP_HAS_NO_THREADS)
+ mutex __mut_;
+#endif
+ unsynchronized_pool_resource __unsync_;
+};
+
_LIBCPP_END_NAMESPACE_LFTS_PMR
_LIBCPP_POP_MACROS
_______________________________________________
cfe-commits mailing list
[email protected]
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits