Quuxplusone updated this revision to Diff 148542.
Quuxplusone retitled this revision from "Implement monotonic_buffer_resource in
<experimental/memory_resource>" to "<experimental/memory_resource>: Implement
monotonic_buffer_resource.".
Quuxplusone added 1 blocking reviewer(s): EricWF.
Quuxplusone added a comment.
Fix one visibility macro.
Repository:
rCXX libc++
https://reviews.llvm.org/D47111
Files:
include/experimental/memory_resource
src/experimental/memory_resource.cpp
test/std/experimental/memory/memory.resource.monotonic.buffer/monotonic_buffer.pass.cpp
Index: test/std/experimental/memory/memory.resource.monotonic.buffer/monotonic_buffer.pass.cpp
===================================================================
--- /dev/null
+++ test/std/experimental/memory/memory.resource.monotonic.buffer/monotonic_buffer.pass.cpp
@@ -0,0 +1,408 @@
+//===----------------------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Source Licenses. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++98, c++03
+
+// <experimental/memory_resource>
+
+// class monotonic_buffer_resource
+
+#include <experimental/memory_resource>
+#include <new>
+#include <type_traits>
+#include <cassert>
+
+#include "count_new.hpp"
+
+struct assert_on_compare : public std::experimental::pmr::memory_resource
+{
+protected:
+ virtual void * do_allocate(size_t, size_t)
+ { assert(false); }
+
+ virtual void do_deallocate(void *, size_t, size_t)
+ { assert(false); }
+
+ virtual bool do_is_equal(std::experimental::pmr::memory_resource const &) const noexcept
+ { assert(false); }
+};
+
+struct repointable_resource : public std::experimental::pmr::memory_resource
+{
+ std::experimental::pmr::memory_resource *which;
+
+ explicit repointable_resource(std::experimental::pmr::memory_resource *res) : which(res) {}
+
+protected:
+ virtual void *do_allocate(size_t size, size_t align)
+ { return which->allocate(size, align); }
+
+ virtual void do_deallocate(void *p, size_t size, size_t align)
+ { return which->deallocate(p, size, align); }
+
+ virtual bool do_is_equal(std::experimental::pmr::memory_resource const &rhs) const noexcept
+ { return which->is_equal(rhs); }
+};
+
+void test_construction_with_default_resource()
+{
+ std::experimental::pmr::memory_resource *expected = std::experimental::pmr::null_memory_resource();
+ std::experimental::pmr::set_default_resource(expected);
+ {
+ char buffer[16];
+ std::experimental::pmr::monotonic_buffer_resource r1;
+ std::experimental::pmr::monotonic_buffer_resource r2(16);
+ std::experimental::pmr::monotonic_buffer_resource r3(buffer, sizeof buffer);
+ assert(r1.upstream_resource() == expected);
+ assert(r2.upstream_resource() == expected);
+ assert(r3.upstream_resource() == expected);
+ }
+
+ expected = std::experimental::pmr::new_delete_resource();
+ std::experimental::pmr::set_default_resource(expected);
+ {
+ char buffer[16];
+ std::experimental::pmr::monotonic_buffer_resource r1;
+ std::experimental::pmr::monotonic_buffer_resource r2(16);
+ std::experimental::pmr::monotonic_buffer_resource r3(buffer, sizeof buffer);
+ assert(r1.upstream_resource() == expected);
+ assert(r2.upstream_resource() == expected);
+ assert(r3.upstream_resource() == expected);
+ }
+}
+
+void test_construction_without_buffer()
+{
+ // Constructing a monotonic_buffer_resource should not cause allocations
+ // by itself; the resource should wait to allocate until an allocation is
+ // requested.
+
+ globalMemCounter.reset();
+ std::experimental::pmr::set_default_resource(std::experimental::pmr::new_delete_resource());
+
+ std::experimental::pmr::monotonic_buffer_resource r1;
+ assert(globalMemCounter.checkNewCalledEq(0));
+
+ std::experimental::pmr::monotonic_buffer_resource r2(1024);
+ assert(globalMemCounter.checkNewCalledEq(0));
+
+ std::experimental::pmr::monotonic_buffer_resource r3(1024, std::experimental::pmr::new_delete_resource());
+ assert(globalMemCounter.checkNewCalledEq(0));
+}
+
+void test_equality()
+{
+ // Same object
+ {
+ std::experimental::pmr::monotonic_buffer_resource r1;
+ std::experimental::pmr::monotonic_buffer_resource r2;
+ assert(r1 == r1);
+ assert(r1 != r2);
+
+ std::experimental::pmr::memory_resource & p1 = r1;
+ std::experimental::pmr::memory_resource & p2 = r2;
+ assert(p1 == p1);
+ assert(p1 != p2);
+ assert(p1 == r1);
+ assert(r1 == p1);
+ assert(p1 != r2);
+ assert(r2 != p1);
+ }
+ // Different types
+ {
+ std::experimental::pmr::monotonic_buffer_resource mono1;
+ std::experimental::pmr::memory_resource & r1 = mono1;
+ assert_on_compare c;
+ std::experimental::pmr::memory_resource & r2 = c;
+ assert(r1 != r2);
+ assert(!(r1 == r2));
+ }
+}
+
+void test_allocate_deallocate()
+{
+ {
+ globalMemCounter.reset();
+
+ std::experimental::pmr::monotonic_buffer_resource mono1(std::experimental::pmr::new_delete_resource());
+ std::experimental::pmr::memory_resource & r1 = mono1;
+
+ void *ret = r1.allocate(50);
+ assert(ret);
+ assert(globalMemCounter.checkNewCalledGreaterThan(0));
+ assert(globalMemCounter.checkDeleteCalledEq(0));
+
+ // mem.res.monotonic.buffer 1.2
+ // A call to deallocate has no effect, thus the amount of memory
+ // consumed increases monotonically until the resource is destroyed.
+ // Check that deallocate is a no-op
+ r1.deallocate(ret, 50);
+ assert(globalMemCounter.checkDeleteCalledEq(0));
+
+ mono1.release();
+ assert(globalMemCounter.checkDeleteCalledEq(1));
+ assert(globalMemCounter.checkOutstandingNewEq(0));
+
+ globalMemCounter.reset();
+
+ ret = r1.allocate(500);
+ assert(ret);
+ assert(globalMemCounter.checkNewCalledGreaterThan(0));
+ assert(globalMemCounter.checkDeleteCalledEq(0));
+
+ // Check that the destructor calls release()
+ }
+ assert(globalMemCounter.checkDeleteCalledEq(1));
+ assert(globalMemCounter.checkOutstandingNewEq(0));
+}
+
+void test_allocate_deallocate_from_original_buffer()
+{
+ globalMemCounter.reset();
+ char buffer[100];
+ std::experimental::pmr::monotonic_buffer_resource mono1((void *)buffer, sizeof buffer, std::experimental::pmr::new_delete_resource());
+ std::experimental::pmr::memory_resource & r1 = mono1;
+
+ // Check that construction with a buffer does not allocate anything from the upstream
+ assert(globalMemCounter.checkNewCalledEq(0));
+
+ // Check that an allocation that fits in the buffer does not allocate anything from the upstream
+ void *ret = r1.allocate(50);
+ assert(ret);
+ assert(globalMemCounter.checkNewCalledEq(0));
+
+ // Check a second allocation
+ ret = r1.allocate(20);
+ assert(ret);
+ assert(globalMemCounter.checkNewCalledEq(0));
+
+ r1.deallocate(ret, 50);
+ assert(globalMemCounter.checkDeleteCalledEq(0));
+
+ // Check an allocation that doesn't fit in the original buffer
+ ret = r1.allocate(50);
+ assert(ret);
+ assert(globalMemCounter.checkNewCalledEq(1));
+
+ r1.deallocate(ret, 50);
+ assert(globalMemCounter.checkDeleteCalledEq(0));
+
+ mono1.release();
+ assert(globalMemCounter.checkDeleteCalledEq(1));
+ assert(globalMemCounter.checkOutstandingNewEq(0));
+}
+
+void test_geometric_progression()
+{
+ // mem.res.monotonic.buffer 1.3
+ // Each additional buffer is larger than the previous one, following a
+ // geometric progression.
+
+ globalMemCounter.reset();
+ std::experimental::pmr::monotonic_buffer_resource mono1(100, std::experimental::pmr::new_delete_resource());
+ std::experimental::pmr::memory_resource & r1 = mono1;
+
+ assert(globalMemCounter.checkNewCalledEq(0));
+ size_t next_buffer_size = 100;
+ void *ret = r1.allocate(10, 1);
+ assert(ret != nullptr);
+ assert(globalMemCounter.checkNewCalledEq(1));
+ assert(globalMemCounter.last_new_size >= next_buffer_size);
+ next_buffer_size = globalMemCounter.last_new_size + 1;
+
+ int new_called = 1;
+ while (new_called < 5) {
+ ret = r1.allocate(10, 1);
+ if (globalMemCounter.new_called > new_called) {
+ assert(globalMemCounter.new_called == new_called + 1);
+ assert(globalMemCounter.last_new_size >= next_buffer_size);
+ next_buffer_size = globalMemCounter.last_new_size + 1;
+ new_called += 1;
+ }
+ }
+}
+
+void test_geometric_progression_after_release()
+{
+ // mem.res.monotonic.buffer 1.3
+ // Each additional buffer is larger than the previous one, following a
+ // geometric progression.
+
+ // mem.res.monotonic.buffer.mem 1
+ // release() calls upstream_rsrc->deallocate() as necessary to release all allocated memory.
+
+ // Implicitly: release() does not reset the geometric progression of next_buffer_size.
+
+ globalMemCounter.reset();
+ std::experimental::pmr::monotonic_buffer_resource mono1;
+ std::experimental::pmr::memory_resource & r1 = mono1;
+
+ void *ret = r1.allocate(100, 1);
+ assert(ret != nullptr);
+ assert(globalMemCounter.checkNewCalledEq(1));
+ size_t last_new_size = globalMemCounter.last_new_size;
+
+ r1.allocate(last_new_size, 1);
+ assert(globalMemCounter.checkNewCalledEq(2));
+ assert(globalMemCounter.last_new_size > last_new_size);
+ last_new_size = globalMemCounter.last_new_size;
+
+ mono1.release();
+ assert(globalMemCounter.checkDeleteCalledEq(2));
+
+ // We expect to see a large upstream allocation corresponding
+ // to this small request, because the upstream allocation must
+ // be at least `next_buffer_size` bytes.
+ r1.allocate(10, 1);
+ assert(globalMemCounter.checkNewCalledEq(3));
+ assert(globalMemCounter.last_new_size >= 10);
+ assert(globalMemCounter.last_new_size > 100);
+ assert(globalMemCounter.last_new_size > last_new_size);
+}
+
+void test_zero_sized_initial_buffer()
+{
+ globalMemCounter.reset();
+ {
+ char buffer[100];
+ std::experimental::pmr::monotonic_buffer_resource mono1((void *)buffer, 0, std::experimental::pmr::new_delete_resource());
+ std::experimental::pmr::memory_resource & r1 = mono1;
+
+ void *ret = r1.allocate(1, 1);
+ assert(ret != nullptr);
+ assert(globalMemCounter.checkNewCalledEq(1));
+ }
+ assert(globalMemCounter.checkDeleteCalledEq(1));
+
+ globalMemCounter.reset();
+ {
+ std::experimental::pmr::monotonic_buffer_resource mono1(nullptr, 0, std::experimental::pmr::new_delete_resource());
+ std::experimental::pmr::memory_resource & r1 = mono1;
+
+ void *ret = r1.allocate(1, 1);
+ assert(ret != nullptr);
+ assert(globalMemCounter.checkNewCalledEq(1));
+ }
+ assert(globalMemCounter.checkDeleteCalledEq(1));
+}
+
+void test_underaligned_initial_buffer()
+{
+ globalMemCounter.reset();
+ {
+ alignas(4) char buffer[17];
+ std::experimental::pmr::monotonic_buffer_resource mono1(buffer + 1, 16, std::experimental::pmr::new_delete_resource());
+ std::experimental::pmr::memory_resource & r1 = mono1;
+
+ void *ret = r1.allocate(1, 1);
+ assert(ret == buffer + 1);
+ mono1.release();
+
+ ret = r1.allocate(1, 2);
+ assert(ret == buffer + 2);
+ mono1.release();
+
+ ret = r1.allocate(1, 4);
+ assert(ret == buffer + 4);
+ mono1.release();
+
+ // Test a size that is just big enough to fit in the buffer,
+ // but can't fit if it's aligned.
+ ret = r1.allocate(16, 1);
+ assert(ret == buffer + 1);
+ mono1.release();
+
+ assert(globalMemCounter.checkNewCalledEq(0));
+ ret = r1.allocate(16, 2);
+ assert(globalMemCounter.checkNewCalledEq(1));
+ assert(globalMemCounter.last_new_size >= 16);
+ // assert(globalMemCounter.last_new_align >= 2);
+ mono1.release();
+ assert(globalMemCounter.checkDeleteCalledEq(1));
+ // assert(globalMemCounter.last_new_align == globalMemCounter.last_delete_align);
+ }
+}
+
+void test_overaligned_single_allocation()
+{
+ globalMemCounter.reset();
+ std::experimental::pmr::monotonic_buffer_resource mono1(1024, std::experimental::pmr::new_delete_resource());
+ std::experimental::pmr::memory_resource & r1 = mono1;
+
+ constexpr size_t big_alignment = 8 * alignof(std::max_align_t);
+ static_assert(big_alignment > 4);
+
+ void *ret = r1.allocate(2048, big_alignment);
+ assert(ret != nullptr);
+ assert(globalMemCounter.checkNewCalledEq(1));
+ assert(globalMemCounter.last_new_size >= 2048);
+ // assert(globalMemCounter.last_new_align >= big_alignment);
+
+ // Check that a single highly aligned allocation request doesn't
+ // permanently "poison" the resource to allocate only super-aligned
+ // blocks of memory.
+ ret = r1.allocate(globalMemCounter.last_new_size, 4);
+ assert(ret != nullptr);
+ assert(globalMemCounter.checkNewCalledEq(2));
+ // assert(globalMemCounter.last_new_align >= 4);
+ // assert(globalMemCounter.last_new_align < big_alignment);
+}
+
+void test_exception_safety()
+{
+ globalMemCounter.reset();
+ repointable_resource upstream(std::experimental::pmr::new_delete_resource());
+ alignas(16) char buffer[64];
+ std::experimental::pmr::monotonic_buffer_resource mono1(buffer, sizeof buffer, &upstream);
+ std::experimental::pmr::memory_resource & r1 = mono1;
+
+ void *res = r1.allocate(64, 16);
+ assert(res == buffer);
+ assert(globalMemCounter.checkNewCalledEq(0));
+
+ res = r1.allocate(64, 16);
+ assert(res != buffer);
+ assert(globalMemCounter.checkNewCalledEq(1));
+ assert(globalMemCounter.checkDeleteCalledEq(0));
+
+ upstream.which = std::experimental::pmr::null_memory_resource();
+ try {
+ res = r1.allocate(globalMemCounter.last_new_size, 16);
+ assert(false);
+ } catch (const std::bad_alloc&) {
+ // we expect this
+ }
+ assert(globalMemCounter.checkNewCalledEq(1));
+ assert(globalMemCounter.checkDeleteCalledEq(0));
+
+ upstream.which = std::experimental::pmr::new_delete_resource();
+ res = r1.allocate(64, 16);
+ assert(res != buffer);
+ assert(globalMemCounter.checkNewCalledEq(2));
+ assert(globalMemCounter.checkDeleteCalledEq(0));
+
+ mono1.release();
+ assert(globalMemCounter.checkNewCalledEq(2));
+ assert(globalMemCounter.checkDeleteCalledEq(2));
+}
+
+int main()
+{
+ test_construction_with_default_resource();
+ test_construction_without_buffer();
+ test_equality();
+ test_allocate_deallocate();
+ test_allocate_deallocate_from_original_buffer();
+ test_geometric_progression();
+ test_geometric_progression_after_release();
+ test_zero_sized_initial_buffer();
+ test_underaligned_initial_buffer();
+ test_overaligned_single_allocation();
+ test_exception_safety();
+}
Index: src/experimental/memory_resource.cpp
===================================================================
--- src/experimental/memory_resource.cpp
+++ src/experimental/memory_resource.cpp
@@ -160,4 +160,96 @@
return __default_memory_resource(true, __new_res);
}
+// 23.12.6, mem.res.monotonic.buffer
+
+static size_t roundup(size_t count, size_t alignment)
+{
+ size_t mask = alignment - 1;
+ return (count + mask) & ~mask;
+}
+
+static void *try_allocate_from_chunk(__monotonic_buffer_header *header, size_t bytes, size_t align)
+{
+ if (!header || !header->__start_) return nullptr;
+ if (header->__capacity_ < bytes) return nullptr;
+ void *new_ptr = static_cast<char *>(header->__start_) + header->__used_;
+ size_t new_capacity = (header->__capacity_ - header->__used_);
+ void *aligned_ptr = _VSTD::align(align, bytes, new_ptr, new_capacity);
+ if (aligned_ptr == nullptr) {
+ return nullptr;
+ }
+ header->__used_ = (header->__capacity_ - new_capacity) + bytes;
+ return aligned_ptr;
+}
+
+monotonic_buffer_resource::monotonic_buffer_resource(void* buffer, size_t buffer_size, memory_resource* upstream)
+ : __res_(upstream)
+{
+ __original_.__start_ = buffer;
+ __original_.__next_ = nullptr;
+ __original_.__capacity_ = buffer_size;
+ __original_.__alignment_ = 1;
+ __original_.__used_ = 0;
+ __next_buffer_size_ = buffer_size >= 1 ? buffer_size : 1;
+}
+
+monotonic_buffer_resource::~monotonic_buffer_resource()
+{
+ release();
+}
+
+void monotonic_buffer_resource::release()
+{
+ const size_t header_size = sizeof(__monotonic_buffer_header);
+
+ __original_.__used_ = 0;
+ while (__original_.__next_ != nullptr) {
+ __monotonic_buffer_header *header = __original_.__next_;
+ __monotonic_buffer_header *next_header = header->__next_;
+ size_t aligned_capacity = header->__capacity_ + header_size;
+ __res_->deallocate(header->__start_, aligned_capacity, header->__alignment_);
+ __original_.__next_ = next_header;
+ }
+}
+
+void* monotonic_buffer_resource::do_allocate(size_t bytes, size_t align)
+{
+ if (void *result = try_allocate_from_chunk(&__original_, bytes, align)) {
+ return result;
+ }
+ if (void *result = try_allocate_from_chunk(__original_.__next_, bytes, align)) {
+ return result;
+ }
+ // Allocate a brand-new chunk.
+ const size_t header_size = sizeof(__monotonic_buffer_header);
+ const size_t header_align = alignof(__monotonic_buffer_header);
+
+ if (align < header_align) {
+ align = header_align;
+ }
+
+ size_t aligned_capacity = roundup(bytes, header_align) + header_size;
+
+ if (aligned_capacity < __next_buffer_size_) {
+ aligned_capacity = roundup(__next_buffer_size_ - header_size, header_align) + header_size;
+ }
+
+ void *result = __res_->allocate(aligned_capacity, align);
+ __monotonic_buffer_header *header = (__monotonic_buffer_header *)((char *)result + aligned_capacity - header_size);
+ header->__start_ = result;
+ header->__capacity_ = aligned_capacity - header_size;
+ header->__alignment_ = align;
+ header->__used_ = 0;
+ header->__next_ = __original_.__next_;
+ __original_.__next_ = header;
+
+ size_t prospective_next_buffer_size = (__next_buffer_size_ * 5) / 4;
+ if (prospective_next_buffer_size <= __next_buffer_size_) {
+ prospective_next_buffer_size = __next_buffer_size_ + 1;
+ }
+ __next_buffer_size_ = prospective_next_buffer_size;
+
+ return try_allocate_from_chunk(__original_.__next_, bytes, align);
+}
+
_LIBCPP_END_NAMESPACE_LFTS_PMR
Index: include/experimental/memory_resource
===================================================================
--- include/experimental/memory_resource
+++ include/experimental/memory_resource
@@ -420,6 +420,73 @@
typename allocator_traits<_Alloc>::template rebind_alloc<char>
>;
+// 23.12.6, mem.res.monotonic.buffer
+
+struct __monotonic_buffer_header {
+ void *__start_;
+ __monotonic_buffer_header *__next_;
+ size_t __capacity_;
+ size_t __alignment_;
+ size_t __used_;
+};
+
+class _LIBCPP_TYPE_VIS monotonic_buffer_resource : public memory_resource
+{
+ static const size_t __default_buffer_capacity = 1024;
+ static const size_t __default_buffer_alignment = 16;
+
+public:
+ _LIBCPP_INLINE_VISIBILITY
+ explicit monotonic_buffer_resource(memory_resource* __upstream)
+ : monotonic_buffer_resource(nullptr, __default_buffer_capacity, __upstream) {}
+
+ _LIBCPP_INLINE_VISIBILITY
+ monotonic_buffer_resource(size_t __initial_size, memory_resource* __upstream)
+ : monotonic_buffer_resource(nullptr, __initial_size, __upstream) {}
+
+ monotonic_buffer_resource(void* __buffer, size_t __buffer_size, memory_resource* __upstream);
+
+ _LIBCPP_INLINE_VISIBILITY
+ monotonic_buffer_resource()
+ : monotonic_buffer_resource(get_default_resource()) {}
+
+ _LIBCPP_INLINE_VISIBILITY
+ explicit monotonic_buffer_resource(size_t __initial_size)
+ : monotonic_buffer_resource(__initial_size, get_default_resource()) {}
+
+ _LIBCPP_INLINE_VISIBILITY
+ monotonic_buffer_resource(void* __buffer, size_t __buffer_size)
+ : monotonic_buffer_resource(__buffer, __buffer_size, get_default_resource()) {}
+
+ monotonic_buffer_resource(const monotonic_buffer_resource&) = delete;
+
+ virtual ~monotonic_buffer_resource();
+
+ monotonic_buffer_resource& operator=(const monotonic_buffer_resource&) = delete;
+
+ void release();
+
+ _LIBCPP_INLINE_VISIBILITY
+ memory_resource* upstream_resource() const
+ { return __res_; }
+
+protected:
+ void* do_allocate(size_t __bytes, size_t __alignment);
+
+ _LIBCPP_INLINE_VISIBILITY
+ void do_deallocate(void*, size_t, size_t)
+ {}
+
+ _LIBCPP_INLINE_VISIBILITY
+ bool do_is_equal(const memory_resource& __other) const _NOEXCEPT
+ { return this == _VSTD::addressof(__other); }
+
+private:
+ __monotonic_buffer_header __original_;
+ memory_resource* __res_;
+ size_t __next_buffer_size_;
+};
+
_LIBCPP_END_NAMESPACE_LFTS_PMR
_LIBCPP_POP_MACROS
_______________________________________________
cfe-commits mailing list
[email protected]
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits