Skip to content

Commit

Permalink
Finish release 0.5-3
Browse files Browse the repository at this point in the history
  • Loading branch information
foonathan committed Apr 17, 2016
2 parents a86588b + a555ddf commit 4123c1c
Show file tree
Hide file tree
Showing 10 changed files with 120 additions and 53 deletions.
14 changes: 14 additions & 0 deletions include/foonathan/memory/detail/free_list.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,13 @@ namespace foonathan { namespace memory
// pre: size != 0
void insert(void *mem, std::size_t size) FOONATHAN_NOEXCEPT;

// returns the usable size
// i.e. how many memory will be actually inserted and usable on a call to insert()
std::size_t usable_size(std::size_t size) const FOONATHAN_NOEXCEPT
{
return size;
}

// returns a single block from the list
// pre: !empty()
void* allocate() FOONATHAN_NOEXCEPT;
Expand Down Expand Up @@ -131,6 +138,13 @@ namespace foonathan { namespace memory
// pre: size != 0
void insert(void *mem, std::size_t size) FOONATHAN_NOEXCEPT;

// returns the usable size
// i.e. how many memory will be actually inserted and usable on a call to insert()
std::size_t usable_size(std::size_t size) const FOONATHAN_NOEXCEPT
{
return size;
}

// returns a single block from the list
// pre: !empty()
void* allocate() FOONATHAN_NOEXCEPT;
Expand Down
13 changes: 10 additions & 3 deletions include/foonathan/memory/detail/small_free_list.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,11 +69,15 @@ namespace foonathan { namespace memory
// mem must be aligned for maximum alignment
void insert(void *mem, std::size_t size) FOONATHAN_NOEXCEPT;

// returns the usable size
// i.e. how many memory will be actually inserted and usable on a call to insert()
std::size_t usable_size(std::size_t size) const FOONATHAN_NOEXCEPT;

// allocates a node big enough for the node size
// pre: !empty()
void* allocate() FOONATHAN_NOEXCEPT;

// must not be called
// always returns nullptr, because array allocations are not supported
void* allocate(std::size_t) FOONATHAN_NOEXCEPT
{
return nullptr;
Expand All @@ -82,8 +86,11 @@ namespace foonathan { namespace memory
// deallocates the node previously allocated via allocate()
void deallocate(void *node) FOONATHAN_NOEXCEPT;

// must not be called
void deallocate(void *, std::size_t) FOONATHAN_NOEXCEPT {}
// forwards to insert()
void deallocate(void *mem, std::size_t size) FOONATHAN_NOEXCEPT
{
insert(mem, size);
}

// hint for allocate() to be prepared to allocate n nodes
// it searches for a chunk that has n nodes free
Expand Down
22 changes: 19 additions & 3 deletions include/foonathan/memory/memory_arena.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,9 @@ namespace foonathan { namespace memory
// the inserted block slightly smaller to allow for the fixup value
using inserted_mb = memory_block;

// how much an inserted block is smaller
static const std::size_t implementation_offset;

// pushes a memory block
void push(allocated_mb block) FOONATHAN_NOEXCEPT;

Expand Down Expand Up @@ -169,6 +172,12 @@ namespace foonathan { namespace memory
{
return cached_.size();
}

std::size_t cached_block_size() const FOONATHAN_NOEXCEPT
{
return cached_.top().size;
}

bool take_from_cache(detail::memory_block_stack &used) FOONATHAN_NOEXCEPT
{
if (cached_.empty())
Expand Down Expand Up @@ -214,6 +223,11 @@ namespace foonathan { namespace memory
return 0u;
}

std::size_t cached_block_size() const FOONATHAN_NOEXCEPT
{
return 0u;
}

bool take_from_cache(detail::memory_block_stack &) FOONATHAN_NOEXCEPT
{
return false;
Expand Down Expand Up @@ -362,11 +376,13 @@ namespace foonathan { namespace memory
}

/// \returns The size of the next memory block,
/// i.e. of the next call to \ref allocate_block() if it does not use the cache.
/// This simply forwards to the \concept{concept_blockallocator,BlockAllocator}.
/// i.e. of the next call to \ref allocate_block().
/// If there are blocks in the cache, returns size of the next one.
/// Otherwise forwards to the \concept{concept_blockallocator,BlockAllocator} and subtracts an implementation offset.
std::size_t next_block_size() const FOONATHAN_NOEXCEPT
{
return allocator_type::next_block_size();
return this->cache_empty() ? allocator_type::next_block_size() - detail::memory_block_stack::implementation_offset
: this->cached_block_size();
}

/// \returns A reference of the \concept{concept_blockallocator,BlockAllocator} object.
Expand Down
7 changes: 3 additions & 4 deletions include/foonathan/memory/memory_pool.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -153,12 +153,11 @@ namespace foonathan { namespace memory
}

/// \returns The size of the next memory block after the free list gets empty and the arena grows.
/// This function just forwards to the \ref memory_arena.
/// \note Due to fence memory, alignment buffers and the like this may not be the exact result \ref capacity_left() will return,
/// but it is an upper bound to it.
/// \ref capacity_left() will increase by this amount.
/// \note Due to fence memory in debug mode this cannot be just divided by the \ref node_size() to get the number of nodes.
std::size_t next_capacity() const FOONATHAN_NOEXCEPT
{
return arena_.next_block_size();
return free_list_.usable_size(arena_.next_block_size());
}

/// \returns A reference to the \concept{concept_blockallocator,BlockAllocator} used for managing the arena.
Expand Down
55 changes: 31 additions & 24 deletions include/foonathan/memory/memory_pool_collection.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -123,8 +123,14 @@ namespace foonathan { namespace memory
detail::check_allocation_size<bad_node_size>(node_size, [&]{return max_node_size();}, info());
auto& pool = pools_.get(node_size);
if (pool.empty())
reserve_impl(pool, def_capacity());
return pool.allocate();
{
auto block = reserve_memory(pool, def_capacity());
pool.insert(block.memory, block.size);
}

auto mem = pool.allocate();
FOONATHAN_MEMORY_ASSERT(mem);
return mem;
}

/// \effects Allocates an \concept{concept_array,array} of nodes by searching for \c n continuous nodes on the appropriate free list and removing them.
Expand All @@ -139,20 +145,22 @@ namespace foonathan { namespace memory
void* allocate_array(std::size_t count, std::size_t node_size)
{
detail::check_allocation_size<bad_node_size>(node_size, [&]{return max_node_size();}, info());
detail::check_allocation_size<bad_array_size>(count * node_size,
[&]{return PoolType::value ? next_capacity() : 0u;},
info());

auto& pool = pools_.get(node_size);
if (pool.empty())
reserve_impl(pool, def_capacity());
auto mem = pool.allocate(count * node_size);

// try allocating if not empty
// for pools without array allocation support, allocate() will always return nullptr
auto mem = pool.empty() ? nullptr : pool.allocate(count * node_size);
if (!mem)
{
reserve_impl(pool, count * node_size);
mem = pool.allocate(count * node_size);
if (!mem)
FOONATHAN_THROW(bad_array_size(info(), count * node_size, next_capacity()));
// use stack for allocation
detail::check_allocation_size<bad_array_size>(count * node_size,
[&]{return next_capacity() - pool.alignment() + 1;},
info());
mem = reserve_memory(pool, count * node_size).memory;
FOONATHAN_MEMORY_ASSERT(mem);
}

return mem;
}

Expand All @@ -169,8 +177,9 @@ namespace foonathan { namespace memory
/// i.e. either this allocator object or a new object created by moving this to it.
void deallocate_array(void *ptr, std::size_t count, std::size_t node_size) FOONATHAN_NOEXCEPT
{
FOONATHAN_MEMORY_ASSERT_MSG(PoolType::value, "array allocations not supported");
auto& pool = pools_.get(node_size);
// deallocate array
// for pools without array allocation support, this will simply forward to insert()
pool.deallocate(ptr, count * node_size);
}

Expand All @@ -185,7 +194,7 @@ namespace foonathan { namespace memory
{
FOONATHAN_MEMORY_ASSERT_MSG(node_size <= max_node_size(), "node_size too big");
auto& pool = pools_.get(node_size);
reserve_impl(pool, capacity);
reserve_memory(pool, capacity);
}

/// \returns The maximum node size for which is a free list.
Expand All @@ -199,7 +208,7 @@ namespace foonathan { namespace memory
/// as defined over the \c BucketDistribution.
/// This is the number of nodes that can be allocated without the free list requesting more memory from the arena.
/// \note Array allocations may lead to a growth even if the capacity_left is big enough.
std::size_t pool_capacity(std::size_t node_size) const FOONATHAN_NOEXCEPT
std::size_t pool_capacity_left(std::size_t node_size) const FOONATHAN_NOEXCEPT
{
FOONATHAN_MEMORY_ASSERT_MSG(node_size <= max_node_size(), "node_size too big");
return pools_.get(node_size).capacity();
Expand All @@ -208,16 +217,15 @@ namespace foonathan { namespace memory
/// \returns The amount of memory available in the arena not inside the free lists.
/// This is the number of bytes that can be inserted into the free lists
/// without requesting more memory from the \concept{concept_blockallocator,BlockAllocator}.
/// \note Array allocations may lead to a growth even if the capacity_left is big enough.
std::size_t capacity() const FOONATHAN_NOEXCEPT
/// \note Array allocations may lead to a growth even if the capacity is big enough.
std::size_t capacity_left() const FOONATHAN_NOEXCEPT
{
return std::size_t(block_end() - stack_.top());
}

/// \returns The size of the next memory block after the free list gets empty and the arena grows.
/// This function just forwards to the \ref memory_arena.
/// \note Due to fence memory, alignment buffers and the like this may not be the exact result \ref capacity() will return,
/// but it is an upper bound to it.
/// \returns The size of the next memory block after \ref capacity_left() arena grows.
/// This is the amount of memory that can be distributed in the pools.
/// \note If the `PoolType` is \ref small_node_pool, the exact usable memory is lower than that.
std::size_t next_capacity() const FOONATHAN_NOEXCEPT
{
return arena_.next_block_size();
Expand Down Expand Up @@ -252,7 +260,7 @@ namespace foonathan { namespace memory
return static_cast<const char*>(block.memory) + block.size;
}

void reserve_impl(typename pool_type::type &pool, std::size_t capacity)
memory_block reserve_memory(typename pool_type::type &pool, std::size_t capacity)
{
auto mem = stack_.allocate(block_end(), capacity, detail::max_alignment);
if (!mem)
Expand All @@ -275,8 +283,7 @@ namespace foonathan { namespace memory
mem = stack_.allocate(block_end(), capacity, detail::max_alignment);
FOONATHAN_MEMORY_ASSERT(mem);
}
// insert new
pool.insert(mem, capacity);
return {mem, capacity};
}

memory_arena<allocator_type, false> arena_;
Expand Down
29 changes: 13 additions & 16 deletions include/foonathan/memory/memory_stack.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -78,27 +78,24 @@ namespace foonathan { namespace memory
/// \requires \c size and \c alignment must be valid.
void* allocate(std::size_t size, std::size_t alignment)
{
detail::check_allocation_size<bad_allocation_size>(size, next_capacity(), info());

auto fence = detail::debug_fence_size;
auto offset = detail::align_offset(stack_.top() + fence, alignment);

if (fence + offset + size + fence <= std::size_t(block_end() - stack_.top()))
{
stack_.bump(fence, debug_magic::fence_memory);
stack_.bump(offset, debug_magic::alignment_memory);
}
else
if (!stack_.top() || fence + offset + size + fence > std::size_t(block_end() - stack_.top()))
{
// need to grow
auto block = arena_.allocate_block();
FOONATHAN_MEMORY_ASSERT_MSG(fence + size + fence <= block.size, "new block size not big enough");

stack_ = detail::fixed_memory_stack(block.memory);
// no need to align, block should be aligned for maximum
stack_.bump(fence, debug_magic::fence_memory);

// new alignment required for over-aligned types
offset = detail::align_offset(stack_.top() + fence, alignment);

auto needed = fence + offset + size + fence;
detail::check_allocation_size<bad_allocation_size>(needed, block.size, info());
}

FOONATHAN_MEMORY_ASSERT(detail::is_aligned(stack_.top(), alignment));
stack_.bump(fence, debug_magic::fence_memory);
stack_.bump(offset, debug_magic::alignment_memory);
auto mem = stack_.bump_return(size);
stack_.bump(fence, debug_magic::fence_memory);
return mem;
Expand Down Expand Up @@ -172,10 +169,10 @@ namespace foonathan { namespace memory
return std::size_t(block_end() - stack_.top());
}

/// \returns The size of the next memory block after the free list gets empty and the arena grows.
/// \returns The size of the next memory block after the current block is exhausted and the arena grows.
/// This function just forwards to the \ref memory_arena.
/// \note Due to fence memory, alignment buffers and the like this may not be the exact result \ref capacity_left() will return,
/// but it is an upper bound to it.
/// \note All of it is available for the stack to use, but due to fences and alignment buffers,
/// this may not be the exact amount of memory usable for the user.
std::size_t next_capacity() const FOONATHAN_NOEXCEPT
{
return arena_.next_block_size();
Expand Down
11 changes: 11 additions & 0 deletions src/detail/small_free_list.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -277,6 +277,17 @@ void small_free_memory_list::insert(void *mem, std::size_t size) FOONATHAN_NOEXC
capacity_ += new_nodes;
}

std::size_t small_free_memory_list::usable_size(std::size_t size) const FOONATHAN_NOEXCEPT
{
auto actual_size = node_size_ + 2 * fence_size();
auto total_chunk_size = chunk::memory_offset + actual_size * chunk::max_nodes;
auto no_chunks = size / total_chunk_size;
auto remainder = size % total_chunk_size;

return no_chunks * chunk::max_nodes * actual_size
+ (remainder > chunk::memory_offset ? remainder - chunk::memory_offset : 0u);
}

void* small_free_memory_list::allocate() FOONATHAN_NOEXCEPT
{
auto chunk = find_chunk_impl(1);
Expand Down
2 changes: 2 additions & 0 deletions src/memory_arena.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ const std::size_t memory_block_stack::node::div_alignment = sizeof(memory_block_
const std::size_t memory_block_stack::node::mod_offset = sizeof(memory_block_stack::node) % max_alignment != 0u;
const std::size_t memory_block_stack::node::offset = (div_alignment + mod_offset) * max_alignment;

const std::size_t memory_block_stack::implementation_offset = memory_block_stack::node::offset;

void memory_block_stack::push(allocated_mb block) FOONATHAN_NOEXCEPT
{
FOONATHAN_MEMORY_ASSERT(is_aligned(block.memory, max_alignment));
Expand Down
6 changes: 3 additions & 3 deletions test/memory_pool_collection.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,12 @@ TEST_CASE("memory_pool_collection", "[pool]")
const auto max_size = 16u;
pools pool(max_size, 1000, alloc);
REQUIRE(pool.max_node_size() == max_size);
REQUIRE(pool.capacity() <= 1000u);
REQUIRE(pool.capacity_left() <= 1000u);
REQUIRE(pool.next_capacity() >= 1000u);
REQUIRE(alloc.no_allocated() == 1u);

for (auto i = 0u; i != max_size; ++i)
REQUIRE(pool.pool_capacity(i) == 0u);
REQUIRE(pool.pool_capacity_left(i) == 0u);

SECTION("normal alloc/dealloc")
{
Expand All @@ -40,7 +40,7 @@ TEST_CASE("memory_pool_collection", "[pool]")
b.push_back(pool.allocate_node(5));
}
REQUIRE(alloc.no_allocated() == 1u);
REQUIRE(pool.capacity() <= 1000u);
REQUIRE(pool.capacity_left() <= 1000u);

std::shuffle(a.begin(), a.end(), std::mt19937{});
std::shuffle(b.begin(), b.end(), std::mt19937{});
Expand Down
14 changes: 14 additions & 0 deletions test/memory_stack.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,5 +74,19 @@ TEST_CASE("memory_stack", "[stack]")
REQUIRE(alloc.no_allocated() == 1u);
REQUIRE(alloc.no_deallocated() == 1u);
}
SECTION("move")
{
auto other = detail::move(stack);
auto m = other.top();
other.allocate(10, 1);
REQUIRE(alloc.no_allocated() == 1u);

stack.allocate(10, 1);
REQUIRE(alloc.no_allocated() == 2u);

stack = detail::move(other);
REQUIRE(alloc.no_allocated() == 1u);
stack.unwind(m);
}
}

0 comments on commit 4123c1c

Please sign in to comment.