diff --git a/src/snmalloc/backend/backend.h b/src/snmalloc/backend/backend.h index ce5e757ed..f5af27a8c 100644 --- a/src/snmalloc/backend/backend.h +++ b/src/snmalloc/backend/backend.h @@ -173,6 +173,14 @@ namespace snmalloc local_state.get_object_range()->dealloc_range(arena, size); } + SNMALLOC_FAST_PATH static capptr::Alloc + capptr_rederive_alloc(capptr::Alloc a, size_t objsize) + { + return capptr_to_user_address_control( + Aal::capptr_bound( + Authmap::amplify(a), objsize)); + } + template SNMALLOC_FAST_PATH static const PagemapEntry& get_metaentry(address_t p) { diff --git a/src/snmalloc/mem/corealloc.h b/src/snmalloc/mem/corealloc.h index 7ffb73db5..9ba5bc7c7 100644 --- a/src/snmalloc/mem/corealloc.h +++ b/src/snmalloc/mem/corealloc.h @@ -577,7 +577,7 @@ namespace snmalloc } else { - auto nelem = RemoteMessage::ring_size( + auto nelem = RemoteMessage::template ring_size( msg, freelist::Object::key_root, entry.get_slab_metadata()->as_key_tweak(), @@ -770,8 +770,14 @@ namespace snmalloc is_start_of_object(entry.get_sizeclass(), address_cast(msg)), "Not deallocating start of an object"); - auto [curr, length] = RemoteMessage::open_free_ring( - msg, freelist::Object::key_root, meta->as_key_tweak(), domesticate); + size_t objsize = sizeclass_full_to_size(entry.get_sizeclass()); + + auto [curr, length] = RemoteMessage::template open_free_ring( + msg, + objsize, + freelist::Object::key_root, + meta->as_key_tweak(), + domesticate); // Update the head and the next pointer in the free list. meta->free_queue.append_segment( diff --git a/src/snmalloc/mem/remoteallocator.h b/src/snmalloc/mem/remoteallocator.h index 61631888d..cef343dbb 100644 --- a/src/snmalloc/mem/remoteallocator.h +++ b/src/snmalloc/mem/remoteallocator.h @@ -37,14 +37,6 @@ namespace snmalloc sizeof(free_ring.next_object) >= sizeof(void*), "RemoteMessage bitpacking needs sizeof(void*) in next_object"); - static auto decode_next(uintptr_t encoded, capptr::Alloc m) - { - return capptr_rewild( - pointer_offset_signed( - m, static_cast(encoded) >> MAX_CAPACITY_BITS)) - .as_static>(); - } - public: static auto emplace_in_alloc(capptr::Alloc alloc) { @@ -69,7 +61,9 @@ namespace snmalloc new (last.unsafe_ptr()) RemoteMessage()); self->free_ring.prev = last_prev; - // XXX CHERI + // XXX On CHERI, we could do a fair bit better if we had a primitive for + // extracting and discarding the offset. That probably beats the dance + // done below, but it should work as it stands. auto n = freelist::HeadPtr::unsafe_from( unsafe_from_uintptr>( @@ -99,9 +93,11 @@ namespace snmalloc .as_reinterpret(); } - template - static std::pair open_free_ring( + template + SNMALLOC_FAST_PATH static std::pair + open_free_ring( capptr::Alloc m, + size_t objsize, const FreeListKey& key, address_t key_tweak, Domesticator_queue domesticate) @@ -112,7 +108,20 @@ namespace snmalloc uint16_t decoded_size = encoded & bits::mask_bits(MAX_CAPACITY_BITS); static_assert(sizeof(decoded_size) * 8 > MAX_CAPACITY_BITS); - auto next = domesticate(decode_next(encoded, m)); + /* + * Derive an out-of-bounds pointer to the next allocation, then use the + * authmap to reconstruct an in-bounds version, which we then immediately + * bound and rewild and then domesticate (how strange). + * + * XXX See above re: doing better on CHERI. + */ + auto next = domesticate( + capptr_rewild( + Config::Backend::capptr_rederive_alloc( + pointer_offset_signed( + m, static_cast(encoded) >> MAX_CAPACITY_BITS), + objsize)) + .template as_static>()); if constexpr (mitigations(freelist_backward_edge)) { @@ -130,7 +139,7 @@ namespace snmalloc return {next.template as_static>(), decoded_size}; } - template + template static uint16_t ring_size( capptr::Alloc m, const FreeListKey& key, @@ -145,7 +154,18 @@ namespace snmalloc if constexpr (mitigations(freelist_backward_edge)) { - auto next = domesticate(decode_next(encoded, m)); + /* + * Like above, but we don't strictly need to rebound the pointer, + * since it's only used internally. Still, doesn't hurt to bound + * to the free list linkage. + */ + auto next = domesticate( + capptr_rewild( + Config::Backend::capptr_rederive_alloc( + pointer_offset_signed( + m, static_cast(encoded) >> MAX_CAPACITY_BITS), + sizeof(freelist::Object::T<>))) + .template as_static>()); next->check_prev( signed_prev(address_cast(m), address_cast(next), key, key_tweak));