Skip to content

Commit

Permalink
mmap: Remove support for MAP_ALIGNED_CHERI
Browse files Browse the repository at this point in the history
Kernel reservation managment code takes care of alignment (and size)
rounding for CheriABI binaries.  Hybrid binaries that need CHERI
alignment can, and for length must, do their own rounding in code that
will already be CHERI-aware.
  • Loading branch information
brooksdavis authored and bsdjhb committed Sep 13, 2024
1 parent 91aa338 commit c3d7e5b
Show file tree
Hide file tree
Showing 4 changed files with 1 addition and 97 deletions.
7 changes: 0 additions & 7 deletions bin/cheribsdtest/cheribsdtest_vm.c
Original file line number Diff line number Diff line change
Expand Up @@ -893,13 +893,6 @@ CHERIBSDTEST(vm_reservation_align,
"mmap failed to align representable region with requested "
"alignment %lx for %p", align_shift + 1, map);

/* Explicit cheri alignment */
map = CHERIBSDTEST_CHECK_SYSCALL(mmap(NULL, len,
PROT_READ | PROT_WRITE, MAP_ANON | MAP_ALIGNED_CHERI, -1, 0));
CHERIBSDTEST_VERIFY2(((ptraddr_t)(map) & align_mask) == 0,
"mmap failed to align representable region with requested "
"cheri alignment for %p", map);

cheribsdtest_success();
}

Expand Down
25 changes: 0 additions & 25 deletions lib/libsys/mmap.2
Original file line number Diff line number Diff line change
Expand Up @@ -198,23 +198,6 @@ will fail.
The
.Fa n
argument specifies the binary logarithm of the desired alignment.
.It Dv MAP_ALIGNED_CHERI
Align the region as required to allow a CHERI capability to be created.
If a suitable region cannot be found or the address of
.Fa addr
or the length in
.Fa len
is not representable as a precise capability,
.Fn mmap
will fail.
The
.Dv MAP_ALIGNED_CHERI
flag is assumed for CheriABI programs when address space is being
reserved.
On architectures without CHERI support or where all capabilities are
precise,
.Dv MAP_ALIGNED_CHERI
has no effect.
.It Dv MAP_ALIGNED_SUPER
Align the region to maximize the potential use of large
.Pq Dq super
Expand Down Expand Up @@ -593,14 +576,6 @@ was specified, but
.Dv MAP_FIXED
was not.
.It Bq Er EINVAL
.Dv MAP_ALIGNED_CHERI
(implied on CheriABI)
was specified and
.Fa addr
or
.Fa size
was not sufficently aligned for the current architecture.
.It Bq Er EINVAL
.Dv MAP_GUARD
was specified, but the
.Fa offset
Expand Down
8 changes: 0 additions & 8 deletions sys/sys/mman.h
Original file line number Diff line number Diff line change
Expand Up @@ -120,14 +120,6 @@
#define MAP_ALIGNMENT_MASK MAP_ALIGNED(0xff)
#define MAP_ALIGNED_SUPER MAP_ALIGNED(1) /* align on a superpage */

/*
* CHERI specific flags and alignment constraints.
*
* MAP_ALIGNED_CHERI returns memory aligned appropriately for the requested
* length or fails. Passing an under-rounded length fails.
*/
#define MAP_ALIGNED_CHERI MAP_ALIGNED(2) /* align for CHERI data */

/*
* Flags provided to shm_rename
*/
Expand Down
58 changes: 1 addition & 57 deletions sys/vm/vm_mmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -390,33 +390,6 @@ sys_mmap(struct thread *td, struct mmap_args *uap)
return (EPROT);
}

if ((flags & MAP_ALIGNMENT_MASK) == MAP_ALIGNED_SUPER) {
#if VM_NRESERVLEVEL > 0
/*
* pmap_align_superpage() is a no-op for allocations
* less than a super page so request data alignment
* in that case.
*
* In practice this is a no-op as super-pages are
* precisely representable.
*/
if (uap->len < (1UL << (VM_LEVEL_0_ORDER + PAGE_SHIFT)) &&
CHERI_REPRESENTABLE_ALIGNMENT(uap->len) > (1UL << PAGE_SHIFT)) {
flags &= ~MAP_ALIGNMENT_MASK;
flags |= MAP_ALIGNED_CHERI;
}
#endif
}
else if ((flags & MAP_ALIGNMENT_MASK) != MAP_ALIGNED(0) &&
(flags & MAP_ALIGNMENT_MASK) != MAP_ALIGNED(3) && /* MAP_ALIGNED_CHERI_SEAL */
(flags & MAP_ALIGNMENT_MASK) != MAP_ALIGNED_CHERI) {
/* Reject nonsensical sub-page alignment requests */
if ((flags >> MAP_ALIGNMENT_SHIFT) < PAGE_SHIFT) {
SYSERRCAUSE("subpage alignment request");
return (EINVAL);
}
}

/*
* NOTE: If this architecture requires an alignment constraint, it is
* set at this point. A simple assert is not easy to contruct...
Expand Down Expand Up @@ -626,37 +599,8 @@ kern_mmap(struct thread *td, const struct mmap_req *mrp)
if (len > size)
return (ENOMEM);

align = flags & MAP_ALIGNMENT_MASK;
#if !__has_feature(capabilities)
/* In the non-CHERI case, remove the alignment request. */
if (align == MAP_ALIGNED_CHERI) {
flags &= ~MAP_ALIGNMENT_MASK;
align = 0;
}
#else /* __has_feature(capabilities) */
/*
* Convert MAP_ALIGNED_CHERI into explicit alignment
* requests and pad lengths. The combination of alignment (via
* the updated, explicit alignment flags) and padding is required
* for any request that would otherwise be unrepresentable due
* to compressed capability bounds.
*/
if (align == MAP_ALIGNED_CHERI) {
flags &= ~MAP_ALIGNMENT_MASK;
if (CHERI_REPRESENTABLE_ALIGNMENT(size) > PAGE_SIZE) {
flags |= MAP_ALIGNED(CHERI_ALIGN_SHIFT(size));

if (size != CHERI_REPRESENTABLE_LENGTH(size))
size = CHERI_REPRESENTABLE_LENGTH(size);

if (CHERI_ALIGN_MASK(size) != 0)
addr_mask = CHERI_ALIGN_MASK(size);
}
align = flags & MAP_ALIGNMENT_MASK;
}
#endif

/* Ensure alignment is at least a page and fits in a pointer. */
align = flags & MAP_ALIGNMENT_MASK;
if (align != 0 && align != MAP_ALIGNED_SUPER &&
(align >> MAP_ALIGNMENT_SHIFT >= sizeof(void *) * NBBY ||
align >> MAP_ALIGNMENT_SHIFT < PAGE_SHIFT)) {
Expand Down

0 comments on commit c3d7e5b

Please sign in to comment.