diff --git a/clang/include/clang/AST/Type.h b/clang/include/clang/AST/Type.h index e0ad28331f0a..1c2f7b4cd450 100644 --- a/clang/include/clang/AST/Type.h +++ b/clang/include/clang/AST/Type.h @@ -2145,6 +2145,9 @@ class alignas(8) Type : public ExtQualsTypeCommonBase { /// pointers. bool isCHERICapabilityType(const ASTContext &Context, bool IncludeIntCap = true) const; + /// Returns true if this is a struct/union type that contains exactly one + /// capability element. + bool isSingleCapabilityRecord(const ASTContext &Context) const; /// Returns true for __uintcap_t or __intcap_t (and enums/_Atomic with that /// underlying type) bool isIntCapType() const; diff --git a/clang/lib/AST/Type.cpp b/clang/lib/AST/Type.cpp index 65463faf6ae5..449084f3af70 100644 --- a/clang/lib/AST/Type.cpp +++ b/clang/lib/AST/Type.cpp @@ -649,6 +649,18 @@ bool Type::isCHERICapabilityType(const ASTContext &Context, return false; } +bool Type::isSingleCapabilityRecord(const ASTContext &Context) const { + if (auto *RT = getAs()) + return Context.containsCapabilities(RT->getDecl()) && + Context.getTypeSize(this) == + Context.getTargetInfo().getCHERICapabilityWidth(); + if (const AtomicType *AT = getAs()) + return AT->getValueType()->isSingleCapabilityRecord(Context); + if (const AttributedType *AT = getAs()) + return AT->getModifiedType()->isSingleCapabilityRecord(Context); + return false; +} + bool Type::isIntCapType() const { if (const BuiltinType *BT = dyn_cast(CanonicalType)) return BT->getKind() == BuiltinType::IntCap || diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp index abbed954580c..953a185517b3 100644 --- a/clang/lib/CodeGen/CGAtomic.cpp +++ b/clang/lib/CodeGen/CGAtomic.cpp @@ -146,7 +146,8 @@ bool isAtomicStoreOp(AtomicExpr::AtomicOp Op) { } UseLibcall = !C.getTargetInfo().hasBuiltinAtomic( AtomicSizeInBits, C.toBits(lvalue.getAlignment()), - AtomicTy->isCHERICapabilityType(CGF.CGM.getContext())); + AtomicTy->isCHERICapabilityType(CGF.getContext()) || + AtomicTy->isSingleCapabilityRecord(CGF.getContext())); } QualType getAtomicType() const { return AtomicTy; } @@ -547,7 +548,8 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, bool PostOpMinMax = false; unsigned PostOp = 0; QualType AtomicTy = E->getPtr()->getType()->getPointeeType(); - bool IsCheriCap = AtomicTy->isCHERICapabilityType(CGF.CGM.getContext()); + bool IsCheriCap = AtomicTy->isCHERICapabilityType(CGF.getContext()) || + AtomicTy->isSingleCapabilityRecord(CGF.getContext()); switch (E->getOp()) { case AtomicExpr::AO__c11_atomic_init: @@ -811,12 +813,14 @@ static void AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args, bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy, SourceLocation Loc, CharUnits SizeInChars) { + bool IsCapTy = ValTy->isCHERICapabilityType(CGF.getContext()) || + ValTy->isSingleCapabilityRecord(CGF.getContext()); if (UseOptimizedLibcall) { // Load value and pass it to the function directly. CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy); int64_t SizeInBits = CGF.getContext().toBits(SizeInChars); llvm::Type *ITy; - if (ValTy->isCHERICapabilityType(CGF.getContext())) { + if (IsCapTy) { ValTy = CGF.getContext().getPointerType(CGF.getContext().VoidTy, PIK_Capability); ITy = CGF.Int8CheriCapTy; @@ -836,7 +840,7 @@ AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args, } else { // Non-optimized functions always take a reference. // NB: Capabilities must be passed directly to the optimized libcall - assert(!ValTy->isCHERICapabilityType(CGF.getContext()) && + assert(!IsCapTy && "Capabilities should not be passed to the generic libcall"); Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)), CGF.getContext().VoidPtrTy); @@ -866,7 +870,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { uint64_t Size = TInfo.Width.getQuantity(); unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth(); - bool IsCheriCap = AtomicTy->isCHERICapabilityType(CGM.getContext()); + bool IsCheriCap = AtomicTy->isCHERICapabilityType(CGM.getContext()) || + AtomicTy->isSingleCapabilityRecord(CGM.getContext()); bool Oversized = (!IsCheriCap && getContext().toBits(TInfo.Width) > MaxInlineWidthInBits) || (IsCheriCap && MaxInlineWidthInBits == 0); @@ -1519,14 +1524,16 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const { llvm::Type *ty; - if (AtomicTy->isCHERICapabilityType(CGF.getContext())) { + if (AtomicTy->isCHERICapabilityType(CGF.getContext()) || + AtomicTy->isSingleCapabilityRecord(CGF.getContext())) { // If capability atomics are natively supported the instruction expects // a capability type. We also pass capabilities directly to the atomic // libcalls (i.e. always use optimized ones) since this is required to // support the RMW operations and special-casing the load/store/xchg to // use the generic libcalls (with mutex+memcpy) adds unncessary complexity. - if (!UseLibcall) { - // If we aren't using a libcall there is no need to cast to i8* + if (!UseLibcall && !AtomicTy->isSingleCapabilityRecord(CGF.getContext())) { + // If we aren't using a libcall and aren't using a single-capability + // struct a there is no need to cast to i8* return CGF.Builder.CreateElementBitCast( addr, getAtomicAddress().getElementType()); } diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp index e0aa94a3e031..3a65be11272b 100644 --- a/clang/lib/CodeGen/TargetInfo.cpp +++ b/clang/lib/CodeGen/TargetInfo.cpp @@ -11536,11 +11536,7 @@ ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed, uint64_t Size = getContext().getTypeSize(Ty); - bool IsSingleCapRecord = false; - if (auto *RT = Ty->getAs()) - IsSingleCapRecord = Size == getTarget().getCHERICapabilityWidth() && - getContext().containsCapabilities(RT->getDecl()); - + bool IsSingleCapRecord = Ty->isSingleCapabilityRecord(getContext()); bool IsCapability = Ty->isCHERICapabilityType(getContext()) || IsSingleCapRecord; @@ -11687,13 +11683,7 @@ Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, auto TInfo = getContext().getTypeInfoInChars(Ty); - bool IsSingleCapRecord = false; - CharUnits CapabilityWidth = - CharUnits::fromQuantity(getTarget().getCHERICapabilityWidth() / 8); - if (const auto *RT = Ty->getAs()) - IsSingleCapRecord = TInfo.Width == CapabilityWidth && - getContext().containsCapabilities(RT->getDecl()); - + bool IsSingleCapRecord = Ty->isSingleCapabilityRecord(getContext()); bool IsCapability = Ty->isCHERICapabilityType(getContext()) || IsSingleCapRecord; diff --git a/clang/test/CodeGen/cheri/c11-atomic-caps-struct.c b/clang/test/CodeGen/cheri/c11-atomic-caps-struct.c new file mode 100644 index 000000000000..5a5338353303 --- /dev/null +++ b/clang/test/CodeGen/cheri/c11-atomic-caps-struct.c @@ -0,0 +1,343 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature +// RUN: %riscv64_cheri_cc1 -target-feature +a -std=c11 -o - -emit-llvm -disable-O0-optnone %s \ +// RUN: | opt -S -mem2reg | FileCheck --check-prefix=HYBRID %s +// RUN: %riscv64_cheri_purecap_cc1 -target-feature +a -std=c11 -o - -emit-llvm -disable-O0-optnone %s \ +// RUN: | opt -S -mem2reg | FileCheck --check-prefix=PURECAP %s +// RUN: %riscv64_cheri_purecap_cc1 -target-feature +a -std=c11 -o - -emit-llvm -disable-O0-optnone %s -no-opaque-pointers \ +// RUN: | opt -S -mem2reg | FileCheck --check-prefix=PURECAP-TYPED-POINTERS %s + +typedef struct capstruct { + unsigned __intcap value; +} capstruct; + +// HYBRID-LABEL: define {{[^@]+}}@test_init +// HYBRID-SAME: (ptr noundef [[F:%.*]], ptr addrspace(200) [[VALUE_COERCE:%.*]]) #[[ATTR0:[0-9]+]] { +// HYBRID-NEXT: entry: +// HYBRID-NEXT: [[VALUE:%.*]] = alloca [[STRUCT_CAPSTRUCT:%.*]], align 16 +// HYBRID-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_CAPSTRUCT]], ptr [[VALUE]], i32 0, i32 0 +// HYBRID-NEXT: store ptr addrspace(200) [[VALUE_COERCE]], ptr [[COERCE_DIVE]], align 16 +// HYBRID-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[F]], ptr align 16 [[VALUE]], i64 16, i1 false) +// HYBRID-NEXT: ret void +// +// PURECAP-LABEL: define {{[^@]+}}@test_init +// PURECAP-SAME: (ptr addrspace(200) noundef [[F:%.*]], ptr addrspace(200) [[VALUE_COERCE:%.*]]) addrspace(200) #[[ATTR0:[0-9]+]] { +// PURECAP-NEXT: entry: +// PURECAP-NEXT: [[VALUE:%.*]] = alloca [[STRUCT_CAPSTRUCT:%.*]], align 16, addrspace(200) +// PURECAP-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_CAPSTRUCT]], ptr addrspace(200) [[VALUE]], i32 0, i32 0 +// PURECAP-NEXT: store ptr addrspace(200) [[VALUE_COERCE]], ptr addrspace(200) [[COERCE_DIVE]], align 16 +// PURECAP-NEXT: call void @llvm.memcpy.p200.p200.i64(ptr addrspace(200) align 16 [[F]], ptr addrspace(200) align 16 [[VALUE]], i64 16, i1 false) +// PURECAP-NEXT: ret void +// +// PURECAP-TYPED-POINTERS-LABEL: define {{[^@]+}}@test_init +// PURECAP-TYPED-POINTERS-SAME: ([[STRUCT_CAPSTRUCT:%.*]] addrspace(200)* noundef [[F:%.*]], i8 addrspace(200)* [[VALUE_COERCE:%.*]]) addrspace(200) #[[ATTR0:[0-9]+]] { +// PURECAP-TYPED-POINTERS-NEXT: entry: +// PURECAP-TYPED-POINTERS-NEXT: [[VALUE:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16, addrspace(200) +// PURECAP-TYPED-POINTERS-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_CAPSTRUCT]], [[STRUCT_CAPSTRUCT]] addrspace(200)* [[VALUE]], i32 0, i32 0 +// PURECAP-TYPED-POINTERS-NEXT: store i8 addrspace(200)* [[VALUE_COERCE]], i8 addrspace(200)* addrspace(200)* [[COERCE_DIVE]], align 16 +// PURECAP-TYPED-POINTERS-NEXT: [[TMP0:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[F]] to i8 addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: [[TMP1:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[VALUE]] to i8 addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: call void @llvm.memcpy.p200i8.p200i8.i64(i8 addrspace(200)* align 16 [[TMP0]], i8 addrspace(200)* align 16 [[TMP1]], i64 16, i1 false) +// PURECAP-TYPED-POINTERS-NEXT: ret void +// +void test_init(_Atomic(capstruct) *f, capstruct value) { + __c11_atomic_init(f, value); +} + +// HYBRID-LABEL: define {{[^@]+}}@test_load +// HYBRID-SAME: (ptr noundef [[F:%.*]]) #[[ATTR0]] { +// HYBRID-NEXT: entry: +// HYBRID-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_CAPSTRUCT:%.*]], align 16 +// HYBRID-NEXT: [[ATOMIC_TEMP:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16 +// HYBRID-NEXT: [[TMP0:%.*]] = load atomic ptr addrspace(200), ptr [[F]] seq_cst, align 16 +// HYBRID-NEXT: store ptr addrspace(200) [[TMP0]], ptr [[ATOMIC_TEMP]], align 16 +// HYBRID-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[ATOMIC_TEMP]], i64 16, i1 false) +// HYBRID-NEXT: [[TMP1:%.*]] = load [[STRUCT_CAPSTRUCT]], ptr [[RETVAL]], align 16 +// HYBRID-NEXT: ret [[STRUCT_CAPSTRUCT]] [[TMP1]] +// +// PURECAP-LABEL: define {{[^@]+}}@test_load +// PURECAP-SAME: (ptr addrspace(200) noundef [[F:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP-NEXT: entry: +// PURECAP-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_CAPSTRUCT:%.*]], align 16, addrspace(200) +// PURECAP-NEXT: [[ATOMIC_TEMP:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16, addrspace(200) +// PURECAP-NEXT: [[TMP0:%.*]] = load atomic ptr addrspace(200), ptr addrspace(200) [[F]] seq_cst, align 16 +// PURECAP-NEXT: store ptr addrspace(200) [[TMP0]], ptr addrspace(200) [[ATOMIC_TEMP]], align 16 +// PURECAP-NEXT: call void @llvm.memcpy.p200.p200.i64(ptr addrspace(200) align 16 [[RETVAL]], ptr addrspace(200) align 16 [[ATOMIC_TEMP]], i64 16, i1 false) +// PURECAP-NEXT: [[TMP1:%.*]] = load [[STRUCT_CAPSTRUCT]], ptr addrspace(200) [[RETVAL]], align 16 +// PURECAP-NEXT: ret [[STRUCT_CAPSTRUCT]] [[TMP1]] +// +// PURECAP-TYPED-POINTERS-LABEL: define {{[^@]+}}@test_load +// PURECAP-TYPED-POINTERS-SAME: ([[STRUCT_CAPSTRUCT:%.*]] addrspace(200)* noundef [[F:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP-TYPED-POINTERS-NEXT: entry: +// PURECAP-TYPED-POINTERS-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16, addrspace(200) +// PURECAP-TYPED-POINTERS-NEXT: [[ATOMIC_TEMP:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16, addrspace(200) +// PURECAP-TYPED-POINTERS-NEXT: [[TMP0:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[F]] to i8 addrspace(200)* addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: [[TMP1:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[ATOMIC_TEMP]] to i8 addrspace(200)* addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: [[TMP2:%.*]] = load atomic i8 addrspace(200)*, i8 addrspace(200)* addrspace(200)* [[TMP0]] seq_cst, align 16 +// PURECAP-TYPED-POINTERS-NEXT: store i8 addrspace(200)* [[TMP2]], i8 addrspace(200)* addrspace(200)* [[TMP1]], align 16 +// PURECAP-TYPED-POINTERS-NEXT: [[TMP3:%.*]] = bitcast i8 addrspace(200)* addrspace(200)* [[TMP1]] to [[STRUCT_CAPSTRUCT]] addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: [[TMP4:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[RETVAL]] to i8 addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: [[TMP5:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[TMP3]] to i8 addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: call void @llvm.memcpy.p200i8.p200i8.i64(i8 addrspace(200)* align 16 [[TMP4]], i8 addrspace(200)* align 16 [[TMP5]], i64 16, i1 false) +// PURECAP-TYPED-POINTERS-NEXT: [[TMP6:%.*]] = load [[STRUCT_CAPSTRUCT]], [[STRUCT_CAPSTRUCT]] addrspace(200)* [[RETVAL]], align 16 +// PURECAP-TYPED-POINTERS-NEXT: ret [[STRUCT_CAPSTRUCT]] [[TMP6]] +// +capstruct test_load(_Atomic(capstruct) *f) { + return __c11_atomic_load(f, __ATOMIC_SEQ_CST); +} + +// HYBRID-LABEL: define {{[^@]+}}@test_store +// HYBRID-SAME: (ptr noundef [[F:%.*]], ptr addrspace(200) [[VALUE_COERCE:%.*]]) #[[ATTR0]] { +// HYBRID-NEXT: entry: +// HYBRID-NEXT: [[VALUE:%.*]] = alloca [[STRUCT_CAPSTRUCT:%.*]], align 16 +// HYBRID-NEXT: [[DOTATOMICTMP:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16 +// HYBRID-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_CAPSTRUCT]], ptr [[VALUE]], i32 0, i32 0 +// HYBRID-NEXT: store ptr addrspace(200) [[VALUE_COERCE]], ptr [[COERCE_DIVE]], align 16 +// HYBRID-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[DOTATOMICTMP]], ptr align 16 [[VALUE]], i64 16, i1 false) +// HYBRID-NEXT: [[TMP0:%.*]] = load ptr addrspace(200), ptr [[DOTATOMICTMP]], align 16 +// HYBRID-NEXT: store atomic ptr addrspace(200) [[TMP0]], ptr [[F]] seq_cst, align 16 +// HYBRID-NEXT: ret void +// +// PURECAP-LABEL: define {{[^@]+}}@test_store +// PURECAP-SAME: (ptr addrspace(200) noundef [[F:%.*]], ptr addrspace(200) [[VALUE_COERCE:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP-NEXT: entry: +// PURECAP-NEXT: [[VALUE:%.*]] = alloca [[STRUCT_CAPSTRUCT:%.*]], align 16, addrspace(200) +// PURECAP-NEXT: [[DOTATOMICTMP:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16, addrspace(200) +// PURECAP-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_CAPSTRUCT]], ptr addrspace(200) [[VALUE]], i32 0, i32 0 +// PURECAP-NEXT: store ptr addrspace(200) [[VALUE_COERCE]], ptr addrspace(200) [[COERCE_DIVE]], align 16 +// PURECAP-NEXT: call void @llvm.memcpy.p200.p200.i64(ptr addrspace(200) align 16 [[DOTATOMICTMP]], ptr addrspace(200) align 16 [[VALUE]], i64 16, i1 false) +// PURECAP-NEXT: [[TMP0:%.*]] = load ptr addrspace(200), ptr addrspace(200) [[DOTATOMICTMP]], align 16 +// PURECAP-NEXT: store atomic ptr addrspace(200) [[TMP0]], ptr addrspace(200) [[F]] seq_cst, align 16 +// PURECAP-NEXT: ret void +// +// PURECAP-TYPED-POINTERS-LABEL: define {{[^@]+}}@test_store +// PURECAP-TYPED-POINTERS-SAME: ([[STRUCT_CAPSTRUCT:%.*]] addrspace(200)* noundef [[F:%.*]], i8 addrspace(200)* [[VALUE_COERCE:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP-TYPED-POINTERS-NEXT: entry: +// PURECAP-TYPED-POINTERS-NEXT: [[VALUE:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16, addrspace(200) +// PURECAP-TYPED-POINTERS-NEXT: [[DOTATOMICTMP:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16, addrspace(200) +// PURECAP-TYPED-POINTERS-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_CAPSTRUCT]], [[STRUCT_CAPSTRUCT]] addrspace(200)* [[VALUE]], i32 0, i32 0 +// PURECAP-TYPED-POINTERS-NEXT: store i8 addrspace(200)* [[VALUE_COERCE]], i8 addrspace(200)* addrspace(200)* [[COERCE_DIVE]], align 16 +// PURECAP-TYPED-POINTERS-NEXT: [[TMP0:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[DOTATOMICTMP]] to i8 addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: [[TMP1:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[VALUE]] to i8 addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: call void @llvm.memcpy.p200i8.p200i8.i64(i8 addrspace(200)* align 16 [[TMP0]], i8 addrspace(200)* align 16 [[TMP1]], i64 16, i1 false) +// PURECAP-TYPED-POINTERS-NEXT: [[TMP2:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[F]] to i8 addrspace(200)* addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: [[TMP3:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[DOTATOMICTMP]] to i8 addrspace(200)* addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: [[TMP4:%.*]] = load i8 addrspace(200)*, i8 addrspace(200)* addrspace(200)* [[TMP3]], align 16 +// PURECAP-TYPED-POINTERS-NEXT: store atomic i8 addrspace(200)* [[TMP4]], i8 addrspace(200)* addrspace(200)* [[TMP2]] seq_cst, align 16 +// PURECAP-TYPED-POINTERS-NEXT: ret void +// +void test_store(_Atomic(capstruct) *f, capstruct value) { + __c11_atomic_store(f, value, __ATOMIC_SEQ_CST); +} + +// HYBRID-LABEL: define {{[^@]+}}@test_xchg +// HYBRID-SAME: (ptr noundef [[F:%.*]], ptr addrspace(200) [[VALUE_COERCE:%.*]]) #[[ATTR0]] { +// HYBRID-NEXT: entry: +// HYBRID-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_CAPSTRUCT:%.*]], align 16 +// HYBRID-NEXT: [[VALUE:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16 +// HYBRID-NEXT: [[DOTATOMICTMP:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16 +// HYBRID-NEXT: [[ATOMIC_TEMP:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16 +// HYBRID-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_CAPSTRUCT]], ptr [[VALUE]], i32 0, i32 0 +// HYBRID-NEXT: store ptr addrspace(200) [[VALUE_COERCE]], ptr [[COERCE_DIVE]], align 16 +// HYBRID-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[DOTATOMICTMP]], ptr align 16 [[VALUE]], i64 16, i1 false) +// HYBRID-NEXT: [[TMP0:%.*]] = load ptr addrspace(200), ptr [[DOTATOMICTMP]], align 16 +// HYBRID-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[F]], ptr addrspace(200) [[TMP0]] seq_cst, align 16 +// HYBRID-NEXT: store ptr addrspace(200) [[TMP1]], ptr [[ATOMIC_TEMP]], align 16 +// HYBRID-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[RETVAL]], ptr align 16 [[ATOMIC_TEMP]], i64 16, i1 false) +// HYBRID-NEXT: [[TMP2:%.*]] = load [[STRUCT_CAPSTRUCT]], ptr [[RETVAL]], align 16 +// HYBRID-NEXT: ret [[STRUCT_CAPSTRUCT]] [[TMP2]] +// +// PURECAP-LABEL: define {{[^@]+}}@test_xchg +// PURECAP-SAME: (ptr addrspace(200) noundef [[F:%.*]], ptr addrspace(200) [[VALUE_COERCE:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP-NEXT: entry: +// PURECAP-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_CAPSTRUCT:%.*]], align 16, addrspace(200) +// PURECAP-NEXT: [[VALUE:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16, addrspace(200) +// PURECAP-NEXT: [[DOTATOMICTMP:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16, addrspace(200) +// PURECAP-NEXT: [[ATOMIC_TEMP:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16, addrspace(200) +// PURECAP-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_CAPSTRUCT]], ptr addrspace(200) [[VALUE]], i32 0, i32 0 +// PURECAP-NEXT: store ptr addrspace(200) [[VALUE_COERCE]], ptr addrspace(200) [[COERCE_DIVE]], align 16 +// PURECAP-NEXT: call void @llvm.memcpy.p200.p200.i64(ptr addrspace(200) align 16 [[DOTATOMICTMP]], ptr addrspace(200) align 16 [[VALUE]], i64 16, i1 false) +// PURECAP-NEXT: [[TMP0:%.*]] = load ptr addrspace(200), ptr addrspace(200) [[DOTATOMICTMP]], align 16 +// PURECAP-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr addrspace(200) [[F]], ptr addrspace(200) [[TMP0]] seq_cst, align 16 +// PURECAP-NEXT: store ptr addrspace(200) [[TMP1]], ptr addrspace(200) [[ATOMIC_TEMP]], align 16 +// PURECAP-NEXT: call void @llvm.memcpy.p200.p200.i64(ptr addrspace(200) align 16 [[RETVAL]], ptr addrspace(200) align 16 [[ATOMIC_TEMP]], i64 16, i1 false) +// PURECAP-NEXT: [[TMP2:%.*]] = load [[STRUCT_CAPSTRUCT]], ptr addrspace(200) [[RETVAL]], align 16 +// PURECAP-NEXT: ret [[STRUCT_CAPSTRUCT]] [[TMP2]] +// +// PURECAP-TYPED-POINTERS-LABEL: define {{[^@]+}}@test_xchg +// PURECAP-TYPED-POINTERS-SAME: ([[STRUCT_CAPSTRUCT:%.*]] addrspace(200)* noundef [[F:%.*]], i8 addrspace(200)* [[VALUE_COERCE:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP-TYPED-POINTERS-NEXT: entry: +// PURECAP-TYPED-POINTERS-NEXT: [[RETVAL:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16, addrspace(200) +// PURECAP-TYPED-POINTERS-NEXT: [[VALUE:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16, addrspace(200) +// PURECAP-TYPED-POINTERS-NEXT: [[DOTATOMICTMP:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16, addrspace(200) +// PURECAP-TYPED-POINTERS-NEXT: [[ATOMIC_TEMP:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16, addrspace(200) +// PURECAP-TYPED-POINTERS-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_CAPSTRUCT]], [[STRUCT_CAPSTRUCT]] addrspace(200)* [[VALUE]], i32 0, i32 0 +// PURECAP-TYPED-POINTERS-NEXT: store i8 addrspace(200)* [[VALUE_COERCE]], i8 addrspace(200)* addrspace(200)* [[COERCE_DIVE]], align 16 +// PURECAP-TYPED-POINTERS-NEXT: [[TMP0:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[DOTATOMICTMP]] to i8 addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: [[TMP1:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[VALUE]] to i8 addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: call void @llvm.memcpy.p200i8.p200i8.i64(i8 addrspace(200)* align 16 [[TMP0]], i8 addrspace(200)* align 16 [[TMP1]], i64 16, i1 false) +// PURECAP-TYPED-POINTERS-NEXT: [[TMP2:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[F]] to i8 addrspace(200)* addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: [[TMP3:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[DOTATOMICTMP]] to i8 addrspace(200)* addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: [[TMP4:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[ATOMIC_TEMP]] to i8 addrspace(200)* addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: [[TMP5:%.*]] = load i8 addrspace(200)*, i8 addrspace(200)* addrspace(200)* [[TMP3]], align 16 +// PURECAP-TYPED-POINTERS-NEXT: [[TMP6:%.*]] = atomicrmw xchg i8 addrspace(200)* addrspace(200)* [[TMP2]], i8 addrspace(200)* [[TMP5]] seq_cst, align 16 +// PURECAP-TYPED-POINTERS-NEXT: store i8 addrspace(200)* [[TMP6]], i8 addrspace(200)* addrspace(200)* [[TMP4]], align 16 +// PURECAP-TYPED-POINTERS-NEXT: [[TMP7:%.*]] = bitcast i8 addrspace(200)* addrspace(200)* [[TMP4]] to [[STRUCT_CAPSTRUCT]] addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: [[TMP8:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[RETVAL]] to i8 addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: [[TMP9:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[TMP7]] to i8 addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: call void @llvm.memcpy.p200i8.p200i8.i64(i8 addrspace(200)* align 16 [[TMP8]], i8 addrspace(200)* align 16 [[TMP9]], i64 16, i1 false) +// PURECAP-TYPED-POINTERS-NEXT: [[TMP10:%.*]] = load [[STRUCT_CAPSTRUCT]], [[STRUCT_CAPSTRUCT]] addrspace(200)* [[RETVAL]], align 16 +// PURECAP-TYPED-POINTERS-NEXT: ret [[STRUCT_CAPSTRUCT]] [[TMP10]] +// +capstruct test_xchg(_Atomic(capstruct) *f, capstruct value) { + return __c11_atomic_exchange(f, value, __ATOMIC_SEQ_CST); +} + +// HYBRID-LABEL: define {{[^@]+}}@test_cmpxchg_weak +// HYBRID-SAME: (ptr noundef [[F:%.*]], ptr noundef [[EXP:%.*]], ptr addrspace(200) [[NEW_COERCE:%.*]]) #[[ATTR0]] { +// HYBRID-NEXT: entry: +// HYBRID-NEXT: [[NEW:%.*]] = alloca [[STRUCT_CAPSTRUCT:%.*]], align 16 +// HYBRID-NEXT: [[DOTATOMICTMP:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16 +// HYBRID-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_CAPSTRUCT]], ptr [[NEW]], i32 0, i32 0 +// HYBRID-NEXT: store ptr addrspace(200) [[NEW_COERCE]], ptr [[COERCE_DIVE]], align 16 +// HYBRID-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[DOTATOMICTMP]], ptr align 16 [[NEW]], i64 16, i1 false) +// HYBRID-NEXT: [[TMP0:%.*]] = load ptr addrspace(200), ptr [[EXP]], align 16 +// HYBRID-NEXT: [[TMP1:%.*]] = load ptr addrspace(200), ptr [[DOTATOMICTMP]], align 16 +// HYBRID-NEXT: [[TMP2:%.*]] = cmpxchg weak ptr [[F]], ptr addrspace(200) [[TMP0]], ptr addrspace(200) [[TMP1]] monotonic monotonic, align 16 +// HYBRID-NEXT: [[TMP3:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP2]], 0 +// HYBRID-NEXT: [[TMP4:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP2]], 1 +// HYBRID-NEXT: br i1 [[TMP4]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]] +// HYBRID: cmpxchg.store_expected: +// HYBRID-NEXT: store ptr addrspace(200) [[TMP3]], ptr [[EXP]], align 16 +// HYBRID-NEXT: br label [[CMPXCHG_CONTINUE]] +// HYBRID: cmpxchg.continue: +// HYBRID-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP4]] to i8 +// HYBRID-NEXT: [[TOBOOL:%.*]] = trunc i8 [[FROMBOOL]] to i1 +// HYBRID-NEXT: ret i1 [[TOBOOL]] +// +// PURECAP-LABEL: define {{[^@]+}}@test_cmpxchg_weak +// PURECAP-SAME: (ptr addrspace(200) noundef [[F:%.*]], ptr addrspace(200) noundef [[EXP:%.*]], ptr addrspace(200) [[NEW_COERCE:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP-NEXT: entry: +// PURECAP-NEXT: [[NEW:%.*]] = alloca [[STRUCT_CAPSTRUCT:%.*]], align 16, addrspace(200) +// PURECAP-NEXT: [[DOTATOMICTMP:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16, addrspace(200) +// PURECAP-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_CAPSTRUCT]], ptr addrspace(200) [[NEW]], i32 0, i32 0 +// PURECAP-NEXT: store ptr addrspace(200) [[NEW_COERCE]], ptr addrspace(200) [[COERCE_DIVE]], align 16 +// PURECAP-NEXT: call void @llvm.memcpy.p200.p200.i64(ptr addrspace(200) align 16 [[DOTATOMICTMP]], ptr addrspace(200) align 16 [[NEW]], i64 16, i1 false) +// PURECAP-NEXT: [[TMP0:%.*]] = load ptr addrspace(200), ptr addrspace(200) [[EXP]], align 16 +// PURECAP-NEXT: [[TMP1:%.*]] = load ptr addrspace(200), ptr addrspace(200) [[DOTATOMICTMP]], align 16 +// PURECAP-NEXT: [[TMP2:%.*]] = cmpxchg weak ptr addrspace(200) [[F]], ptr addrspace(200) [[TMP0]], ptr addrspace(200) [[TMP1]] monotonic monotonic, align 16 +// PURECAP-NEXT: [[TMP3:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP2]], 0 +// PURECAP-NEXT: [[TMP4:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP2]], 1 +// PURECAP-NEXT: br i1 [[TMP4]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]] +// PURECAP: cmpxchg.store_expected: +// PURECAP-NEXT: store ptr addrspace(200) [[TMP3]], ptr addrspace(200) [[EXP]], align 16 +// PURECAP-NEXT: br label [[CMPXCHG_CONTINUE]] +// PURECAP: cmpxchg.continue: +// PURECAP-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP4]] to i8 +// PURECAP-NEXT: [[TOBOOL:%.*]] = trunc i8 [[FROMBOOL]] to i1 +// PURECAP-NEXT: ret i1 [[TOBOOL]] +// +// PURECAP-TYPED-POINTERS-LABEL: define {{[^@]+}}@test_cmpxchg_weak +// PURECAP-TYPED-POINTERS-SAME: ([[STRUCT_CAPSTRUCT:%.*]] addrspace(200)* noundef [[F:%.*]], [[STRUCT_CAPSTRUCT]] addrspace(200)* noundef [[EXP:%.*]], i8 addrspace(200)* [[NEW_COERCE:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP-TYPED-POINTERS-NEXT: entry: +// PURECAP-TYPED-POINTERS-NEXT: [[NEW:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16, addrspace(200) +// PURECAP-TYPED-POINTERS-NEXT: [[DOTATOMICTMP:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16, addrspace(200) +// PURECAP-TYPED-POINTERS-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_CAPSTRUCT]], [[STRUCT_CAPSTRUCT]] addrspace(200)* [[NEW]], i32 0, i32 0 +// PURECAP-TYPED-POINTERS-NEXT: store i8 addrspace(200)* [[NEW_COERCE]], i8 addrspace(200)* addrspace(200)* [[COERCE_DIVE]], align 16 +// PURECAP-TYPED-POINTERS-NEXT: [[TMP0:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[DOTATOMICTMP]] to i8 addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: [[TMP1:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[NEW]] to i8 addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: call void @llvm.memcpy.p200i8.p200i8.i64(i8 addrspace(200)* align 16 [[TMP0]], i8 addrspace(200)* align 16 [[TMP1]], i64 16, i1 false) +// PURECAP-TYPED-POINTERS-NEXT: [[TMP2:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[F]] to i8 addrspace(200)* addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: [[TMP3:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[EXP]] to i8 addrspace(200)* addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: [[TMP4:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[DOTATOMICTMP]] to i8 addrspace(200)* addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: [[TMP5:%.*]] = load i8 addrspace(200)*, i8 addrspace(200)* addrspace(200)* [[TMP3]], align 16 +// PURECAP-TYPED-POINTERS-NEXT: [[TMP6:%.*]] = load i8 addrspace(200)*, i8 addrspace(200)* addrspace(200)* [[TMP4]], align 16 +// PURECAP-TYPED-POINTERS-NEXT: [[TMP7:%.*]] = cmpxchg weak i8 addrspace(200)* addrspace(200)* [[TMP2]], i8 addrspace(200)* [[TMP5]], i8 addrspace(200)* [[TMP6]] monotonic monotonic, align 16 +// PURECAP-TYPED-POINTERS-NEXT: [[TMP8:%.*]] = extractvalue { i8 addrspace(200)*, i1 } [[TMP7]], 0 +// PURECAP-TYPED-POINTERS-NEXT: [[TMP9:%.*]] = extractvalue { i8 addrspace(200)*, i1 } [[TMP7]], 1 +// PURECAP-TYPED-POINTERS-NEXT: br i1 [[TMP9]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]] +// PURECAP-TYPED-POINTERS: cmpxchg.store_expected: +// PURECAP-TYPED-POINTERS-NEXT: store i8 addrspace(200)* [[TMP8]], i8 addrspace(200)* addrspace(200)* [[TMP3]], align 16 +// PURECAP-TYPED-POINTERS-NEXT: br label [[CMPXCHG_CONTINUE]] +// PURECAP-TYPED-POINTERS: cmpxchg.continue: +// PURECAP-TYPED-POINTERS-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP9]] to i8 +// PURECAP-TYPED-POINTERS-NEXT: [[TOBOOL:%.*]] = trunc i8 [[FROMBOOL]] to i1 +// PURECAP-TYPED-POINTERS-NEXT: ret i1 [[TOBOOL]] +// +_Bool test_cmpxchg_weak(_Atomic(capstruct) *f, capstruct *exp, capstruct new) { + return __c11_atomic_compare_exchange_weak(f, exp, new, __ATOMIC_RELAXED, __ATOMIC_RELAXED); +} + +// HYBRID-LABEL: define {{[^@]+}}@test_cmpxchg_strong +// HYBRID-SAME: (ptr noundef [[F:%.*]], ptr noundef [[EXP:%.*]], ptr addrspace(200) [[NEW_COERCE:%.*]]) #[[ATTR0]] { +// HYBRID-NEXT: entry: +// HYBRID-NEXT: [[NEW:%.*]] = alloca [[STRUCT_CAPSTRUCT:%.*]], align 16 +// HYBRID-NEXT: [[DOTATOMICTMP:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16 +// HYBRID-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_CAPSTRUCT]], ptr [[NEW]], i32 0, i32 0 +// HYBRID-NEXT: store ptr addrspace(200) [[NEW_COERCE]], ptr [[COERCE_DIVE]], align 16 +// HYBRID-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[DOTATOMICTMP]], ptr align 16 [[NEW]], i64 16, i1 false) +// HYBRID-NEXT: [[TMP0:%.*]] = load ptr addrspace(200), ptr [[EXP]], align 16 +// HYBRID-NEXT: [[TMP1:%.*]] = load ptr addrspace(200), ptr [[DOTATOMICTMP]], align 16 +// HYBRID-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[F]], ptr addrspace(200) [[TMP0]], ptr addrspace(200) [[TMP1]] monotonic monotonic, align 16 +// HYBRID-NEXT: [[TMP3:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP2]], 0 +// HYBRID-NEXT: [[TMP4:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP2]], 1 +// HYBRID-NEXT: br i1 [[TMP4]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]] +// HYBRID: cmpxchg.store_expected: +// HYBRID-NEXT: store ptr addrspace(200) [[TMP3]], ptr [[EXP]], align 16 +// HYBRID-NEXT: br label [[CMPXCHG_CONTINUE]] +// HYBRID: cmpxchg.continue: +// HYBRID-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP4]] to i8 +// HYBRID-NEXT: [[TOBOOL:%.*]] = trunc i8 [[FROMBOOL]] to i1 +// HYBRID-NEXT: ret i1 [[TOBOOL]] +// +// PURECAP-LABEL: define {{[^@]+}}@test_cmpxchg_strong +// PURECAP-SAME: (ptr addrspace(200) noundef [[F:%.*]], ptr addrspace(200) noundef [[EXP:%.*]], ptr addrspace(200) [[NEW_COERCE:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP-NEXT: entry: +// PURECAP-NEXT: [[NEW:%.*]] = alloca [[STRUCT_CAPSTRUCT:%.*]], align 16, addrspace(200) +// PURECAP-NEXT: [[DOTATOMICTMP:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16, addrspace(200) +// PURECAP-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_CAPSTRUCT]], ptr addrspace(200) [[NEW]], i32 0, i32 0 +// PURECAP-NEXT: store ptr addrspace(200) [[NEW_COERCE]], ptr addrspace(200) [[COERCE_DIVE]], align 16 +// PURECAP-NEXT: call void @llvm.memcpy.p200.p200.i64(ptr addrspace(200) align 16 [[DOTATOMICTMP]], ptr addrspace(200) align 16 [[NEW]], i64 16, i1 false) +// PURECAP-NEXT: [[TMP0:%.*]] = load ptr addrspace(200), ptr addrspace(200) [[EXP]], align 16 +// PURECAP-NEXT: [[TMP1:%.*]] = load ptr addrspace(200), ptr addrspace(200) [[DOTATOMICTMP]], align 16 +// PURECAP-NEXT: [[TMP2:%.*]] = cmpxchg ptr addrspace(200) [[F]], ptr addrspace(200) [[TMP0]], ptr addrspace(200) [[TMP1]] monotonic monotonic, align 16 +// PURECAP-NEXT: [[TMP3:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP2]], 0 +// PURECAP-NEXT: [[TMP4:%.*]] = extractvalue { ptr addrspace(200), i1 } [[TMP2]], 1 +// PURECAP-NEXT: br i1 [[TMP4]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]] +// PURECAP: cmpxchg.store_expected: +// PURECAP-NEXT: store ptr addrspace(200) [[TMP3]], ptr addrspace(200) [[EXP]], align 16 +// PURECAP-NEXT: br label [[CMPXCHG_CONTINUE]] +// PURECAP: cmpxchg.continue: +// PURECAP-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP4]] to i8 +// PURECAP-NEXT: [[TOBOOL:%.*]] = trunc i8 [[FROMBOOL]] to i1 +// PURECAP-NEXT: ret i1 [[TOBOOL]] +// +// PURECAP-TYPED-POINTERS-LABEL: define {{[^@]+}}@test_cmpxchg_strong +// PURECAP-TYPED-POINTERS-SAME: ([[STRUCT_CAPSTRUCT:%.*]] addrspace(200)* noundef [[F:%.*]], [[STRUCT_CAPSTRUCT]] addrspace(200)* noundef [[EXP:%.*]], i8 addrspace(200)* [[NEW_COERCE:%.*]]) addrspace(200) #[[ATTR0]] { +// PURECAP-TYPED-POINTERS-NEXT: entry: +// PURECAP-TYPED-POINTERS-NEXT: [[NEW:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16, addrspace(200) +// PURECAP-TYPED-POINTERS-NEXT: [[DOTATOMICTMP:%.*]] = alloca [[STRUCT_CAPSTRUCT]], align 16, addrspace(200) +// PURECAP-TYPED-POINTERS-NEXT: [[COERCE_DIVE:%.*]] = getelementptr inbounds [[STRUCT_CAPSTRUCT]], [[STRUCT_CAPSTRUCT]] addrspace(200)* [[NEW]], i32 0, i32 0 +// PURECAP-TYPED-POINTERS-NEXT: store i8 addrspace(200)* [[NEW_COERCE]], i8 addrspace(200)* addrspace(200)* [[COERCE_DIVE]], align 16 +// PURECAP-TYPED-POINTERS-NEXT: [[TMP0:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[DOTATOMICTMP]] to i8 addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: [[TMP1:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[NEW]] to i8 addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: call void @llvm.memcpy.p200i8.p200i8.i64(i8 addrspace(200)* align 16 [[TMP0]], i8 addrspace(200)* align 16 [[TMP1]], i64 16, i1 false) +// PURECAP-TYPED-POINTERS-NEXT: [[TMP2:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[F]] to i8 addrspace(200)* addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: [[TMP3:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[EXP]] to i8 addrspace(200)* addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: [[TMP4:%.*]] = bitcast [[STRUCT_CAPSTRUCT]] addrspace(200)* [[DOTATOMICTMP]] to i8 addrspace(200)* addrspace(200)* +// PURECAP-TYPED-POINTERS-NEXT: [[TMP5:%.*]] = load i8 addrspace(200)*, i8 addrspace(200)* addrspace(200)* [[TMP3]], align 16 +// PURECAP-TYPED-POINTERS-NEXT: [[TMP6:%.*]] = load i8 addrspace(200)*, i8 addrspace(200)* addrspace(200)* [[TMP4]], align 16 +// PURECAP-TYPED-POINTERS-NEXT: [[TMP7:%.*]] = cmpxchg i8 addrspace(200)* addrspace(200)* [[TMP2]], i8 addrspace(200)* [[TMP5]], i8 addrspace(200)* [[TMP6]] monotonic monotonic, align 16 +// PURECAP-TYPED-POINTERS-NEXT: [[TMP8:%.*]] = extractvalue { i8 addrspace(200)*, i1 } [[TMP7]], 0 +// PURECAP-TYPED-POINTERS-NEXT: [[TMP9:%.*]] = extractvalue { i8 addrspace(200)*, i1 } [[TMP7]], 1 +// PURECAP-TYPED-POINTERS-NEXT: br i1 [[TMP9]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]] +// PURECAP-TYPED-POINTERS: cmpxchg.store_expected: +// PURECAP-TYPED-POINTERS-NEXT: store i8 addrspace(200)* [[TMP8]], i8 addrspace(200)* addrspace(200)* [[TMP3]], align 16 +// PURECAP-TYPED-POINTERS-NEXT: br label [[CMPXCHG_CONTINUE]] +// PURECAP-TYPED-POINTERS: cmpxchg.continue: +// PURECAP-TYPED-POINTERS-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP9]] to i8 +// PURECAP-TYPED-POINTERS-NEXT: [[TOBOOL:%.*]] = trunc i8 [[FROMBOOL]] to i1 +// PURECAP-TYPED-POINTERS-NEXT: ret i1 [[TOBOOL]] +// +_Bool test_cmpxchg_strong(_Atomic(capstruct) *f, capstruct *exp, capstruct new) { + return __c11_atomic_compare_exchange_strong(f, exp, new, __ATOMIC_RELAXED, __ATOMIC_RELAXED); +}