// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
//
// Test GNU atomic builtins for __int128 aligned to 16 bytes, which should be
// expanded to LLVM I/R by the front end.
#include <stdatomic.h>
#include <stdint.h>
__int128 Ptr __attribute__((aligned(16)));
__int128 Ret __attribute__((aligned(16)));
__int128 Val __attribute__((aligned(16)));
__int128 Exp __attribute__((aligned(16)));
__int128 Des __attribute__((aligned(16)));
// CHECK-LABEL: @f1(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load atomic i128, ptr @Ptr seq_cst, align 16
// CHECK-NEXT: store i128 [[TMP0]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2:![0-9]+]]
// CHECK-NEXT: ret void
//
__int128 f1() {
return __atomic_load_n(&Ptr, memory_order_seq_cst);
}
// CHECK-LABEL: @f2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load atomic i128, ptr @Ptr seq_cst, align 16
// CHECK-NEXT: store i128 [[TMP0]], ptr @Ret, align 16
// CHECK-NEXT: store i128 [[TMP0]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
// CHECK-NEXT: ret void
//
__int128 f2() {
__atomic_load(&Ptr, &Ret, memory_order_seq_cst);
return Ret;
}
// CHECK-LABEL: @f3(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
// CHECK-NEXT: store atomic i128 [[TMP0]], ptr @Ptr seq_cst, align 16
// CHECK-NEXT: ret void
//
void f3() {
__atomic_store_n(&Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f4(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16
// CHECK-NEXT: store atomic i128 [[TMP0]], ptr @Ptr seq_cst, align 16
// CHECK-NEXT: ret void
//
void f4() {
__atomic_store(&Ptr, &Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f5(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
// CHECK-NEXT: ret void
//
__int128 f5() {
return __atomic_exchange_n(&Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f6(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16
// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
// CHECK-NEXT: store i128 [[TMP1]], ptr @Ret, align 16
// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
// CHECK-NEXT: ret void
//
__int128 f6() {
__atomic_exchange(&Ptr, &Val, &Ret, memory_order_seq_cst);
return Ret;
}
// CHECK-LABEL: @f7(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Des, align 16, !tbaa [[TBAA2]]
// CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr @Exp, align 16
// CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr @Ptr, i128 [[TMP1]], i128 [[TMP0]] seq_cst seq_cst, align 16
// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i128, i1 } [[TMP2]], 1
// CHECK-NEXT: br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
// CHECK: cmpxchg.store_expected:
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i128, i1 } [[TMP2]], 0
// CHECK-NEXT: store i128 [[TMP4]], ptr @Exp, align 16
// CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
// CHECK: cmpxchg.continue:
// CHECK-NEXT: ret i1 [[TMP3]]
//
_Bool f7() {
return __atomic_compare_exchange_n(&Ptr, &Exp, Des, 0,
memory_order_seq_cst, memory_order_seq_cst);
}
// CHECK-LABEL: @f8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Exp, align 16
// CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr @Des, align 16
// CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr @Ptr, i128 [[TMP0]], i128 [[TMP1]] seq_cst seq_cst, align 16
// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i128, i1 } [[TMP2]], 1
// CHECK-NEXT: br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
// CHECK: cmpxchg.store_expected:
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i128, i1 } [[TMP2]], 0
// CHECK-NEXT: store i128 [[TMP4]], ptr @Exp, align 16
// CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
// CHECK: cmpxchg.continue:
// CHECK-NEXT: ret i1 [[TMP3]]
//
_Bool f8() {
return __atomic_compare_exchange(&Ptr, &Exp, &Des, 0,
memory_order_seq_cst, memory_order_seq_cst);
}
// CHECK-LABEL: @f9(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw add ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
// CHECK-NEXT: [[TMP2:%.*]] = add i128 [[TMP1]], [[TMP0]]
// CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
// CHECK-NEXT: ret void
//
__int128 f9() {
return __atomic_add_fetch(&Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f10(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw sub ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
// CHECK-NEXT: [[TMP2:%.*]] = sub i128 [[TMP1]], [[TMP0]]
// CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
// CHECK-NEXT: ret void
//
__int128 f10() {
return __atomic_sub_fetch(&Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f11(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw and ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
// CHECK-NEXT: [[TMP2:%.*]] = and i128 [[TMP1]], [[TMP0]]
// CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
// CHECK-NEXT: ret void
//
__int128 f11() {
return __atomic_and_fetch(&Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f12(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xor ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
// CHECK-NEXT: [[TMP2:%.*]] = xor i128 [[TMP1]], [[TMP0]]
// CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
// CHECK-NEXT: ret void
//
__int128 f12() {
return __atomic_xor_fetch(&Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f13(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw or ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
// CHECK-NEXT: [[TMP2:%.*]] = or i128 [[TMP1]], [[TMP0]]
// CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
// CHECK-NEXT: ret void
//
__int128 f13() {
return __atomic_or_fetch(&Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f14(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw nand ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
// CHECK-NEXT: [[TMP2:%.*]] = and i128 [[TMP1]], [[TMP0]]
// CHECK-NEXT: [[TMP3:%.*]] = xor i128 [[TMP2]], -1
// CHECK-NEXT: store i128 [[TMP3]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
// CHECK-NEXT: ret void
//
__int128 f14() {
return __atomic_nand_fetch(&Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f15(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw add ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
// CHECK-NEXT: ret void
//
__int128 f15() {
return __atomic_fetch_add(&Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw sub ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
// CHECK-NEXT: ret void
//
__int128 f16() {
return __atomic_fetch_sub(&Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f17(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw and ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
// CHECK-NEXT: ret void
//
__int128 f17() {
return __atomic_fetch_and(&Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f18(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xor ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
// CHECK-NEXT: ret void
//
__int128 f18() {
return __atomic_fetch_xor(&Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f19(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw or ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
// CHECK-NEXT: ret void
//
__int128 f19() {
return __atomic_fetch_or(&Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f20(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw nand ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
// CHECK-NEXT: ret void
//
__int128 f20() {
return __atomic_fetch_nand(&Ptr, Val, memory_order_seq_cst);
}