// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
//
// Test GNU atomic builtins for int32_t.
#include <stdatomic.h>
#include <stdint.h>
// CHECK-LABEL: @f1(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load atomic i32, ptr [[PTR:%.*]] seq_cst, align 4
// CHECK-NEXT: ret i32 [[TMP0]]
//
int32_t f1(int32_t *Ptr) {
return __atomic_load_n(Ptr, memory_order_seq_cst);
}
// CHECK-LABEL: @f2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load atomic i32, ptr [[PTR:%.*]] seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP0]], ptr [[RET:%.*]], align 4
// CHECK-NEXT: ret i32 [[TMP0]]
//
int32_t f2(int32_t *Ptr, int32_t *Ret) {
__atomic_load(Ptr, Ret, memory_order_seq_cst);
return *Ret;
}
// CHECK-LABEL: @f3(
// CHECK-NEXT: entry:
// CHECK-NEXT: store atomic i32 [[VAL:%.*]], ptr [[PTR:%.*]] seq_cst, align 4
// CHECK-NEXT: ret void
//
void f3(int32_t *Ptr, int32_t Val) {
__atomic_store_n(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f4(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[VAL:%.*]], align 4
// CHECK-NEXT: store atomic i32 [[TMP0]], ptr [[PTR:%.*]] seq_cst, align 4
// CHECK-NEXT: ret void
//
void f4(int32_t *Ptr, int32_t *Val) {
__atomic_store(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f5(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
// CHECK-NEXT: ret i32 [[TMP0]]
//
int32_t f5(int32_t *Ptr, int32_t Val) {
return __atomic_exchange_n(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f6(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[VAL:%.*]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i32 [[TMP0]] seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP1]], ptr [[RET:%.*]], align 4
// CHECK-NEXT: ret i32 [[TMP1]]
//
int32_t f6(int32_t *Ptr, int32_t *Val, int32_t *Ret) {
__atomic_exchange(Ptr, Val, Ret, memory_order_seq_cst);
return *Ret;
}
// CHECK-LABEL: @f7(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[EXP:%.*]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = cmpxchg ptr [[PTR:%.*]], i32 [[TMP0]], i32 [[DES:%.*]] seq_cst seq_cst, align 4
// CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
// CHECK-NEXT: br i1 [[TMP2]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
// CHECK: cmpxchg.store_expected:
// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i32, i1 } [[TMP1]], 0
// CHECK-NEXT: store i32 [[TMP3]], ptr [[EXP]], align 4
// CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
// CHECK: cmpxchg.continue:
// CHECK-NEXT: ret i1 [[TMP2]]
//
_Bool f7(int32_t *Ptr, int32_t *Exp, int32_t Des) {
return __atomic_compare_exchange_n(Ptr, Exp, Des, 0,
memory_order_seq_cst, memory_order_seq_cst);
}
// CHECK-LABEL: @f8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[EXP:%.*]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DES:%.*]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR:%.*]], i32 [[TMP0]], i32 [[TMP1]] seq_cst seq_cst, align 4
// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
// CHECK-NEXT: br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
// CHECK: cmpxchg.store_expected:
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i1 } [[TMP2]], 0
// CHECK-NEXT: store i32 [[TMP4]], ptr [[EXP]], align 4
// CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
// CHECK: cmpxchg.continue:
// CHECK-NEXT: ret i1 [[TMP3]]
//
_Bool f8(int32_t *Ptr, int32_t *Exp, int32_t *Des) {
return __atomic_compare_exchange(Ptr, Exp, Des, 0,
memory_order_seq_cst, memory_order_seq_cst);
}
// CHECK-LABEL: @f9(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
// CHECK-NEXT: [[TMP1:%.*]] = add i32 [[TMP0]], [[VAL]]
// CHECK-NEXT: ret i32 [[TMP1]]
//
int32_t f9(int32_t *Ptr, int32_t Val) {
return __atomic_add_fetch(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f10(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
// CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[TMP0]], [[VAL]]
// CHECK-NEXT: ret i32 [[TMP1]]
//
int32_t f10(int32_t *Ptr, int32_t Val) {
return __atomic_sub_fetch(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f11(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
// CHECK-NEXT: [[TMP1:%.*]] = and i32 [[TMP0]], [[VAL]]
// CHECK-NEXT: ret i32 [[TMP1]]
//
int32_t f11(int32_t *Ptr, int32_t Val) {
return __atomic_and_fetch(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f12(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
// CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[TMP0]], [[VAL]]
// CHECK-NEXT: ret i32 [[TMP1]]
//
int32_t f12(int32_t *Ptr, int32_t Val) {
return __atomic_xor_fetch(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f13(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
// CHECK-NEXT: [[TMP1:%.*]] = or i32 [[TMP0]], [[VAL]]
// CHECK-NEXT: ret i32 [[TMP1]]
//
int32_t f13(int32_t *Ptr, int32_t Val) {
return __atomic_or_fetch(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f14(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
// CHECK-NEXT: [[TMP1:%.*]] = and i32 [[TMP0]], [[VAL]]
// CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[TMP1]], -1
// CHECK-NEXT: ret i32 [[TMP2]]
//
int32_t f14(int32_t *Ptr, int32_t Val) {
return __atomic_nand_fetch(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f15(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
// CHECK-NEXT: ret i32 [[TMP0]]
//
int32_t f15(int32_t *Ptr, int32_t Val) {
return __atomic_fetch_add(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
// CHECK-NEXT: ret i32 [[TMP0]]
//
int32_t f16(int32_t *Ptr, int32_t Val) {
return __atomic_fetch_sub(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f17(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
// CHECK-NEXT: ret i32 [[TMP0]]
//
int32_t f17(int32_t *Ptr, int32_t Val) {
return __atomic_fetch_and(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f18(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
// CHECK-NEXT: ret i32 [[TMP0]]
//
int32_t f18(int32_t *Ptr, int32_t Val) {
return __atomic_fetch_xor(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f19(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
// CHECK-NEXT: ret i32 [[TMP0]]
//
int32_t f19(int32_t *Ptr, int32_t Val) {
return __atomic_fetch_or(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f20(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
// CHECK-NEXT: ret i32 [[TMP0]]
//
int32_t f20(int32_t *Ptr, int32_t Val) {
return __atomic_fetch_nand(Ptr, Val, memory_order_seq_cst);
}