// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
//
// Test GNU atomic builtins for int16_t.
#include <stdatomic.h>
#include <stdint.h>
// CHECK-LABEL: @f1(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load atomic i16, ptr [[PTR:%.*]] seq_cst, align 2
// CHECK-NEXT: ret i16 [[TMP0]]
//
int16_t f1(int16_t *Ptr) {
return __atomic_load_n(Ptr, memory_order_seq_cst);
}
// CHECK-LABEL: @f2(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load atomic i16, ptr [[PTR:%.*]] seq_cst, align 2
// CHECK-NEXT: store i16 [[TMP0]], ptr [[RET:%.*]], align 2
// CHECK-NEXT: ret i16 [[TMP0]]
//
int16_t f2(int16_t *Ptr, int16_t *Ret) {
__atomic_load(Ptr, Ret, memory_order_seq_cst);
return *Ret;
}
// CHECK-LABEL: @f3(
// CHECK-NEXT: entry:
// CHECK-NEXT: store atomic i16 [[VAL:%.*]], ptr [[PTR:%.*]] seq_cst, align 2
// CHECK-NEXT: ret void
//
void f3(int16_t *Ptr, int16_t Val) {
__atomic_store_n(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f4(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[VAL:%.*]], align 2
// CHECK-NEXT: store atomic i16 [[TMP0]], ptr [[PTR:%.*]] seq_cst, align 2
// CHECK-NEXT: ret void
//
void f4(int16_t *Ptr, int16_t *Val) {
__atomic_store(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f5(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
// CHECK-NEXT: ret i16 [[TMP0]]
//
int16_t f5(int16_t *Ptr, int16_t Val) {
return __atomic_exchange_n(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f6(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[VAL:%.*]], align 2
// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i16 [[TMP0]] seq_cst, align 2
// CHECK-NEXT: store i16 [[TMP1]], ptr [[RET:%.*]], align 2
// CHECK-NEXT: ret i16 [[TMP1]]
//
int16_t f6(int16_t *Ptr, int16_t *Val, int16_t *Ret) {
__atomic_exchange(Ptr, Val, Ret, memory_order_seq_cst);
return *Ret;
}
// CHECK-LABEL: @f7(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[EXP:%.*]], align 2
// CHECK-NEXT: [[TMP1:%.*]] = cmpxchg ptr [[PTR:%.*]], i16 [[TMP0]], i16 [[DES:%.*]] seq_cst seq_cst, align 2
// CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i16, i1 } [[TMP1]], 1
// CHECK-NEXT: br i1 [[TMP2]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
// CHECK: cmpxchg.store_expected:
// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i16, i1 } [[TMP1]], 0
// CHECK-NEXT: store i16 [[TMP3]], ptr [[EXP]], align 2
// CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
// CHECK: cmpxchg.continue:
// CHECK-NEXT: ret i1 [[TMP2]]
//
_Bool f7(int16_t *Ptr, int16_t *Exp, int16_t Des) {
return __atomic_compare_exchange_n(Ptr, Exp, Des, 0,
memory_order_seq_cst, memory_order_seq_cst);
}
// CHECK-LABEL: @f8(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[EXP:%.*]], align 2
// CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[DES:%.*]], align 2
// CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR:%.*]], i16 [[TMP0]], i16 [[TMP1]] seq_cst seq_cst, align 2
// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i16, i1 } [[TMP2]], 1
// CHECK-NEXT: br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
// CHECK: cmpxchg.store_expected:
// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i16, i1 } [[TMP2]], 0
// CHECK-NEXT: store i16 [[TMP4]], ptr [[EXP]], align 2
// CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
// CHECK: cmpxchg.continue:
// CHECK-NEXT: ret i1 [[TMP3]]
//
_Bool f8(int16_t *Ptr, int16_t *Exp, int16_t *Des) {
return __atomic_compare_exchange(Ptr, Exp, Des, 0,
memory_order_seq_cst, memory_order_seq_cst);
}
// CHECK-LABEL: @f9(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
// CHECK-NEXT: [[TMP1:%.*]] = add i16 [[TMP0]], [[VAL]]
// CHECK-NEXT: ret i16 [[TMP1]]
//
int16_t f9(int16_t *Ptr, int16_t Val) {
return __atomic_add_fetch(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f10(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
// CHECK-NEXT: [[TMP1:%.*]] = sub i16 [[TMP0]], [[VAL]]
// CHECK-NEXT: ret i16 [[TMP1]]
//
int16_t f10(int16_t *Ptr, int16_t Val) {
return __atomic_sub_fetch(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f11(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
// CHECK-NEXT: [[TMP1:%.*]] = and i16 [[TMP0]], [[VAL]]
// CHECK-NEXT: ret i16 [[TMP1]]
//
int16_t f11(int16_t *Ptr, int16_t Val) {
return __atomic_and_fetch(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f12(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
// CHECK-NEXT: [[TMP1:%.*]] = xor i16 [[TMP0]], [[VAL]]
// CHECK-NEXT: ret i16 [[TMP1]]
//
int16_t f12(int16_t *Ptr, int16_t Val) {
return __atomic_xor_fetch(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f13(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
// CHECK-NEXT: [[TMP1:%.*]] = or i16 [[TMP0]], [[VAL]]
// CHECK-NEXT: ret i16 [[TMP1]]
//
int16_t f13(int16_t *Ptr, int16_t Val) {
return __atomic_or_fetch(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f14(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
// CHECK-NEXT: [[TMP1:%.*]] = and i16 [[TMP0]], [[VAL]]
// CHECK-NEXT: [[TMP2:%.*]] = xor i16 [[TMP1]], -1
// CHECK-NEXT: ret i16 [[TMP2]]
//
int16_t f14(int16_t *Ptr, int16_t Val) {
return __atomic_nand_fetch(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f15(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
// CHECK-NEXT: ret i16 [[TMP0]]
//
int16_t f15(int16_t *Ptr, int16_t Val) {
return __atomic_fetch_add(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f16(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
// CHECK-NEXT: ret i16 [[TMP0]]
//
int16_t f16(int16_t *Ptr, int16_t Val) {
return __atomic_fetch_sub(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f17(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
// CHECK-NEXT: ret i16 [[TMP0]]
//
int16_t f17(int16_t *Ptr, int16_t Val) {
return __atomic_fetch_and(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f18(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
// CHECK-NEXT: ret i16 [[TMP0]]
//
int16_t f18(int16_t *Ptr, int16_t Val) {
return __atomic_fetch_xor(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f19(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
// CHECK-NEXT: ret i16 [[TMP0]]
//
int16_t f19(int16_t *Ptr, int16_t Val) {
return __atomic_fetch_or(Ptr, Val, memory_order_seq_cst);
}
// CHECK-LABEL: @f20(
// CHECK-NEXT: entry:
// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
// CHECK-NEXT: ret i16 [[TMP0]]
//
int16_t f20(int16_t *Ptr, int16_t Val) {
return __atomic_fetch_nand(Ptr, Val, memory_order_seq_cst);
}