llvm/clang/test/CodeGen/X86/x86-atomic-double.c

// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
// RUN: %clang_cc1 -triple x86_64-linux-gnu -target-cpu core2 %s -emit-llvm -o - | FileCheck -check-prefixes=X64 %s
// RUN: %clang_cc1 -triple i686-linux-gnu -target-cpu core2 %s -emit-llvm -o - | FileCheck -check-prefixes=X86 %s


// X64-LABEL: define dso_local double @test_double_post_inc(
// X64-SAME: ) #[[ATTR0:[0-9]+]] {
// X64-NEXT:  entry:
// X64-NEXT:    [[RETVAL:%.*]] = alloca double, align 8
// X64-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr @test_double_post_inc.n, float 1.000000e+00 seq_cst, align 8
// X64-NEXT:    store float [[TMP0]], ptr [[RETVAL]], align 8
// X64-NEXT:    [[TMP1:%.*]] = load double, ptr [[RETVAL]], align 8
// X64-NEXT:    ret double [[TMP1]]
//
// X86-LABEL: define dso_local double @test_double_post_inc(
// X86-SAME: ) #[[ATTR0:[0-9]+]] {
// X86-NEXT:  entry:
// X86-NEXT:    [[RETVAL:%.*]] = alloca double, align 4
// X86-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr @test_double_post_inc.n, float 1.000000e+00 seq_cst, align 8
// X86-NEXT:    store float [[TMP0]], ptr [[RETVAL]], align 4
// X86-NEXT:    [[TMP1:%.*]] = load double, ptr [[RETVAL]], align 4
// X86-NEXT:    ret double [[TMP1]]
//
double test_double_post_inc()
{
    static _Atomic double n;
    return n++;
}

// X64-LABEL: define dso_local double @test_double_post_dc(
// X64-SAME: ) #[[ATTR0]] {
// X64-NEXT:  entry:
// X64-NEXT:    [[RETVAL:%.*]] = alloca double, align 8
// X64-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr @test_double_post_dc.n, float 1.000000e+00 seq_cst, align 8
// X64-NEXT:    store float [[TMP0]], ptr [[RETVAL]], align 8
// X64-NEXT:    [[TMP1:%.*]] = load double, ptr [[RETVAL]], align 8
// X64-NEXT:    ret double [[TMP1]]
//
// X86-LABEL: define dso_local double @test_double_post_dc(
// X86-SAME: ) #[[ATTR0]] {
// X86-NEXT:  entry:
// X86-NEXT:    [[RETVAL:%.*]] = alloca double, align 4
// X86-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr @test_double_post_dc.n, float 1.000000e+00 seq_cst, align 8
// X86-NEXT:    store float [[TMP0]], ptr [[RETVAL]], align 4
// X86-NEXT:    [[TMP1:%.*]] = load double, ptr [[RETVAL]], align 4
// X86-NEXT:    ret double [[TMP1]]
//
double test_double_post_dc()
{
    static _Atomic double n;
    return n--;
}

// X64-LABEL: define dso_local double @test_double_pre_dc(
// X64-SAME: ) #[[ATTR0]] {
// X64-NEXT:  entry:
// X64-NEXT:    [[RETVAL:%.*]] = alloca double, align 8
// X64-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr @test_double_pre_dc.n, float 1.000000e+00 seq_cst, align 8
// X64-NEXT:    [[TMP1:%.*]] = fsub float [[TMP0]], 1.000000e+00
// X64-NEXT:    store float [[TMP1]], ptr [[RETVAL]], align 8
// X64-NEXT:    [[TMP2:%.*]] = load double, ptr [[RETVAL]], align 8
// X64-NEXT:    ret double [[TMP2]]
//
// X86-LABEL: define dso_local double @test_double_pre_dc(
// X86-SAME: ) #[[ATTR0]] {
// X86-NEXT:  entry:
// X86-NEXT:    [[RETVAL:%.*]] = alloca double, align 4
// X86-NEXT:    [[TMP0:%.*]] = atomicrmw fsub ptr @test_double_pre_dc.n, float 1.000000e+00 seq_cst, align 8
// X86-NEXT:    [[TMP1:%.*]] = fsub float [[TMP0]], 1.000000e+00
// X86-NEXT:    store float [[TMP1]], ptr [[RETVAL]], align 4
// X86-NEXT:    [[TMP2:%.*]] = load double, ptr [[RETVAL]], align 4
// X86-NEXT:    ret double [[TMP2]]
//
double test_double_pre_dc()
{
    static _Atomic double n;
    return --n;
}

// X64-LABEL: define dso_local double @test_double_pre_inc(
// X64-SAME: ) #[[ATTR0]] {
// X64-NEXT:  entry:
// X64-NEXT:    [[RETVAL:%.*]] = alloca double, align 8
// X64-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr @test_double_pre_inc.n, float 1.000000e+00 seq_cst, align 8
// X64-NEXT:    [[TMP1:%.*]] = fadd float [[TMP0]], 1.000000e+00
// X64-NEXT:    store float [[TMP1]], ptr [[RETVAL]], align 8
// X64-NEXT:    [[TMP2:%.*]] = load double, ptr [[RETVAL]], align 8
// X64-NEXT:    ret double [[TMP2]]
//
// X86-LABEL: define dso_local double @test_double_pre_inc(
// X86-SAME: ) #[[ATTR0]] {
// X86-NEXT:  entry:
// X86-NEXT:    [[RETVAL:%.*]] = alloca double, align 4
// X86-NEXT:    [[TMP0:%.*]] = atomicrmw fadd ptr @test_double_pre_inc.n, float 1.000000e+00 seq_cst, align 8
// X86-NEXT:    [[TMP1:%.*]] = fadd float [[TMP0]], 1.000000e+00
// X86-NEXT:    store float [[TMP1]], ptr [[RETVAL]], align 4
// X86-NEXT:    [[TMP2:%.*]] = load double, ptr [[RETVAL]], align 4
// X86-NEXT:    ret double [[TMP2]]
//
double test_double_pre_inc()
{
    static _Atomic double n;
    return ++n;
}